This is an automatic generated email to let you know that the following patch were queued:
Subject: media: ov8865: Disable only enabled regulators on error path
Author: Sakari Ailus <sakari.ailus(a)linux.intel.com>
Date: Wed Dec 15 09:38:48 2021 +0100
If powering on the sensor failed, the entire power-off sequence was run
independently of how far the power-on sequence proceeded before the error.
This lead to disabling regulators and/or clock that was not enabled.
Fix this by disabling only clocks and regulators that were enabled
previously.
Fixes: 11c0d8fdccc5 ("media: i2c: Add support for the OV8865 image sensor")
Cc: stable(a)vger.kernel.org
Signed-off-by: Sakari Ailus <sakari.ailus(a)linux.intel.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei(a)kernel.org>
drivers/media/i2c/ov8865.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
---
diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c
index ebdb20d3fe9d..d9d016cfa9ac 100644
--- a/drivers/media/i2c/ov8865.c
+++ b/drivers/media/i2c/ov8865.c
@@ -2407,27 +2407,27 @@ static int ov8865_sensor_power(struct ov8865_sensor *sensor, bool on)
if (ret) {
dev_err(sensor->dev,
"failed to enable DOVDD regulator\n");
- goto disable;
+ return ret;
}
ret = regulator_enable(sensor->avdd);
if (ret) {
dev_err(sensor->dev,
"failed to enable AVDD regulator\n");
- goto disable;
+ goto disable_dovdd;
}
ret = regulator_enable(sensor->dvdd);
if (ret) {
dev_err(sensor->dev,
"failed to enable DVDD regulator\n");
- goto disable;
+ goto disable_avdd;
}
ret = clk_prepare_enable(sensor->extclk);
if (ret) {
dev_err(sensor->dev, "failed to enable EXTCLK clock\n");
- goto disable;
+ goto disable_dvdd;
}
gpiod_set_value_cansleep(sensor->reset, 0);
@@ -2436,14 +2436,16 @@ static int ov8865_sensor_power(struct ov8865_sensor *sensor, bool on)
/* Time to enter streaming mode according to power timings. */
usleep_range(10000, 12000);
} else {
-disable:
gpiod_set_value_cansleep(sensor->powerdown, 1);
gpiod_set_value_cansleep(sensor->reset, 1);
clk_disable_unprepare(sensor->extclk);
+disable_dvdd:
regulator_disable(sensor->dvdd);
+disable_avdd:
regulator_disable(sensor->avdd);
+disable_dovdd:
regulator_disable(sensor->dovdd);
}
Pointer to the allocated pages (struct page *page) has already
progressed towards the end of allocation. It is incorrect to perform
__free_pages(page, order) using this pointer as we would free any
arbitrary pages. Fix this by stop modifying the page pointer.
Fixes: ec185dd3ab25 ("optee: Fix memory leak when failing to register shm pages")
Cc: stable(a)vger.kernel.org
Reported-by: Patrik Lantz <patrik.lantz(a)axis.com>
Signed-off-by: Sumit Garg <sumit.garg(a)linaro.org>
Reviewed-by: Tyler Hicks <tyhicks(a)linux.microsoft.com>
---
Changes since v1:
- Added stable CC tag.
- Picked up Tyler's review tag.
drivers/tee/optee/core.c | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index ab2edfcc6c70..2a66a5203d2f 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -48,10 +48,8 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
goto err;
}
- for (i = 0; i < nr_pages; i++) {
- pages[i] = page;
- page++;
- }
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = page + i;
shm->flags |= TEE_SHM_REGISTER;
rc = shm_register(shm->ctx, shm, pages, nr_pages,
--
2.25.1
I'm announcing the release of the 5.15.9 kernel.
Only change here is a permission setting of a netfilter selftest file.
No need to upgrade if this problem is not bothering you.
The updated 5.15.y git tree can be found at:
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-5.15.y
and can be browsed at the normal kernel.org git web browser:
https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git;a=summary
thanks,
greg k-h
------------
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
Greg Kroah-Hartman (2):
netfilter: selftest: conntrack_vrf.sh: fix file permission
Linux 5.15.9
In some places of the current kernel, it assumes that dma zone must have
managed pages if CONFIG_ZONE_DMA is enabled. While this is not always true.
E.g in kdump kernel of x86_64, only low 1M is presented and locked down
at very early stage of boot, so that there's no managed pages at all in
DMA zone. This exception will always cause page allocation failure if page
is requested from DMA zone.
Here add function has_managed_dma() and the relevant helper functions to
check if there's DMA zone with managed pages. It will be used in later
patches.
Fixes: 6f599d84231f ("x86/kdump: Always reserve the low 1M when the crashkernel option is specified")
Cc: stable(a)vger.kernel.org
Signed-off-by: Baoquan He <bhe(a)redhat.com>
---
v2->v3:
Rewrite has_managed_dma() in a simpler and more efficient way which is
sugggested by DavidH.
include/linux/mmzone.h | 9 +++++++++
mm/page_alloc.c | 15 +++++++++++++++
2 files changed, 24 insertions(+)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 58e744b78c2c..6e1b726e9adf 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1046,6 +1046,15 @@ static inline int is_highmem_idx(enum zone_type idx)
#endif
}
+#ifdef CONFIG_ZONE_DMA
+bool has_managed_dma(void);
+#else
+static inline bool has_managed_dma(void)
+{
+ return false;
+}
+#endif
+
/**
* is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c5952749ad40..7c7a0b5de2ff 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -9460,3 +9460,18 @@ bool take_page_off_buddy(struct page *page)
return ret;
}
#endif
+
+#ifdef CONFIG_ZONE_DMA
+bool has_managed_dma(void)
+{
+ struct pglist_data *pgdat;
+
+ for_each_online_pgdat(pgdat) {
+ struct zone *zone = &pgdat->node_zones[ZONE_DMA];
+
+ if (managed_zone(zone))
+ return true;
+ }
+ return false;
+}
+#endif /* CONFIG_ZONE_DMA */
--
2.17.2
There is a seldom issue that the controller access invalid address
and trigger devapc or emimpu violation. That is due to memory access
is out of order and cause gpd data is not correct.
Make sure GPD is fully written before giving it to HW by setting its
HWO.
Fixes: 48e0d3735aa5 ("usb: mtu3: supports new QMU format")
Cc: stable(a)vger.kernel.org
Reported-by: Eddie Hung <eddie.hung(a)mediatek.com>
Signed-off-by: Chunfeng Yun <chunfeng.yun(a)mediatek.com>
---
drivers/usb/mtu3/mtu3_qmu.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
index 3f414f91b589..34bb5ac67efe 100644
--- a/drivers/usb/mtu3/mtu3_qmu.c
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -273,6 +273,8 @@ static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
}
+ /* make sure GPD is fully written before giving it to HW */
+ mb();
gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
mreq->gpd = gpd;
@@ -306,6 +308,8 @@ static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
gpd->dw3_info = cpu_to_le32(ext_addr);
+ /* make sure GPD is fully written before giving it to HW */
+ mb();
gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
mreq->gpd = gpd;
@@ -445,7 +449,8 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
return;
}
mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
-
+ /* make sure GPD is fully written before giving it to HW */
+ mb();
/* by pass the current GDP */
gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
--
2.18.0
During SYS_ERR condition, as a response to the MHI_RESET from host, some
devices tend to issue BHI interrupt without clearing the SYS_ERR state in
the device. This creates a race condition and causes a failure in booting
up the device.
The issue is seen on the Sierra Wireless EM9191 modem during SYS_ERR
handling in mhi_async_power_up(). Once the host detects that the device
is in SYS_ERR state, it issues MHI_RESET and waits for the device to
process the reset request. During this time, the device triggers the BHI
interrupt to the host without clearing SYS_ERR condition. So the host
starts handling the SYS_ERR condition again.
To fix this issue, let's register the IRQ handler only after handling the
SYS_ERR check to avoid getting spurious IRQs from the device.
Cc: stable(a)vger.kernel.org
Fixes: e18d4e9fa79b ("bus: mhi: core: Handle syserr during power_up")
Reported-by: Aleksander Morgado <aleksander(a)aleksander.es>
Tested-by: Aleksander Morgado <aleksander(a)aleksander.es>
Tested-by: Thomas Perrot <thomas.perrot(a)bootlin.com>
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam(a)linaro.org>
---
drivers/bus/mhi/core/pm.c | 35 ++++++++++++-----------------------
1 file changed, 12 insertions(+), 23 deletions(-)
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index 7464f5d09973..9ae8532df5a3 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -1038,7 +1038,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
enum mhi_ee_type current_ee;
enum dev_st_transition next_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- u32 val;
+ u32 interval_us = 25000; /* poll register field every 25 milliseconds */
int ret;
dev_info(dev, "Requested to power ON\n");
@@ -1055,10 +1055,6 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
mutex_lock(&mhi_cntrl->pm_mutex);
mhi_cntrl->pm_state = MHI_PM_DISABLE;
- ret = mhi_init_irq_setup(mhi_cntrl);
- if (ret)
- goto error_setup_irq;
-
/* Setup BHI INTVEC */
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
@@ -1072,7 +1068,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
dev_err(dev, "%s is not a valid EE for power on\n",
TO_MHI_EXEC_STR(current_ee));
ret = -EIO;
- goto error_async_power_up;
+ goto error_exit;
}
state = mhi_get_mhi_state(mhi_cntrl);
@@ -1081,20 +1077,12 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
if (state == MHI_STATE_SYS_ERR) {
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
- ret = wait_event_timeout(mhi_cntrl->state_event,
- MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
- mhi_read_reg_field(mhi_cntrl,
- mhi_cntrl->regs,
- MHICTRL,
- MHICTRL_RESET_MASK,
- MHICTRL_RESET_SHIFT,
- &val) ||
- !val,
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
- if (!ret) {
- ret = -EIO;
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0,
+ interval_us);
+ if (ret) {
dev_info(dev, "Failed to reset MHI due to syserr state\n");
- goto error_async_power_up;
+ goto error_exit;
}
/*
@@ -1104,6 +1092,10 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
}
+ ret = mhi_init_irq_setup(mhi_cntrl);
+ if (ret)
+ goto error_exit;
+
/* Transition to next state */
next_state = MHI_IN_PBL(current_ee) ?
DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
@@ -1116,10 +1108,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
return 0;
-error_async_power_up:
- mhi_deinit_free_irq(mhi_cntrl);
-
-error_setup_irq:
+error_exit:
mhi_cntrl->pm_state = MHI_PM_DISABLE;
mutex_unlock(&mhi_cntrl->pm_mutex);
--
2.25.1