From: Christophe JAILLET christophe.jaillet@wanadoo.fr
[ Upstream commit 0bb7e560f821c7770973a94e346654c4bdccd42c ]
If 'mmc_of_parse()' fails, we must undo the previous 'dma_request_chan()' call.
Signed-off-by: Christophe JAILLET christophe.jaillet@wanadoo.fr Link: https://lore.kernel.org/r/20201208203527.49262-1-christophe.jaillet@wanadoo.... Signed-off-by: Ulf Hansson ulf.hansson@linaro.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/mmc/host/mxs-mmc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index c8b8ac66ff7e..687fd68fbbcd 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -651,7 +651,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
ret = mmc_of_parse(mmc); if (ret) - goto out_clk_disable; + goto out_free_dma;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
From: Chaotian Jing chaotian.jing@mediatek.com
[ Upstream commit 0354ca6edd464a2cf332f390581977b8699ed081 ]
when get request SW timeout, if CMD/DAT xfer done irq coming right now, then there is race between the msdc_request_timeout work and irq handler, and the host->cmd and host->data may set to NULL in irq handler. also, current flow ensure that only one path can go to msdc_request_done(), so no need check the return value of cancel_delayed_work().
Signed-off-by: Chaotian Jing chaotian.jing@mediatek.com Link: https://lore.kernel.org/r/20201218071611.12276-1-chaotian.jing@mediatek.com Signed-off-by: Ulf Hansson ulf.hansson@linaro.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/mmc/host/mtk-sd.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 5ef25463494f..1770c8df9d1b 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -720,13 +720,13 @@ static void msdc_track_cmd_data(struct msdc_host *host, static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq) { unsigned long flags; - bool ret;
- ret = cancel_delayed_work(&host->req_timeout); - if (!ret) { - /* delay work already running */ - return; - } + /* + * No need check the return value of cancel_delayed_work, as only ONE + * path will go here! + */ + cancel_delayed_work(&host->req_timeout); + spin_lock_irqsave(&host->lock, flags); host->mrq = NULL; spin_unlock_irqrestore(&host->lock, flags); @@ -747,7 +747,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events, bool done = false; bool sbc_error; unsigned long flags; - u32 *rsp = cmd->resp; + u32 *rsp;
if (mrq->sbc && cmd == mrq->cmd && (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR @@ -768,6 +768,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
if (done) return true; + rsp = cmd->resp;
sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask);
@@ -942,7 +943,7 @@ static void msdc_data_xfer_next(struct msdc_host *host, static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, struct mmc_request *mrq, struct mmc_data *data) { - struct mmc_command *stop = data->stop; + struct mmc_command *stop; unsigned long flags; bool done; unsigned int check_data = events & @@ -958,6 +959,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
if (done) return true; + stop = data->stop;
if (check_data || (stop && stop->error)) { dev_dbg(host->dev, "DMA status: 0x%8X\n",
From: Athira Rajeev atrajeev@linux.vnet.ibm.com
[ Upstream commit d137845c973147a22622cc76c7b0bc16f6206323 ]
While sampling for marked events, currently we record the sample only if the SIAR valid bit of Sampled Instruction Event Register (SIER) is set. SIAR_VALID bit is used for fetching the instruction address from Sampled Instruction Address Register(SIAR). But there are some usecases, where the user is interested only in the PMU stats at each counter overflow and the exact IP of the overflow event is not required. Dropping SIAR invalid samples will fail to record some of the counter overflows in such cases.
Example of such usecase is dumping the PMU stats (event counts) after some regular amount of instructions/events from the userspace (ex: via ptrace). Here counter overflow is indicated to userspace via signal handler, and captured by monitoring and enabling I/O signaling on the event file descriptor. In these cases, we expect to get sample/overflow indication after each specified sample_period.
Perf event attribute will not have PERF_SAMPLE_IP set in the sample_type if exact IP of the overflow event is not requested. So while profiling if SAMPLE_IP is not set, just record the counter overflow irrespective of SIAR_VALID check.
Suggested-by: Michael Ellerman mpe@ellerman.id.au Signed-off-by: Athira Rajeev atrajeev@linux.vnet.ibm.com [mpe: Reflow comment and if formatting] Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/1612516492-1428-1-git-send-email-atrajeev@linux.vn... Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/perf/core-book3s.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index e593e7f856ed..7a80e1cff6e2 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2008,7 +2008,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val, left += period; if (left <= 0) left = period; - record = siar_valid(regs); + + /* + * If address is not requested in the sample via + * PERF_SAMPLE_IP, just record that sample irrespective + * of SIAR valid check. + */ + if (event->attr.sample_type & PERF_SAMPLE_IP) + record = siar_valid(regs); + else + record = 1; + event->hw.last_period = event->hw.sample_period; } if (left < 0x80000000LL) @@ -2026,9 +2036,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val, * MMCR2. Check attr.exclude_kernel and address to drop the sample in * these cases. */ - if (event->attr.exclude_kernel && record) - if (is_kernel_addr(mfspr(SPRN_SIAR))) - record = 0; + if (event->attr.exclude_kernel && + (event->attr.sample_type & PERF_SAMPLE_IP) && + is_kernel_addr(mfspr(SPRN_SIAR))) + record = 0;
/* * Finally record data if requested.
From: Andreas Larsson andreas@gaisler.com
[ Upstream commit bda166930c37604ffa93f2425426af6921ec575a ]
Commit cca079ef8ac29a7c02192d2bad2ffe4c0c5ffdd0 changed sparc32 to use memblocks instead of bootmem, but also made high memory available via memblock allocation which does not work together with e.g. phys_to_virt and can lead to kernel panic.
This changes back to only low memory being allocatable in the early stages, now using memblock allocation.
Signed-off-by: Andreas Larsson andreas@gaisler.com Acked-by: Mike Rapoport rppt@linux.ibm.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- arch/sparc/mm/init_32.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 3b7092d9ea8f..4abe4bf08377 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -240,6 +240,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT); *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+ /* Only allow low memory to be allocated via memblock allocation */ + memblock_set_current_limit(max_low_pfn << PAGE_SHIFT); + return max_pfn; }
On 2021-03-02 12:59, Sasha Levin wrote:
From: Andreas Larsson andreas@gaisler.com
[ Upstream commit bda166930c37604ffa93f2425426af6921ec575a ]
Commit cca079ef8ac29a7c02192d2bad2ffe4c0c5ffdd0 changed sparc32 to use memblocks instead of bootmem, but also made high memory available via memblock allocation which does not work together with e.g. phys_to_virt and can lead to kernel panic.
This changes back to only low memory being allocatable in the early stages, now using memblock allocation.
Signed-off-by: Andreas Larsson andreas@gaisler.com Acked-by: Mike Rapoport rppt@linux.ibm.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org
arch/sparc/mm/init_32.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 3b7092d9ea8f..4abe4bf08377 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -240,6 +240,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT); *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
- /* Only allow low memory to be allocated via memblock allocation */
- memblock_set_current_limit(max_low_pfn << PAGE_SHIFT);
- return max_pfn; }
This is not needed for 4.4, and will not compile, as the problem it fixes was introduced in 4.19.
From: Sami Tolvanen samitolvanen@google.com
[ Upstream commit 6dafca97803309c3cb5148d449bfa711e41ddef2 ]
Select HAVE_OBJTOOL_MCOUNT if STACK_VALIDATION is selected to use objtool to generate __mcount_loc sections for dynamic ftrace with Clang and gcc <5 (later versions of gcc use -mrecord-mcount).
Signed-off-by: Sami Tolvanen samitolvanen@google.com Reviewed-by: Kees Cook keescook@chromium.org Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/Kconfig | 1 + 1 file changed, 1 insertion(+)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1bee1c6a9891..2e2ddc864ea9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -94,6 +94,7 @@ config X86 select HAVE_CONTEXT_TRACKING if X86_64 select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT + select HAVE_OBJTOOL_MCOUNT if STACK_VALIDATION select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_STACKOVERFLOW select HAVE_DMA_API_DEBUG
From: Martin Kaiser martin@kaiser.cx
[ Upstream commit a93c00e5f975f23592895b7e83f35de2d36b7633 ]
Fix a race where a pending interrupt could be received and the handler called before the handler's data has been setup, by converting to irq_set_chained_handler_and_data().
See also 2cf5a03cb29d ("PCI/keystone: Fix race in installing chained IRQ handler").
Based on the mail discussion, it seems ok to drop the error handling.
Link: https://lore.kernel.org/r/20210115212435.19940-3-martin@kaiser.cx Signed-off-by: Martin Kaiser martin@kaiser.cx Signed-off-by: Lorenzo Pieralisi lorenzo.pieralisi@arm.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/pci/host/pci-xgene-msi.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-)
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c index a6456b578269..b6a099371ad2 100644 --- a/drivers/pci/host/pci-xgene-msi.c +++ b/drivers/pci/host/pci-xgene-msi.c @@ -393,13 +393,9 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu) if (!msi_group->gic_irq) continue;
- irq_set_chained_handler(msi_group->gic_irq, - xgene_msi_isr); - err = irq_set_handler_data(msi_group->gic_irq, msi_group); - if (err) { - pr_err("failed to register GIC IRQ handler\n"); - return -EINVAL; - } + irq_set_chained_handler_and_data(msi_group->gic_irq, + xgene_msi_isr, msi_group); + /* * Statically allocate MSI GIC IRQs to each CPU core. * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
From: Heiko Carstens hca@linux.ibm.com
[ Upstream commit 62c8dca9e194326802b43c60763f856d782b225c ]
Avoid a potentially large stack frame and overflow by making "cpumask_t avail" a static variable. There is no concurrent access due to the existing locking.
Signed-off-by: Heiko Carstens hca@linux.ibm.com Signed-off-by: Vasily Gorbik gor@linux.ibm.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/s390/kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index f113fcd781d8..486f0d4f9aee 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -738,7 +738,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) { struct sclp_core_entry *core; - cpumask_t avail; + static cpumask_t avail; bool configured; u16 core_id; int nr, i;
From: Mike Christie michael.christie@oracle.com
[ Upstream commit d28d48c699779973ab9a3bd0e5acfa112bd4fdef ]
If iscsi_prep_scsi_cmd_pdu() fails we try to add it back to the cmdqueue, but we leave it partially setup. We don't have functions that can undo the pdu and init task setup. We only have cleanup_task which can clean up both parts. So this has us just fail the cmd and go through the standard cleanup routine and then have the SCSI midlayer retry it like is done when it fails in the queuecommand path.
Link: https://lore.kernel.org/r/20210207044608.27585-2-michael.christie@oracle.com Reviewed-by: Lee Duncan lduncan@suse.com Signed-off-by: Mike Christie michael.christie@oracle.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/libiscsi.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-)
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 36e415487fe5..444f58589f33 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1568,14 +1568,9 @@ check_mgmt: } rc = iscsi_prep_scsi_cmd_pdu(conn->task); if (rc) { - if (rc == -ENOMEM || rc == -EACCES) { - spin_lock_bh(&conn->taskqueuelock); - list_add_tail(&conn->task->running, - &conn->cmdqueue); - conn->task = NULL; - spin_unlock_bh(&conn->taskqueuelock); - goto done; - } else + if (rc == -ENOMEM || rc == -EACCES) + fail_scsi_task(conn->task, DID_IMM_RETRY); + else fail_scsi_task(conn->task, DID_ABORT); spin_lock_bh(&conn->taskqueuelock); continue;
linux-stable-mirror@lists.linaro.org