From: Takashi Iwai tiwai@suse.de
[ Upstream commit d7f910bfedd863d13ea320030fe98e42d0938ed5 ]
For accessing the snd_timer_user queue indices, we take tu->qlock. But it's forgotten in a couple of places.
The one in snd_timer_user_params() should be safe without the spinlock as the timer is already stopped. But it's better for consistency.
The one in poll is just a read-out, so it's not inevitably needed, but it'd be good to make the result consistent, too.
Tested-by: Alexander Potapenko glider@google.com Signed-off-by: Takashi Iwai tiwai@suse.de Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- sound/core/timer.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/sound/core/timer.c b/sound/core/timer.c index 48eaccba82a3..fd622aa0bb93 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -1771,6 +1771,7 @@ static int snd_timer_user_params(struct file *file, } } } + spin_lock_irq(&tu->qlock); tu->qhead = tu->qtail = tu->qused = 0; if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) { if (tu->tread) { @@ -1791,6 +1792,7 @@ static int snd_timer_user_params(struct file *file, } tu->filter = params.filter; tu->ticks = params.ticks; + spin_unlock_irq(&tu->qlock); err = 0; _end: if (copy_to_user(_params, ¶ms, sizeof(params))) @@ -2029,10 +2031,12 @@ static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait) poll_wait(file, &tu->qchange_sleep, wait);
mask = 0; + spin_lock_irq(&tu->qlock); if (tu->qused) mask |= POLLIN | POLLRDNORM; if (tu->disconnected) mask |= POLLERR; + spin_unlock_irq(&tu->qlock);
return mask; }
From: Chris Wilson chris@chris-wilson.co.uk
[ Upstream commit 833521ebc65b1c3092e5c0d8a97092f98eec595d ]
An error during suspend (e100e_pm_suspend),
[ 429.994338] ACPI : EC: event blocked [ 429.994633] e1000e: EEE TX LPI TIMER: 00000011 [ 430.955451] pci_pm_suspend(): e1000e_pm_suspend+0x0/0x30 [e1000e] returns -2 [ 430.955454] dpm_run_callback(): pci_pm_suspend+0x0/0x140 returns -2 [ 430.955458] PM: Device 0000:00:19.0 failed to suspend async: error -2 [ 430.955581] PM: Some devices failed to suspend, or early wake event detected [ 430.957709] ACPI : EC: event unblocked
lead to complete failure:
[ 432.585002] ------------[ cut here ]------------ [ 432.585013] WARNING: CPU: 3 PID: 8372 at kernel/irq/manage.c:1478 __free_irq+0x9f/0x280 [ 432.585015] Trying to free already-free IRQ 20 [ 432.585016] Modules linked in: cdc_ncm usbnet x86_pkg_temp_thermal intel_powerclamp coretemp mii crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic snd_hda_intel snd_hda_codec snd_hwdep lpc_ich snd_hda_core snd_pcm mei_me mei sdhci_pci sdhci i915 mmc_core e1000e ptp pps_core prime_numbers [ 432.585042] CPU: 3 PID: 8372 Comm: kworker/u16:40 Tainted: G U 4.10.0-rc8-CI-Patchwork_3870+ #1 [ 432.585044] Hardware name: LENOVO 2356GCG/2356GCG, BIOS G7ET31WW (1.13 ) 07/02/2012 [ 432.585050] Workqueue: events_unbound async_run_entry_fn [ 432.585051] Call Trace: [ 432.585058] dump_stack+0x67/0x92 [ 432.585062] __warn+0xc6/0xe0 [ 432.585065] warn_slowpath_fmt+0x4a/0x50 [ 432.585070] ? _raw_spin_lock_irqsave+0x49/0x60 [ 432.585072] __free_irq+0x9f/0x280 [ 432.585075] free_irq+0x34/0x80 [ 432.585089] e1000_free_irq+0x65/0x70 [e1000e] [ 432.585098] e1000e_pm_freeze+0x7a/0xb0 [e1000e] [ 432.585106] e1000e_pm_suspend+0x21/0x30 [e1000e] [ 432.585113] pci_pm_suspend+0x71/0x140 [ 432.585118] dpm_run_callback+0x6f/0x330 [ 432.585122] ? pci_pm_freeze+0xe0/0xe0 [ 432.585125] __device_suspend+0xea/0x330 [ 432.585128] async_suspend+0x1a/0x90 [ 432.585132] async_run_entry_fn+0x34/0x160 [ 432.585137] process_one_work+0x1f4/0x6d0 [ 432.585140] ? process_one_work+0x16e/0x6d0 [ 432.585143] worker_thread+0x49/0x4a0 [ 432.585145] kthread+0x107/0x140 [ 432.585148] ? process_one_work+0x6d0/0x6d0 [ 432.585150] ? kthread_create_on_node+0x40/0x40 [ 432.585154] ret_from_fork+0x2e/0x40 [ 432.585156] ---[ end trace 6712df7f8c4b9124 ]---
The unwind failures stems from commit 2800209994f8 ("e1000e: Refactor PM flows"), but it may be a later patch that introduced the non-recoverable behaviour.
Fixes: 2800209994f8 ("e1000e: Refactor PM flows") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=99847 Signed-off-by: Chris Wilson chris@chris-wilson.co.uk Signed-off-by: Jani Nikula jani.nikula@intel.com Tested-by: Aaron Brown aaron.f.brown@intel.com Signed-off-by: Jeff Kirsher jeffrey.t.kirsher@intel.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/ethernet/intel/e1000e/netdev.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index e356e9187e84..12bdb7b5241a 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6589,12 +6589,17 @@ static int e1000e_pm_thaw(struct device *dev) static int e1000e_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); + int rc;
e1000e_flush_lpic(pdev);
e1000e_pm_freeze(dev);
- return __e1000_shutdown(pdev, false); + rc = __e1000_shutdown(pdev, false); + if (rc) + e1000e_pm_thaw(dev); + + return rc; }
static int e1000e_pm_resume(struct device *dev)
From: Peter Zijlstra peterz@infradead.org
[ Upstream commit ba5213ae6b88fb170c4771fef6553f759c7d8cdd ]
Andi was asking about PERF_FORMAT_GROUP vs inherited events, which led to the discovery of a bug from commit:
3dab77fb1bf8 ("perf: Rework/fix the whole read vs group stuff")
- PERF_SAMPLE_GROUP = 1U << 4, + PERF_SAMPLE_READ = 1U << 4,
- if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) + if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
is a clear fail :/
While this changes user visible behaviour; it was previously possible to create an inherited event with PERF_SAMPLE_READ; this is deemed acceptible because its results were always incorrect.
Reported-by: Andi Kleen ak@linux.intel.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Arnaldo Carvalho de Melo acme@kernel.org Cc: Jiri Olsa jolsa@kernel.org Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Peter Zijlstra peterz@infradead.org Cc: Stephane Eranian eranian@google.com Cc: Thomas Gleixner tglx@linutronix.de Cc: Vince Weaver vince@deater.net Fixes: 3dab77fb1bf8 ("perf: Rework/fix the whole read vs group stuff") Link: http://lkml.kernel.org/r/20170530094512.dy2nljns2uq7qa3j@hirez.programming.k... Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- kernel/events/core.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c index 8f75386e61a7..835ac4d9f349 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5271,9 +5271,6 @@ static void perf_output_read_one(struct perf_output_handle *handle, __output_copy(handle, values, n * sizeof(u64)); }
-/* - * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. - */ static void perf_output_read_group(struct perf_output_handle *handle, struct perf_event *event, u64 enabled, u64 running) @@ -5318,6 +5315,13 @@ static void perf_output_read_group(struct perf_output_handle *handle, #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ PERF_FORMAT_TOTAL_TIME_RUNNING)
+/* + * XXX PERF_SAMPLE_READ vs inherited events seems difficult. + * + * The problem is that its both hard and excessively expensive to iterate the + * child list, not to mention that its impossible to IPI the children running + * on another CPU, from interrupt/NMI context. + */ static void perf_output_read(struct perf_output_handle *handle, struct perf_event *event) { @@ -7958,9 +7962,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, local64_set(&hwc->period_left, hwc->sample_period);
/* - * we currently do not support PERF_FORMAT_GROUP on inherited events + * We currently do not support PERF_SAMPLE_READ on inherited events. + * See perf_output_read(). */ - if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) + if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)) goto err_ns;
if (!has_branch_stack(event))
From: Marcin Nowakowski marcin.nowakowski@imgtec.com
[ Upstream commit 71eb989ab5a110df8bcbb9609bacde73feacbedd ]
fixrange_init operates at PMD-granularity and expects the addresses to be PMD-size aligned, but currently that might not be the case for PKMAP_BASE unless it is defined properly, so ensure a correct alignment is used before passing the address to fixrange_init.
fixed mappings: only align the start address that is passed to fixrange_init rather than the value before adding the size, as we may end up with uninitialised upper part of the range.
Signed-off-by: Marcin Nowakowski marcin.nowakowski@imgtec.com Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/15948/ Signed-off-by: Ralf Baechle ralf@linux-mips.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/mips/mm/pgtable-32.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c index adc6911ba748..b19a3c506b1e 100644 --- a/arch/mips/mm/pgtable-32.c +++ b/arch/mips/mm/pgtable-32.c @@ -51,15 +51,15 @@ void __init pagetable_init(void) /* * Fixed mappings: */ - vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; - fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1); + fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
#ifdef CONFIG_HIGHMEM /* * Permanent kmaps: */ vaddr = PKMAP_BASE; - fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); + fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
pgd = swapper_pg_dir + __pgd_offset(vaddr); pud = pud_offset(pgd, vaddr);
From: Marcin Nowakowski marcin.nowakowski@imgtec.com
[ Upstream commit 698b851073ddf5a894910d63ca04605e0473414e ]
When ftrace is used with kprobes, it is possible for a kprobe to contain an invalid location (ie. only initialised to 0 and not to a specific location in the code). Trying to perform a cache flush on such location leads to a crash r4k_flush_icache_range().
Fixes: c1bf207d6ee1 ("MIPS: kprobe: Add support.") Signed-off-by: Marcin Nowakowski marcin.nowakowski@imgtec.com Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16296/ Signed-off-by: Ralf Baechle ralf@linux-mips.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/mips/include/asm/kprobes.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h index daba1f9a4f79..174aedce3167 100644 --- a/arch/mips/include/asm/kprobes.h +++ b/arch/mips/include/asm/kprobes.h @@ -40,7 +40,8 @@ typedef union mips_instruction kprobe_opcode_t;
#define flush_insn_slot(p) \ do { \ - flush_icache_range((unsigned long)p->addr, \ + if (p->addr) \ + flush_icache_range((unsigned long)p->addr, \ (unsigned long)p->addr + \ (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ } while (0)
From: James Wang jnwang@suse.com
[ Upstream commit 6460495709aeb651896bc8e5c134b2e4ca7d34a8 ]
While installing SLES-12 (based on v4.4), I found that the installer will stall for 60+ seconds during LVM disk scan. The root cause was determined to be the removal of a bound device check in loop_flush() by commit b5dd2f6047ca ("block: loop: improve performance via blk-mq").
Restoring this check, examining ->lo_state as set by loop_set_fd() eliminates the bad behavior.
Test method: modprobe loop max_loop=64 dd if=/dev/zero of=disk bs=512 count=200K for((i=0;i<4;i++))do losetup -f disk; done mkfs.ext4 -F /dev/loop0 for((i=0;i<4;i++))do mkdir t$i; mount /dev/loop$i t$i;done for f in `ls /dev/loop[0-9]*|sort`; do \ echo $f; dd if=$f of=/dev/null bs=512 count=1; \ done
Test output: stock patched /dev/loop0 18.1217e-05 8.3842e-05 /dev/loop1 6.1114e-05 0.000147979 /dev/loop10 0.414701 0.000116564 /dev/loop11 0.7474 6.7942e-05 /dev/loop12 0.747986 8.9082e-05 /dev/loop13 0.746532 7.4799e-05 /dev/loop14 0.480041 9.3926e-05 /dev/loop15 1.26453 7.2522e-05
Note that from loop10 onward, the device is not mounted, yet the stock kernel consumes several orders of magnitude more wall time than it does for a mounted device. (Thanks for Mike Galbraith efault@gmx.de, give a changelog review.)
Reviewed-by: Hannes Reinecke hare@suse.com Reviewed-by: Ming Lei ming.lei@redhat.com Signed-off-by: James Wang jnwang@suse.com Fixes: b5dd2f6047ca ("block: loop: improve performance via blk-mq") Signed-off-by: Jens Axboe axboe@fb.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/block/loop.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 3e65ae144fde..1dd16f26e77d 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -623,6 +623,9 @@ static int loop_switch(struct loop_device *lo, struct file *file) */ static int loop_flush(struct loop_device *lo) { + /* loop not yet configured, no running thread, nothing to flush */ + if (lo->lo_state != Lo_bound) + return 0; return loop_switch(lo, NULL); }
From: "Paul E. McKenney" paulmck@linux.vnet.ibm.com
[ Upstream commit 68ab0b4263224157f4d0c0e42854169a183d7534 ]
Currently, doing synchronize_rcu_mult(call_rcu, call_rcu) might (or might not) wait for two RCU grace periods. One approach is of course "don't do that!", but in CONFIG_PREEMPT=n kernels, synchronize_rcu_mult(call_rcu, call_rcu_sched) does exactly that. This results in an ugly #ifdef in sched_cpu_deactivate().
This commit therefore makes __wait_rcu_gp() check for duplicates, which in turn allows duplicates to be passed to synchronize_rcu_mult() without risk of waiting twice on the same type of grace period.
Signed-off-by: Paul E. McKenney paulmck@linux.vnet.ibm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- kernel/rcu/update.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 5f748c5a40f0..d98acb903325 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -324,6 +324,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, struct rcu_synchronize *rs_array) { int i; + int j;
/* Initialize and register callbacks for each flavor specified. */ for (i = 0; i < n; i++) { @@ -335,7 +336,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, } init_rcu_head_on_stack(&rs_array[i].head); init_completion(&rs_array[i].completion); - (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); + for (j = 0; j < i; j++) + if (crcu_array[j] == crcu_array[i]) + break; + if (j == i) + (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); }
/* Wait for all callbacks to be invoked. */ @@ -344,7 +349,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, (crcu_array[i] == call_rcu || crcu_array[i] == call_rcu_bh)) continue; - wait_for_completion(&rs_array[i].completion); + for (j = 0; j < i; j++) + if (crcu_array[j] == crcu_array[i]) + break; + if (j == i) + wait_for_completion(&rs_array[i].completion); destroy_rcu_head_on_stack(&rs_array[i].head); } }
From: Christian Lamparter chunkeey@googlemail.com
[ Upstream commit 19d90ece81da802207a9b91ce95a29fbdc40626e ]
This patch fixes a problem where the AR8035 PHY can't be detected on an Cisco Meraki MR24, if the ethernet cable is not connected on boot.
Russell Senior provided steps to reproduce the issue: |Disconnect ethernet cable, apply power, wait until device has booted, |plug in ethernet, check for interfaces, no eth0 is listed. | |This appears to be a problem during probing of the AR8035 Phy chip. |When ethernet has no link, the phy detection fails, and eth0 is not |created. Plugging ethernet later has no effect, because there is no |interface as far as the kernel is concerned. The relevant part of |the boot log looks like this: |this is the failing case: | |[ 0.876611] /plb/opb/emac-rgmii@ef601500: input 0 in RGMII mode |[ 0.882532] /plb/opb/ethernet@ef600c00: reset timeout |[ 0.888546] /plb/opb/ethernet@ef600c00: can't find PHY! |and the succeeding case: | |[ 0.876672] /plb/opb/emac-rgmii@ef601500: input 0 in RGMII mode |[ 0.883952] eth0: EMAC-0 /plb/opb/ethernet@ef600c00, MAC 00:01:.. |[ 0.890822] eth0: found Atheros 8035 Gigabit Ethernet PHY (0x01)
Based on the comment and the commit message of commit 23fbb5a87c56 ("emac: Fix EMAC soft reset on 460EX/GT"). This is because the AR8035 PHY doesn't provide the TX Clock, if the ethernet cable is not attached. This causes the reset to timeout and the PHY detection code in emac_init_phy() is unable to detect the AR8035 PHY. As a result, the emac driver bails out early and the user left with no ethernet.
In order to stay compatible with existing configurations, the driver tries the current reset approach at first. Only if the first attempt timed out, it does perform one more retry with the clock temporarily switched to the internal source for just the duration of the reset.
LEDE-Bug: #687 https://bugs.lede-project.org/index.php?do=details&task_id=687
Cc: Chris Blake chrisrblake93@gmail.com Reported-by: Russell Senior russell@personaltelco.net Fixes: 23fbb5a87c56e98 ("emac: Fix EMAC soft reset on 460EX/GT") Signed-off-by: Christian Lamparter chunkeey@googlemail.com Reviewed-by: Andrew Lunn andrew@lunn.ch Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/ethernet/ibm/emac/core.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 5d7db6c01c46..f301c03c527b 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -342,6 +342,7 @@ static int emac_reset(struct emac_instance *dev) { struct emac_regs __iomem *p = dev->emacp; int n = 20; + bool __maybe_unused try_internal_clock = false;
DBG(dev, "reset" NL);
@@ -354,6 +355,7 @@ static int emac_reset(struct emac_instance *dev) }
#ifdef CONFIG_PPC_DCR_NATIVE +do_retry: /* * PPC460EX/GT Embedded Processor Advanced User's Manual * section 28.10.1 Mode Register 0 (EMACx_MR0) states: @@ -361,10 +363,19 @@ static int emac_reset(struct emac_instance *dev) * of the EMAC. If none is present, select the internal clock * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). * After a soft reset, select the external clock. + * + * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the + * ethernet cable is not attached. This causes the reset to timeout + * and the PHY detection code in emac_init_phy() is unable to + * communicate and detect the AR8035-A PHY. As a result, the emac + * driver bails out early and the user has no ethernet. + * In order to stay compatible with existing configurations, the + * driver will temporarily switch to the internal clock, after + * the first reset fails. */ if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { - if (dev->phy_address == 0xffffffff && - dev->phy_map == 0xffffffff) { + if (try_internal_clock || (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff)) { /* No PHY: select internal loop clock before reset */ dcri_clrset(SDR0, SDR0_ETH_CFG, 0, SDR0_ETH_CFG_ECS << dev->cell_index); @@ -382,8 +393,15 @@ static int emac_reset(struct emac_instance *dev)
#ifdef CONFIG_PPC_DCR_NATIVE if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { - if (dev->phy_address == 0xffffffff && - dev->phy_map == 0xffffffff) { + if (!n && !try_internal_clock) { + /* first attempt has timed out. */ + n = 20; + try_internal_clock = true; + goto do_retry; + } + + if (try_internal_clock || (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff)) { /* No PHY: restore external clock source after reset */ dcri_clrset(SDR0, SDR0_ETH_CFG, SDR0_ETH_CFG_ECS << dev->cell_index, 0);
From: Namhyung Kim namhyung@kernel.org
[ Upstream commit 94df1040b1e6aacd8dec0ba3c61d7e77cd695f26 ]
If a kernel modules is compressed, it should be decompressed before running objdump to parse binary data correctly. This fixes a failure of object code reading test for me.
Signed-off-by: Namhyung Kim namhyung@kernel.org Acked-by: Adrian Hunter adrian.hunter@intel.com Acked-by: Jiri Olsa jolsa@kernel.org Cc: David Ahern dsahern@gmail.com Cc: Peter Zijlstra a.p.zijlstra@chello.nl Cc: Wang Nan wangnan0@huawei.com Cc: kernel-team@lge.com Link: http://lkml.kernel.org/r/20170608073109.30699-8-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- tools/perf/tests/code-reading.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c index a767a6400c5c..6ea4fcfaab36 100644 --- a/tools/perf/tests/code-reading.c +++ b/tools/perf/tests/code-reading.c @@ -182,6 +182,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode, unsigned char buf2[BUFSZ]; size_t ret_len; u64 objdump_addr; + const char *objdump_name; + char decomp_name[KMOD_DECOMP_LEN]; int ret;
pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); @@ -242,9 +244,25 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode, state->done[state->done_cnt++] = al.map->start; }
+ objdump_name = al.map->dso->long_name; + if (dso__needs_decompress(al.map->dso)) { + if (dso__decompress_kmodule_path(al.map->dso, objdump_name, + decomp_name, + sizeof(decomp_name)) < 0) { + pr_debug("decompression failed\n"); + return -1; + } + + objdump_name = decomp_name; + } + /* Read the object code using objdump */ objdump_addr = map__rip_2objdump(al.map, al.addr); - ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len); + ret = read_via_objdump(objdump_name, objdump_addr, buf2, len); + + if (dso__needs_decompress(al.map->dso)) + unlink(objdump_name); + if (ret > 0) { /* * The kernel maps are inaccurate - assume objdump is right in
From: Arnd Bergmann arnd@arndb.de
[ Upstream commit 9cc91f212111cdcbefa02dcdb7dd443f224bf52c ]
The improved type-checking version of container_of() triggers a warning for xchg_xen_ulong, pointing out that 'xen_ulong_t' is unsigned, but atomic64_t contains a signed value:
drivers/xen/events/events_2l.c: In function 'evtchn_2l_handle_events': drivers/xen/events/events_2l.c:187:1020: error: call to '__compiletime_assert_187' declared with attribute error: pointer type mismatch in container_of()
This adds a cast to work around the warning.
Cc: Ian Abbott abbotti@mev.co.uk Fixes: 85323a991d40 ("xen: arm: mandate EABI and use generic atomic operations.") Fixes: daa2ac80834d ("kernel.h: handle pointers to arrays better in container_of()") Signed-off-by: Arnd Bergmann arnd@arndb.de Signed-off-by: Stefano Stabellini sstabellini@kernel.org Reviewed-by: Stefano Stabellini sstabellini@kernel.org Acked-by: Ian Abbott abbotti@mev.co.uk Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/arm/include/asm/xen/events.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h index 71e473d05fcc..620dc75362e5 100644 --- a/arch/arm/include/asm/xen/events.h +++ b/arch/arm/include/asm/xen/events.h @@ -16,7 +16,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) return raw_irqs_disabled_flags(regs->ARM_cpsr); }
-#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \ +#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((long long*)(ptr),\ atomic64_t, \ counter), (val))
From: "Mintz, Yuval" Yuval.Mintz@cavium.com
[ Upstream commit 92f85f05caa51d844af6ea14ffbc7a786446a644 ]
VF clients are configured as enforced, meaning firmware is validating the correctness of their ethertype/vid during transmission. Once txvlan is disabled, VF would start getting SKBs for transmission here vlan is on the payload - but it'll pass the packet's ethertype instead of the vid, leading to firmware declaring it as malicious.
Signed-off-by: Yuval Mintz Yuval.Mintz@cavium.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index d1103d612d8b..949a82458a29 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3943,15 +3943,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) /* when transmitting in a vf, start bd must hold the ethertype * for fw to enforce it */ + u16 vlan_tci = 0; #ifndef BNX2X_STOP_ON_ERROR - if (IS_VF(bp)) + if (IS_VF(bp)) { #endif - tx_start_bd->vlan_or_ethertype = - cpu_to_le16(ntohs(eth->h_proto)); + /* Still need to consider inband vlan for enforced */ + if (__vlan_get_tag(skb, &vlan_tci)) { + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(ntohs(eth->h_proto)); + } else { + tx_start_bd->bd_flags.as_bitfield |= + (X_ETH_INBAND_VLAN << + ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(vlan_tci); + } #ifndef BNX2X_STOP_ON_ERROR - else + } else { /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + } #endif }
From: Eric Biggers ebiggers@google.com
[ Upstream commit d636bd9f12a66ea3775c9fabbf3f8e118253467a ]
In join_session_keyring(), if install_session_keyring_to_cred() were to fail, we would leak the keyring reference, just like in the bug fixed by commit 23567fd052a9 ("KEYS: Fix keyring ref leak in join_session_keyring()"). Fortunately this cannot happen currently, but we really should be more careful. Do this by adding and using a new error label at which the keyring reference is dropped.
Signed-off-by: Eric Biggers ebiggers@google.com Signed-off-by: David Howells dhowells@redhat.com Signed-off-by: James Morris james.l.morris@oracle.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- security/keys/process_keys.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index ac1d5b2b1626..a7095372701e 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -808,15 +808,14 @@ long join_session_keyring(const char *name) ret = PTR_ERR(keyring); goto error2; } else if (keyring == new->session_keyring) { - key_put(keyring); ret = 0; - goto error2; + goto error3; }
/* we've got a keyring - now to install it */ ret = install_session_keyring_to_cred(new, keyring); if (ret < 0) - goto error2; + goto error3;
commit_creds(new); mutex_unlock(&key_session_mutex); @@ -826,6 +825,8 @@ long join_session_keyring(const char *name) okay: return ret;
+error3: + key_put(keyring); error2: mutex_unlock(&key_session_mutex); error:
From: Xin Long lucien.xin@gmail.com
[ Upstream commit 6dfe4b97e08ec3d1a593fdaca099f0ef0a3a19e6 ]
Dmitry got the following recursive locking report while running syzkaller fuzzer, the Call Trace: __dump_stack lib/dump_stack.c:16 [inline] dump_stack+0x2ee/0x3ef lib/dump_stack.c:52 print_deadlock_bug kernel/locking/lockdep.c:1729 [inline] check_deadlock kernel/locking/lockdep.c:1773 [inline] validate_chain kernel/locking/lockdep.c:2251 [inline] __lock_acquire+0xef2/0x3430 kernel/locking/lockdep.c:3340 lock_acquire+0x2a1/0x630 kernel/locking/lockdep.c:3755 lock_sock_nested+0xcb/0x120 net/core/sock.c:2536 lock_sock include/net/sock.h:1460 [inline] sctp_close+0xcd/0x9d0 net/sctp/socket.c:1497 inet_release+0xed/0x1c0 net/ipv4/af_inet.c:425 inet6_release+0x50/0x70 net/ipv6/af_inet6.c:432 sock_release+0x8d/0x1e0 net/socket.c:597 __sock_create+0x38b/0x870 net/socket.c:1226 sock_create+0x7f/0xa0 net/socket.c:1237 sctp_do_peeloff+0x1a2/0x440 net/sctp/socket.c:4879 sctp_getsockopt_peeloff net/sctp/socket.c:4914 [inline] sctp_getsockopt+0x111a/0x67e0 net/sctp/socket.c:6628 sock_common_getsockopt+0x95/0xd0 net/core/sock.c:2690 SYSC_getsockopt net/socket.c:1817 [inline] SyS_getsockopt+0x240/0x380 net/socket.c:1799 entry_SYSCALL_64_fastpath+0x1f/0xc2
This warning is caused by the lock held by sctp_getsockopt() is on one socket, while the other lock that sctp_close() is getting later is on the newly created (which failed) socket during peeloff operation.
This patch is to avoid this warning by use lock_sock with subclass SINGLE_DEPTH_NESTING as Wang Cong and Marcelo's suggestion.
Reported-by: Dmitry Vyukov dvyukov@google.com Suggested-by: Marcelo Ricardo Leitner marcelo.leitner@gmail.com Suggested-by: Cong Wang xiyou.wangcong@gmail.com Signed-off-by: Xin Long lucien.xin@gmail.com Acked-by: Marcelo Ricardo Leitner marcelo.leitner@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- net/sctp/socket.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index df6a4b2d0728..8efdf7fc1044 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1518,7 +1518,7 @@ static void sctp_close(struct sock *sk, long timeout)
pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
- lock_sock(sk); + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_state = SCTP_SS_CLOSING;
@@ -1569,7 +1569,7 @@ static void sctp_close(struct sock *sk, long timeout) * held and that should be grabbed before socket lock. */ spin_lock_bh(&net->sctp.addr_wq_lock); - bh_lock_sock(sk); + bh_lock_sock_nested(sk);
/* Hold the sock, since sk_common_release() will put sock_put() * and we have just a little more cleanup.
From: Lorenzo Bianconi lorenzo.bianconi83@gmail.com
[ Upstream commit c83761ff0aac954aa368c623bb0f0d1a3214e834 ]
Remove LSM303DLHC, LSM303DLM from st_magn_id_table since LSM303DL series does not support spi interface
Fixes: 872e79add756 (iio: magn: Add STMicroelectronics magn driver) Signed-off-by: Lorenzo Bianconi lorenzo.bianconi@st.com Signed-off-by: Jonathan Cameron jic23@kernel.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/iio/magnetometer/st_magn_spi.c | 2 -- 1 file changed, 2 deletions(-)
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c index 6325e7dc8e03..f3cb4dc05391 100644 --- a/drivers/iio/magnetometer/st_magn_spi.c +++ b/drivers/iio/magnetometer/st_magn_spi.c @@ -48,8 +48,6 @@ static int st_magn_spi_remove(struct spi_device *spi) }
static const struct spi_device_id st_magn_id_table[] = { - { LSM303DLHC_MAGN_DEV_NAME }, - { LSM303DLM_MAGN_DEV_NAME }, { LIS3MDL_MAGN_DEV_NAME }, { LSM303AGR_MAGN_DEV_NAME }, {},
From: Jag Raman jag.raman@oracle.com
[ Upstream commit 6c95483b768c62f8ee933ae08a1bdbcb78b5410f ]
Orabug: 20902628
When an ldc control-only packet is received during data exchange in read_nonraw(), a new rx head is calculated but the rx queue head is not actually advanced (rx_set_head() is not called) and a branch is taken to 'no_data' at which point two things can happen depending on the value of the newly calculated rx head and the current rx tail:
- If the rx queue is determined to be not empty, then the wrong packet is picked up.
- If the rx queue is determined to be empty, then a read error (EAGAIN) is eventually returned since it is falsely assumed that more data was expected.
The fix is to update the rx head and return in case of a control only packet during data exchange.
Signed-off-by: Jagannathan Raman jag.raman@oracle.com Reviewed-by: Aaron Young aaron.young@oracle.com Reviewed-by: Alexandre Chartre alexandre.chartre@oracle.com Reviewed-by: Bijan Mottahedeh bijan.mottahedeh@oracle.com Reviewed-by: Liam Merwick liam.merwick@oracle.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/sparc/kernel/ldc.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 59d503866431..9cc600b2d68c 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -1733,9 +1733,14 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
lp->rcv_nxt = p->seqid;
+ /* + * If this is a control-only packet, there is nothing + * else to do but advance the rx queue since the packet + * was already processed above. + */ if (!(p->type & LDC_DATA)) { new = rx_advance(lp, new); - goto no_data; + break; } if (p->stype & (LDC_ACK | LDC_NACK)) { err = data_ack_nack(lp, p);
From: Marcel Holtmann marcel@holtmann.org
[ Upstream commit 313f6888c8fbb1bc8b36c9012ce4e1de848df696 ]
The Broadcom BCM20702 Bluetooth controller in ThinkPad-T530 devices report support for the Set Event Mask Page 2 command, but actually do return an error when trying to use it.
< HCI Command: Read Local Supported Commands (0x04|0x0002) plen 0
HCI Event: Command Complete (0x0e) plen 68
Read Local Supported Commands (0x04|0x0002) ncmd 1 Status: Success (0x00) Commands: 162 entries ... Set Event Mask Page 2 (Octet 22 - Bit 2) ...
< HCI Command: Set Event Mask Page 2 (0x03|0x0063) plen 8 Mask: 0x0000000000000000
HCI Event: Command Complete (0x0e) plen 4
Set Event Mask Page 2 (0x03|0x0063) ncmd 1 Status: Unknown HCI Command (0x01)
Since these controllers do not support any feature that would require the event mask page 2 to be modified, it is safe to not send this command at all. The default value is all bits set to zero.
T: Bus=01 Lev=02 Prnt=02 Port=03 Cnt=03 Dev#= 9 Spd=12 MxCh= 0 D: Ver= 2.00 Cls=ff(vend.) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=0a5c ProdID=21e6 Rev= 1.12 S: Manufacturer=Broadcom Corp S: Product=BCM20702A0 S: SerialNumber=F82FA8E8CFC0 C:* #Ifs= 4 Cfg#= 1 Atr=e0 MxPwr= 0mA I:* If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=1ms E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms I: If#= 1 Alt= 1 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms I: If#= 1 Alt= 2 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms I: If#= 1 Alt= 3 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms I: If#= 1 Alt= 4 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms I: If#= 1 Alt= 5 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms I:* If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=btusb E: Ad=84(I) Atr=02(Bulk) MxPS= 32 Ivl=0ms E: Ad=04(O) Atr=02(Bulk) MxPS= 32 Ivl=0ms I:* If#= 3 Alt= 0 #EPs= 0 Cls=fe(app. ) Sub=01 Prot=01 Driver=(none)
Signed-off-by: Marcel Holtmann marcel@holtmann.org Reported-by: Sedat Dilek sedat.dilek@gmail.com Tested-by: Sedat Dilek sedat.dilek@gmail.com Signed-off-by: Szymon Janc szymon.janc@codecoup.pl Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- net/bluetooth/hci_core.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-)
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 62edbf1b114e..5d0b1358c754 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -716,6 +716,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req) { struct hci_dev *hdev = req->hdev; u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + bool changed = false;
/* If Connectionless Slave Broadcast master role is supported * enable all necessary events for it. @@ -725,6 +726,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req) events[1] |= 0x80; /* Synchronization Train Complete */ events[2] |= 0x10; /* Slave Page Response Timeout */ events[2] |= 0x20; /* CSB Channel Map Change */ + changed = true; }
/* If Connectionless Slave Broadcast slave role is supported @@ -735,13 +737,24 @@ static void hci_set_event_mask_page_2(struct hci_request *req) events[2] |= 0x02; /* CSB Receive */ events[2] |= 0x04; /* CSB Timeout */ events[2] |= 0x08; /* Truncated Page Complete */ + changed = true; }
/* Enable Authenticated Payload Timeout Expired event if supported */ - if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) + if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { events[2] |= 0x80; + changed = true; + }
- hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events); + /* Some Broadcom based controllers indicate support for Set Event + * Mask Page 2 command, but then actually do not support it. Since + * the default value is all bits set to zero, the command is only + * required if the event mask has to be changed. In case no change + * to the event mask is needed, skip this command. + */ + if (changed) + hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, + sizeof(events), events); }
static void hci_init3_req(struct hci_request *req, unsigned long opt)
From: Christophe Jaillet christophe.jaillet@wanadoo.fr
[ Upstream commit b2cdd8e1b54849477a32d820acc2e87828a38f3d ]
'of_node_put()' should be called on pointer returned by 'of_parse_phandle()' when done. In this function this is done in all path except this 'continue', so add it.
Fixes: 97735da074fd (drivers: cpuidle: Add status property to ARM idle states) Signed-off-by: Christophe Jaillet christophe.jaillet@wanadoo.fr Signed-off-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/cpuidle/dt_idle_states.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index a5c111b67f37..ea11a33e7fff 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -174,8 +174,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, if (!state_node) break;
- if (!of_device_is_available(state_node)) + if (!of_device_is_available(state_node)) { + of_node_put(state_node); continue; + }
if (!idle_state_valid(state_node, i, cpumask)) { pr_warn("%s idle state not valid, bailing out\n",
From: Lv Zheng lv.zheng@intel.com
[ Upstream commit 861ba6351c520328e94a78c923b415faa9116287 ]
ACPICA commit 99bc3beca92c6574ea1d69de42e54f872e6373ce
It is reported that on Linux, RTC driver complains wrong errors on hardware reduced platform: [ 4.085420] ACPI Warning: Could not enable fixed event - real_time_clock (4) (20160422/evxface-654)
This patch fixes this by correctly adding runtime reduced hardware check. Reported by Chandan Tagore, fixed by Lv Zheng.
Link: https://github.com/acpica/acpica/commit/99bc3bec Tested-by: Chandan Tagore tagore.chandan@gmail.com Signed-off-by: Lv Zheng lv.zheng@intel.com Signed-off-by: Bob Moore robert.moore@intel.com Signed-off-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/acpi/acpica/evxfevnt.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+)
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index 10ce48e16ebf..d830705f8a18 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -180,6 +180,12 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
ACPI_FUNCTION_TRACE(acpi_enable_event);
+ /* If Hardware Reduced flag is set, there are no fixed events */ + + if (acpi_gbl_reduced_hardware) { + return_ACPI_STATUS(AE_OK); + } + /* Decode the Fixed Event */
if (event > ACPI_EVENT_MAX) { @@ -237,6 +243,12 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
ACPI_FUNCTION_TRACE(acpi_disable_event);
+ /* If Hardware Reduced flag is set, there are no fixed events */ + + if (acpi_gbl_reduced_hardware) { + return_ACPI_STATUS(AE_OK); + } + /* Decode the Fixed Event */
if (event > ACPI_EVENT_MAX) { @@ -290,6 +302,12 @@ acpi_status acpi_clear_event(u32 event)
ACPI_FUNCTION_TRACE(acpi_clear_event);
+ /* If Hardware Reduced flag is set, there are no fixed events */ + + if (acpi_gbl_reduced_hardware) { + return_ACPI_STATUS(AE_OK); + } + /* Decode the Fixed Event */
if (event > ACPI_EVENT_MAX) {
From: Bob Moore robert.moore@intel.com
[ Upstream commit 6f0527b77d9e0129dd8e50945b0d610ed943d6b2 ]
ACPICA commit ed0389cb11a61e63c568ac1f67948fc6a7bd1aeb
An invalid opcode indicates something seriously wrong with the input AML file. The AML parser is immediately confused and lost, causing the resulting parse tree to be ill-formed. The actual disassembly can then cause numerous unrelated errors and faults.
This change aborts the disassembly upon discovery of such an opcode during the AML parse phase.
Link: https://github.com/acpica/acpica/commit/ed0389cb Signed-off-by: Bob Moore robert.moore@intel.com Signed-off-by: Lv Zheng lv.zheng@intel.com Signed-off-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/acpi/acpica/psobject.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+)
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index e54bc2aa7a88..a05b3b79b987 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -121,6 +121,9 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) (u32)(aml_offset + sizeof(struct acpi_table_header)));
+ ACPI_ERROR((AE_INFO, + "Aborting disassembly, AML byte code is corrupt")); + /* Dump the context surrounding the invalid opcode */
acpi_ut_dump_buffer(((u8 *)walk_state->parser_state. @@ -129,6 +132,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) sizeof(struct acpi_table_header) - 16)); acpi_os_printf(" */\n"); + + /* + * Just abort the disassembly, cannot continue because the + * parser is essentially lost. The disassembler can then + * randomly fail because an ill-constructed parse tree + * can result. + */ + return_ACPI_STATUS(AE_AML_BAD_OPCODE); #endif }
@@ -293,6 +304,9 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state, if (status == AE_CTRL_PARSE_CONTINUE) { return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE); } + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + }
/* Create Op structure and append to parent's argument list */
From: Jan Höppner hoeppner@linux.vnet.ibm.com
[ Upstream commit b487a914f853545842a0899329b6b72fe56c4081 ]
We have two flags, DASD_FLAG_DEVICE_RO and DASD_FEATURE_READONLY, that tell us whether a device is read-only. DASD_FLAG_DEVICE_RO is set when a device is attached as read-only to z/VM and DASD_FEATURE_READONLY is set when either the corresponding kernel parameter is configured, or the read-only state is changed via sysfs. This is valuable information in any case. However, only the feature flag is being checked at the moment when we display the current state.
Fix this by checking both flags.
Reviewed-by: Stefan Haberland sth@linux.vnet.ibm.com Signed-off-by: Jan Höppner hoeppner@linux.vnet.ibm.com Signed-off-by: Martin Schwidefsky schwidefsky@de.ibm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/s390/block/dasd_devmap.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-)
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 8286f742436b..f61a8e0ae7c8 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -758,13 +758,22 @@ static ssize_t dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf) { struct dasd_devmap *devmap; - int ro_flag; + struct dasd_device *device; + int ro_flag = 0;
devmap = dasd_find_busid(dev_name(dev)); - if (!IS_ERR(devmap)) - ro_flag = (devmap->features & DASD_FEATURE_READONLY) != 0; - else - ro_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_READONLY) != 0; + if (IS_ERR(devmap)) + goto out; + + ro_flag = !!(devmap->features & DASD_FEATURE_READONLY); + + spin_lock(&dasd_devmap_lock); + device = devmap->device; + if (device) + ro_flag |= test_bit(DASD_FLAG_DEVICE_RO, &device->flags); + spin_unlock(&dasd_devmap_lock); + +out: return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n"); }
From: Stefan Haberland sth@linux.vnet.ibm.com
[ Upstream commit e8ac01555d9e464249e8bb122337d6d6e5589ccc ]
The safe offline processing may hang forever because it waits for I/O which can not be started because of the offline flag that prevents new I/O from being started.
Allow I/O to be started during safe offline processing because in this special case we take care that the queues are empty before throwing away the device.
Signed-off-by: Stefan Haberland sth@linux.vnet.ibm.com Signed-off-by: Martin Schwidefsky schwidefsky@de.ibm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/s390/block/dasd.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index e7a6f1222642..b76a85d14ef0 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1881,8 +1881,12 @@ static int __dasd_device_is_unusable(struct dasd_device *device, { int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
- if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { - /* dasd is being set offline. */ + if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && + !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { + /* + * dasd is being set offline + * but it is no safe offline where we have to allow I/O + */ return 1; } if (device->stopped) {
From: Roopa Prabhu roopa@cumulusnetworks.com
[ Upstream commit e0090a9e979de5202c7d16c635dea2f005221073 ]
This patch fixes vxlan_snoop to not move permanent fdb entries on learn events. This is consistent with the bridge fdb handling of permanent entries.
Fixes: 26a41ae60438 ("vxlan: only migrate dynamic FDB entries") Signed-off-by: Roopa Prabhu roopa@cumulusnetworks.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/vxlan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index e4ff1e45c02e..c41378214ede 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -962,7 +962,7 @@ static bool vxlan_snoop(struct net_device *dev, return false;
/* Don't migrate static entries, drop packets */ - if (f->state & NUD_NOARP) + if (f->state & (NUD_PERMANENT | NUD_NOARP)) return true;
if (net_ratelimit())
From: Kees Cook keescook@chromium.org
[ Upstream commit 42c335f7e67029d2e01711f2f2bc6252277c8993 ]
When copying attributes, the len argument was padded out and the resulting memcpy() would copy beyond the end of the source buffer. Avoid this, and use size_t for val_len to avoid all the casts. Similarly, avoid source buffer casts and use void *.
Additionally enforces val_len can be represented by u16 and that the DMA buffer was not overflowed. Fixes the size of mfa, which is not FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN (but it will be padded up to 4). This was noticed by the future CONFIG_FORTIFY_SOURCE checks.
Cc: Daniel Micay danielmicay@gmail.com Signed-off-by: Kees Cook keescook@chromium.org Acked-by: Varun Prakash varun@chelsio.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/scsi/csiostor/csio_lnode.c | 43 +++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 17 deletions(-)
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index c00b2ff72b55..be5ee2d37815 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -238,14 +238,23 @@ csio_osname(uint8_t *buf, size_t buf_len) }
static inline void -csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len) +csio_append_attrib(uint8_t **ptr, uint16_t type, void *val, size_t val_len) { + uint16_t len; struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr; + + if (WARN_ON(val_len > U16_MAX)) + return; + + len = val_len; + ae->type = htons(type); len += 4; /* includes attribute type and length */ len = (len + 3) & ~3; /* should be multiple of 4 bytes */ ae->len = htons(len); - memcpy(ae->value, val, len); + memcpy(ae->value, val, val_len); + if (len > val_len) + memset(ae->value + val_len, 0, len - val_len); *ptr += len; }
@@ -335,7 +344,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) numattrs++; val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED, - (uint8_t *)&val, + &val, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN); numattrs++;
@@ -346,23 +355,22 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) else val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN); csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED, - (uint8_t *)&val, - FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN); + &val, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN); numattrs++;
mfs = ln->ln_sparm.csp.sp_bb_data; csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE, - (uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN); + &mfs, sizeof(mfs)); numattrs++;
strcpy(buf, "csiostor"); csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf, - (uint16_t)strlen(buf)); + strlen(buf)); numattrs++;
if (!csio_hostname(buf, sizeof(buf))) { csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME, - buf, (uint16_t)strlen(buf)); + buf, strlen(buf)); numattrs++; } attrib_blk->numattrs = htonl(numattrs); @@ -444,33 +452,32 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
strcpy(buf, "Chelsio Communications"); csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf, - (uint16_t)strlen(buf)); + strlen(buf)); numattrs++; csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER, - hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn)); + hw->vpd.sn, sizeof(hw->vpd.sn)); numattrs++; csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id, - (uint16_t)sizeof(hw->vpd.id)); + sizeof(hw->vpd.id)); numattrs++; csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION, - hw->model_desc, (uint16_t)strlen(hw->model_desc)); + hw->model_desc, strlen(hw->model_desc)); numattrs++; csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION, - hw->hw_ver, (uint16_t)sizeof(hw->hw_ver)); + hw->hw_ver, sizeof(hw->hw_ver)); numattrs++; csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION, - hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str)); + hw->fwrev_str, strlen(hw->fwrev_str)); numattrs++;
if (!csio_osname(buf, sizeof(buf))) { csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION, - buf, (uint16_t)strlen(buf)); + buf, strlen(buf)); numattrs++; }
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD, - (uint8_t *)&maxpayload, - FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN); + &maxpayload, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN); len = (uint32_t)(pld - (uint8_t *)cmd); numattrs++; attrib_blk->numattrs = htonl(numattrs); @@ -1794,6 +1801,8 @@ csio_ln_mgmt_submit_req(struct csio_ioreq *io_req, struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); int rv;
+ BUG_ON(pld_len > pld->len); + io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */ io_req->fw_handle = (uintptr_t) (io_req); io_req->eq_idx = mgmtm->eq_idx;
From: James Smart jsmart2021@gmail.com
[ Upstream commit 522dceeb62ded1a7b538d2f1f61cc69a1402537d ]
On hbacmd reset failure, observing wrong string "nline" in kernel log.
On failure, non negative value (1) is returned from sysfs store routine. It is interpreted as count by kernel and store routine is called again with the remaining characters as input.
Fix: Return negative error code (-EIO) in case of failure.
Signed-off-by: Dick Kennedy dick.kennedy@broadcom.com Signed-off-by: James Smart james.smart@broadcom.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/scsi/lpfc/lpfc_attr.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 4639dac64e7f..5708bbc056c6 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1090,6 +1090,8 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, goto board_mode_out; } wait_for_completion(&online_compl); + if (status) + status = -EIO; } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
From: Jia-Ju Bai baijiaju1990@163.com
[ Upstream commit 896f6966fc815abe71f85fb26f0193875df8a035 ]
The driver may sleep under a spin lock, and the function call path is: mraid_mm_attach_buf (acquire the lock by spin_lock_irqsave) pci_pool_alloc(GFP_KERNEL) --> may sleep
To fix it, the "GFP_KERNEL" is replaced with "GFP_ATOMIC".
[mkp: fixed whitespace]
Signed-off-by: Jia-Ju Bai baijiaju1990@163.com Acked-by: Sumit Saxena sumit.saxena@broadcom.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/scsi/megaraid/megaraid_mm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index a70692779a16..bfc7984a1c17 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -570,7 +570,7 @@ mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
kioc->pool_index = right_pool; kioc->free_buf = 1; - kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL, + kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_ATOMIC, &kioc->buf_paddr); spin_unlock_irqrestore(&pool->lock, flags);
From: Yuyang Du yuyang.du@intel.com
[ Upstream commit c3509715fc9484a48b69a9f0196b728c960840c9 ]
If we get nonpositive number of ports, there is no sense to continue, then fail gracefully.
In addition, the commit 0775a9cbc694e8c72 ("usbip: vhci extension: modifications to vhci driver") introduced configurable numbers of controllers and ports, but we have a static port number maximum, MAXNPORT. If exceeded, the idev array will be overflown. We fix it by validating the nports to make sure the port number max is not exceeded.
Reviewed-by: Krzysztof Opasiak k.opasiak@samsung.com Signed-off-by: Yuyang Du yuyang.du@intel.com Acked-by: Shuah Khan shuahkh@osg.samsung.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- tools/usb/usbip/libsrc/vhci_driver.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c index 1274f326242c..c589cfbd1cfe 100644 --- a/tools/usb/usbip/libsrc/vhci_driver.c +++ b/tools/usb/usbip/libsrc/vhci_driver.c @@ -238,9 +238,16 @@ int usbip_vhci_driver_open(void) }
vhci_driver->nports = get_nports(); - dbg("available ports: %d", vhci_driver->nports);
+ if (vhci_driver->nports <= 0) { + err("no available ports"); + goto err; + } else if (vhci_driver->nports > MAXNPORT) { + err("port number exceeds %d", MAXNPORT); + goto err; + } + if (refresh_imported_device_list()) goto err;
From: Yuyang Du yuyang.du@intel.com
[ Upstream commit fd92b7deb98a4edd31ffcc2d64cee36103805ff5 ]
The commit 0775a9cbc694e8c7 ("usbip: vhci extension: modifications to vhci driver") introduced multiple controllers, but the status of the ports are only extracted from the first status file, fix it.
Reviewed-by: Krzysztof Opasiak k.opasiak@samsung.com Signed-off-by: Yuyang Du yuyang.du@intel.com Acked-by: Shuah Khan shuahkh@osg.samsung.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- tools/usb/usbip/libsrc/vhci_driver.c | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-)
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c index c589cfbd1cfe..1305a36f95d8 100644 --- a/tools/usb/usbip/libsrc/vhci_driver.c +++ b/tools/usb/usbip/libsrc/vhci_driver.c @@ -107,18 +107,33 @@ static int parse_status(const char *value) return 0; }
+#define MAX_STATUS_NAME 16 + static int refresh_imported_device_list(void) { const char *attr_status; + char status[MAX_STATUS_NAME+1] = "status"; + int i, ret;
- attr_status = udev_device_get_sysattr_value(vhci_driver->hc_device, - "status"); - if (!attr_status) { - err("udev_device_get_sysattr_value failed"); - return -1; + for (i = 0; i < vhci_driver->ncontrollers; i++) { + if (i > 0) + snprintf(status, sizeof(status), "status.%d", i); + + attr_status = udev_device_get_sysattr_value(vhci_driver->hc_device, + status); + if (!attr_status) { + err("udev_device_get_sysattr_value failed"); + return -1; + } + + dbg("controller %d", i); + + ret = parse_status(attr_status); + if (ret != 0) + return ret; }
- return parse_status(attr_status); + return 0; }
static int get_nports(void)
From: Sujith Pandel sujithpshankar@gmail.com
[ Upstream commit 6c51c82c60991bdbfb937f3bf0cdbe68d042073d ]
The function find_smbios_instance_string() does not consider the PCI domain number. As a result, SMBIOS type 41 device type instance would be exported to sysfs for all the PCI domains which have a PCI device with same bus/device/function, though PCI bus/device/func from a specific PCI domain has SMBIOS type 41 device type instance defined.
Address the issue by making find_smbios_instance_string() check PCI domain number as well.
Reported-by: Shai Fultheim Shai@ScaleMP.com Suggested-by: Shai Fultheim Shai@ScaleMP.com Tested-by: Shai Fultheim Shai@ScaleMP.com Signed-off-by: Sujith Pandel sujithpshankar@gmail.com Signed-off-by: Narendra K Narendra_K@Dell.com Signed-off-by: Bjorn Helgaas bhelgaas@google.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/pci/pci-label.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index 024b5c179348..5986aad76088 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c @@ -43,9 +43,11 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf, { const struct dmi_device *dmi; struct dmi_dev_onboard *donboard; + int domain_nr; int bus; int devfn;
+ domain_nr = pci_domain_nr(pdev->bus); bus = pdev->bus->number; devfn = pdev->devfn;
@@ -53,8 +55,9 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf, while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, NULL, dmi)) != NULL) { donboard = dmi->device_data; - if (donboard && donboard->bus == bus && - donboard->devfn == devfn) { + if (donboard && donboard->segment == domain_nr && + donboard->bus == bus && + donboard->devfn == devfn) { if (buf) { if (attribute == SMBIOS_ATTR_INSTANCE_SHOW) return scnprintf(buf, PAGE_SIZE,
From: Bjorn Helgaas bhelgaas@google.com
[ Upstream commit 2f686f1d9beee135de6d08caea707ec7bfc916d4 ]
PCI_STD_RESOURCE_END is (confusingly) the index of the last valid BAR, not the *number* of BARs. To iterate through all possible BARs, we need to include PCI_STD_RESOURCE_END.
Fixes: 9fe373f9997b ("PCI: Increase IBM ipr SAS Crocodile BARs to at least system page size") Signed-off-by: Bjorn Helgaas bhelgaas@google.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/pci/quirks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 4eb1cf0ed00c..ae6aa7b385c2 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -304,7 +304,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev) { int i;
- for (i = 0; i < PCI_STD_RESOURCE_END; i++) { + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { struct resource *r = &dev->resource[i];
if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
From: Chris Packham chris.packham@alliedtelesis.co.nz
[ Upstream commit 1eeef2d7483a7e3f8d2dd2a5b9939b3b814dc549 ]
erasesize is meaningful for flash devices but for SRAM there is no concept of an erase block so erasesize is set to 0. When partitioning these devices instead of ensuring partitions fall on erasesize boundaries we ensure they fall on writesize boundaries.
Helped-by: Boris Brezillon boris.brezillon@free-electrons.com Signed-off-by: Chris Packham chris.packham@alliedtelesis.co.nz Acked-by: Boris Brezillon boris.brezillon@free-electrons.com Signed-off-by: Brian Norris computersforpeace@gmail.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/mtd/mtdpart.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-)
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index f8ba153f63bf..3b7ac1989f90 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -353,8 +353,12 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, const struct mtd_partition *part, int partno, uint64_t cur_offset) { + int wr_alignment = (master->flags & MTD_NO_ERASE) ? master->writesize: + master->erasesize; struct mtd_part *slave; + u32 remainder; char *name; + u64 tmp;
/* allocate the partition structure */ slave = kzalloc(sizeof(*slave), GFP_KERNEL); @@ -449,10 +453,11 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, if (slave->offset == MTDPART_OFS_APPEND) slave->offset = cur_offset; if (slave->offset == MTDPART_OFS_NXTBLK) { + tmp = cur_offset; slave->offset = cur_offset; - if (mtd_mod_by_eb(cur_offset, master) != 0) { - /* Round up to next erasesize */ - slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; + remainder = do_div(tmp, wr_alignment); + if (remainder) { + slave->offset += wr_alignment - remainder; printk(KERN_NOTICE "Moving partition %d: " "0x%012llx -> 0x%012llx\n", partno, (unsigned long long)cur_offset, (unsigned long long)slave->offset); @@ -517,19 +522,22 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, slave->mtd.erasesize = master->erasesize; }
- if ((slave->mtd.flags & MTD_WRITEABLE) && - mtd_mod_by_eb(slave->offset, &slave->mtd)) { + tmp = slave->offset; + remainder = do_div(tmp, wr_alignment); + if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { /* Doesn't start on a boundary of major erase size */ /* FIXME: Let it be writable if it is on a boundary of * _minor_ erase size though */ slave->mtd.flags &= ~MTD_WRITEABLE; - printk(KERN_WARNING"mtd: partition "%s" doesn't start on an erase block boundary -- force read-only\n", + printk(KERN_WARNING"mtd: partition "%s" doesn't start on an erase/write block boundary -- force read-only\n", part->name); } - if ((slave->mtd.flags & MTD_WRITEABLE) && - mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { + + tmp = slave->mtd.size; + remainder = do_div(tmp, wr_alignment); + if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { slave->mtd.flags &= ~MTD_WRITEABLE; - printk(KERN_WARNING"mtd: partition "%s" doesn't end on an erase block -- force read-only\n", + printk(KERN_WARNING"mtd: partition "%s" doesn't end on an erase/write block -- force read-only\n", part->name); }
From: Jonas Gorski jonas.gorski@gmail.com
[ Upstream commit 4f02b50ece11dcf75263fb7a4cfe8a5df1cfabea ]
Each nibble represents 4 LEDs, and in case of the higher register, bit 0 represents LED 4, so we need to use modulus for the LED number as well.
Fixes: fd7b025a238d0a5440bfa26c585eb78097bf48dc ("leds: add BCM6328 LED driver") Signed-off-by: Jonas Gorski jonas.gorski@gmail.com Acked-by: Álvaro Fernández Rojas noltari@gmail.com Signed-off-by: Jacek Anaszewski jacek.anaszewski@gmail.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/leds/leds-bcm6328.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c index c7ea5c626331..833ccd167d44 100644 --- a/drivers/leds/leds-bcm6328.c +++ b/drivers/leds/leds-bcm6328.c @@ -224,7 +224,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg,
spin_lock_irqsave(lock, flags); val = bcm6328_led_read(addr); - val |= (BIT(reg) << (((sel % 4) * 4) + 16)); + val |= (BIT(reg % 4) << (((sel % 4) * 4) + 16)); bcm6328_led_write(addr, val); spin_unlock_irqrestore(lock, flags); } @@ -251,7 +251,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg,
spin_lock_irqsave(lock, flags); val = bcm6328_led_read(addr); - val |= (BIT(reg) << ((sel % 4) * 4)); + val |= (BIT(reg % 4) << ((sel % 4) * 4)); bcm6328_led_write(addr, val); spin_unlock_irqrestore(lock, flags); }
From: João Paulo Rechi Vita jprvita@gmail.com
[ Upstream commit 3e2bc5c5b3274ec7402fabbfba557ea58084985e ]
If a machine reports a RF Button in the communication button device bitmap, we need to remove it before calling Get Device Status otherwise it will return the "Undefined device" (0xE2) error code.
Although this may be a BIOS bug, we don't really need to get or set the RF Button status. The status indicator LED embedded in the button is controlled by firmware logic, depending on the status of the wireless radios present on the machine (WiFi || WWAN).
This commit fixes the wireless status indicator LED on the Acer TravelMate P648-G2-MG, and cleans the following message from the kernel log: "Get Current Device Status failed: 0xe2 - 0x0".
Signed-off-by: João Paulo Rechi Vita jprvita@endlessm.com Reviewed-by: "Lee, Chun-Yi" joeyli.kernel@gmail.com Signed-off-by: Andy Shevchenko andriy.shevchenko@linux.intel.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/platform/x86/acer-wmi.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index b2cdc1a1ad4f..208690289da3 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -148,6 +148,8 @@ struct event_return_value { #define ACER_WMID3_GDS_THREEG (1<<6) /* 3G */ #define ACER_WMID3_GDS_WIMAX (1<<7) /* WiMAX */ #define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */ +#define ACER_WMID3_GDS_RFBTN (1<<14) /* RF Button */ + #define ACER_WMID3_GDS_TOUCHPAD (1<<1) /* Touchpad */
struct lm_input_params { @@ -205,6 +207,7 @@ struct hotkey_function_type_aa { #define ACER_CAP_BRIGHTNESS (1<<3) #define ACER_CAP_THREEG (1<<4) #define ACER_CAP_ACCEL (1<<5) +#define ACER_CAP_RFBTN (1<<6) #define ACER_CAP_ANY (0xFFFFFFFF)
/* @@ -1218,6 +1221,10 @@ static void __init type_aa_dmi_decode(const struct dmi_header *header, void *d) interface->capability |= ACER_CAP_THREEG; if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH) interface->capability |= ACER_CAP_BLUETOOTH; + if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_RFBTN) { + interface->capability |= ACER_CAP_RFBTN; + commun_func_bitmap &= ~ACER_WMID3_GDS_RFBTN; + }
commun_fn_key_number = type_aa->commun_fn_key_number; }
From: Mateusz Jurczyk mjurczyk@google.com
[ Upstream commit 20a3d5bf5e5b13c02450ab6178ec374abd830686 ]
Verify that the caller-provided sockaddr structure is large enough to contain the sa_family field, before accessing it in the connect() handler of the AF_CAIF socket. Since the syscall doesn't enforce a minimum size of the corresponding memory region, very short sockaddrs (zero or one byte long) result in operating on uninitialized memory while referencing sa_family.
Signed-off-by: Mateusz Jurczyk mjurczyk@google.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- net/caif/caif_socket.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index aa209b1066c9..bbc80e4e08f5 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -754,6 +754,10 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
lock_sock(sk);
+ err = -EINVAL; + if (addr_len < offsetofend(struct sockaddr, sa_family)) + goto out; + err = -EAFNOSUPPORT; if (uaddr->sa_family != AF_CAIF) goto out;
From: Jeff Mahoney jeffm@suse.com
[ Upstream commit a09c0fc3f5d775231f1884e0e66c495065a461ee ]
We call pci_set_drvdata immediately after calling register_netdev, which leaves a window where tasks writing to the sriov_numvfs sysfs attribute can sneak in and crash the kernel. register_netdev cleans up after itself so placing pci_set_drvdata immediately before it should preserve the intent of commit 0fb6a55cc31f ("ixgbe: fix crash on rmmod after probe fail").
Fixes: 0fb6a55cc31f ("ixgbe: fix crash on rmmod after probe fail") Signed-off-by: Jeff Mahoney jeffm@suse.com Tested-by: Andrew Bowers andrewx.bowers@intel.com Signed-off-by: Jeff Kirsher jeffrey.t.kirsher@intel.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3283e8e1f409..72faaf4411b5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8963,11 +8963,11 @@ skip_sriov: "hardware.\n"); } strcpy(netdev->name, "eth%d"); + pci_set_drvdata(pdev, adapter); err = register_netdev(netdev); if (err) goto err_register;
- pci_set_drvdata(pdev, adapter);
/* power down the optics for 82599 SFP+ fiber */ if (hw->mac.ops.disable_tx_laser)
From: Jacob Keller jacob.e.keller@intel.com
[ Upstream commit 5fef124d9c75942dc5c2445a3faa8ad37cbf4c82 ]
The ixgbe driver uses a state bit lock to avoid handling more than one Tx timestamp request at once. This is required because hardware is limited to a single set of registers for Tx timestamps.
The state bit lock is not properly cleaned up during ixgbe_xmit_frame_ring() if the transmit fails such as due to DMA or TSO failure. In some hardware this results in blocking timestamps until the service task times out. In other hardware this results in a permanent lock of the timestamp bit because we never receive an interrupt indicating the timestamp occurred, since indeed the packet was never transmitted.
Fix this by checking for DMA and TSO errors in ixgbe_xmit_frame_ring() and properly cleaning up after ourselves when these occur.
Reported-by: Reported-by: David Mirabito davidm@metamako.com Signed-off-by: Jacob Keller jacob.e.keller@intel.com Tested-by: Andrew Bowers andrewx.bowers@intel.com Signed-off-by: Jeff Kirsher jeffrey.t.kirsher@intel.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a5b443171b8b..3283e8e1f409 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7180,9 +7180,9 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ IXGBE_TXD_CMD_RS)
-static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first, - const u8 hdr_len) +static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + const u8 hdr_len) { struct sk_buff *skb = first->skb; struct ixgbe_tx_buffer *tx_buffer; @@ -7309,7 +7309,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, mmiowb(); }
- return; + return 0; dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n");
@@ -7325,6 +7325,8 @@ dma_error: }
tx_ring->next_to_use = i; + + return -1; }
static void ixgbe_atr(struct ixgbe_ring *ring, @@ -7611,13 +7613,21 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, #ifdef IXGBE_FCOE xmit_fcoe: #endif /* IXGBE_FCOE */ - ixgbe_tx_map(tx_ring, first, hdr_len); + if (ixgbe_tx_map(tx_ring, first, hdr_len)) + goto cleanup_tx_timestamp;
return NETDEV_TX_OK;
out_drop: dev_kfree_skb_any(first->skb); first->skb = NULL; +cleanup_tx_timestamp: + if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + cancel_work_sync(&adapter->ptp_tx_work); + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); + }
return NETDEV_TX_OK; }
From: Yazen Ghannam yazen.ghannam@amd.com
[ Upstream commit ec33838244c8535b23b8d24b167996fd1318bb68 ]
AMD systems have non-core, shared MCA banks within a die. These banks are controlled by a master CPU per die. If this CPU is offlined then all the shared banks are disabled in addition to the CPU's core banks.
Also, Fam17h systems may have SMT enabled. The MCA_CTL register is shared between SMT thread siblings. If a CPU is offlined then all its sibling's MCA banks are also disabled.
Extend the existing vendor check to AMD too.
Signed-off-by: Yazen Ghannam yazen.ghannam@amd.com [ Fix up comment. ] Signed-off-by: Borislav Petkov bp@suse.de Cc: Borislav Petkov bp@alien8.de Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Peter Zijlstra peterz@infradead.org Cc: Thomas Gleixner tglx@linutronix.de Cc: Tony Luck tony.luck@intel.com Cc: linux-edac linux-edac@vger.kernel.org Link: http://lkml.kernel.org/r/20170613162835.30750-8-bp@alien8.de Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/x86/kernel/cpu/mcheck/mce.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 6edb9530d7e9..8a79833e627d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -2081,12 +2081,13 @@ static void mce_disable_error_reporting(void) static void vendor_disable_error_reporting(void) { /* - * Don't clear on Intel CPUs. Some of these MSRs are socket-wide. + * Don't clear on Intel or AMD CPUs. Some of these MSRs are socket-wide. * Disabling them for just a single offlined CPU is bad, since it will * inhibit reporting for all shared resources on the socket like the * last level cache (LLC), the integrated memory controller (iMC), etc. */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || + boot_cpu_data.x86_vendor == X86_VENDOR_AMD) return;
mce_disable_error_reporting();
From: WANG Cong xiyou.wangcong@gmail.com
[ Upstream commit 74030603dfd9f76c0f279f19f1dd1ee3028fee7a ]
Laura reported a sleep-in-atomic kernel warning inside tcf_act_police_init() which calls gen_replace_estimator() with spinlock protection.
It is not necessary in this case, we already have RTNL lock here so it is enough to protect concurrent writers. For the reader, i.e. tcf_act_police(), it needs to make decision based on this rate estimator, in the worst case we drop more/less packets than necessary while changing the rate in parallel, it is still acceptable.
Reported-by: Laura Abbott labbott@redhat.com Reported-by: Nick Huber nicholashuber@gmail.com Cc: Jamal Hadi Salim jhs@mojatatu.com Signed-off-by: Cong Wang xiyou.wangcong@gmail.com Acked-by: Jamal Hadi Salim jhs@mojatatu.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- net/sched/act_police.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 9a1c42a43f92..c5d5650fd14b 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -176,21 +176,21 @@ override: } }
- spin_lock_bh(&police->tcf_lock); if (est) { err = gen_replace_estimator(&police->tcf_bstats, NULL, &police->tcf_rate_est, &police->tcf_lock, est); if (err) - goto failure_unlock; + goto failure; } else if (tb[TCA_POLICE_AVRATE] && (ret == ACT_P_CREATED || !gen_estimator_active(&police->tcf_bstats, &police->tcf_rate_est))) { err = -EINVAL; - goto failure_unlock; + goto failure; }
+ spin_lock_bh(&police->tcf_lock); /* No failure allowed after this point */ police->tcfp_mtu = parm->mtu; if (police->tcfp_mtu == 0) { @@ -242,8 +242,6 @@ override: a->priv = police; return ret;
-failure_unlock: - spin_unlock_bh(&police->tcf_lock); failure: qdisc_put_rtab(P_tab); qdisc_put_rtab(R_tab);
From: Christoph Hellwig hch@lst.de
[ Upstream commit b014e96d1abbd67404bbe2018937b46466299e9e ]
Every method in struct device_driver or structures derived from it like struct pci_driver MUST provide exclusion vs the driver's ->remove() method, usually by using device_lock().
Protect use of pci_error_handlers->reset_notify() by holding the device lock while calling it.
Note:
- pci_dev_lock() calls device_lock() in addition to blocking user-space config accesses.
- pci_err_handlers->reset_notify() is used inside pci_dev_save_and_disable() and pci_dev_restore(). We could hold the device lock directly in pci_reset_notify(), but we expand the region since we have several calls following each other.
Without this, ->reset_notify() may race with ->remove() calls, which can be easily triggered in NVMe.
[bhelgaas: changelog, add pci_reset_notify() comment] [bhelgaas: fold in fix from Dan Carpenter dan.carpenter@oracle.com: http://lkml.kernel.org/r/20170701135323.x5vaj4e2wcs2mcro@mwanda] Link: http://lkml.kernel.org/r/20170601111039.8913-2-hch@lst.de Reported-by: Rakesh Pandit rakesh@tuxera.com Tested-by: Rakesh Pandit rakesh@tuxera.com Signed-off-by: Christoph Hellwig hch@lst.de Signed-off-by: Bjorn Helgaas bhelgaas@google.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/pci/pci.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 295bf1472d02..84854f522291 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3672,6 +3672,12 @@ static void pci_reset_notify(struct pci_dev *dev, bool prepare) { const struct pci_error_handlers *err_handler = dev->driver ? dev->driver->err_handler : NULL; + + /* + * dev->driver->err_handler->reset_notify() is protected against + * races with ->remove() by the device lock, which must be held by + * the caller. + */ if (err_handler && err_handler->reset_notify) err_handler->reset_notify(dev, prepare); } @@ -3807,11 +3813,13 @@ int pci_reset_function(struct pci_dev *dev) if (rc) return rc;
+ pci_dev_lock(dev); pci_dev_save_and_disable(dev);
- rc = pci_dev_reset(dev, 0); + rc = __pci_dev_reset(dev, 0);
pci_dev_restore(dev); + pci_dev_unlock(dev);
return rc; } @@ -3831,16 +3839,14 @@ int pci_try_reset_function(struct pci_dev *dev) if (rc) return rc;
- pci_dev_save_and_disable(dev); + if (!pci_dev_trylock(dev)) + return -EAGAIN;
- if (pci_dev_trylock(dev)) { - rc = __pci_dev_reset(dev, 0); - pci_dev_unlock(dev); - } else - rc = -EAGAIN; + pci_dev_save_and_disable(dev); + rc = __pci_dev_reset(dev, 0); + pci_dev_unlock(dev);
pci_dev_restore(dev); - return rc; } EXPORT_SYMBOL_GPL(pci_try_reset_function); @@ -3994,7 +4000,9 @@ static void pci_bus_save_and_disable(struct pci_bus *bus) struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_lock(dev); pci_dev_save_and_disable(dev); + pci_dev_unlock(dev); if (dev->subordinate) pci_bus_save_and_disable(dev->subordinate); } @@ -4009,7 +4017,9 @@ static void pci_bus_restore(struct pci_bus *bus) struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_lock(dev); pci_dev_restore(dev); + pci_dev_unlock(dev); if (dev->subordinate) pci_bus_restore(dev->subordinate); }
From: Jean Delvare jdelvare@suse.de
[ Upstream commit c9aba14362a6eec583819ec8f4b872c1816f5cbe ]
Since version 3.0.0 of the SMBIOS specification, there can be multiple entry points in memory, pointing to one or two DMI tables. If both a 32-bit ("_SM_") entry point and a 64-bit ("_SM3_") entry point are present, the specification requires that the latter points to a table which is a super-set of the table pointed to by the former. Therefore we should give preference to the 64-bit ("_SM3_") entry point.
However, currently the code is picking the first valid entry point it finds. Per specification, we should look for a 64-bit ("_SM3_") entry point first, and if we can't find any, look for a 32-bit ("_SM_" or "_DMI_") entry point. Modify the code to do that.
Signed-off-by: Jean Delvare jdelvare@suse.de Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/firmware/dmi_scan.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 0e08e665f715..f1d4881dd2fc 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -626,6 +626,21 @@ void __init dmi_scan_machine(void) if (p == NULL) goto error;
+ /* + * Same logic as above, look for a 64-bit entry point + * first, and if not found, fall back to 32-bit entry point. + */ + memcpy_fromio(buf, p, 16); + for (q = p + 16; q < p + 0x10000; q += 16) { + memcpy_fromio(buf + 16, q, 16); + if (!dmi_smbios3_present(buf)) { + dmi_available = 1; + dmi_early_unmap(p, 0x10000); + goto out; + } + memcpy(buf, buf + 16, 16); + } + /* * Iterate over all possible DMI header addresses q. * Maintain the 32 bytes around q in buf. On the @@ -636,7 +651,7 @@ void __init dmi_scan_machine(void) memset(buf, 0, 16); for (q = p; q < p + 0x10000; q += 16) { memcpy_fromio(buf + 16, q, 16); - if (!dmi_smbios3_present(buf) || !dmi_present(buf)) { + if (!dmi_present(buf)) { dmi_available = 1; dmi_early_unmap(p, 0x10000); goto out;
From: Jean Delvare jdelvare@suse.de
[ Upstream commit a814c3597a6b6040e2ef9459748081a6d5b7312d ]
Before accessing DMI data to record it for later, we should ensure that the DMI structures are large enough to contain the data in question.
Signed-off-by: Jean Delvare jdelvare@suse.de Reviewed-by: Mika Westerberg mika.westerberg@linux.intel.com Cc: Dmitry Torokhov dmitry.torokhov@gmail.com Cc: Andy Shevchenko andy.shevchenko@gmail.com Cc: Linus Walleij linus.walleij@linaro.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/firmware/dmi_scan.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index f1d4881dd2fc..e8eea181a601 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -178,7 +178,7 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot, const char *d = (const char *) dm; const char *p;
- if (dmi_ident[slot]) + if (dmi_ident[slot] || dm->length <= string) return;
p = dmi_string(dm, d[string]); @@ -191,13 +191,14 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot, static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int index) { - const u8 *d = (u8 *) dm + index; + const u8 *d; char *s; int is_ff = 1, is_00 = 1, i;
- if (dmi_ident[slot]) + if (dmi_ident[slot] || dm->length <= index + 16) return;
+ d = (u8 *) dm + index; for (i = 0; i < 16 && (is_ff || is_00); i++) { if (d[i] != 0x00) is_00 = 0; @@ -228,16 +229,17 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, static void __init dmi_save_type(const struct dmi_header *dm, int slot, int index) { - const u8 *d = (u8 *) dm + index; + const u8 *d; char *s;
- if (dmi_ident[slot]) + if (dmi_ident[slot] || dm->length <= index) return;
s = dmi_alloc(4); if (!s) return;
+ d = (u8 *) dm + index; sprintf(s, "%u", *d & 0x7F); dmi_ident[slot] = s; } @@ -278,9 +280,13 @@ static void __init dmi_save_devices(const struct dmi_header *dm)
static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) { - int i, count = *(u8 *)(dm + 1); + int i, count; struct dmi_device *dev;
+ if (dm->length < 0x05) + return; + + count = *(u8 *)(dm + 1); for (i = 1; i <= count; i++) { const char *devname = dmi_string(dm, i);
@@ -347,6 +353,9 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm) { const u8 *d = (u8 *) dm + 5;
+ if (dm->length < 0x0B) + return; + /* Skip disabled device */ if ((*d & 0x80) == 0) return; @@ -368,7 +377,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v) const char *d = (const char *)dm; static int nr;
- if (dm->type != DMI_ENTRY_MEM_DEVICE) + if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12) return; if (nr >= dmi_memdev_nr) { pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
From: Liwei Song liwei.song@windriver.com
[ Upstream commit 17e83549e199d89aace7788a9f11c108671eecf5 ]
Fix the following kernel bug:
kernel BUG at drivers/iommu/intel-iommu.c:3260! invalid opcode: 0000 [#5] PREEMPT SMP Hardware name: Intel Corp. Harcuvar/Server, BIOS HAVLCRB0.X64.0013.D39.1608311820 08/31/2016 task: ffff880175389950 ti: ffff880176bec000 task.ti: ffff880176bec000 RIP: 0010:[<ffffffff8150a83b>] [<ffffffff8150a83b>] intel_unmap+0x25b/0x260 RSP: 0018:ffff880176bef5e8 EFLAGS: 00010296 RAX: 0000000000000024 RBX: ffff8800773c7c88 RCX: 000000000000ce04 RDX: 0000000080000000 RSI: 0000000000000000 RDI: 0000000000000009 RBP: ffff880176bef638 R08: 0000000000000010 R09: 0000000000000004 R10: ffff880175389c78 R11: 0000000000000a4f R12: ffff8800773c7868 R13: 00000000ffffac88 R14: ffff8800773c7818 R15: 0000000000000001 FS: 00007fef21258700(0000) GS:ffff88017b5c0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000000000066d6d8 CR3: 000000007118c000 CR4: 00000000003406e0 Stack: 00000000ffffac88 ffffffff8199867f ffff880176bef5f8 ffff880100000030 ffff880176bef668 ffff8800773c7c88 ffff880178288098 ffff8800772c0010 ffff8800773c7818 0000000000000001 ffff880176bef648 ffffffff8150a86e Call Trace: [<ffffffff8199867f>] ? printk+0x46/0x48 [<ffffffff8150a86e>] intel_unmap_page+0xe/0x10 [<ffffffffa039d99b>] ismt_access+0x27b/0x8fa [i2c_ismt] [<ffffffff81554420>] ? __pm_runtime_suspend+0xa0/0xa0 [<ffffffff815544a0>] ? pm_suspend_timer_fn+0x80/0x80 [<ffffffff81554420>] ? __pm_runtime_suspend+0xa0/0xa0 [<ffffffff815544a0>] ? pm_suspend_timer_fn+0x80/0x80 [<ffffffff8143dfd0>] ? pci_bus_read_dev_vendor_id+0xf0/0xf0 [<ffffffff8172b36c>] i2c_smbus_xfer+0xec/0x4b0 [<ffffffff810aa4d5>] ? vprintk_emit+0x345/0x530 [<ffffffffa038936b>] i2cdev_ioctl_smbus+0x12b/0x240 [i2c_dev] [<ffffffff810aa829>] ? vprintk_default+0x29/0x40 [<ffffffffa0389b33>] i2cdev_ioctl+0x63/0x1ec [i2c_dev] [<ffffffff811b04c8>] do_vfs_ioctl+0x328/0x5d0 [<ffffffff8119d8ec>] ? vfs_write+0x11c/0x190 [<ffffffff8109d449>] ? rt_up_read+0x19/0x20 [<ffffffff811b07f1>] SyS_ioctl+0x81/0xa0 [<ffffffff819a351b>] system_call_fastpath+0x16/0x6e
This happen When run "i2cdetect -y 0" detect SMBus iSMT adapter.
After finished I2C block read/write, when unmap the data buffer, a wrong device address was pass to dma_unmap_single().
To fix this, give dma_unmap_single() the "dev" parameter, just like what dma_map_single() does, then unmap can find the right devices.
Fixes: 13f35ac14cd0 ("i2c: Adding support for Intel iSMT SMBus 2.0 host controller") Signed-off-by: Liwei Song liwei.song@windriver.com Reviewed-by: Andy Shevchenko andy.shevchenko@gmail.com Signed-off-by: Wolfram Sang wsa@the-dreams.de Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/i2c/busses/i2c-ismt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 1111cb966a44..fa2b58142cde 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -587,7 +587,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
/* unmap the data buffer */ if (dma_size != 0) - dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction); + dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
if (unlikely(!time_left)) { dev_err(dev, "completion wait timed out\n");
From: hayeswang hayeswang@realtek.com
[ Upstream commit d8fbd27469fc02049c674de296a3263bef089131 ]
Add byte_enable for ocp_read_word() to replace reading 4 bytes data with reading the desired 2 bytes data.
This is used to avoid the issue which is described in commit b4d99def0938 ("r8152: remove sram_read"). The original method always reads 4 bytes data, and it may have problem when reading the PHY registers.
The new method is supported since RTL8153B, but it doesn't influence the previous chips. The bits of the byte_enable for the previous chips are the reserved bits, and the hw would ignore them.
Signed-off-by: Hayes Wang hayeswang@realtek.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/usb/r8152.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 89950f5cea71..d91d094c11d1 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -853,11 +853,13 @@ static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index) { u32 data; __le32 tmp; + u16 byen = BYTE_EN_WORD; u8 shift = index & 2;
index &= ~3; + byen <<= shift;
- generic_ocp_read(tp, index, sizeof(tmp), &tmp, type); + generic_ocp_read(tp, index, sizeof(tmp), &tmp, type | byen);
data = __le32_to_cpu(tmp); data >>= (shift * 8);
From: Mathieu Larouche mathieu.larouche@matrox.com
[ Upstream commit 0cbb738108927916a659b5b0b96e386fcd7cc6e1 ]
- Changed the HiPri value for G200e4 to always be 0. - Added Bandwith limitation to block resolution above 1920x1200x60Hz
Signed-off-by: Mathieu Larouche mathieu.larouche@matrox.com Acked-by: Dave Airlie airlied@redhat.com [seanpaul removed some trailing whitespace from the patch] Signed-off-by: Sean Paul seanpaul@chromium.org Link: http://patchwork.freedesktop.org/patch/msgid/ec0f8568d7ec41904dfe593c5deccf3... Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/gpu/drm/mgag200/mgag200_mode.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index e5bb40e58020..5884cf77ccdb 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1145,7 +1145,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
if (IS_G200_SE(mdev)) { - if (mdev->unique_rev_id >= 0x02) { + if (mdev->unique_rev_id >= 0x04) { + WREG8(MGAREG_CRTCEXT_INDEX, 0x06); + WREG8(MGAREG_CRTCEXT_DATA, 0); + } else if (mdev->unique_rev_id >= 0x02) { u8 hi_pri_lvl; u32 bpp; u32 mb; @@ -1618,6 +1621,10 @@ static int mga_vga_mode_valid(struct drm_connector *connector, if (mga_vga_calculate_mode_bandwidth(mode, bpp) > (30100 * 1024)) return MODE_BANDWIDTH; + } else { + if (mga_vga_calculate_mode_bandwidth(mode, bpp) + > (55000 * 1024)) + return MODE_BANDWIDTH; } } else if (mdev->type == G200_WB) { if (mode->hdisplay > 1280)
From: Haishuang Yan yanhaishuang@cmss.chinamobile.com
[ Upstream commit 469f87e158628fe66dcbbce9dd5e7b7acfe934a9 ]
When ip_tunnel_rcv fails, the tun_dst won't be freed, so call dst_release to free it in error code path.
Fixes: 2e15ea390e6f ("ip_gre: Add support to collect tunnel metadata.") Acked-by: Eric Dumazet edumazet@google.com Acked-by: Pravin B Shelar pshelar@ovn.org Tested-by: Zhang Shengju zhangshengju@cmss.chinamobile.com Signed-off-by: Haishuang Yan yanhaishuang@cmss.chinamobile.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- net/ipv4/ip_tunnel.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 80e2d1b0c08c..f26749d450f7 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -439,6 +439,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, return 0;
drop: + if (tun_dst) + dst_release((struct dst_entry *)tun_dst); kfree_skb(skb); return 0; }
From: Johan Hovold johan@kernel.org
[ Upstream commit d0607aa4aee88cb097b694caa619e68f1e0a39c6 ]
Make sure to release the device-node reference when done parsing the node.
Fixes: e097dc624f78 ("NFC: nfcmrvl: add UART driver") Cc: Vincent Cuissard cuissard@marvell.com Signed-off-by: Johan Hovold johan@kernel.org Signed-off-by: Samuel Ortiz sameo@linux.intel.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/nfc/nfcmrvl/uart.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c index 6c0c301611c4..91162f8e0366 100644 --- a/drivers/nfc/nfcmrvl/uart.c +++ b/drivers/nfc/nfcmrvl/uart.c @@ -84,6 +84,7 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node, ret = nfcmrvl_parse_dt(matched_node, pdata); if (ret < 0) { pr_err("Failed to get generic entries\n"); + of_node_put(matched_node); return ret; }
@@ -97,6 +98,8 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node, else pdata->break_control = 0;
+ of_node_put(matched_node); + return 0; }
From: Liu Bo bo.li.liu@oracle.com
[ Upstream commit 28785f70ef882e4798cd5706066a55dbf7adf80e ]
We commit transaction in order to reclaim space from pinned bytes because it could process delayed refs, and in may_commit_transaction(), we check first if pinned bytes are enough for the required space, we then check if that plus bytes reserved for delayed insert are enough for the required space.
This changes the code to the above logic.
Fixes: b150a4f10d87 ("Btrfs: use a percpu to keep track of possibly pinned bytes") Tested-by: Nikolay Borisov nborisov@suse.com Reported-by: Nikolay Borisov nborisov@suse.com Reviewed-by: Nikolay Borisov nborisov@suse.com Signed-off-by: Liu Bo bo.li.liu@oracle.com Signed-off-by: David Sterba dsterba@suse.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/btrfs/extent-tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 260f94b019c9..875e179bb2d5 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4672,7 +4672,7 @@ static int may_commit_transaction(struct btrfs_root *root,
spin_lock(&delayed_rsv->lock); if (percpu_counter_compare(&space_info->total_bytes_pinned, - bytes - delayed_rsv->size) >= 0) { + bytes - delayed_rsv->size) < 0) { spin_unlock(&delayed_rsv->lock); return -ENOSPC; }
From: Liu Bo bo.li.liu@oracle.com
[ Upstream commit e3d37faba2eb19a1d459917bbf54ac1c65711510 ]
With raid1 profile, dio read isn't tolerating IO errors if read length is less than the stripe length (64K).
Our bio didn't get split in btrfs_submit_direct_hook() if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) is true and that happens when the read length is less than 64k. In this case, if the underlying device returns error somehow, bio->bi_error has recorded that error.
If we could recover the correct data from another copy in profile raid1/10/5/6, with btrfs_subio_endio_read() returning 0, bio would have the correct data in its vector, but bio->bi_error is not updated accordingly so that the following dio_end_io(dio_bio, bio->bi_error) makes directIO think this read has failed.
This fixes the problem by setting bio's error to 0 if a good copy has been found.
Signed-off-by: Liu Bo bo.li.liu@oracle.com Reviewed-by: David Sterba dsterba@suse.com Signed-off-by: David Sterba dsterba@suse.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/btrfs/inode.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 81b5a461d94e..d56520e52dce 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8029,8 +8029,11 @@ static void btrfs_endio_direct_read(struct bio *bio) struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); int err = bio->bi_error;
- if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) + if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) { err = btrfs_subio_endio_read(inode, io_bio, err); + if (!err) + bio->bi_error = 0; + }
unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, dip->logical_offset + dip->bytes - 1);
From: Dinh Nguyen dinguyen@kernel.org
[ Upstream commit b7f8101d6e75fefd22c39624a30c9ed3d7a72463 ]
The smplsel bits for the SDMMC clock on Arria10 and Stratix10 platforms are offset by 1 additional bit.
Add a new macro SYSMGR_SDMMC_CTRL_SET_AS10 for usage on the Arria10 and Stratix10 platforms.
Fixes: 5611a5ba8e54 ("clk: socfpga: update clk.h so for Arria10 platform to use") Signed-off-by: Dinh Nguyen dinguyen@kernel.org Signed-off-by: Stephen Boyd sboyd@codeaurora.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/clk/socfpga/clk-gate-a10.c | 2 +- drivers/clk/socfpga/clk.h | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c index 1cebf253e8fd..f55b36bbb7ae 100644 --- a/drivers/clk/socfpga/clk-gate-a10.c +++ b/drivers/clk/socfpga/clk-gate-a10.c @@ -86,7 +86,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk) } }
- hs_timing = SYSMGR_SDMMC_CTRL_SET(clk_phase[0], clk_phase[1]); + hs_timing = SYSMGR_SDMMC_CTRL_SET_AS10(clk_phase[0], clk_phase[1]); if (!IS_ERR(socfpgaclk->sys_mgr_base_addr)) regmap_write(socfpgaclk->sys_mgr_base_addr, SYSMGR_SDMMCGRP_CTRL_OFFSET, hs_timing); diff --git a/drivers/clk/socfpga/clk.h b/drivers/clk/socfpga/clk.h index 814c7247bf73..9cf1230115b1 100644 --- a/drivers/clk/socfpga/clk.h +++ b/drivers/clk/socfpga/clk.h @@ -32,6 +32,9 @@ #define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel) \ ((((smplsel) & 0x7) << 3) | (((drvsel) & 0x7) << 0))
+#define SYSMGR_SDMMC_CTRL_SET_AS10(smplsel, drvsel) \ + ((((smplsel) & 0x7) << 4) | (((drvsel) & 0x7) << 0)) + extern void __iomem *clk_mgr_base_addr; extern void __iomem *clk_mgr_a10_base_addr;
From: Takashi Iwai tiwai@suse.de
[ Upstream commit eb8d0eaaf84b0398533a7c091a0b65663f2fd7ea ]
When the codec device is unregistered / freed, it may release the resource while being used in an unsolicited event like the jack detection work. This leads to use-after-free.
The fix here is to unregister the device at first, i.e. removing the codec from the list, then flushing the pending works to assure that all unsol events are gone. After this point, we're free from accessing the codec via unsol events, thus can release the resources gracefully.
The issue was spotted originally by Intel CI, but it couldn't be reproduced reliably by its nature. So let's hope this fix really addresses the whole issues.
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=196045 Reported-by: Martin Peres martin.peres@free.fr Signed-off-by: Takashi Iwai tiwai@suse.de Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- sound/hda/hdac_bus.c | 1 + sound/hda/hdac_device.c | 1 + 2 files changed, 2 insertions(+)
diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c index 0e81ea89a596..714a51721a31 100644 --- a/sound/hda/hdac_bus.c +++ b/sound/hda/hdac_bus.c @@ -212,5 +212,6 @@ void snd_hdac_bus_remove_device(struct hdac_bus *bus, bus->caddr_tbl[codec->addr] = NULL; clear_bit(codec->addr, &bus->codec_powered); bus->num_codecs--; + flush_work(&bus->unsol_work); } EXPORT_SYMBOL_GPL(snd_hdac_bus_remove_device); diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c index e361024eabb6..4a7400ae8af3 100644 --- a/sound/hda/hdac_device.c +++ b/sound/hda/hdac_device.c @@ -159,6 +159,7 @@ void snd_hdac_device_unregister(struct hdac_device *codec) if (device_is_registered(&codec->dev)) { hda_widget_sysfs_exit(codec); device_del(&codec->dev); + snd_hdac_bus_remove_device(codec->bus, codec); } } EXPORT_SYMBOL_GPL(snd_hdac_device_unregister);
From: James Smart jsmart2021@gmail.com
[ Upstream commit 569dbe84a3e769009aa4a5d1030d000168889580 ]
OS crashes after the completion of firmware download.
Failure in posting SCSI SGL buffers because number of SGL buffers is less than total count. Some of the pending IOs are not completed by driver. SGL buffers for these IOs are not added back to the list. Pending IOs are not completed because lpfc_wq_list list is initialized before completion of pending IOs.
Postpone lpfc_wq_list reinitialization by moving lpfc_sli4_queue_destroy() after lpfc_hba_down_post().
Signed-off-by: Dick Kennedy dick.kennedy@broadcom.com Signed-off-by: James Smart james.smart@broadcom.com Reviewed-by: Hannes Reinecke hare@suse.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/scsi/lpfc/lpfc_sli.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index ef43847153ea..f31ad4f5c7e5 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4101,7 +4101,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
/* Perform FCoE PCI function reset before freeing queue memory */ rc = lpfc_pci_function_reset(phba); - lpfc_sli4_queue_destroy(phba);
/* Restore PCI cmd register */ pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); @@ -4224,6 +4223,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) pci_disable_pcie_error_reporting(phba->pcidev);
lpfc_hba_down_post(phba); + lpfc_sli4_queue_destroy(phba);
return rc; }
From: Phong LE ple@baylibre.com
[ Upstream commit 16f2e0c6ffdfaf964bb0a6d5e67253a1c8116f0e ]
The dma gpd dma_free_coherent call size in invalid.
Fixes: 208489032bdd ("mmc: mediatek: Add Mediatek MMC driver") Signed-off-by: Phong LE ple@baylibre.com Signed-off-by: Neil Armstrong narmstrong@baylibre.com Signed-off-by: Ulf Hansson ulf.hansson@linaro.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/mmc/host/mtk-sd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 0bf0d0e9dbdb..f701cbac2061 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -1626,7 +1626,7 @@ static int msdc_drv_remove(struct platform_device *pdev) pm_runtime_disable(host->dev); pm_runtime_put_noidle(host->dev); dma_free_coherent(&pdev->dev, - sizeof(struct mt_gpdma_desc), + 2 * sizeof(struct mt_gpdma_desc), host->dma.gpd, host->dma.gpd_addr); dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc), host->dma.bd, host->dma.bd_addr);
From: Benoît Thébaudeau benoit@wsystem.com
[ Upstream commit cbb4509374963bea440c15ff26e2501d15e7927a ]
The eSDHC can only DMA from 32-bit-aligned addresses.
This fixes the following test cases of mmc_test: 11: Badly aligned write 12: Badly aligned read 13: Badly aligned multi-block write 14: Badly aligned multi-block read
Signed-off-by: Benoît Thébaudeau benoit@wsystem.com Acked-by: Adrian Hunter adrian.hunter@intel.com Reviewed-by: Fabio Estevam fabio.estevam@nxp.com Signed-off-by: Ulf Hansson ulf.hansson@linaro.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/mmc/host/sdhci-esdhc.h | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index de132e281753..3d6a005ad896 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -19,6 +19,7 @@ */
#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ + SDHCI_QUIRK_32BIT_DMA_ADDR | \ SDHCI_QUIRK_NO_BUSY_IRQ | \ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ SDHCI_QUIRK_PIO_NEEDS_DELAY | \
From: Dan Carpenter dan.carpenter@oracle.com
[ Upstream commit c133c7615751008f6c32ccae7cdfc5ff6e989c35 ]
We're supposed to exit the loop with "timeout" set to zero.
Signed-off-by: Dan Carpenter dan.carpenter@oracle.com Acked-by: Don Zickus dzickus@redhat.com Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Peter Zijlstra peterz@infradead.org Cc: Thomas Gleixner tglx@linutronix.de Cc: kernel-janitors@vger.kernel.org Fixes: 99e8b9ca90d6 ("x86, NMI: Add NMI IPI selftest") Link: http://lkml.kernel.org/r/20170619105304.GA23995@elgon.mountain Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/x86/kernel/nmi_selftest.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c index 6d9582ec0324..d27f8d84c4ff 100644 --- a/arch/x86/kernel/nmi_selftest.c +++ b/arch/x86/kernel/nmi_selftest.c @@ -78,7 +78,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
/* Don't wait longer than a second */ timeout = USEC_PER_SEC; - while (!cpumask_empty(mask) && timeout--) + while (!cpumask_empty(mask) && --timeout) udelay(1);
/* What happens if we timeout, do we still unregister?? */
From: Luc Van Oostenryck luc.vanoostenryck@gmail.com
[ Upstream commit f5d284900c0f960e318a063f4c40826b6e3aa6a8 ]
When using sparse on the arm64 tree we get many thousands of warnings like 'constant ... is so big it is unsigned long long' or 'shift too big (32) for type unsigned long'. This happens because by default sparse considers the machine as 32bit and defines the size of the types accordingly.
Fix this by passing the '-m64' flag to sparse so that sparse can correctly define longs as being 64bit.
CC: Catalin Marinas catalin.marinas@arm.com CC: Will Deacon will.deacon@arm.com CC: linux-arm-kernel@lists.infradead.org Signed-off-by: Luc Van Oostenryck luc.vanoostenryck@gmail.com Signed-off-by: Will Deacon will.deacon@arm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/arm64/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index b6c90e5006e4..119d1885a3c0 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -40,7 +40,7 @@ AS += -EL LD += -EL endif
-CHECKFLAGS += -D__aarch64__ +CHECKFLAGS += -D__aarch64__ -m64
ifeq ($(CONFIG_ARM64_ERRATUM_843419), y) KBUILD_CFLAGS_MODULE += -mcmodel=large
From: Serhey Popovych serhe.popovych@gmail.com
[ Upstream commit bdaf32c3ced3d111b692f0af585f880f82d686c5 ]
We should avoid marking goto rules unresolved when their target is actually reachable after rule deletion.
Consolder following sample scenario:
# ip -4 ru sh 0: from all lookup local 32000: from all goto 32100 32100: from all lookup main 32100: from all lookup default 32766: from all lookup main 32767: from all lookup default
# ip -4 ru del pref 32100 table main # ip -4 ru sh 0: from all lookup local 32000: from all goto 32100 [unresolved] 32100: from all lookup default 32766: from all lookup main 32767: from all lookup default
After removal of first rule with preference 32100 we mark all goto rules as unreachable, even when rule with same preference as removed one still present.
Check if next rule with same preference is available and make all rules with goto action pointing to it.
Signed-off-by: Serhey Popovych serhe.popovych@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- net/core/fib_rules.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-)
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 365de66436ac..8df55176c9b8 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -430,7 +430,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh) struct net *net = sock_net(skb->sk); struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rules_ops *ops = NULL; - struct fib_rule *rule, *tmp; + struct fib_rule *rule, *r; struct nlattr *tb[FRA_MAX+1]; int err = -EINVAL;
@@ -510,16 +510,23 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
/* * Check if this rule is a target to any of them. If so, + * adjust to the next one with the same preference or * disable them. As this operation is eventually very - * expensive, it is only performed if goto rules have - * actually been added. + * expensive, it is only performed if goto rules, except + * current if it is goto rule, have actually been added. */ if (ops->nr_goto_rules > 0) { - list_for_each_entry(tmp, &ops->rules_list, list) { - if (rtnl_dereference(tmp->ctarget) == rule) { - RCU_INIT_POINTER(tmp->ctarget, NULL); + struct fib_rule *n; + + n = list_next_entry(rule, list); + if (&n->list == &ops->rules_list || n->pref != rule->pref) + n = NULL; + list_for_each_entry(r, &ops->rules_list, list) { + if (rtnl_dereference(r->ctarget) != rule) + continue; + rcu_assign_pointer(r->ctarget, n); + if (!n) ops->unresolved_rules++; - } } }
From: Xin Long lucien.xin@gmail.com
[ Upstream commit d5494acb88aa9dd1325079c9b8855008a52c19b3 ]
Now dccp_ipv4 works as a kernel module. During loading this module, if one dccp packet is being recieved after inet_add_protocol but before register_pernet_subsys in which v4_ctl_sk is initialized, a null pointer dereference may be triggered because of init_net.dccp.v4_ctl_sk is 0x0.
Jianlin found this issue when the following call trace occurred:
[ 171.950177] BUG: unable to handle kernel NULL pointer dereference at 0000000000000110 [ 171.951007] IP: [<ffffffffc0558364>] dccp_v4_ctl_send_reset+0xc4/0x220 [dccp_ipv4] [...] [ 171.984629] Call Trace: [ 171.984859] <IRQ> [ 171.985061] [ 171.985213] [<ffffffffc0559a53>] dccp_v4_rcv+0x383/0x3f9 [dccp_ipv4] [ 171.985711] [<ffffffff815ca054>] ip_local_deliver_finish+0xb4/0x1f0 [ 171.986309] [<ffffffff815ca339>] ip_local_deliver+0x59/0xd0 [ 171.986852] [<ffffffff810cd7a4>] ? update_curr+0x104/0x190 [ 171.986956] [<ffffffff815c9cda>] ip_rcv_finish+0x8a/0x350 [ 171.986956] [<ffffffff815ca666>] ip_rcv+0x2b6/0x410 [ 171.986956] [<ffffffff810c83b4>] ? task_cputime+0x44/0x80 [ 171.986956] [<ffffffff81586f22>] __netif_receive_skb_core+0x572/0x7c0 [ 171.986956] [<ffffffff810d2c51>] ? trigger_load_balance+0x61/0x1e0 [ 171.986956] [<ffffffff81587188>] __netif_receive_skb+0x18/0x60 [ 171.986956] [<ffffffff8158841e>] process_backlog+0xae/0x180 [ 171.986956] [<ffffffff8158799d>] net_rx_action+0x16d/0x380 [ 171.986956] [<ffffffff81090b7f>] __do_softirq+0xef/0x280 [ 171.986956] [<ffffffff816b6a1c>] call_softirq+0x1c/0x30
This patch is to move inet_add_protocol after register_pernet_subsys in dccp_v4_init, so that v4_ctl_sk is initialized before any incoming dccp packets are processed.
Reported-by: Jianlin Shi jishi@redhat.com Signed-off-by: Xin Long lucien.xin@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- net/dccp/ipv4.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 6eb2bbf9873b..dfda437cd86b 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -1033,33 +1033,34 @@ static int __init dccp_v4_init(void) { int err = proto_register(&dccp_v4_prot, 1);
- if (err != 0) + if (err) goto out;
- err = inet_add_protocol(&dccp_v4_protocol, IPPROTO_DCCP); - if (err != 0) - goto out_proto_unregister; - inet_register_protosw(&dccp_v4_protosw);
err = register_pernet_subsys(&dccp_v4_ops); if (err) goto out_destroy_ctl_sock; + + err = inet_add_protocol(&dccp_v4_protocol, IPPROTO_DCCP); + if (err) + goto out_proto_unregister; + out: return err; +out_proto_unregister: + unregister_pernet_subsys(&dccp_v4_ops); out_destroy_ctl_sock: inet_unregister_protosw(&dccp_v4_protosw); - inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP); -out_proto_unregister: proto_unregister(&dccp_v4_prot); goto out; }
static void __exit dccp_v4_exit(void) { + inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP); unregister_pernet_subsys(&dccp_v4_ops); inet_unregister_protosw(&dccp_v4_protosw); - inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP); proto_unregister(&dccp_v4_prot); }
From: Kai Heng Feng kai.heng.feng@canonical.com
[ Upstream commit 1b1ffc57a1b2edf58e803e463980b326765a237f ]
Dell Latitude 3160 does not have keyboard backlight, but there is a sysfs interface for it, which does nothing at all.
KBD_LED_ON_TOKEN is the only token can be found. Since it doesn't have KBD_LED_OFF_TOKEN or KBD_LED_AUTO_*_TOKEN, it should be safe to assume at least two tokens should be present to support keyboard backlight. Not all models have ON token - they may have multiple AUTO tokens instead.
Models which do not use SMBIOS token to control keyboard backlight, also have this issue. Brightness level is 0 on these models. Verified on Dell Inspiron 3565.
Reports keyboard backlight is supported only when at least two modes are present.
Signed-off-by: Kai-Heng Feng kai.heng.feng@canonical.com Reviewed-by: Pali Rohár pali.rohar@gmail.com Signed-off-by: Andy Shevchenko andriy.shevchenko@linux.intel.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/platform/x86/dell-laptop.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index aaeeae81e3a9..c6e5ca2b8ae9 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -1608,7 +1608,11 @@ static void kbd_init(void) ret = kbd_init_info(); kbd_init_tokens();
- if (kbd_token_bits != 0 || ret == 0) + /* + * Only supports keyboard backlight when it has at least two modes. + */ + if ((ret == 0 && (kbd_info.levels != 0 || kbd_mode_levels_count >= 2)) + || kbd_get_valid_token_counts() >= 2) kbd_led_present = true; }
From: Edward Cree ecree@solarflare.com
[ Upstream commit 57f0c9cf58ff7fe479137ab847a886d0eed3ad1d ]
Somehow two copies of the line 'up_write(&vf->efx->filter_sem);' got into efx_ef10_sriov_set_vf_vlan(). This would put the mutex in a bad state and cause all subsequent down attempts to hang.
Fixes: 671b53eec2ed ("sfc: Ensure down_write(&filter_sem) and up_write() are matched before calling efx_net_open()") Signed-off-by: Edward Cree ecree@solarflare.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/ethernet/sfc/ef10_sriov.c | 2 -- 1 file changed, 2 deletions(-)
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 3c17f274e802..a0226b11eab2 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -631,8 +631,6 @@ restore_filters:
up_write(&vf->efx->filter_sem);
- up_write(&vf->efx->filter_sem); - rc2 = efx_net_open(vf->efx->net_dev); if (rc2) goto reset_nic;
From: Miroslav Lichvar mlichvar@redhat.com
[ Upstream commit 7a5de5512296fd2ab7a497e4a576196b1f046e78 ]
When the first timestamp in the list of clock readings was later than the second timestamp and all other timestamps were in order, the inconsistency was not reported because the index of the out-of-order timestamp was equal to the default value.
Cc: Thomas Gleixner tglx@linutronix.de Cc: Ingo Molnar mingo@kernel.org Cc: Miroslav Lichvar mlichvar@redhat.com Cc: Richard Cochran richardcochran@gmail.com Cc: Prarit Bhargava prarit@redhat.com Cc: Stephen Boyd stephen.boyd@linaro.org Cc: Shuah Khan shuah@kernel.org Signed-off-by: Miroslav Lichvar mlichvar@redhat.com Signed-off-by: John Stultz john.stultz@linaro.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- tools/testing/selftests/timers/inconsistency-check.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/timers/inconsistency-check.c b/tools/testing/selftests/timers/inconsistency-check.c index caf1bc9257c4..74c60e8759a0 100644 --- a/tools/testing/selftests/timers/inconsistency-check.c +++ b/tools/testing/selftests/timers/inconsistency-check.c @@ -118,7 +118,7 @@ int consistency_test(int clock_type, unsigned long seconds) start_str = ctime(&t);
while (seconds == -1 || now - then < seconds) { - inconsistent = 0; + inconsistent = -1;
/* Fill list */ for (i = 0; i < CALLS_PER_LOOP; i++) @@ -130,7 +130,7 @@ int consistency_test(int clock_type, unsigned long seconds) inconsistent = i;
/* display inconsistency */ - if (inconsistent) { + if (inconsistent >= 0) { unsigned long long delta;
printf("%s\n", start_str);
From: Ping-Ke Shih pkshih@realtek.com
[ Upstream commit f95d95a7cd5514549dcf6ba754f0ee834cce3e1f ]
To make ant_sel work, we should call power_on_setting to set antenna correctly.
Signed-off-by: Ping-Ke Shih pkshih@realtek.com Signed-off-by: Larry Finger Larry.Finger@lwfinger.net Cc: Yan-Hsuan Chuang yhchuang@realtek.com Cc: Birming Chiu birming@realtek.com Cc: Shaofu shaofu@realtek.com Cc: Steven Ting steventing@realtek.com Signed-off-by: Kalle Valo kvalo@codeaurora.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c | 3 +++ drivers/net/wireless/realtek/rtlwifi/wifi.h | 1 + 2 files changed, 4 insertions(+)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 89515f02c353..d2ae9c4a3665 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -846,6 +846,9 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw) return false; }
+ if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv); + bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3));
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index d676d055feda..cf48f014d640 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2533,6 +2533,7 @@ struct bt_coexist_info { struct rtl_btc_ops { void (*btc_init_variables) (struct rtl_priv *rtlpriv); void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv); + void (*btc_power_on_setting)(struct rtl_priv *rtlpriv); void (*btc_init_hw_config) (struct rtl_priv *rtlpriv); void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type); void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type);
From: Boris Brezillon boris.brezillon@free-electrons.com
[ Upstream commit edeb729f7929d1372ab426a7f56fd0b337ba5751 ]
VBLANK events are missed when the CRTC is being disabled because the driver does not wait till the end of the frame before stopping the HVS and PV blocks. In this case, we should explicitly issue a VBLANK event if there's one waiting.
Signed-off-by: Boris Brezillon boris.brezillon@free-electrons.com Reviewed-by: Eric Anholt eric@anholt.net Link: http://patchwork.freedesktop.org/patch/msgid/1497601833-24588-1-git-send-ema... Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/gpu/drm/vc4/vc4_crtc.c | 13 +++++++++++++ 1 file changed, 13 insertions(+)
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 265064c62d49..6987723c23ed 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -290,6 +290,19 @@ static void vc4_crtc_disable(struct drm_crtc *crtc) WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) & (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) != SCALER_DISPSTATX_EMPTY); + + /* + * Make sure we issue a vblank event after disabling the CRTC if + * someone was waiting it. + */ + if (crtc->state->event) { + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + } }
static void vc4_crtc_enable(struct drm_crtc *crtc)
From: Michail Georgios Etairidis m.etairidis@beck-ipc.com
[ Upstream commit 6c782a5ea56a799658e213a78dc1455264938afa ]
The i2c-imx driver incorrectly uses readb()/writeb() to read and write to the appropriate registers when performing a repeated start. The appropriate imx_i2c_read_reg()/imx_i2c_write_reg() functions should be used instead. Performing a repeated start results in a kernel panic. The platform is imx.
Signed-off-by: Michail G Etairidis m.etairidis@beck-ipc.com Fixes: ce1a78840ff7 ("i2c: imx: add DMA support for freescale i2c driver") Fixes: 054b62d9f25c ("i2c: imx: fix the i2c bus hang issue when do repeat restart") Acked-by: Fugang Duan fugang.duan@nxp.com Acked-by: Uwe Kleine-König u.kleine-koenig@pengutronix.de Signed-off-by: Wolfram Sang wsa@the-dreams.de Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/i2c/busses/i2c-imx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index d4d853680ae4..c011e53c0b4d 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -746,9 +746,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, * the first read operation, otherwise the first read cost * one extra clock cycle. */ - temp = readb(i2c_imx->base + IMX_I2C_I2CR); + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp |= I2CR_MTX; - writeb(temp, i2c_imx->base + IMX_I2C_I2CR); + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); } msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
@@ -869,9 +869,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo * the first read operation, otherwise the first read cost * one extra clock cycle. */ - temp = readb(i2c_imx->base + IMX_I2C_I2CR); + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp |= I2CR_MTX; - writeb(temp, i2c_imx->base + IMX_I2C_I2CR); + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); } } else if (i == (msgs->len - 2)) { dev_dbg(&i2c_imx->adapter.dev,
From: Robin Murphy robin.murphy@arm.com
[ Upstream commit 3403b0259d152c3ca67b32bb710f826bc17c0d16 ]
The call to pci_for_each_dma_alias() in the ITS PCI code has aroused suspicion in the past, and upon closer inspection does turn out to be completely backwards. Rather than iterating through each RID alias of the given device, what we actually want to be doing here is iterating through all the *other* devices which may also alias the same RID, in order to size the table for the worst case.
Do the right thing by ignoring the initial DMA aliases themselves and just using that walk to detect an aliasing bridge, then walking back down the bus topology as necessary to actually count everything else.
Our alias handling still isn't perfect, since we don't account for the cases of certain bridges only taking ownership of transactions under particular circumstances, but without completely reworking the ITS code to cope with the notion of multiple DevIDs per device, it'll have to do.
Signed-off-by: Robin Murphy robin.murphy@arm.com Signed-off-by: Marc Zyngier marc.zyngier@arm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/irqchip/irq-gic-v3-its-pci-msi.c | 35 ++++++++++++++++---------------- 1 file changed, 17 insertions(+), 18 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index aee60ed025dc..147a29faa34f 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -40,27 +40,22 @@ static struct irq_chip its_msi_irq_chip = { .irq_write_msi_msg = pci_msi_domain_write_msg, };
-struct its_pci_alias { - struct pci_dev *pdev; - u32 count; -}; - -static int its_pci_msi_vec_count(struct pci_dev *pdev) +static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data) { - int msi, msix; + int msi, msix, *count = data;
msi = max(pci_msi_vec_count(pdev), 0); msix = max(pci_msix_vec_count(pdev), 0); + *count += max(msi, msix);
- return max(msi, msix); + return 0; }
static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) { - struct its_pci_alias *dev_alias = data; + struct pci_dev **alias_dev = data;
- if (pdev != dev_alias->pdev) - dev_alias->count += its_pci_msi_vec_count(pdev); + *alias_dev = pdev;
return 0; } @@ -68,9 +63,9 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *info) { - struct pci_dev *pdev; - struct its_pci_alias dev_alias; + struct pci_dev *pdev, *alias_dev; struct msi_domain_info *msi_info; + int alias_count = 0;
if (!dev_is_pci(dev)) return -EINVAL; @@ -78,16 +73,20 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, msi_info = msi_get_domain_info(domain->parent);
pdev = to_pci_dev(dev); - dev_alias.pdev = pdev; - dev_alias.count = nvec; - - pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); + /* + * If pdev is downstream of any aliasing bridges, take an upper + * bound of how many other vectors could map to the same DevID. + */ + pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev); + if (alias_dev != pdev && alias_dev->subordinate) + pci_walk_bus(alias_dev->subordinate, its_pci_msi_vec_count, + &alias_count);
/* ITS specific DeviceID, as the core ITS ignores dev. */ info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
return msi_info->ops->msi_prepare(domain->parent, - dev, dev_alias.count, info); + dev, max(nvec, alias_count), info); }
static struct msi_domain_ops its_pci_msi_ops = {
From: Dave Martin Dave.Martin@arm.com
[ Upstream commit af66b2d88a76574d55e81d712292abd34beb6178 ]
Currently, VFP registers are omitted from coredumps for compat processes, due to a bug in the REGSET_COMPAT_VFP regset implementation.
compat_vfp_get() needs to transfer non-contiguous data from thread_struct.fpsimd_state, and uses put_user() to handle the offending trailing word (FPSCR). This fails when copying to a kernel address (i.e., kbuf && !ubuf), which is what happens when dumping core. As a result, the ELF coredump core code silently omits the NT_ARM_VFP note from the dump.
It would be possible to work around this with additional special case code for the put_user(), but since user_regset_copyout() is explicitly designed to handle this scenario it is cleaner to port the put_user() to a user_regset_copyout() call, which this patch does.
Signed-off-by: Dave Martin Dave.Martin@arm.com Signed-off-by: Will Deacon will.deacon@arm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/arm64/kernel/ptrace.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 55909b2208cc..183f39384e4c 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -792,7 +792,7 @@ static int compat_vfp_get(struct task_struct *target, { struct user_fpsimd_state *uregs; compat_ulong_t fpscr; - int ret; + int ret, vregs_end_pos;
uregs = &target->thread.fpsimd_state.user_fpsimd;
@@ -800,13 +800,16 @@ static int compat_vfp_get(struct task_struct *target, * The VFP registers are packed into the fpsimd_state, so they all sit * nicely together for us. We just need to create the fpscr separately. */ - ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, - VFP_STATE_SIZE - sizeof(compat_ulong_t)); + vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, + 0, vregs_end_pos);
if (count && !ret) { fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | (uregs->fpcr & VFP_FPSCR_CTRL_MASK); - ret = put_user(fpscr, (compat_ulong_t *)ubuf); + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr, + vregs_end_pos, VFP_STATE_SIZE); }
return ret;
From: Chenbo Feng fengc@google.com
[ Upstream commit 8fac365f63c866a00015fa13932d8ffc584518b8 ]
Currently in both ipv4 and ipv6 code path, the ack packet received when sk at TCP_NEW_SYN_RECV state is not filtered by socket filter or cgroup filter since it is handled from tcp_child_process and never reaches the tcp_filter inside tcp_v4_rcv or tcp_v6_rcv. Adding a tcp_filter hooks here can make sure all the ingress tcp packet can be correctly filtered.
Signed-off-by: Chenbo Feng fengc@google.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- net/ipv4/tcp_ipv4.c | 2 ++ net/ipv6/tcp_ipv6.c | 2 ++ 2 files changed, 4 insertions(+)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 61c93a93f228..2eb9a1747f25 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1639,6 +1639,8 @@ process: } if (nsk == sk) { reqsk_put(req); + } else if (tcp_filter(sk, skb)) { + goto discard_and_relse; } else if (tcp_child_process(sk, nsk, skb)) { tcp_v4_send_reset(nsk, skb); goto discard_and_relse; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 74cbcc4b399c..7d7f0999a919 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1428,6 +1428,8 @@ process: if (nsk == sk) { reqsk_put(req); tcp_v6_restore_cb(skb); + } else if (tcp_filter(sk, skb)) { + goto discard_and_relse; } else if (tcp_child_process(sk, nsk, skb)) { tcp_v6_send_reset(nsk, skb); goto discard_and_relse;
On Sun, Apr 8, 2018 at 5:28 PM, Sasha Levin Alexander.Levin@microsoft.com wrote:
From: Chenbo Feng fengc@google.com
[ Upstream commit 8fac365f63c866a00015fa13932d8ffc584518b8 ]
Currently in both ipv4 and ipv6 code path, the ack packet received when sk at TCP_NEW_SYN_RECV state is not filtered by socket filter or cgroup filter since it is handled from tcp_child_process and never reaches the tcp_filter inside tcp_v4_rcv or tcp_v6_rcv. Adding a tcp_filter hooks here can make sure all the ingress tcp packet can be correctly filtered.
Signed-off-by: Chenbo Feng fengc@google.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com
net/ipv4/tcp_ipv4.c | 2 ++ net/ipv6/tcp_ipv6.c | 2 ++ 2 files changed, 4 insertions(+)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 61c93a93f228..2eb9a1747f25 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1639,6 +1639,8 @@ process: } if (nsk == sk) { reqsk_put(req);
} else if (tcp_filter(sk, skb)) {
goto discard_and_relse; } else if (tcp_child_process(sk, nsk, skb)) { tcp_v4_send_reset(nsk, skb); goto discard_and_relse;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 74cbcc4b399c..7d7f0999a919 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1428,6 +1428,8 @@ process: if (nsk == sk) { reqsk_put(req); tcp_v6_restore_cb(skb);
} else if (tcp_filter(sk, skb)) {
goto discard_and_relse; } else if (tcp_child_process(sk, nsk, skb)) { tcp_v6_send_reset(nsk, skb); goto discard_and_relse;
-- 2.15.1
There is a bug fix for this patch upstream: "d624d276d1ddacbcb12ad96832ce0c7b82cd25db tcp: fix possible deadlock in TCP stack vs BPF filter", Please make you include that as well if you want to add this patch to stable.
On Mon, Apr 09, 2018 at 11:00:06AM -0700, Chenbo Feng wrote:
On Sun, Apr 8, 2018 at 5:28 PM, Sasha Levin Alexander.Levin@microsoft.com wrote:
From: Chenbo Feng fengc@google.com
[ Upstream commit 8fac365f63c866a00015fa13932d8ffc584518b8 ]
Currently in both ipv4 and ipv6 code path, the ack packet received when sk at TCP_NEW_SYN_RECV state is not filtered by socket filter or cgroup filter since it is handled from tcp_child_process and never reaches the tcp_filter inside tcp_v4_rcv or tcp_v6_rcv. Adding a tcp_filter hooks here can make sure all the ingress tcp packet can be correctly filtered.
Signed-off-by: Chenbo Feng fengc@google.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com
net/ipv4/tcp_ipv4.c | 2 ++ net/ipv6/tcp_ipv6.c | 2 ++ 2 files changed, 4 insertions(+)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 61c93a93f228..2eb9a1747f25 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1639,6 +1639,8 @@ process: } if (nsk == sk) { reqsk_put(req);
} else if (tcp_filter(sk, skb)) {
goto discard_and_relse; } else if (tcp_child_process(sk, nsk, skb)) { tcp_v4_send_reset(nsk, skb); goto discard_and_relse;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 74cbcc4b399c..7d7f0999a919 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1428,6 +1428,8 @@ process: if (nsk == sk) { reqsk_put(req); tcp_v6_restore_cb(skb);
} else if (tcp_filter(sk, skb)) {
goto discard_and_relse; } else if (tcp_child_process(sk, nsk, skb)) { tcp_v6_send_reset(nsk, skb); goto discard_and_relse;
-- 2.15.1
There is a bug fix for this patch upstream: "d624d276d1ddacbcb12ad96832ce0c7b82cd25db tcp: fix possible deadlock in TCP stack vs BPF filter", Please make you include that as well if you want to add this patch to stable.
That commit had quite a few conflicts, so I dropped this commit instead. Thanks!
From: Serhey Popovych serhe.popovych@gmail.com
[ Upstream commit 191cdb3822e5df6b3c8b9f8cb8c4bf93f6cc90c7 ]
There are number of problems with configuration peer network device in absence of IFLA_VETH_PEER attributes where attributes for main network device shared with peer.
First it is not feasible to configure both network devices with same MAC address since this makes communication in such configuration problematic.
This case can be reproduced with following sequence:
# ip link add address 02:11:22:33:44:55 type veth # ip li sh ... 26: veth0@veth1: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc \ noop state DOWN mode DEFAULT qlen 1000 link/ether 00:11:22:33:44:55 brd ff:ff:ff:ff:ff:ff 27: veth1@veth0: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc \ noop state DOWN mode DEFAULT qlen 1000 link/ether 00:11:22:33:44:55 brd ff:ff:ff:ff:ff:ff
Second it is not possible to register both main and peer network devices with same name, that happens when name for main interface is given with IFLA_IFNAME and same attribute reused for peer.
This case can be reproduced with following sequence:
# ip link add dev veth1a type veth RTNETLINK answers: File exists
To fix both of the cases check if corresponding netlink attributes are taken from peer_tb when valid or name based on rtnl ops kind and random address is used.
Signed-off-by: Serhey Popovych serhe.popovych@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/veth.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 6b4cc1c2e6b4..5446a1b6480c 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -374,7 +374,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, tbp = tb; }
- if (tbp[IFLA_IFNAME]) { + if (ifmp && tbp[IFLA_IFNAME]) { nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); name_assign_type = NET_NAME_USER; } else { @@ -393,7 +393,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, return PTR_ERR(peer); }
- if (tbp[IFLA_ADDRESS] == NULL) + if (!ifmp || !tbp[IFLA_ADDRESS]) eth_hw_addr_random(peer);
if (ifmp && (dev->ifindex != 0))
From: Vlad Yasevich vyasevich@gmail.com
[ Upstream commit e26f43faa0d79dd06e9e94829696b68b9940c2ee ]
The user currently gets an EBUSY error when attempting to set the mac address on a macvlan device to the same value.
This should really be a no-op as nothing changes. Catch the condition and return early.
Signed-off-by: Vladislav Yasevich vyasevic@redhat.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/macvlan.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 9897cabec371..c0d143df4c79 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -693,6 +693,10 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
+ /* If the addresses are the same, this is a no-op */ + if (ether_addr_equal(dev->dev_addr, addr->sa_data)) + return 0; + if (vlan->mode == MACVLAN_MODE_PASSTHRU) { dev_set_mac_address(vlan->lowerdev, addr); return 0;
From: Matt Weber matthew.weber@rockwellcollins.com
[ Upstream commit 8064c616984eaa015f018dba595d78cd24a0cc8c ]
The driver was clearing the hold bit in the control register before writing to the address register which resulted in a stop condition being generated rather than a repeated start.
This issue was only observed when a system was running much slower than a normal processor would execute. The IP data sheet mentions a ordering of writing to the address register before clearing the hold.
Fixes: df8eb5691c4 ("i2c: Add driver for Cadence I2C controller") Signed-off-by: John Linn john.linn@xilinx.com Signed-off-by: Paresh Chaudhary paresh.chaudhary@rockwellcollins.com Signed-off-by: Matthew Weber matthew.weber@rockwellcollins.com Signed-off-by: Wolfram Sang wsa@the-dreams.de Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/i2c/busses/i2c-cadence.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 84deed6571bd..a2f51087d8ab 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -401,14 +401,14 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET); }
+ /* Set the slave address in address register - triggers operation */ + cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK, + CDNS_I2C_ADDR_OFFSET); /* Clear the bus hold flag if bytes to receive is less than FIFO size */ if (!id->bus_hold_flag && ((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) && (id->recv_count <= CDNS_I2C_FIFO_DEPTH)) cdns_i2c_clear_bus_hold(id); - /* Set the slave address in address register - triggers operation */ - cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK, - CDNS_I2C_ADDR_OFFSET); cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET); }
From: Mateusz Jurczyk mjurczyk@google.com
[ Upstream commit e3c42b61ff813921ba58cfc0019e3fd63f651190 ]
Verify that the caller-provided sockaddr structure is large enough to contain the sa_family field, before accessing it in bind() and connect() handlers of the AF_IUCV socket. Since neither syscall enforces a minimum size of the corresponding memory region, very short sockaddrs (zero or one byte long) result in operating on uninitialized memory while referencing .sa_family.
Fixes: 52a82e23b9f2 ("af_iucv: Validate socket address length in iucv_sock_bind()") Signed-off-by: Mateusz Jurczyk mjurczyk@google.com [jwi: removed unneeded null-check for addr] Signed-off-by: Julian Wiedmann jwi@linux.vnet.ibm.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- net/iucv/af_iucv.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index aeffb65181f5..5984cc35d508 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -705,10 +705,8 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, char uid[9];
/* Verify the input sockaddr */ - if (!addr || addr->sa_family != AF_IUCV) - return -EINVAL; - - if (addr_len < sizeof(struct sockaddr_iucv)) + if (addr_len < sizeof(struct sockaddr_iucv) || + addr->sa_family != AF_IUCV) return -EINVAL;
lock_sock(sk); @@ -852,7 +850,7 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, struct iucv_sock *iucv = iucv_sk(sk); int err;
- if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv)) + if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV) return -EINVAL;
if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
From: Eric Ren zren@suse.com
[ Upstream commit 8818efaaacb78c60a9d90c5705b6c99b75d7d442 ]
Another deadlock path caused by recursive locking is reported. This kind of issue was introduced since commit 743b5f1434f5 ("ocfs2: take inode lock in ocfs2_iop_set/get_acl()"). Two deadlock paths have been fixed by commit b891fa5024a9 ("ocfs2: fix deadlock issue when taking inode lock at vfs entry points"). Yes, we intend to fix this kind of case in incremental way, because it's hard to find out all possible paths at once.
This one can be reproduced like this. On node1, cp a large file from home directory to ocfs2 mountpoint. While on node2, run setfacl/getfacl. Both nodes will hang up there. The backtraces:
On node1: __ocfs2_cluster_lock.isra.39+0x357/0x740 [ocfs2] ocfs2_inode_lock_full_nested+0x17d/0x840 [ocfs2] ocfs2_write_begin+0x43/0x1a0 [ocfs2] generic_perform_write+0xa9/0x180 __generic_file_write_iter+0x1aa/0x1d0 ocfs2_file_write_iter+0x4f4/0xb40 [ocfs2] __vfs_write+0xc3/0x130 vfs_write+0xb1/0x1a0 SyS_write+0x46/0xa0
On node2: __ocfs2_cluster_lock.isra.39+0x357/0x740 [ocfs2] ocfs2_inode_lock_full_nested+0x17d/0x840 [ocfs2] ocfs2_xattr_set+0x12e/0xe80 [ocfs2] ocfs2_set_acl+0x22d/0x260 [ocfs2] ocfs2_iop_set_acl+0x65/0xb0 [ocfs2] set_posix_acl+0x75/0xb0 posix_acl_xattr_set+0x49/0xa0 __vfs_setxattr+0x69/0x80 __vfs_setxattr_noperm+0x72/0x1a0 vfs_setxattr+0xa7/0xb0 setxattr+0x12d/0x190 path_setxattr+0x9f/0xb0 SyS_setxattr+0x14/0x20
Fix this one by using ocfs2_inode_{lock|unlock}_tracker, which is exported by commit 439a36b8ef38 ("ocfs2/dlmglue: prepare tracking logic to avoid recursive cluster lock").
Link: http://lkml.kernel.org/r/20170622014746.5815-1-zren@suse.com Fixes: 743b5f1434f5 ("ocfs2: take inode lock in ocfs2_iop_set/get_acl()") Signed-off-by: Eric Ren zren@suse.com Reported-by: Thomas Voegtle tv@lio96.de Tested-by: Thomas Voegtle tv@lio96.de Reviewed-by: Joseph Qi jiangqi903@gmail.com Cc: Mark Fasheh mfasheh@versity.com Cc: Joel Becker jlbec@evilplan.org Cc: Junxiao Bi junxiao.bi@oracle.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/ocfs2/dlmglue.c | 4 ++++ fs/ocfs2/xattr.c | 23 +++++++++++++---------- 2 files changed, 17 insertions(+), 10 deletions(-)
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 555b57a16499..b41bbd133103 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2608,6 +2608,10 @@ void ocfs2_inode_unlock_tracker(struct inode *inode, struct ocfs2_lock_res *lockres;
lockres = &OCFS2_I(inode)->ip_inode_lockres; + /* had_lock means that the currect process already takes the cluster + * lock previously. If had_lock is 1, we have nothing to do here, and + * it will get unlocked where we got the lock. + */ if (!had_lock) { ocfs2_remove_holder(lockres, oh); ocfs2_inode_unlock(inode, ex); diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 877830b05e12..d2b7192c0937 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1307,20 +1307,21 @@ static int ocfs2_xattr_get(struct inode *inode, void *buffer, size_t buffer_size) { - int ret; + int ret, had_lock; struct buffer_head *di_bh = NULL; + struct ocfs2_lock_holder oh;
- ret = ocfs2_inode_lock(inode, &di_bh, 0); - if (ret < 0) { - mlog_errno(ret); - return ret; + had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh); + if (had_lock < 0) { + mlog_errno(had_lock); + return had_lock; } down_read(&OCFS2_I(inode)->ip_xattr_sem); ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, name, buffer, buffer_size); up_read(&OCFS2_I(inode)->ip_xattr_sem);
- ocfs2_inode_unlock(inode, 0); + ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
brelse(di_bh);
@@ -3516,11 +3517,12 @@ int ocfs2_xattr_set(struct inode *inode, { struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; - int ret, credits, ref_meta = 0, ref_credits = 0; + int ret, credits, had_lock, ref_meta = 0, ref_credits = 0; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct inode *tl_inode = osb->osb_tl_inode; struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, }; struct ocfs2_refcount_tree *ref_tree = NULL; + struct ocfs2_lock_holder oh;
struct ocfs2_xattr_info xi = { .xi_name_index = name_index, @@ -3551,8 +3553,9 @@ int ocfs2_xattr_set(struct inode *inode, return -ENOMEM; }
- ret = ocfs2_inode_lock(inode, &di_bh, 1); - if (ret < 0) { + had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh); + if (had_lock < 0) { + ret = had_lock; mlog_errno(ret); goto cleanup_nolock; } @@ -3649,7 +3652,7 @@ cleanup: if (ret) mlog_errno(ret); } - ocfs2_inode_unlock(inode, 1); + ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); cleanup_nolock: brelse(di_bh); brelse(xbs.xattr_bh);
From: Marcelo Ricardo Leitner marcelo.leitner@gmail.com
[ Upstream commit a02d036c027e9070ca27c156f59eb445d5405480 ]
RFC 4960 Errata 3.27 identifies that ssthresh should be adjusted to cwnd because otherwise it could cause the transport to lock into congestion avoidance phase specially if ssthresh was previously reduced by some packet drop, leading to poor performance.
The Errata says to adjust ssthresh to cwnd only once, though the same goal is achieved by updating it every time we update cwnd too. The caveat is that we could take longer to get back up to speed but that should be compensated by the fact that we don't adjust on RTO basis (as RFC says) but based on Heartbeats, which are usually way longer.
See-also: https://tools.ietf.org/html/draft-ietf-tsvwg-rfc4960-errata-01#section-3.27 Signed-off-by: Marcelo Ricardo Leitner marcelo.leitner@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- net/sctp/transport.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index aab9e3f29755..d0889e560615 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -546,6 +546,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, */ transport->cwnd = max(transport->cwnd/2, 4*asoc->pathmtu); + /* RFC 4960 Errata 3.27.2: also adjust sshthresh */ + transport->ssthresh = transport->cwnd; break; }
From: Luc Van Oostenryck luc.vanoostenryck@gmail.com
[ Upstream commit bcde519e8c325f3cc1fcf443eb6466e6bb3a3aca ]
ARM64 depends on the macro __AARCH64EB__ being defined or not to correctly select or define endian-specific macros, structures or pieces of code.
This macro is predefined by the compiler but sparse knows nothing about it and thus may pre-process files differently from what gcc would.
Fix this by passing '-D__AARCH64EL__' or '-D__AARCH64EB__' to sparse depending of the endianness of the kernel, like defined by GCC.
Note: In most case it won't change anything since most arm64 use little-endian (but an allyesconfig would use big-endian!).
CC: Catalin Marinas catalin.marinas@arm.com CC: Will Deacon will.deacon@arm.com CC: linux-arm-kernel@lists.infradead.org Signed-off-by: Luc Van Oostenryck luc.vanoostenryck@gmail.com Signed-off-by: Will Deacon will.deacon@arm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/arm64/Makefile | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 119d1885a3c0..95c667328b5d 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -32,10 +32,12 @@ KBUILD_AFLAGS += $(lseinstr)
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian +CHECKFLAGS += -D__AARCH64EB__ AS += -EB LD += -EB else KBUILD_CPPFLAGS += -mlittle-endian +CHECKFLAGS += -D__AARCH64EL__ AS += -EL LD += -EL endif
From: Kees Cook keescook@chromium.org
[ Upstream commit 93bd70e3330be45542c455dde11d8dc657ab3044 ]
While glibc's pthread implementation is rather forgiving about repeat thread joining, Bionic has recently become much more strict. To deal with this, actually track which threads have been successfully joined and kill the rest at teardown.
Based on a patch from Paul Lawrence.
Cc: Paul Lawrence paullawrence@google.com Signed-off-by: Kees Cook keescook@chromium.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- tools/testing/selftests/seccomp/seccomp_bpf.c | 51 ++++++++++++++++++--------- 1 file changed, 34 insertions(+), 17 deletions(-)
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 882fe83a3554..821aa0a408dc 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -1615,6 +1615,23 @@ struct tsync_sibling { struct __test_metadata *metadata; };
+/* + * To avoid joining joined threads (which is not allowed by Bionic), + * make sure we both successfully join and clear the tid to skip a + * later join attempt during fixture teardown. Any remaining threads + * will be directly killed during teardown. + */ +#define PTHREAD_JOIN(tid, status) \ + do { \ + int _rc = pthread_join(tid, status); \ + if (_rc) { \ + TH_LOG("pthread_join of tid %u failed: %d\n", \ + (unsigned int)tid, _rc); \ + } else { \ + tid = 0; \ + } \ + } while (0) + FIXTURE_DATA(TSYNC) { struct sock_fprog root_prog, apply_prog; struct tsync_sibling sibling[TSYNC_SIBLINGS]; @@ -1683,14 +1700,14 @@ FIXTURE_TEARDOWN(TSYNC)
for ( ; sib < self->sibling_count; ++sib) { struct tsync_sibling *s = &self->sibling[sib]; - void *status;
if (!s->tid) continue; - if (pthread_kill(s->tid, 0)) { - pthread_cancel(s->tid); - pthread_join(s->tid, &status); - } + /* + * If a thread is still running, it may be stuck, so hit + * it over the head really hard. + */ + pthread_kill(s->tid, 9); } pthread_mutex_destroy(&self->mutex); pthread_cond_destroy(&self->cond); @@ -1780,9 +1797,9 @@ TEST_F(TSYNC, siblings_fail_prctl) pthread_mutex_unlock(&self->mutex);
/* Ensure diverging sibling failed to call prctl. */ - pthread_join(self->sibling[0].tid, &status); + PTHREAD_JOIN(self->sibling[0].tid, &status); EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status); - pthread_join(self->sibling[1].tid, &status); + PTHREAD_JOIN(self->sibling[1].tid, &status); EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); }
@@ -1822,9 +1839,9 @@ TEST_F(TSYNC, two_siblings_with_ancestor) } pthread_mutex_unlock(&self->mutex); /* Ensure they are both killed and don't exit cleanly. */ - pthread_join(self->sibling[0].tid, &status); + PTHREAD_JOIN(self->sibling[0].tid, &status); EXPECT_EQ(0x0, (long)status); - pthread_join(self->sibling[1].tid, &status); + PTHREAD_JOIN(self->sibling[1].tid, &status); EXPECT_EQ(0x0, (long)status); }
@@ -1848,9 +1865,9 @@ TEST_F(TSYNC, two_sibling_want_nnp) pthread_mutex_unlock(&self->mutex);
/* Ensure they are both upset about lacking nnp. */ - pthread_join(self->sibling[0].tid, &status); + PTHREAD_JOIN(self->sibling[0].tid, &status); EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status); - pthread_join(self->sibling[1].tid, &status); + PTHREAD_JOIN(self->sibling[1].tid, &status); EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status); }
@@ -1888,9 +1905,9 @@ TEST_F(TSYNC, two_siblings_with_no_filter) pthread_mutex_unlock(&self->mutex);
/* Ensure they are both killed and don't exit cleanly. */ - pthread_join(self->sibling[0].tid, &status); + PTHREAD_JOIN(self->sibling[0].tid, &status); EXPECT_EQ(0x0, (long)status); - pthread_join(self->sibling[1].tid, &status); + PTHREAD_JOIN(self->sibling[1].tid, &status); EXPECT_EQ(0x0, (long)status); }
@@ -1933,9 +1950,9 @@ TEST_F(TSYNC, two_siblings_with_one_divergence) pthread_mutex_unlock(&self->mutex);
/* Ensure they are both unkilled. */ - pthread_join(self->sibling[0].tid, &status); + PTHREAD_JOIN(self->sibling[0].tid, &status); EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); - pthread_join(self->sibling[1].tid, &status); + PTHREAD_JOIN(self->sibling[1].tid, &status); EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); }
@@ -1992,7 +2009,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter) TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); - pthread_join(self->sibling[sib].tid, &status); + PTHREAD_JOIN(self->sibling[sib].tid, &status); EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); /* Poll for actual task death. pthread_join doesn't guarantee it. */ while (!kill(self->sibling[sib].system_tid, 0)) @@ -2017,7 +2034,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter) TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); - pthread_join(self->sibling[sib].tid, &status); + PTHREAD_JOIN(self->sibling[sib].tid, &status); EXPECT_EQ(0, (long)status); /* Poll for actual task death. pthread_join doesn't guarantee it. */ while (!kill(self->sibling[sib].system_tid, 0))
From: Dan Carpenter dan.carpenter@oracle.com
[ Upstream commit 1d32a62c74b3bcb69822b0f4745af5410cfec3a7 ]
If bnx2i_map_ep_dbell_regs() then we accidentally return NULL instead of an error pointer. It results in a NULL dereference in iscsi_if_ep_connect().
Fixes: cf4e6363859d ("[SCSI] bnx2i: Add bnx2i iSCSI driver.") Signed-off-by: Dan Carpenter dan.carpenter@oracle.com Reviewed-by: Johannes Thumshirn jthumshirn@suse.de Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/scsi/bnx2i/bnx2i_iscsi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 72894378ffcf..94ee4bbc0505 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1909,7 +1909,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
bnx2i_ep_active_list_add(hba, bnx2i_ep);
- if (bnx2i_map_ep_dbell_regs(bnx2i_ep)) + rc = bnx2i_map_ep_dbell_regs(bnx2i_ep); + if (rc) goto del_active_ep;
mutex_unlock(&hba->net_dev_lock);
From: Michael Neuling mikey@neuling.org
[ Upstream commit 64ebb9a208c6e66316329a6d9101815d1ee06fa9 ]
The P9 PVR bits 12-15 don't indicate a revision but instead different chip configurations. From BookIV we have: Bits Configuration 0 : Scale out 12 cores 1 : Scale out 24 cores 2 : Scale up 12 cores 3 : Scale up 24 cores
DD1 doesn't use this but DD2 does. Linux will mostly use the "Scale out 24 core" configuration (ie. SMT4 not SMT8) which results in a PVR of 0x004e1200. The reported revision in /proc/cpuinfo is hence reported incorrectly as "18.0".
This patch fixes this to mask off only the relevant bits for the major revision (ie. bits 8-11) for POWER9.
Signed-off-by: Michael Neuling mikey@neuling.org Signed-off-by: Michael Ellerman mpe@ellerman.id.au Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/powerpc/kernel/setup-common.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 44c8d03558ac..aa25af44a971 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -310,6 +310,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) maj = ((pvr >> 8) & 0xFF) - 1; min = pvr & 0xFF; break; + case 0x004e: /* POWER9 bits 12-15 give chip type */ + maj = (pvr >> 8) & 0x0F; + min = pvr & 0xFF; + break; default: maj = (pvr >> 8) & 0xFF; min = pvr & 0xFF;
From: Gal Pressman galp@mellanox.com
[ Upstream commit 8ce59b16b4b6eacedaec1f7b652b4781cdbfe15f ]
When wait for firmware init fails, previous code would mistakenly return success and cause inconsistency in the driver state.
Fixes: 6c780a0267b8 ("net/mlx5: Wait for FW readiness before initializing command interface") Signed-off-by: Gal Pressman galp@mellanox.com Signed-off-by: Saeed Mahameed saeedm@mellanox.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index f5c1f4acc57b..e9636c0117c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -941,7 +941,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) if (err) { dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", FW_PRE_INIT_TIMEOUT_MILI); - goto out; + goto out_err; }
err = mlx5_cmd_init(dev);
From: Lv Zheng lv.zheng@intel.com
[ Upstream commit 4625d752e600c116e6327f9d7fc16e8f5be107e9 ]
acpi_ec_cmd_string() currently is only enabled for "DEBUG" macro, but users trend to use CONFIG_DYNAMIC_DEBUG and enable ec.c pr_debug() print-outs by "dyndbg='file ec.c +p'". In this use case, all command names are turned into UNDEF and the log is confusing. This affects bugzilla triage work.
This patch fixes this issue by enabling acpi_ec_cmd_string() for CONFIG_DYNAMIC_DEBUG.
Tested-by: Wang Wendy wendy.wang@intel.com Tested-by: Feng Chenzhou chenzhoux.feng@intel.com Signed-off-by: Lv Zheng lv.zheng@intel.com Signed-off-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/acpi/ec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 43f20328f830..aaec2efbe64b 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -287,7 +287,7 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) ec->timestamp = jiffies; }
-#ifdef DEBUG +#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) static const char *acpi_ec_cmd_string(u8 cmd) { switch (cmd) {
From: Johan Hovold johan@kernel.org
[ Upstream commit f62f9ffdb5ef683ef8cffb43932fa72cc3713e94 ]
Make sure to drop the reference to the dma device taken by of_find_device_by_node() on probe errors and on driver unbind.
Fixes: 334ae614772b ("sparc: Kill SBUS DVMA layer.") Signed-off-by: Johan Hovold johan@kernel.org Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/scsi/sun_esp.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c index 7b6d4c2087d7..747ee64a78e1 100644 --- a/drivers/scsi/sun_esp.c +++ b/drivers/scsi/sun_esp.c @@ -566,6 +566,7 @@ static int esp_sbus_probe(struct platform_device *op) struct device_node *dp = op->dev.of_node; struct platform_device *dma_of = NULL; int hme = 0; + int ret;
if (dp->parent && (!strcmp(dp->parent->name, "espdma") || @@ -580,7 +581,11 @@ static int esp_sbus_probe(struct platform_device *op) if (!dma_of) return -ENODEV;
- return esp_sbus_probe_one(op, dma_of, hme); + ret = esp_sbus_probe_one(op, dma_of, hme); + if (ret) + put_device(&dma_of->dev); + + return ret; }
static int esp_sbus_remove(struct platform_device *op) @@ -613,6 +618,8 @@ static int esp_sbus_remove(struct platform_device *op)
dev_set_drvdata(&op->dev, NULL);
+ put_device(&dma_of->dev); + return 0; }
From: Sebastian Ott sebott@linux.vnet.ibm.com
[ Upstream commit 4dfbd3efe3f0cf9ff1325b87491e1b1fe07afaf1 ]
When we ask a function to stop creating interrupts this may fail due to the function being already gone (e.g. after hot-unplug).
Consequently we don't free associated resources like summary bits and bit vectors used for irq processing. This could lead to situations where we ran out of these resources and fail to setup new interrupts.
The fix is to just ignore the errors in cases where we can be sure no new interrupts are generated.
Signed-off-by: Sebastian Ott sebott@linux.vnet.ibm.com Reviewed-by: Gerald Schaefer gerald.schaefer@de.ibm.com Signed-off-by: Martin Schwidefsky schwidefsky@de.ibm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/s390/include/asm/pci_insn.h | 2 +- arch/s390/pci/pci.c | 29 +++++++++++++++++++---------- arch/s390/pci/pci_insn.c | 10 +++++----- 3 files changed, 25 insertions(+), 16 deletions(-)
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index 9e02cb7955c1..a74efc02ad2c 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h @@ -76,7 +76,7 @@ struct zpci_fib { u32 gd; } __packed __aligned(8);
-int zpci_mod_fc(u64 req, struct zpci_fib *fib); +u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status); int zpci_refresh_trans(u64 fn, u64 addr, u64 range); int zpci_load(u64 *data, u64 req, u64 offset); int zpci_store(u64 data, u64 req, u64 offset); diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index ef0499b76c50..d95bfffdcc2e 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -113,6 +113,7 @@ static int zpci_set_airq(struct zpci_dev *zdev) { u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT); struct zpci_fib fib = {0}; + u8 status;
fib.isc = PCI_ISC; fib.sum = 1; /* enable summary notifications */ @@ -122,7 +123,22 @@ static int zpci_set_airq(struct zpci_dev *zdev) fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8; fib.aisbo = zdev->aisb & 63;
- return zpci_mod_fc(req, &fib); + return zpci_mod_fc(req, &fib, &status) ? -EIO : 0; +} + +/* Modify PCI: Unregister adapter interruptions */ +static int zpci_clear_airq(struct zpci_dev *zdev) +{ + u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT); + struct zpci_fib fib = {0}; + u8 cc, status; + + cc = zpci_mod_fc(req, &fib, &status); + if (cc == 3 || (cc == 1 && status == 24)) + /* Function already gone or IRQs already deregistered. */ + cc = 0; + + return cc ? -EIO : 0; }
struct mod_pci_args { @@ -136,13 +152,14 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args { u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn); struct zpci_fib fib = {0}; + u8 status;
fib.pba = args->base; fib.pal = args->limit; fib.iota = args->iota; fib.fmb_addr = args->fmb_addr;
- return zpci_mod_fc(req, &fib); + return zpci_mod_fc(req, &fib, &status) ? -EIO : 0; }
/* Modify PCI: Register I/O address translation parameters */ @@ -164,14 +181,6 @@ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args); }
-/* Modify PCI: Unregister adapter interruptions */ -static int zpci_clear_airq(struct zpci_dev *zdev) -{ - struct mod_pci_args args = { 0, 0, 0, 0 }; - - return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args); -} - /* Modify PCI: Set PCI function measurement parameters */ int zpci_fmb_enable_device(struct zpci_dev *zdev) { diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c index bc065392f7ab..c005dbb01563 100644 --- a/arch/s390/pci/pci_insn.c +++ b/arch/s390/pci/pci_insn.c @@ -41,20 +41,20 @@ static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status) return cc; }
-int zpci_mod_fc(u64 req, struct zpci_fib *fib) +u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status) { - u8 cc, status; + u8 cc;
do { - cc = __mpcifc(req, fib, &status); + cc = __mpcifc(req, fib, status); if (cc == 2) msleep(ZPCI_INSN_BUSY_DELAY); } while (cc == 2);
if (cc) - zpci_err_insn(cc, status, req, 0); + zpci_err_insn(cc, *status, req, 0);
- return (cc) ? -EIO : 0; + return cc; }
/* Refresh PCI Translations */
From: Hari Bathini hbathini@linux.vnet.ibm.com
[ Upstream commit a77af552ccc9d4d54459a39f9e5f7ad307aeb4f9 ]
fadump sets up crash memory ranges to be used for creating PT_LOAD program headers in elfcore header. Memory chunk RMA_START through boot memory area size is added as the first memory range because firmware, at the time of crash, moves this memory chunk to different location specified during fadump registration making it necessary to create a separate program header for it with the correct offset. This memory chunk is skipped while setting up the remaining memory ranges. But currently, there is possibility that some of this memory may have duplicate entries like when it is hot-removed and added again. Ensure that no two memory ranges represent the same memory.
When 5 lmbs are hot-removed and then hot-plugged before registering fadump, here is how the program headers in /proc/vmcore exported by fadump look like
without this change:
Program Headers: Type Offset VirtAddr PhysAddr FileSiz MemSiz Flags Align NOTE 0x0000000000010000 0x0000000000000000 0x0000000000000000 0x0000000000001894 0x0000000000001894 0 LOAD 0x0000000000021020 0xc000000000000000 0x0000000000000000 0x0000000040000000 0x0000000040000000 RWE 0 LOAD 0x0000000040031020 0xc000000000000000 0x0000000000000000 0x0000000010000000 0x0000000010000000 RWE 0 LOAD 0x0000000050040000 0xc000000010000000 0x0000000010000000 0x0000000050000000 0x0000000050000000 RWE 0 LOAD 0x00000000a0040000 0xc000000060000000 0x0000000060000000 0x000000019ffe0000 0x000000019ffe0000 RWE 0
and with this change:
Program Headers: Type Offset VirtAddr PhysAddr FileSiz MemSiz Flags Align NOTE 0x0000000000010000 0x0000000000000000 0x0000000000000000 0x0000000000001894 0x0000000000001894 0 LOAD 0x0000000000021020 0xc000000000000000 0x0000000000000000 0x0000000040000000 0x0000000040000000 RWE 0 LOAD 0x0000000040030000 0xc000000040000000 0x0000000040000000 0x0000000020000000 0x0000000020000000 RWE 0 LOAD 0x0000000060030000 0xc000000060000000 0x0000000060000000 0x000000019ffe0000 0x000000019ffe0000 RWE 0
Signed-off-by: Hari Bathini hbathini@linux.vnet.ibm.com Reviewed-by: Mahesh J Salgaonkar mahesh@linux.vnet.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/powerpc/kernel/fadump.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 26d091a1a54c..5df362630ffc 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -812,8 +812,19 @@ static void fadump_setup_crash_memory_ranges(void) for_each_memblock(memory, reg) { start = (unsigned long long)reg->base; end = start + (unsigned long long)reg->size; - if (start == RMA_START && end >= fw_dump.boot_memory_size) - start = fw_dump.boot_memory_size; + + /* + * skip the first memory chunk that is already added (RMA_START + * through boot_memory_size). This logic needs a relook if and + * when RMA_START changes to a non-zero value. + */ + BUILD_BUG_ON(RMA_START != 0); + if (start < fw_dump.boot_memory_size) { + if (end > fw_dump.boot_memory_size) + start = fw_dump.boot_memory_size; + else + continue; + }
/* add this range excluding the reserved dump area. */ fadump_exclude_reserved_area(start, end);
From: Sebastian Ott sebott@linux.vnet.ibm.com
[ Upstream commit 725708349172f00b86e2bac5c03de360b79aaf65 ]
DMA tables are freed in zpci_dma_exit_device regardless of the return code of zpci_unregister_ioat. This could lead to a use after free. On the other hand during function hot-unplug, zpci_unregister_ioat will always fail since the function is already gone.
So let zpci_unregister_ioat report success when the function is gone but don't cleanup the dma table when a function could still have it in access.
Signed-off-by: Sebastian Ott sebott@linux.vnet.ibm.com Reviewed-by: Gerald Schaefer gerald.schaefer@de.ibm.com Signed-off-by: Martin Schwidefsky schwidefsky@de.ibm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/s390/pci/pci.c | 19 ++++++++++++++----- arch/s390/pci/pci_dma.c | 4 +++- 2 files changed, 17 insertions(+), 6 deletions(-)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index d95bfffdcc2e..a23140fd666b 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -166,19 +166,28 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, u64 base, u64 limit, u64 iota) { - struct mod_pci_args args = { base, limit, iota, 0 }; + u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT); + struct zpci_fib fib = {0}; + u8 status;
WARN_ON_ONCE(iota & 0x3fff); - args.iota |= ZPCI_IOTA_RTTO_FLAG; - return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args); + fib.pba = base; + fib.pal = limit; + fib.iota = iota | ZPCI_IOTA_RTTO_FLAG; + return zpci_mod_fc(req, &fib, &status) ? -EIO : 0; }
/* Modify PCI: Unregister I/O address translation parameters */ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) { - struct mod_pci_args args = { 0, 0, 0, 0 }; + u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT); + struct zpci_fib fib = {0}; + u8 cc, status;
- return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args); + cc = zpci_mod_fc(req, &fib, &status); + if (cc == 3) /* Function already gone. */ + cc = 0; + return cc ? -EIO : 0; }
/* Modify PCI: Set PCI function measurement parameters */ diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 4004e03267cd..49ff18f8cf99 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -503,7 +503,9 @@ void zpci_dma_exit_device(struct zpci_dev *zdev) */ WARN_ON(zdev->s390_domain);
- zpci_unregister_ioat(zdev, 0); + if (zpci_unregister_ioat(zdev, 0)) + return; + dma_cleanup_tables(zdev->dma_table); zdev->dma_table = NULL; vfree(zdev->iommu_bitmap);
From: Chuck Lever chuck.lever@oracle.com
[ Upstream commit 06eb8a56af23ae32e90fdd6b27fec30930364b52 ]
Running a multi-threaded 8KB fio test (70/30 mix), three or four out of twelve of the jobs fail when using krb5i. The failure is an EIO on a read.
Troubleshooting confirmed the EIO results when the client fails to verify the MIC of an NFS READ reply. Bruce suggested the problem could be due to the data payload changing between the time the reply's MIC was computed on the server and the time the reply was actually sent.
krb5p gets around this problem by disabling RQ_SPLICE_OK. Use the same mechanism for krb5i RPCs.
"iozone -i0 -i1 -s128m -y1k -az -I", export is tmpfs, mount is sec=krb5i,vers=3,proto=rdma. The important numbers are the read / reread column.
Here's without the RQ_SPLICE_OK patch:
kB reclen write rewrite read reread 131072 1 7546 7929 8396 8267 131072 2 14375 14600 15843 15639 131072 4 19280 19248 21303 21410 131072 8 32350 31772 35199 34883 131072 16 36748 37477 49365 51706 131072 32 55669 56059 57475 57389 131072 64 74599 75190 74903 75550 131072 128 99810 101446 102828 102724 131072 256 122042 122612 124806 125026 131072 512 137614 138004 141412 141267 131072 1024 146601 148774 151356 151409 131072 2048 180684 181727 293140 292840 131072 4096 206907 207658 552964 549029 131072 8192 223982 224360 454493 473469 131072 16384 228927 228390 654734 632607
And here's with it:
kB reclen write rewrite read reread 131072 1 7700 7365 7958 8011 131072 2 13211 13303 14937 14414 131072 4 19001 19265 20544 20657 131072 8 30883 31097 34255 33566 131072 16 36868 34908 51499 49944 131072 32 56428 55535 58710 56952 131072 64 73507 74676 75619 74378 131072 128 100324 101442 103276 102736 131072 256 122517 122995 124639 124150 131072 512 137317 139007 140530 140830 131072 1024 146807 148923 151246 151072 131072 2048 179656 180732 292631 292034 131072 4096 206216 208583 543355 541951 131072 8192 223738 224273 494201 489372 131072 16384 229313 229840 691719 668427
I would say that there is not much difference in this test.
For good measure, here's the same test with sec=krb5p:
kB reclen write rewrite read reread 131072 1 5982 5881 6137 6218 131072 2 10216 10252 10850 10932 131072 4 12236 12575 15375 15526 131072 8 15461 15462 23821 22351 131072 16 25677 25811 27529 27640 131072 32 31903 32354 34063 33857 131072 64 42989 43188 45635 45561 131072 128 52848 53210 56144 56141 131072 256 59123 59214 62691 62933 131072 512 63140 63277 66887 67025 131072 1024 65255 65299 69213 69140 131072 2048 76454 76555 133767 133862 131072 4096 84726 84883 251925 250702 131072 8192 89491 89482 270821 276085 131072 16384 91572 91597 361768 336868
BugLink: https://bugzilla.linux-nfs.org/show_bug.cgi?id=307 Signed-off-by: Chuck Lever chuck.lever@oracle.com Reviewed-by: Jeff Layton jlayton@redhat.com Signed-off-by: J. Bruce Fields bfields@redhat.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- net/sunrpc/auth_gss/svcauth_gss.c | 8 ++++++++ net/sunrpc/svc.c | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-)
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 036bbf2b44c1..5a52e37dc395 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -832,6 +832,14 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g struct xdr_netobj mic; struct xdr_buf integ_buf;
+ /* NFS READ normally uses splice to send data in-place. However + * the data in cache can change after the reply's MIC is computed + * but before the RPC reply is sent. To prevent the client from + * rejecting the server-computed MIC in this somewhat rare case, + * do not use splice with the GSS integrity service. + */ + clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags); + /* Did we already verify the signature on the original pass through? */ if (rqstp->rq_deferred) return 0; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index c5b0cb4f4056..2221a52870dc 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1084,7 +1084,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) if (argv->iov_len < 6*4) goto err_short_len;
- /* Will be turned off only in gss privacy case: */ + /* Will be turned off by GSS integrity and privacy services */ set_bit(RQ_SPLICE_OK, &rqstp->rq_flags); /* Will be turned off only when NFSv4 Sessions are used */ set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
From: Bjorn Helgaas bhelgaas@google.com
[ Upstream commit 675734baa361cf044033bb60594dea33d8d8da36 ]
John reported that an Intel QuickAssist crypto accelerator didn't work in a Dell PowerEdge R730. The problem seems to be that we enabled ECRC when the device doesn't support it:
85:00.0 Co-processor [0b40]: Intel Corporation DH895XCC Series QAT [8086:0435] Capabilities: [100 v1] Advanced Error Reporting AERCap: First Error Pointer: 00, GenCap- CGenEn+ ChkCap- ChkEn+
1302fcf0d03e ("PCI: Configure *all* devices, not just hot-added ones") exposed the problem because it applies settings from the _HPX method to all devices, not just hot-added ones. The R730 supplies an _HPX method that allows the kernel to enable ECRC.
Only enable ECRC if the device advertises support for it.
Link: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1571798 Fixes: 1302fcf0d03e ("PCI: Configure *all* devices, not just hot-added ones") Reported-by: John Mazzie john_mazzie@dell.com Signed-off-by: Bjorn Helgaas bhelgaas@google.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/pci/probe.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 193ac13de49b..dead38f99068 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1511,6 +1511,11 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) /* Initialize Advanced Error Capabilities and Control Register */ pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; + /* Don't enable ECRC generation or checking if unsupported */ + if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) + reg32 &= ~PCI_ERR_CAP_ECRC_GENE; + if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) + reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
/*
From: Dan Carpenter dan.carpenter@oracle.com
[ Upstream commit 69551f5f370cc20342fab17ca54716b6ec7e332d ]
The kstrtoul() test was reversed so this always returned -ENOTSUPP.
Fixes: 27d7f47756f4 ("net: wireless: replace strict_strtoul() with kstrtoul()") Signed-off-by: Dan Carpenter dan.carpenter@oracle.com Reviewed-by: James Cameron quozl@laptop.org Signed-off-by: Kalle Valo kvalo@codeaurora.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/wireless/libertas/mesh.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c index d0c881dd5846..a78c4d18fe4b 100644 --- a/drivers/net/wireless/libertas/mesh.c +++ b/drivers/net/wireless/libertas/mesh.c @@ -239,8 +239,9 @@ static ssize_t lbs_prb_rsp_limit_set(struct device *dev, memset(&mesh_access, 0, sizeof(mesh_access)); mesh_access.data[0] = cpu_to_le32(CMD_ACT_SET);
- if (!kstrtoul(buf, 10, &retry_limit)) - return -ENOTSUPP; + ret = kstrtoul(buf, 10, &retry_limit); + if (ret) + return ret; if (retry_limit > 15) return -ENOTSUPP;
From: Paul Burton paul.burton@imgtec.com
[ Upstream commit 5570ba2ee920de4e7760a2802b842771845b2c32 ]
Systems using the MIPS Coherence Manager (CM) cannot support multi-core SMP with dcache aliasing. This is because CPU caches are VIPT, but interventions in CM-based systems provide only the physical address to remote caches. This means that interventions may behave incorrectly in the presence of an aliasing dcache, since the physical address used when handling an intervention may lead to operation on an aliased cache line rather than the correct line.
Prevent us from running into this issue by refusing to boot secondary cores in systems where dcache aliasing may occur.
Signed-off-by: Paul Burton paul.burton@imgtec.com Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16196/ Signed-off-by: Ralf Baechle ralf@linux-mips.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/mips/kernel/smp-cps.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index e04c8057b882..ff0993dfeb08 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -122,9 +122,11 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
/* Warn the user if the CCA prevents multi-core */ ncores = mips_cm_numcores(); - if (cca_unsuitable && ncores > 1) { - pr_warn("Using only one core due to unsuitable CCA 0x%x\n", - cca); + if ((cca_unsuitable || cpu_has_dc_aliases) && ncores > 1) { + pr_warn("Using only one core due to %s%s%s\n", + cca_unsuitable ? "unsuitable CCA" : "", + (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "", + cpu_has_dc_aliases ? "dcache aliasing" : "");
for_each_present_cpu(c) { if (cpu_data[c].core)
From: Goran Ferenc goran.ferenc@imgtec.com
[ Upstream commit 8ec7f15b8cca4f790df5cdf33f26e2926d4ee2fd ]
Fix incorrect calculation in do_monotonic() and do_monotonic_coarse() function that in turn caused incorrect values returned by the vdso version of system call clock_gettime() on mips64 if its system clock ID parameter was CLOCK_MONOTONIC or CLOCK_MONOTONIC_COARSE.
Consider these variables and their types on mips32 and mips64:
tk->wall_to_monotonic.tv_sec s64, s64 (kernel/vdso.c) vdso_data.wall_to_mono_sec u32, u32 (kernel/vdso.c) to_mono_sec u32, u32 (vdso/gettimeofday.c) ts->tv_sec s32, s64 (vdso/gettimeofday.c)
For mips64 case, u32 vdso_data.wall_to_mono_sec variable is updated from the 64-bit signed variable tk->wall_to_monotonic.tv_sec (kernel/vdso.c:76) which is a negative number holding the time passed from 1970-01-01 to the time boot started. This 64-bit signed value is currently around 47+ years, in seconds. For instance, let this value be:
-1489757461
or
11111111111111111111111111111111 10100111001101000001101011101011
By updating 32-bit vdso_data.wall_to_mono_sec variable, we lose upper 32 bits (signed 1's).
to_mono_sec variable is a parameter of do_monotonic() and do_monotonic_coarse() functions which holds vdso_data.wall_to_mono_sec value. Its value needs to be added (or subtracted considering it holds negative value from the tk->wall_to_monotonic.tv_sec) to the current time passed from 1970-01-01 (ts->tv_sec), which is again something like 47+ years, but increased by the time passed from the boot to the current time. ts->tv_sec is 32-bit long in case of 32-bit architecture and 64-bit long in case of 64-bit architecture. Consider the update of ts->tv_sec (vdso/gettimeofday.c:55 & 167):
ts->tv_sec += to_mono_sec;
mips32 case: This update will be performed correctly, since both ts->tv_sec and to_mono_sec are 32-bit long and the sign in to_mono_sec is preserved. Implicit conversion from u32 to s32 will be done correctly.
mips64 case: This update will be wrong, since the implicit conversion will not be done correctly. The reason is that the conversion will be from u32 to s64. This is because to_mono_sec is 32-bit long for both mips32 and mips64 cases and s64..33 bits of converted to_mono_sec variable will be zeros.
So, in order to make MIPS64 implementation work properly for MONOTONIC and MONOTONIC_COARSE clock ids on mips64, the size of wall_to_mono_sec variable in mips_vdso_data union and respective parameters in do_monotonic() and do_monotonic_coarse() functions should be changed from u32 to u64. Because of consistency, this size change from u32 and u64 is also done for wall_to_mono_nsec variable and corresponding function parameters.
As far as similar situations for other architectures are concerned, let's take a look at arm. Arm has two distinct vdso_data structures for 32-bit & 64-bit cases, and arm's wall_to_mono_sec and wall_to_mono_nsec are u32 for 32-bit and u64 for 64-bit cases. On the other hand, MIPS has only one structure (mips_vdso_data), hence the need for changing the size of above mentioned parameters.
Signed-off-by: Goran Ferenc goran.ferenc@imgtec.com Signed-off-by: Miodrag Dinic miodrag.dinic@imgtec.com Signed-off-by: Aleksandar Markovic aleksandar.markovic@imgtec.com Cc: Douglas Leung douglas.leung@imgtec.com Cc: James Hogan james.hogan@imgtec.com Cc: Paul Burton paul.burton@imgtec.com Cc: Petar Jovanovic petar.jovanovic@imgtec.com Cc: Raghu Gandham raghu.gandham@imgtec.com Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16638/ Signed-off-by: Ralf Baechle ralf@linux-mips.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/mips/include/asm/vdso.h | 4 ++-- arch/mips/vdso/gettimeofday.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h index 8f4ca5dd992b..b7cd6cf77b83 100644 --- a/arch/mips/include/asm/vdso.h +++ b/arch/mips/include/asm/vdso.h @@ -79,8 +79,8 @@ union mips_vdso_data { struct { u64 xtime_sec; u64 xtime_nsec; - u32 wall_to_mono_sec; - u32 wall_to_mono_nsec; + u64 wall_to_mono_sec; + u64 wall_to_mono_nsec; u32 seq_count; u32 cs_shift; u8 clock_mode; diff --git a/arch/mips/vdso/gettimeofday.c b/arch/mips/vdso/gettimeofday.c index ce89c9e294f9..fd7d433970bf 100644 --- a/arch/mips/vdso/gettimeofday.c +++ b/arch/mips/vdso/gettimeofday.c @@ -39,8 +39,8 @@ static __always_inline int do_monotonic_coarse(struct timespec *ts, const union mips_vdso_data *data) { u32 start_seq; - u32 to_mono_sec; - u32 to_mono_nsec; + u64 to_mono_sec; + u64 to_mono_nsec;
do { start_seq = vdso_data_read_begin(data); @@ -148,8 +148,8 @@ static __always_inline int do_monotonic(struct timespec *ts, { u32 start_seq; u64 ns; - u32 to_mono_sec; - u32 to_mono_nsec; + u64 to_mono_sec; + u64 to_mono_nsec;
do { start_seq = vdso_data_read_begin(data);
From: Paul Burton paul.burton@imgtec.com
[ Upstream commit f39878cc5b09c75d35eaf52131e920b872e3feb4 ]
In systems where there are multiple actors updating the TLB, the potential exists for a race condition wherein a CPU hits a TLB exception but by the time it reaches a TLBP instruction the affected TLB entry may have been replaced. This can happen if, for example, a CPU shares the TLB between hardware threads (VPs) within a core and one of them replaces the entry that another has just taken a TLB exception for.
We handle this race in the case of the Hardware Table Walker (HTW) being the other actor already, but didn't take into account the potential for multiple threads racing. Include the code for aborting TLB exception handling in affected multi-threaded systems, those being the I6400 & I6500 CPUs which share TLB entries between VPs.
In the case of using RiXi without dedicated exceptions we have never handled this race even for HTW. This patch adds WARN()s to these cases which ought never to be hit because all CPUs with either HTW or shared FTLB RAMs also include dedicated RiXi exceptions, but the WARN()s will ensure this is always the case.
Signed-off-by: Paul Burton paul.burton@imgtec.com Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16203/ Signed-off-by: Ralf Baechle ralf@linux-mips.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/mips/mm/tlbex.c | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 63b7d6f82d24..b639e12867ef 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1876,6 +1876,26 @@ static void build_r3000_tlb_modify_handler(void) } #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
+static bool cpu_has_tlbex_tlbp_race(void) +{ + /* + * When a Hardware Table Walker is running it can replace TLB entries + * at any time, leading to a race between it & the CPU. + */ + if (cpu_has_htw) + return true; + + /* + * If the CPU shares FTLB RAM with its siblings then our entry may be + * replaced at any time by a sibling performing a write to the FTLB. + */ + if (cpu_has_shared_ftlb_ram) + return true; + + /* In all other cases there ought to be no race condition to handle */ + return false; +} + /* * R4000 style TLB load/store/modify handlers. */ @@ -1912,7 +1932,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ if (!m4kc_tlbp_war()) { build_tlb_probe_entry(p); - if (cpu_has_htw) { + if (cpu_has_tlbex_tlbp_race()) { /* race condition happens, leaving */ uasm_i_ehb(p); uasm_i_mfc0(p, wr.r3, C0_INDEX); @@ -1986,6 +2006,14 @@ static void build_r4000_tlb_load_handler(void) } uasm_i_nop(&p);
+ /* + * Warn if something may race with us & replace the TLB entry + * before we read it here. Everything with such races should + * also have dedicated RiXi exception handlers, so this + * shouldn't be hit. + */ + WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path"); + uasm_i_tlbr(&p);
switch (current_cpu_type()) { @@ -2053,6 +2081,14 @@ static void build_r4000_tlb_load_handler(void) } uasm_i_nop(&p);
+ /* + * Warn if something may race with us & replace the TLB entry + * before we read it here. Everything with such races should + * also have dedicated RiXi exception handlers, so this + * shouldn't be hit. + */ + WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path"); + uasm_i_tlbr(&p);
switch (current_cpu_type()) {
From: Goran Ferenc goran.ferenc@imgtec.com
[ Upstream commit 180902e08f051f72c89ffa366f4e4f7a8e9c753e ]
This patch adds clock_gettime_fallback() function that wraps assembly invocation of clock_gettime() syscall using __NR_clock_gettime.
This function is used if pure VDSO implementation of clock_gettime() does not succeed for any reason. For example, it is called if the clkid parameter of clock_gettime() is not one of the clkids listed in the switch-case block of the function __vdso_clock_gettime() (one such case for clkid is CLOCK_BOOTIME).
If syscall invocation via __NR_clock_gettime fails, register a3 will be set. So, after the syscall, register a3 is tested and the return value is negated if it's set.
Signed-off-by: Goran Ferenc goran.ferenc@imgtec.com Signed-off-by: Miodrag Dinic miodrag.dinic@imgtec.com Signed-off-by: Aleksandar Markovic aleksandar.markovic@imgtec.com Cc: Douglas Leung douglas.leung@imgtec.com Cc: James Hogan james.hogan@imgtec.com Cc: Paul Burton paul.burton@imgtec.com Cc: Petar Jovanovic petar.jovanovic@imgtec.com Cc: Raghu Gandham raghu.gandham@imgtec.com Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16639/ Signed-off-by: Ralf Baechle ralf@linux-mips.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/mips/vdso/gettimeofday.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-)
diff --git a/arch/mips/vdso/gettimeofday.c b/arch/mips/vdso/gettimeofday.c index fd7d433970bf..5f6337545ee2 100644 --- a/arch/mips/vdso/gettimeofday.c +++ b/arch/mips/vdso/gettimeofday.c @@ -20,6 +20,24 @@ #include <asm/unistd.h> #include <asm/vdso.h>
+static __always_inline long clock_gettime_fallback(clockid_t _clkid, + struct timespec *_ts) +{ + register struct timespec *ts asm("a1") = _ts; + register clockid_t clkid asm("a0") = _clkid; + register long ret asm("v0"); + register long nr asm("v0") = __NR_clock_gettime; + register long error asm("a3"); + + asm volatile( + " syscall\n" + : "=r" (ret), "=r" (error) + : "r" (clkid), "r" (ts), "r" (nr) + : "memory"); + + return error ? -ret : ret; +} + static __always_inline int do_realtime_coarse(struct timespec *ts, const union mips_vdso_data *data) { @@ -207,7 +225,7 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts) { const union mips_vdso_data *data = get_vdso_data(); - int ret; + int ret = -1;
switch (clkid) { case CLOCK_REALTIME_COARSE: @@ -223,10 +241,11 @@ int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts) ret = do_monotonic(ts, data); break; default: - ret = -ENOSYS; break; }
- /* If we return -ENOSYS libc should fall back to a syscall. */ + if (ret) + ret = clock_gettime_fallback(clkid, ts); + return ret; }
From: Goran Ferenc goran.ferenc@imgtec.com
[ Upstream commit 0b523a85e134d41f57ddd8c5193bd9f0a5e20b0d ]
This patch adds gettimeofday_fallback() function that wraps assembly invocation of gettimeofday() syscall using __NR_gettimeofday.
This function is used if pure VDSO implementation gettimeofday() does not succeed for any reason. Its imeplementation is enclosed in "#ifdef CONFIG_MIPS_CLOCK_VSYSCALL" to be in sync with the similar arrangement for __vdso_gettimeofday().
If syscall invocation via __NR_gettimeofday fails, register a3 will be set. So, after the syscall, register a3 is tested and the return valuem is negated if it's set.
Signed-off-by: Goran Ferenc goran.ferenc@imgtec.com Signed-off-by: Miodrag Dinic miodrag.dinic@imgtec.com Signed-off-by: Aleksandar Markovic aleksandar.markovic@imgtec.com Cc: Douglas Leung douglas.leung@imgtec.com Cc: James Hogan james.hogan@imgtec.com Cc: Paul Burton paul.burton@imgtec.com Cc: Petar Jovanovic petar.jovanovic@imgtec.com Cc: Raghu Gandham raghu.gandham@imgtec.com Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16640/ Signed-off-by: Ralf Baechle ralf@linux-mips.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/mips/vdso/gettimeofday.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-)
diff --git a/arch/mips/vdso/gettimeofday.c b/arch/mips/vdso/gettimeofday.c index 5f6337545ee2..23305bf6c7a2 100644 --- a/arch/mips/vdso/gettimeofday.c +++ b/arch/mips/vdso/gettimeofday.c @@ -20,6 +20,28 @@ #include <asm/unistd.h> #include <asm/vdso.h>
+#ifdef CONFIG_MIPS_CLOCK_VSYSCALL + +static __always_inline long gettimeofday_fallback(struct timeval *_tv, + struct timezone *_tz) +{ + register struct timezone *tz asm("a1") = _tz; + register struct timeval *tv asm("a0") = _tv; + register long ret asm("v0"); + register long nr asm("v0") = __NR_gettimeofday; + register long error asm("a3"); + + asm volatile( + " syscall\n" + : "=r" (ret), "=r" (error) + : "r" (tv), "r" (tz), "r" (nr) + : "memory"); + + return error ? -ret : ret; +} + +#endif + static __always_inline long clock_gettime_fallback(clockid_t _clkid, struct timespec *_ts) { @@ -205,7 +227,7 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
ret = do_realtime(&ts, data); if (ret) - return ret; + return gettimeofday_fallback(tv, tz);
if (tv) { tv->tv_sec = ts.tv_sec;
From: Dave Martin Dave.Martin@arm.com
[ Upstream commit 53b1a742ed251780267a57415bc955bd50f40c3d ]
If get_user() fails when reading the new FPSCR value from userspace in compat_vfp_get(), then garbage* will be written to the task's FPSR and FPCR registers.
This patch prevents this by checking the return from get_user() first.
[*] Actually, zero, due to the behaviour of get_user() on error, but that's still not what userspace expects.
Fixes: 478fcb2cdb23 ("arm64: Debugging support") Signed-off-by: Dave Martin Dave.Martin@arm.com Signed-off-by: Will Deacon will.deacon@arm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/arm64/kernel/ptrace.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 183f39384e4c..b81fa63bc834 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -834,8 +834,10 @@ static int compat_vfp_set(struct task_struct *target,
if (count && !ret) { ret = get_user(fpscr, (compat_ulong_t *)ubuf); - uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; - uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; + if (!ret) { + uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; + uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; + } }
fpsimd_flush_task_state(target);
From: Dave Martin Dave.Martin@arm.com
[ Upstream commit 5fbd5fc49fc39ac8433da62d16682a1d0217ea4f ]
Now that compat_vfp_get() uses the regset API to copy the FPSCR value out to userspace, compat_vfp_set() looks inconsistent. In particular, compat_vfp_set() will fail if called with kbuf != NULL && ubuf == NULL (which is valid usage according to the regset API).
This patch fixes compat_vfp_set() to use user_regset_copyin(), similarly to compat_vfp_get().
This also squashes a sparse warning triggered by the cast that drops __user when calling get_user().
Signed-off-by: Dave Martin Dave.Martin@arm.com Signed-off-by: Will Deacon will.deacon@arm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/arm64/kernel/ptrace.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index b81fa63bc834..f67bb64d2640 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -822,18 +822,20 @@ static int compat_vfp_set(struct task_struct *target, { struct user_fpsimd_state *uregs; compat_ulong_t fpscr; - int ret; + int ret, vregs_end_pos;
if (pos + count > VFP_STATE_SIZE) return -EIO;
uregs = &target->thread.fpsimd_state.user_fpsimd;
+ vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, - VFP_STATE_SIZE - sizeof(compat_ulong_t)); + vregs_end_pos);
if (count && !ret) { - ret = get_user(fpscr, (compat_ulong_t *)ubuf); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, + vregs_end_pos, VFP_STATE_SIZE); if (!ret) { uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
From: Omar Sandoval osandov@fb.com
[ Upstream commit 0a16c7d7aecfae8987197e50116ebfc338cbe0a2 ]
Currently, we only increment total_bytes_pinned in btrfs_free_tree_block() when dropping the last reference on the block. However, when the delayed ref is run later, we will decrement total_bytes_pinned regardless of whether it was the last reference or not. This causes the counter to underflow when the reference we dropped was not the last reference. Fix it by incrementing the counter unconditionally, which is what btrfs_free_extent() does. This makes total_bytes_pinned an overestimate when references to shared extents are dropped, but in the worst case this will just make us try to commit the transaction to try to free up space and find we didn't free enough.
Signed-off-by: Omar Sandoval osandov@fb.com Tested-by: Holger Hoffstätte holger@applied-asynchrony.com Reviewed-by: Liu Bo bo.li.liu@oracle.com Signed-off-by: David Sterba dsterba@suse.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/btrfs/extent-tree.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 875e179bb2d5..2d59b3927223 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6770,10 +6770,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, BUG_ON(ret); /* -ENOMEM */ }
- if (!last_ref) - return; - - if (btrfs_header_generation(buf) == trans->transid) { + if (last_ref && btrfs_header_generation(buf) == trans->transid) { struct btrfs_block_group_cache *cache;
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { @@ -6804,11 +6801,13 @@ out: btrfs_header_level(buf), root->root_key.objectid);
- /* - * Deleting the buffer, clear the corrupt flag since it doesn't matter - * anymore. - */ - clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); + if (last_ref) { + /* + * Deleting the buffer, clear the corrupt flag since it doesn't + * matter anymore. + */ + clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); + } }
/* Can return -ENOMEM */
From: Michael Grzeschik m.grzeschik@pengutronix.de
[ Upstream commit 5b85840320151f61e04d83a23ef2567a07094503 ]
This patch prevents the arcnet driver from the following deadlock.
[ 41.273910] ====================================================== [ 41.280397] [ INFO: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected ] [ 41.287433] 4.4.0-00034-gc0ae784 #536 Not tainted [ 41.292366] ------------------------------------------------------ [ 41.298863] arcecho/233 [HC0[0]:SC0[2]:HE0:SE0] is trying to acquire: [ 41.305628] (&(&lp->lock)->rlock){+.+...}, at: [<bf083bc8>] arcnet_send_packet+0x60/0x1c0 [arcnet] [ 41.315199] [ 41.315199] and this task is already holding: [ 41.321324] (_xmit_ARCNET#2){+.-...}, at: [<c06b934c>] packet_direct_xmit+0xfc/0x1c8 [ 41.329593] which would create a new lock dependency: [ 41.334893] (_xmit_ARCNET#2){+.-...} -> (&(&lp->lock)->rlock){+.+...} [ 41.341801] [ 41.341801] but this new dependency connects a SOFTIRQ-irq-safe lock: [ 41.350108] (_xmit_ARCNET#2){+.-...} ... which became SOFTIRQ-irq-safe at: [ 41.357539] [<c06f8fc8>] _raw_spin_lock+0x30/0x40 [ 41.362677] [<c063ab8c>] dev_watchdog+0x5c/0x264 [ 41.367723] [<c0094edc>] call_timer_fn+0x6c/0xf4 [ 41.372759] [<c00950b8>] run_timer_softirq+0x154/0x210 [ 41.378340] [<c0036b30>] __do_softirq+0x144/0x298 [ 41.383469] [<c0036fb4>] irq_exit+0xcc/0x130 [ 41.388138] [<c0085c50>] __handle_domain_irq+0x60/0xb4 [ 41.393728] [<c0014578>] __irq_svc+0x58/0x78 [ 41.398402] [<c0010274>] arch_cpu_idle+0x24/0x3c [ 41.403443] [<c007127c>] cpu_startup_entry+0x1f8/0x25c [ 41.409029] [<c09adc90>] start_kernel+0x3c0/0x3cc [ 41.414170] [ 41.414170] to a SOFTIRQ-irq-unsafe lock: [ 41.419931] (&(&lp->lock)->rlock){+.+...} ... which became SOFTIRQ-irq-unsafe at: [ 41.427996] ... [<c06f8fc8>] _raw_spin_lock+0x30/0x40 [ 41.433409] [<bf083d54>] arcnet_interrupt+0x2c/0x800 [arcnet] [ 41.439646] [<c0089120>] handle_nested_irq+0x8c/0xec [ 41.445063] [<c03c1170>] regmap_irq_thread+0x190/0x314 [ 41.450661] [<c0087244>] irq_thread_fn+0x1c/0x34 [ 41.455700] [<c0087548>] irq_thread+0x13c/0x1dc [ 41.460649] [<c0050f10>] kthread+0xe4/0xf8 [ 41.465158] [<c000f810>] ret_from_fork+0x14/0x24 [ 41.470207] [ 41.470207] other info that might help us debug this: [ 41.470207] [ 41.478627] Possible interrupt unsafe locking scenario: [ 41.478627] [ 41.485763] CPU0 CPU1 [ 41.490521] ---- ---- [ 41.495279] lock(&(&lp->lock)->rlock); [ 41.499414] local_irq_disable(); [ 41.505636] lock(_xmit_ARCNET#2); [ 41.511967] lock(&(&lp->lock)->rlock); [ 41.518741] <Interrupt> [ 41.521490] lock(_xmit_ARCNET#2); [ 41.525356] [ 41.525356] *** DEADLOCK *** [ 41.525356] [ 41.531587] 1 lock held by arcecho/233: [ 41.535617] #0: (_xmit_ARCNET#2){+.-...}, at: [<c06b934c>] packet_direct_xmit+0xfc/0x1c8 [ 41.544355] the dependencies between SOFTIRQ-irq-safe lock and the holding lock: [ 41.552362] -> (_xmit_ARCNET#2){+.-...} ops: 27 { [ 41.557357] HARDIRQ-ON-W at: [ 41.560664] [<c06f8fc8>] _raw_spin_lock+0x30/0x40 [ 41.567445] [<c063ba28>] dev_deactivate_many+0x114/0x304 [ 41.574866] [<c063bc3c>] dev_deactivate+0x24/0x38 [ 41.581646] [<c0630374>] linkwatch_do_dev+0x40/0x74 [ 41.588613] [<c06305d8>] __linkwatch_run_queue+0xec/0x140 [ 41.596120] [<c0630658>] linkwatch_event+0x2c/0x34 [ 41.602991] [<c004af30>] process_one_work+0x188/0x40c [ 41.610131] [<c004b200>] worker_thread+0x4c/0x480 [ 41.616912] [<c0050f10>] kthread+0xe4/0xf8 [ 41.623048] [<c000f810>] ret_from_fork+0x14/0x24 [ 41.629735] IN-SOFTIRQ-W at: [ 41.633039] [<c06f8fc8>] _raw_spin_lock+0x30/0x40 [ 41.639820] [<c063ab8c>] dev_watchdog+0x5c/0x264 [ 41.646508] [<c0094edc>] call_timer_fn+0x6c/0xf4 [ 41.653190] [<c00950b8>] run_timer_softirq+0x154/0x210 [ 41.660425] [<c0036b30>] __do_softirq+0x144/0x298 [ 41.667201] [<c0036fb4>] irq_exit+0xcc/0x130 [ 41.673518] [<c0085c50>] __handle_domain_irq+0x60/0xb4 [ 41.680754] [<c0014578>] __irq_svc+0x58/0x78 [ 41.687077] [<c0010274>] arch_cpu_idle+0x24/0x3c [ 41.693769] [<c007127c>] cpu_startup_entry+0x1f8/0x25c [ 41.701006] [<c09adc90>] start_kernel+0x3c0/0x3cc [ 41.707791] INITIAL USE at: [ 41.711003] [<c06f8fc8>] _raw_spin_lock+0x30/0x40 [ 41.717696] [<c063ba28>] dev_deactivate_many+0x114/0x304 [ 41.725026] [<c063bc3c>] dev_deactivate+0x24/0x38 [ 41.731718] [<c0630374>] linkwatch_do_dev+0x40/0x74 [ 41.738593] [<c06305d8>] __linkwatch_run_queue+0xec/0x140 [ 41.746011] [<c0630658>] linkwatch_event+0x2c/0x34 [ 41.752789] [<c004af30>] process_one_work+0x188/0x40c [ 41.759847] [<c004b200>] worker_thread+0x4c/0x480 [ 41.766541] [<c0050f10>] kthread+0xe4/0xf8 [ 41.772596] [<c000f810>] ret_from_fork+0x14/0x24 [ 41.779198] } [ 41.780945] ... key at: [<c124d620>] netdev_xmit_lock_key+0x38/0x1c8 [ 41.788192] ... acquired at: [ 41.791309] [<c007bed8>] lock_acquire+0x70/0x90 [ 41.796361] [<c06f9140>] _raw_spin_lock_irqsave+0x40/0x54 [ 41.802324] [<bf083bc8>] arcnet_send_packet+0x60/0x1c0 [arcnet] [ 41.808844] [<c06b9380>] packet_direct_xmit+0x130/0x1c8 [ 41.814622] [<c06bc7e4>] packet_sendmsg+0x3b8/0x680 [ 41.820034] [<c05fe8b0>] sock_sendmsg+0x14/0x24 [ 41.825091] [<c05ffd68>] SyS_sendto+0xb8/0xe0 [ 41.829956] [<c05ffda8>] SyS_send+0x18/0x20 [ 41.834638] [<c000f780>] ret_fast_syscall+0x0/0x1c [ 41.839954] [ 41.841514] the dependencies between the lock to be acquired and SOFTIRQ-irq-unsafe lock: [ 41.850302] -> (&(&lp->lock)->rlock){+.+...} ops: 5 { [ 41.855644] HARDIRQ-ON-W at: [ 41.858945] [<c06f8fc8>] _raw_spin_lock+0x30/0x40 [ 41.865726] [<bf083d54>] arcnet_interrupt+0x2c/0x800 [arcnet] [ 41.873607] [<c0089120>] handle_nested_irq+0x8c/0xec [ 41.880666] [<c03c1170>] regmap_irq_thread+0x190/0x314 [ 41.887901] [<c0087244>] irq_thread_fn+0x1c/0x34 [ 41.894593] [<c0087548>] irq_thread+0x13c/0x1dc [ 41.901195] [<c0050f10>] kthread+0xe4/0xf8 [ 41.907338] [<c000f810>] ret_from_fork+0x14/0x24 [ 41.914025] SOFTIRQ-ON-W at: [ 41.917328] [<c06f8fc8>] _raw_spin_lock+0x30/0x40 [ 41.924106] [<bf083d54>] arcnet_interrupt+0x2c/0x800 [arcnet] [ 41.931981] [<c0089120>] handle_nested_irq+0x8c/0xec [ 41.939028] [<c03c1170>] regmap_irq_thread+0x190/0x314 [ 41.946264] [<c0087244>] irq_thread_fn+0x1c/0x34 [ 41.952954] [<c0087548>] irq_thread+0x13c/0x1dc [ 41.959548] [<c0050f10>] kthread+0xe4/0xf8 [ 41.965689] [<c000f810>] ret_from_fork+0x14/0x24 [ 41.972379] INITIAL USE at: [ 41.975595] [<c06f8fc8>] _raw_spin_lock+0x30/0x40 [ 41.982283] [<bf083d54>] arcnet_interrupt+0x2c/0x800 [arcnet] [ 41.990063] [<c0089120>] handle_nested_irq+0x8c/0xec [ 41.997027] [<c03c1170>] regmap_irq_thread+0x190/0x314 [ 42.004172] [<c0087244>] irq_thread_fn+0x1c/0x34 [ 42.010766] [<c0087548>] irq_thread+0x13c/0x1dc [ 42.017267] [<c0050f10>] kthread+0xe4/0xf8 [ 42.023314] [<c000f810>] ret_from_fork+0x14/0x24 [ 42.029903] } [ 42.031648] ... key at: [<bf0854cc>] __key.42091+0x0/0xfffff0f8 [arcnet] [ 42.039255] ... acquired at: [ 42.042372] [<c007bed8>] lock_acquire+0x70/0x90 [ 42.047413] [<c06f9140>] _raw_spin_lock_irqsave+0x40/0x54 [ 42.053364] [<bf083bc8>] arcnet_send_packet+0x60/0x1c0 [arcnet] [ 42.059872] [<c06b9380>] packet_direct_xmit+0x130/0x1c8 [ 42.065634] [<c06bc7e4>] packet_sendmsg+0x3b8/0x680 [ 42.071030] [<c05fe8b0>] sock_sendmsg+0x14/0x24 [ 42.076069] [<c05ffd68>] SyS_sendto+0xb8/0xe0 [ 42.080926] [<c05ffda8>] SyS_send+0x18/0x20 [ 42.085601] [<c000f780>] ret_fast_syscall+0x0/0x1c [ 42.090918] [ 42.092481] [ 42.092481] stack backtrace: [ 42.097065] CPU: 0 PID: 233 Comm: arcecho Not tainted 4.4.0-00034-gc0ae784 #536 [ 42.104751] Hardware name: Generic AM33XX (Flattened Device Tree) [ 42.111183] [<c0017ec8>] (unwind_backtrace) from [<c00139d0>] (show_stack+0x10/0x14) [ 42.119337] [<c00139d0>] (show_stack) from [<c02a82c4>] (dump_stack+0x8c/0x9c) [ 42.126937] [<c02a82c4>] (dump_stack) from [<c0078260>] (check_usage+0x4bc/0x63c) [ 42.134815] [<c0078260>] (check_usage) from [<c0078438>] (check_irq_usage+0x58/0xb0) [ 42.142964] [<c0078438>] (check_irq_usage) from [<c007aaa0>] (__lock_acquire+0x1524/0x20b0) [ 42.151740] [<c007aaa0>] (__lock_acquire) from [<c007bed8>] (lock_acquire+0x70/0x90) [ 42.159886] [<c007bed8>] (lock_acquire) from [<c06f9140>] (_raw_spin_lock_irqsave+0x40/0x54) [ 42.168768] [<c06f9140>] (_raw_spin_lock_irqsave) from [<bf083bc8>] (arcnet_send_packet+0x60/0x1c0 [arcnet]) [ 42.179115] [<bf083bc8>] (arcnet_send_packet [arcnet]) from [<c06b9380>] (packet_direct_xmit+0x130/0x1c8) [ 42.189182] [<c06b9380>] (packet_direct_xmit) from [<c06bc7e4>] (packet_sendmsg+0x3b8/0x680) [ 42.198059] [<c06bc7e4>] (packet_sendmsg) from [<c05fe8b0>] (sock_sendmsg+0x14/0x24) [ 42.206199] [<c05fe8b0>] (sock_sendmsg) from [<c05ffd68>] (SyS_sendto+0xb8/0xe0) [ 42.213978] [<c05ffd68>] (SyS_sendto) from [<c05ffda8>] (SyS_send+0x18/0x20) [ 42.221388] [<c05ffda8>] (SyS_send) from [<c000f780>] (ret_fast_syscall+0x0/0x1c)
Signed-off-by: Michael Grzeschik m.grzeschik@pengutronix.de
--- v1 -> v2: removed unneeded zero assignment of flags Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/arcnet/arcnet.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 6ea963e3b89a..915d55337e4c 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -756,6 +756,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) struct net_device *dev = dev_id; struct arcnet_local *lp; int recbuf, status, diagstatus, didsomething, boguscount; + unsigned long flags; int retval = IRQ_NONE;
arc_printk(D_DURING, dev, "\n"); @@ -765,7 +766,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) lp = netdev_priv(dev); BUG_ON(!lp);
- spin_lock(&lp->lock); + spin_lock_irqsave(&lp->lock, flags);
/* RESET flag was enabled - if device is not running, we must * clear it right away (but nothing else). @@ -774,7 +775,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) if (lp->hw.status(dev) & RESETflag) lp->hw.command(dev, CFLAGScmd | RESETclear); lp->hw.intmask(dev, 0); - spin_unlock(&lp->lock); + spin_unlock_irqrestore(&lp->lock, flags); return retval; }
@@ -998,7 +999,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) udelay(1); lp->hw.intmask(dev, lp->intmask);
- spin_unlock(&lp->lock); + spin_unlock_irqrestore(&lp->lock, flags); return retval; } EXPORT_SYMBOL(arcnet_interrupt);
From: Nicholas Piggin npiggin@gmail.com
[ Upstream commit 827880ec260ba048f95fe646b96a205c394fa0f0 ]
The linker does not like vdso-syms.lds in input archive files. Make it an extra-y instead.
Cc: Jeff Dike jdike@addtoit.com Cc: Richard Weinberger richard@nod.at Cc: user-mode-linux-devel@lists.sourceforge.net Signed-off-by: Nicholas Piggin npiggin@gmail.com Signed-off-by: Masahiro Yamada yamada.masahiro@socionext.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/x86/um/vdso/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile index 6c803ca49b5d..486f62c3bd04 100644 --- a/arch/x86/um/vdso/Makefile +++ b/arch/x86/um/vdso/Makefile @@ -50,7 +50,7 @@ CFLAGS_REMOVE_vdso-note.o = -pg -fprofile-arcs -ftest-coverage CFLAGS_REMOVE_um_vdso.o = -pg -fprofile-arcs -ftest-coverage
targets += vdso-syms.lds -obj-$(VDSO64-y) += vdso-syms.lds +extra-$(VDSO64-y) += vdso-syms.lds
# # Match symbols in the DSO that look like VDSO*; produce a file of constants.
From: "Luis R. Rodriguez" mcgrof@kernel.org
[ Upstream commit 41124db869b7e00e12052555f8987867ac01d70c ]
kmod <= v19 was broken -- it could return 0 to modprobe calls, incorrectly assuming that a kernel module was built-in, whereas in reality the module was just forming in the kernel. The reason for this is an incorrect userspace heuristics. A userspace kmod fix is available for it [0], however should userspace break again we could go on with an failed get_fs_type() which is hard to debug as the request_module() is detected as returning 0. The first suspect would be that there is something worth with the kernel's module loader and obviously in this case that is not the issue.
Since these issues are painful to debug complain when we know userspace has outright lied to us.
[0] http://git.kernel.org/cgit/utils/kernel/kmod/kmod.git/commit/libkmod/libkmod...
Suggested-by: Rusty Russell rusty@rustcorp.com.au Cc: Jessica Yu jeyu@redhat.com Signed-off-by: Luis R. Rodriguez mcgrof@kernel.org Signed-off-by: Al Viro viro@zeniv.linux.org.uk Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/filesystems.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/fs/filesystems.c b/fs/filesystems.c index 5797d45a78cb..2a7ae03f821e 100644 --- a/fs/filesystems.c +++ b/fs/filesystems.c @@ -275,8 +275,10 @@ struct file_system_type *get_fs_type(const char *name) int len = dot ? dot - name : strlen(name);
fs = __get_fs_type(name, len); - if (!fs && (request_module("fs-%.*s", len, name) == 0)) + if (!fs && (request_module("fs-%.*s", len, name) == 0)) { fs = __get_fs_type(name, len); + WARN_ONCE(!fs, "request_module fs-%.*s succeeded, but still no fs?\n", len, name); + }
if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { put_filesystem(fs);
From: Thiago Jung Bauermann bauerman@linux.vnet.ibm.com
[ Upstream commit 12bf85a71000af7419b19b5e90910919f36f336c ]
H_GET_24X7_CATALOG_PAGE needs to be passed the version number obtained from the first catalog page obtained previously. This is a 64 bit number, but create_events_from_catalog truncates it to 32-bit.
This worked on POWER8, but POWER9 actually uses the upper bits so the call fails with H_P3 because the hypervisor doesn't recognize the version.
This patch also adds the hcall return code to the error message, which is helpful when debugging the problem.
Fixes: 5c5cd7b50259 ("powerpc/perf/hv-24x7: parse catalog and populate sysfs with events") Reviewed-by: Sukadev Bhattiprolu sukadev@linux.vnet.ibm.com Signed-off-by: Thiago Jung Bauermann bauerman@linux.vnet.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/powerpc/perf/hv-24x7.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index e8ca0fad2e69..e267f4c3e51d 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -668,7 +668,7 @@ static int create_events_from_catalog(struct attribute ***events_, event_data_bytes, junk_events, event_idx, event_attr_ct, i, attr_max, event_idx_last, desc_ct, long_desc_ct; ssize_t ct, ev_len; - uint32_t catalog_version_num; + uint64_t catalog_version_num; struct attribute **events, **event_descs, **event_long_descs; struct hv_24x7_catalog_page_0 *page_0 = kmem_cache_alloc(hv_page_cache, GFP_KERNEL); @@ -704,8 +704,8 @@ static int create_events_from_catalog(struct attribute ***events_, event_data_offs = be16_to_cpu(page_0->event_data_offs); event_data_len = be16_to_cpu(page_0->event_data_len);
- pr_devel("cv %zu cl %zu eec %zu edo %zu edl %zu\n", - (size_t)catalog_version_num, catalog_len, + pr_devel("cv %llu cl %zu eec %zu edo %zu edl %zu\n", + catalog_version_num, catalog_len, event_entry_count, event_data_offs, event_data_len);
if ((MAX_4K < event_data_len) @@ -760,8 +760,8 @@ static int create_events_from_catalog(struct attribute ***events_, catalog_version_num, i + event_data_offs); if (hret) { - pr_err("failed to get event data in page %zu\n", - i + event_data_offs); + pr_err("Failed to get event data in page %zu: rc=%ld\n", + i + event_data_offs, hret); ret = -EIO; goto e_event_data; }
From: Adrian Hunter adrian.hunter@intel.com
[ Upstream commit 38b65b0891dc129dc0a5ce148a21c481e667b395 ]
CBR (core-to-bus ratio) packets provide an indication of CPU frequency. A more accurate measure can be made by counting the cycles (given by CYC packets) in between other timing packets (either MTC or TSC). Using TSC packets has at least 2 issues: 1) timing might have stopped (e.g. mwait) or 2) TSC packets within PSB+ might slip past CYC packets. For now, simply do not use TSC packets for calculating CPU cycles to TSC. That leaves the case where 2 MTC packets are used, otherwise falling back to the CBR value.
Signed-off-by: Adrian Hunter adrian.hunter@intel.com Cc: Andi Kleen ak@linux.intel.com Link: http://lkml.kernel.org/r/1495786658-18063-37-git-send-email-adrian.hunter@in... Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- tools/perf/util/intel-pt-decoder/intel-pt-decoder.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+)
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index eeeae0629ad3..fbfe97dcdaf0 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -696,6 +696,12 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info) break;
case INTEL_PT_TSC: + /* + * For now, do not support using TSC packets - refer + * intel_pt_calc_cyc_to_tsc(). + */ + if (data->from_mtc) + return 1; timestamp = pkt_info->packet.payload | (data->timestamp & (0xffULL << 56)); if (data->from_mtc && timestamp < data->timestamp && @@ -808,6 +814,14 @@ static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder, .cbr_cyc_to_tsc = 0, };
+ /* + * For now, do not support using TSC packets for at least the reasons: + * 1) timing might have stopped + * 2) TSC packets within PSB+ can slip against CYC packets + */ + if (!from_mtc) + return; + intel_pt_pkt_lookahead(decoder, intel_pt_calc_cyc_cb, &data); }
From: Thiago Jung Bauermann bauerman@linux.vnet.ibm.com
[ Upstream commit 36c8fb2c616d9373758b155d9723774353067a87 ]
request_buffer can hold 254 requests, so if it already has that number of entries we can't add a new one.
Also, define constant to show where the number comes from.
Fixes: e3ee15dc5d19 ("powerpc/perf/hv-24x7: Define add_event_to_24x7_request()") Reviewed-by: Sukadev Bhattiprolu sukadev@linux.vnet.ibm.com Signed-off-by: Thiago Jung Bauermann bauerman@linux.vnet.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/powerpc/perf/hv-24x7.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index e267f4c3e51d..8e80882d8f6d 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -160,6 +160,10 @@ DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw); DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096); DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
+#define MAX_NUM_REQUESTS ((H24x7_DATA_BUFFER_SIZE - \ + sizeof(struct hv_24x7_request_buffer)) \ + / sizeof(struct hv_24x7_request)) + static char *event_name(struct hv_24x7_event_data *ev, int *len) { *len = be16_to_cpu(ev->event_name_len) - 2; @@ -1083,7 +1087,7 @@ static int add_event_to_24x7_request(struct perf_event *event, int i; struct hv_24x7_request *req;
- if (request_buffer->num_requests > 254) { + if (request_buffer->num_requests >= MAX_NUM_REQUESTS) { pr_devel("Too many requests for 24x7 HCALL %d\n", request_buffer->num_requests); return -EINVAL;
From: Tahsin Erdogan tahsin@google.com
[ Upstream commit 407cd7fb83c0ebabb490190e673d8c71ee7df97e ]
ext4_inode_info->i_data is the storage area for 4 types of data:
a) Extents data b) Inline data c) Block map d) Fast symlink data (symlink length < 60)
Extents data case is positively identified by EXT4_INODE_EXTENTS flag. Inline data case is also obvious because of EXT4_INODE_INLINE_DATA flag.
Distinguishing c) and d) however requires additional logic. This currently relies on i_blocks count. After subtracting external xattr block from i_blocks, if it is greater than 0 then we know that some data blocks exist, so there must be a block map.
This logic got broken after ea_inode feature was added. That feature charges the data blocks of external xattr inodes to the referencing inode and so adds them to the i_blocks. To fix this, we could subtract ea_inode blocks by iterating through all xattr entries and then check whether remaining i_blocks count is zero. Besides being complicated, this won't change the fact that the current way of distinguishing between c) and d) is fragile.
The alternative solution is to test whether i_size is less than 60 to determine fast symlink case. ext4_symlink() uses the same test to decide whether to store the symlink in i_data. There is one caveat to address before this can work though.
If an inode's i_nlink is zero during eviction, its i_size is set to zero and its data is truncated. If system crashes before inode is removed from the orphan list, next boot orphan cleanup may find the inode with zero i_size. So, a symlink that had its data stored in a block may now appear to be a fast symlink. The solution used in this patch is to treat i_size = 0 as a non-fast symlink case. A zero sized symlink is not legal so the only time this can happen is the mentioned scenario. This is also logically correct because a i_size = 0 symlink has no data stored in i_data.
Suggested-by: Andreas Dilger adilger@dilger.ca Signed-off-by: Tahsin Erdogan tahsin@google.com Signed-off-by: Theodore Ts'o tytso@mit.edu Reviewed-by: Andreas Dilger adilger@dilger.ca Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/ext4/inode.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index f0cabc8c96cb..e62afd15b35b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -143,16 +143,12 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
/* * Test whether an inode is a fast symlink. + * A fast symlink has its symlink data stored in ext4_inode_info->i_data. */ int ext4_inode_is_fast_symlink(struct inode *inode) { - int ea_blocks = EXT4_I(inode)->i_file_acl ? - EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; - - if (ext4_has_inline_data(inode)) - return 0; - - return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); + return S_ISLNK(inode->i_mode) && inode->i_size && + (inode->i_size < EXT4_N_BLOCKS * 4); }
/* @@ -256,6 +252,16 @@ void ext4_evict_inode(struct inode *inode)
if (IS_SYNC(inode)) ext4_handle_sync(handle); + + /* + * Set inode->i_size to 0 before calling ext4_truncate(). We need + * special handling of symlinks here because i_size is used to + * determine whether ext4_inode_info->i_data contains symlink data or + * block mappings. Setting i_size to 0 will remove its fast symlink + * status. Erase i_data so that it becomes a valid empty block map. + */ + if (ext4_inode_is_fast_symlink(inode)) + memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data)); inode->i_size = 0; err = ext4_mark_inode_dirty(handle, inode); if (err) {
From: Nikolay Aleksandrov nikolay@cumulusnetworks.com
[ Upstream commit 7597b266c56feaad7d4e6e65822766e929407da2 ]
current code silently ignores change of port in the request message. This patch makes sure the port is modified and notification is sent to userspace.
Fixes: cf6b8e1eedff ("bridge: add API to notify bridge driver of learned FBD on offloaded device") Signed-off-by: Nikolay Aleksandrov nikolay@cumulusnetworks.com Signed-off-by: Roopa Prabhu roopa@cumulusnetworks.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- net/bridge/br_fdb.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 09442e0f7f67..50213ff8ac73 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -1092,8 +1092,9 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p) int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, u16 vid) { - struct hlist_head *head; struct net_bridge_fdb_entry *fdb; + struct hlist_head *head; + bool modified = false; int err = 0;
ASSERT_RTNL(); @@ -1109,14 +1110,25 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, } fdb->added_by_external_learn = 1; fdb_notify(br, fdb, RTM_NEWNEIGH); - } else if (fdb->added_by_external_learn) { - /* Refresh entry */ - fdb->updated = fdb->used = jiffies; - } else if (!fdb->added_by_user) { - /* Take over SW learned entry */ - fdb->added_by_external_learn = 1; + } else { fdb->updated = jiffies; - fdb_notify(br, fdb, RTM_NEWNEIGH); + + if (fdb->dst != p) { + fdb->dst = p; + modified = true; + } + + if (fdb->added_by_external_learn) { + /* Refresh entry */ + fdb->used = jiffies; + } else if (!fdb->added_by_user) { + /* Take over SW learned entry */ + fdb->added_by_external_learn = 1; + modified = true; + } + + if (modified) + fdb_notify(br, fdb, RTM_NEWNEIGH); }
err_unlock:
From: Suzuki K Poulose suzuki.poulose@arm.com
[ Upstream commit 456c59c31c5126fe31c64956c43670060ea9debd ]
If the GIC cannot map an IRQ via irq_domain_ops->alloc(), it doesn't return an error code. This can cause a problem with drivers, where it thinks it has successfully got an IRQ for the device, but requesting the same ends up failure with -ENOSYS (as the IRQ's chip is not set).
Fixes: commit 9a1091ef0017c ("irqchip: gic: Support hierarchy irq domain.") Cc: Yingjoe Chen yingjoe.chen@mediatek.com Cc: Marc Zyngier marc.zyngier@arm.com Signed-off-by: Suzuki K Poulose suzuki.poulose@arm.com Signed-off-by: Marc Zyngier marc.zyngier@arm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/irqchip/irq-gic.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index cebd8efe651a..c3ddd80d16e0 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1023,8 +1023,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, if (ret) return ret;
- for (i = 0; i < nr_irqs; i++) - gic_irq_domain_map(domain, virq + i, hwirq + i); + for (i = 0; i < nr_irqs; i++) { + ret = gic_irq_domain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + }
return 0; }
From: Suzuki K Poulose suzuki.poulose@arm.com
[ Upstream commit 63c16c6eacb69d0cbdaee5dea0dd56d238375fe6 ]
If the GIC cannot map an IRQ via irq_domain_ops->alloc(), it doesn't return an error code. This can cause a problem with drivers, where it thinks it has successfully got an IRQ for the device, but requesting the same ends up failure with -ENOSYS (as the IRQ's chip is not set).
Fixes: commit 443acc4f37f6 ("irqchip: GICv3: Convert to domain hierarchy") Cc: Marc Zyngier marc.zyngier@arm.com Signed-off-by: Suzuki K Poulose suzuki.poulose@arm.com Signed-off-by: Marc Zyngier marc.zyngier@arm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/irqchip/irq-gic-v3.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 3f1c4dea8866..5d93e0254d70 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -802,8 +802,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, if (ret) return ret;
- for (i = 0; i < nr_irqs; i++) - gic_irq_domain_map(domain, virq + i, hwirq + i); + for (i = 0; i < nr_irqs; i++) { + ret = gic_irq_domain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + }
return 0; }
From: Suzuki K Poulose suzuki.poulose@arm.com
[ Upstream commit 65a30f8b300107266f316d550f060ccc186201a3 ]
Honor the 'force' flag for set_affinity, by selecting a CPU from the given mask (which may not be reported "online" by the cpu_online_mask). Some drivers, like ARM PMU, rely on it.
Cc: Marc Zyngier marc.zyngier@arm.com Reported-by: Mark Rutland mark.rutland@arm.com Signed-off-by: Suzuki K Poulose suzuki.poulose@arm.com Signed-off-by: Marc Zyngier marc.zyngier@arm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/irqchip/irq-gic-v3.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 5d93e0254d70..eed31f9bee05 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -627,11 +627,16 @@ static void gic_smp_init(void) static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); + unsigned int cpu; void __iomem *reg; int enabled; u64 val;
+ if (force) + cpu = cpumask_first(mask_val); + else + cpu = cpumask_any_and(mask_val, cpu_online_mask); + if (cpu >= nr_cpu_ids) return -EINVAL;
From: Alexey Khoroshilov khoroshilov@ispras.ru
[ Upstream commit 6af574e826740bf17663b48ba3f8fadb81d2113f ]
There is an error path, where iomemory is left mapped.
Found by Linux Driver Verification project (linuxtesting.org).
Signed-off-by: Alexey Khoroshilov khoroshilov@ispras.ru Cc: Alan Hourihane alanh@fairlite.demon.co.uk Signed-off-by: Bartlomiej Zolnierkiewicz b.zolnierkie@samsung.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/video/fbdev/vermilion/cr_pll.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/video/fbdev/vermilion/cr_pll.c b/drivers/video/fbdev/vermilion/cr_pll.c index ebc6e6e0dd0f..ba105c876bed 100644 --- a/drivers/video/fbdev/vermilion/cr_pll.c +++ b/drivers/video/fbdev/vermilion/cr_pll.c @@ -185,6 +185,7 @@ static int __init cr_pll_init(void) if (err) { printk(KERN_ERR "Carillo Ranch failed to initialize vml_sys.\n"); + iounmap(mch_regs_base); pci_dev_put(mch_dev); return err; }
From: Hector Martin marcan@marcan.st
[ Upstream commit 188775181bc05f29372b305ef96485840e351fde ]
At least some JMicron controllers issue buggy oversized DMA reads when fetching context descriptors, always fetching 0x20 bytes at once for descriptors which are only 0x10 bytes long. This is often harmless, but can cause page faults on modern systems with IOMMUs:
DMAR: [DMA Read] Request device [05:00.0] fault addr fff56000 [fault reason 06] PTE Read access is not set firewire_ohci 0000:05:00.0: DMA context IT0 has stopped, error code: evt_descriptor_read
This works around the problem by always leaving 0x10 padding bytes at the end of descriptor buffer pages, which should be harmless to do unconditionally for controllers in case others have the same behavior.
Signed-off-by: Hector Martin marcan@marcan.st Reviewed-by: Clemens Ladisch clemens@ladisch.de Signed-off-by: Stefan Richter stefanr@s5r6.in-berlin.de Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/firewire/ohci.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index c2f5117fd8cb..5545a7f3a98f 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -1130,7 +1130,13 @@ static int context_add_buffer(struct context *ctx) return -ENOMEM;
offset = (void *)&desc->buffer - (void *)desc; - desc->buffer_size = PAGE_SIZE - offset; + /* + * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads + * for descriptors, even 0x10-byte ones. This can cause page faults when + * an IOMMU is in use and the oversized read crosses a page boundary. + * Work around this by always leaving at least 0x10 bytes of padding. + */ + desc->buffer_size = PAGE_SIZE - offset - 0x10; desc->buffer_bus = bus_addr + offset; desc->used = 0;
From: NeilBrown neilb@suse.com
[ Upstream commit dce2630c7da73b0634686bca557cc8945cc450c8 ]
There are 2 comments in the NFSv4 code which suggest that SIGLOST should possibly be sent to a process. In these cases a lock has been lost. The current practice is to set NFS_LOCK_LOST so that read/write returns EIO when a lock is lost. So change these comments to code when sets NFS_LOCK_LOST.
One case is when lock recovery after apparent server restart fails with NFS4ERR_DENIED, NFS4ERR_RECLAIM_BAD, or NFS4ERRO_RECLAIM_CONFLICT. The other case is when a lock attempt as part of lease recovery fails with NFS4ERR_DENIED.
In an ideal world, these should not happen. However I have a packet trace showing an NFSv4.1 session getting NFS4ERR_BADSESSION after an extended network parition. The NFSv4.1 client treats this like server reboot until/unless it get NFS4ERR_NO_GRACE, in which case it switches over to "nograce" recovery mode. In this network trace, the client attempts to recover a lock and the server (incorrectly) reports NFS4ERR_DENIED rather than NFS4ERR_NO_GRACE. This leads to the ineffective comment and the client then continues to write using the OPEN stateid.
Signed-off-by: NeilBrown neilb@suse.com Signed-off-by: Trond Myklebust trond.myklebust@primarydata.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/nfs/nfs4proc.c | 12 ++++++++---- fs/nfs/nfs4state.c | 5 ++++- 2 files changed, 12 insertions(+), 5 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8ef6f70c9e25..681b8c217a85 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1780,7 +1780,7 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta return ret; }
-static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err) +static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err) { switch (err) { default: @@ -1827,7 +1827,11 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct return -EAGAIN; case -ENOMEM: case -NFS4ERR_DENIED: - /* kill_proc(fl->fl_pid, SIGLOST, 1); */ + if (fl) { + struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner; + if (lsp) + set_bit(NFS_LOCK_LOST, &lsp->ls_flags); + } return 0; } return err; @@ -1863,7 +1867,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, err = nfs4_open_recover_helper(opendata, FMODE_READ); } nfs4_opendata_put(opendata); - return nfs4_handle_delegation_recall_error(server, state, stateid, err); + return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err); }
static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) @@ -6151,7 +6155,7 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, if (err != 0) return err; err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); - return nfs4_handle_delegation_recall_error(server, state, stateid, err); + return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); }
struct nfs_release_lockowner_data { diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 9a0b219ff74d..eef49200caa7 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1386,6 +1386,7 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_ struct inode *inode = state->inode; struct nfs_inode *nfsi = NFS_I(inode); struct file_lock *fl; + struct nfs4_lock_state *lsp; int status = 0; struct file_lock_context *flctx = inode->i_flctx; struct list_head *list; @@ -1426,7 +1427,9 @@ restart: case -NFS4ERR_DENIED: case -NFS4ERR_RECLAIM_BAD: case -NFS4ERR_RECLAIM_CONFLICT: - /* kill_proc(fl->fl_pid, SIGLOST, 1); */ + lsp = fl->fl_u.nfs4_fl.owner; + if (lsp) + set_bit(NFS_LOCK_LOST, &lsp->ls_flags); status = 0; } spin_lock(&flctx->flc_lock);
From: Takashi Iwai tiwai@suse.de
[ Upstream commit c469652bb5e8fb715db7d152f46d33b3740c9b87 ]
The commit ffcd28d88e4f ("ALSA: hda - Select INPUT for Realtek HD-audio codec") introduced the reverse-selection of CONFIG_INPUT for Realtek codec in order to avoid the mess with dependency between built-in and modules. Later on, we obtained IS_REACHABLE() macro exactly for this kind of problems, and now we can remove th INPUT selection in Kconfig and put IS_REACHABLE(INPUT) to the appropriate places in the code, so that the driver doesn't need to select other subsystem forcibly.
Fixes: ffcd28d88e4f ("ALSA: hda - Select INPUT for Realtek HD-audio codec") Reported-by: Randy Dunlap rdunlap@infradead.org Acked-by: Randy Dunlap rdunlap@infradead.org # and build-tested Signed-off-by: Takashi Iwai tiwai@suse.de Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- sound/pci/hda/Kconfig | 1 - sound/pci/hda/patch_realtek.c | 5 +++++ 2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig index e94cfd5c69f7..ebec1a1ae543 100644 --- a/sound/pci/hda/Kconfig +++ b/sound/pci/hda/Kconfig @@ -84,7 +84,6 @@ config SND_HDA_PATCH_LOADER config SND_HDA_CODEC_REALTEK tristate "Build Realtek HD-audio codec support" select SND_HDA_GENERIC - select INPUT help Say Y or M here to include Realtek HD-audio codec support in snd-hda-intel driver, such as ALC880. diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 8cb14e27988b..1b172a33bca3 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -3494,6 +3494,7 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec, } }
+#if IS_REACHABLE(INPUT) static void gpio2_mic_hotkey_event(struct hda_codec *codec, struct hda_jack_callback *event) { @@ -3626,6 +3627,10 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec, spec->kb_dev = NULL; } } +#else /* INPUT */ +#define alc280_fixup_hp_gpio2_mic_hotkey NULL +#define alc233_fixup_lenovo_line2_mic_hotkey NULL +#endif /* INPUT */
static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, const struct hda_fixup *fix, int action)
From: Dan Carpenter dan.carpenter@oracle.com
[ Upstream commit 123af9043e93cb6f235207d260d50f832cdb5439 ]
The loop timeout doesn't work because it's a post op and ends with "tmo" set to -1. I changed it from a post-op to a pre-op and I changed the initial the starting value from 5 to 6 so we still iterate 5 times. I left the other as it was because it's a large number.
Fixes: b3c70c9ea62a ("ASoC: Alchemy AC97C/I2SC audio support") Signed-off-by: Dan Carpenter dan.carpenter@oracle.com Signed-off-by: Mark Brown broonie@kernel.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- sound/soc/au1x/ac97c.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c index 29a97d52e8ad..66d6c52e7761 100644 --- a/sound/soc/au1x/ac97c.c +++ b/sound/soc/au1x/ac97c.c @@ -91,8 +91,8 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97, do { mutex_lock(&ctx->lock);
- tmo = 5; - while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--) + tmo = 6; + while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo) udelay(21); /* wait an ac97 frame time */ if (!tmo) { pr_debug("ac97rd timeout #1\n"); @@ -105,7 +105,7 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97, * poll, Forrest, poll... */ tmo = 0x10000; - while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--) + while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo) asm volatile ("nop"); data = RD(ctx, AC97_CMDRESP);
From: Paolo Bonzini pbonzini@redhat.com
[ Upstream commit 51776043afa415435c7e4636204fbe4f7edc4501 ]
This ioctl is obsolete (it was used by Xenner as far as I know) but still let's not break it gratuitously... Its handler is copying directly into struct kvm. Go through a bounce buffer instead, with the added benefit that we can actually do something useful with the flags argument---the previous code was exiting with -EINVAL but still doing the copy.
This technically is a userspace ABI breakage, but since no one should be using the ioctl, it's a good occasion to see if someone actually complains.
Cc: kernel-hardening@lists.openwall.com Cc: Kees Cook keescook@chromium.org Cc: Radim Krčmář rkrcmar@redhat.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Kees Cook keescook@chromium.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/x86/kvm/x86.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f37f0c72b22a..9cea09597d66 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3973,13 +3973,14 @@ long kvm_arch_vm_ioctl(struct file *filp, mutex_unlock(&kvm->lock); break; case KVM_XEN_HVM_CONFIG: { + struct kvm_xen_hvm_config xhc; r = -EFAULT; - if (copy_from_user(&kvm->arch.xen_hvm_config, argp, - sizeof(struct kvm_xen_hvm_config))) + if (copy_from_user(&xhc, argp, sizeof(xhc))) goto out; r = -EINVAL; - if (kvm->arch.xen_hvm_config.flags) + if (xhc.flags) goto out; + memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc)); r = 0; break; }
From: Anna-Maria Gleixner anna-maria@linutronix.de
[ Upstream commit 91633eed73a3ac37aaece5c8c1f93a18bae616a9 ]
So far only CLOCK_MONOTONIC and CLOCK_REALTIME were taken into account as well as HRTIMER_MODE_ABS/REL in the hrtimer_init tracepoint. The query for detecting the ABS or REL timer modes is not valid anymore, it got broken by the introduction of HRTIMER_MODE_PINNED.
HRTIMER_MODE_PINNED is not evaluated in the hrtimer_init() call, but for the sake of completeness print all given modes.
Signed-off-by: Anna-Maria Gleixner anna-maria@linutronix.de Cc: Christoph Hellwig hch@lst.de Cc: John Stultz john.stultz@linaro.org Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Peter Zijlstra peterz@infradead.org Cc: Thomas Gleixner tglx@linutronix.de Cc: keescook@chromium.org Link: http://lkml.kernel.org/r/20171221104205.7269-9-anna-maria@linutronix.de Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- include/trace/events/timer.h | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-)
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 073b9ac245ba..e844556794dc 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -125,6 +125,20 @@ DEFINE_EVENT(timer_class, timer_cancel, TP_ARGS(timer) );
+#define decode_clockid(type) \ + __print_symbolic(type, \ + { CLOCK_REALTIME, "CLOCK_REALTIME" }, \ + { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \ + { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \ + { CLOCK_TAI, "CLOCK_TAI" }) + +#define decode_hrtimer_mode(mode) \ + __print_symbolic(mode, \ + { HRTIMER_MODE_ABS, "ABS" }, \ + { HRTIMER_MODE_REL, "REL" }, \ + { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \ + { HRTIMER_MODE_REL_PINNED, "REL|PINNED" }) + /** * hrtimer_init - called when the hrtimer is initialized * @hrtimer: pointer to struct hrtimer @@ -151,10 +165,8 @@ TRACE_EVENT(hrtimer_init, ),
TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer, - __entry->clockid == CLOCK_REALTIME ? - "CLOCK_REALTIME" : "CLOCK_MONOTONIC", - __entry->mode == HRTIMER_MODE_ABS ? - "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL") + decode_clockid(__entry->clockid), + decode_hrtimer_mode(__entry->mode)) );
/**
From: "Steven Rostedt (VMware)" rostedt@goodmis.org
[ Upstream commit 38d70b7ca1769f26c0b79f3c08ff2cc949712b59 ]
When processing %pX in pretty_print(), simplify the logic slightly by incrementing the ptr to the format string if isalnum(ptr[1]) is true. This follows the logic a bit more closely to what is in the kernel.
Also, this fixes a small bug where %pF was not giving the offset of the function.
Signed-off-by: Steven Rostedt rostedt@goodmis.org Acked-by: Namhyung Kim namhyung@kernel.org Cc: Andrew Morton akpm@linux-foundation.org Link: http://lkml.kernel.org/r/20180112004822.260262257@goodmis.org Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- tools/lib/traceevent/event-parse.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 68276f35e323..6e4a10fe9dd0 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -4905,21 +4905,22 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event else ls = 2;
- if (*(ptr+1) == 'F' || *(ptr+1) == 'f' || - *(ptr+1) == 'S' || *(ptr+1) == 's') { + if (isalnum(ptr[1])) ptr++; + + if (*ptr == 'F' || *ptr == 'f' || + *ptr == 'S' || *ptr == 's') { show_func = *ptr; - } else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') { - print_mac_arg(s, *(ptr+1), data, size, event, arg); - ptr++; + } else if (*ptr == 'M' || *ptr == 'm') { + print_mac_arg(s, *ptr, data, size, event, arg); arg = arg->next; break; - } else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') { + } else if (*ptr == 'I' || *ptr == 'i') { int n;
- n = print_ip_arg(s, ptr+1, data, size, event, arg); + n = print_ip_arg(s, ptr, data, size, event, arg); if (n > 0) { - ptr += n; + ptr += n - 1; arg = arg->next; break; }
From: Alex Williamson alex.williamson@redhat.com
[ Upstream commit aa008206634363ef800fbd5f0262016c9ff81dea ]
The Marvell 9128 is the original device generating bug 42679, from which many other Marvell DMA alias quirks have been sourced, but we didn't have positive confirmation of the fix on 9128 until now.
Link: https://bugzilla.kernel.org/show_bug.cgi?id=42679 Link: https://www.spinics.net/lists/kvm/msg161459.html Reported-by: Binarus lists@binarus.de Tested-by: Binarus lists@binarus.de Signed-off-by: Alex Williamson alex.williamson@redhat.com Signed-off-by: Bjorn Helgaas bhelgaas@google.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/pci/quirks.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ae6aa7b385c2..6373b6fe422d 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3614,6 +3614,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123, quirk_dma_func1_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128, + quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130, quirk_dma_func1_alias);
From: Arnaldo Carvalho de Melo acme@redhat.com
[ Upstream commit 249d98e567e25dd03e015e2d31e1b7b9648f34df ]
When setting the "dwarf" unwinder for a specific event and not specifying the max-stack, the attr.sample_max_stack ended up using an uninitialized callchain_param.max_stack, fix it by using designated initializers for that callchain_param variable, zeroing all non explicitely initialized struct members.
Here is what happened:
# perf trace -vv --no-syscalls --max-stack 4 -e probe_libc:inet_pton/call-graph=dwarf/ ping -6 -c 1 ::1 callchain: type DWARF callchain: stack dump size 8192 perf_event_attr: type 2 size 112 config 0x730 { sample_period, sample_freq } 1 sample_type IP|TID|TIME|ADDR|CALLCHAIN|CPU|PERIOD|RAW|REGS_USER|STACK_USER|DATA_SRC exclude_callchain_user 1 { wakeup_events, wakeup_watermark } 1 sample_regs_user 0xff0fff sample_stack_user 8192 sample_max_stack 50656 sys_perf_event_open failed, error -75 Value too large for defined data type # perf trace -vv --no-syscalls --max-stack 4 -e probe_libc:inet_pton/call-graph=dwarf/ ping -6 -c 1 ::1 callchain: type DWARF callchain: stack dump size 8192 perf_event_attr: type 2 size 112 config 0x730 sample_type IP|TID|TIME|ADDR|CALLCHAIN|CPU|PERIOD|RAW|REGS_USER|STACK_USER|DATA_SRC exclude_callchain_user 1 sample_regs_user 0xff0fff sample_stack_user 8192 sample_max_stack 30448 sys_perf_event_open failed, error -75 Value too large for defined data type #
Now the attr.sample_max_stack is set to zero and the above works as expected:
# perf trace --no-syscalls --max-stack 4 -e probe_libc:inet_pton/call-graph=dwarf/ ping -6 -c 1 ::1 PING ::1(::1) 56 data bytes 64 bytes from ::1: icmp_seq=1 ttl=64 time=0.072 ms
--- ::1 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 0.072/0.072/0.072/0.000 ms 0.000 probe_libc:inet_pton:(7feb7a998350)) __inet_pton (inlined) gaih_inet.constprop.7 (/usr/lib64/libc-2.26.so) __GI_getaddrinfo (inlined) [0xffffaa39b6108f3f] (/usr/bin/ping) #
Cc: Adrian Hunter adrian.hunter@intel.com Cc: David Ahern dsahern@gmail.com Cc: Hendrick Brueckner brueckner@linux.vnet.ibm.com Cc: Jiri Olsa jolsa@kernel.org Cc: Namhyung Kim namhyung@kernel.org Cc: Thomas Richter tmricht@linux.vnet.ibm.com Cc: Wang Nan wangnan0@huawei.com Link: https://lkml.kernel.org/n/tip-is9tramondqa9jlxxsgcm9iz@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- tools/perf/util/evsel.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 397fb4ed3c97..f0bd4825f95a 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -624,13 +624,13 @@ static void apply_config_terms(struct perf_evsel *evsel, struct perf_evsel_config_term *term; struct list_head *config_terms = &evsel->config_terms; struct perf_event_attr *attr = &evsel->attr; - struct callchain_param param; + /* callgraph default */ + struct callchain_param param = { + .record_mode = callchain_param.record_mode, + }; u32 dump_size = 0; char *callgraph_buf = NULL;
- /* callgraph default */ - param.record_mode = callchain_param.record_mode; - list_for_each_entry(term, config_terms, list) { switch (term->type) { case PERF_EVSEL__CONFIG_TERM_PERIOD:
From: "Steven Rostedt (VMware)" rostedt@goodmis.org
[ Upstream commit d777f8de99b05d399c0e4e51cdce016f26bd971b ]
If a field is a dynamic string, get_field_str() returned just the offset/size value and not the string. Have it parse the offset/size correctly to return the actual string. Otherwise filtering fails when trying to filter fields that are dynamic strings.
Reported-by: Gopanapalli Pradeep prap_hai@yahoo.com Signed-off-by: Steven Rostedt rostedt@goodmis.org Acked-by: Namhyung Kim namhyung@kernel.org Cc: Andrew Morton akpm@linux-foundation.org Link: http://lkml.kernel.org/r/20180112004823.146333275@goodmis.org Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- tools/lib/traceevent/parse-filter.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c index 88cccea3ca99..64309d73921b 100644 --- a/tools/lib/traceevent/parse-filter.c +++ b/tools/lib/traceevent/parse-filter.c @@ -1867,17 +1867,25 @@ static const char *get_field_str(struct filter_arg *arg, struct pevent_record *r struct pevent *pevent; unsigned long long addr; const char *val = NULL; + unsigned int size; char hex[64];
/* If the field is not a string convert it */ if (arg->str.field->flags & FIELD_IS_STRING) { val = record->data + arg->str.field->offset; + size = arg->str.field->size; + + if (arg->str.field->flags & FIELD_IS_DYNAMIC) { + addr = *(unsigned int *)val; + val = record->data + (addr & 0xffff); + size = addr >> 16; + }
/* * We need to copy the data since we can't be sure the field * is null terminated. */ - if (*(val + arg->str.field->size - 1)) { + if (*(val + size - 1)) { /* copy it */ memcpy(arg->str.buffer, val, arg->str.field->size); /* the buffer is already NULL terminated */
From: mulhern amulhern@redhat.com
[ Upstream commit 9b28a1102efc75d81298198166ead87d643a29ce ]
Fixes: 1. The use of "exceeds" when the opposite of exceeds, falls below, was meant. 2. Properly speaking, a table can not exceed a threshold.
It emphasizes the important point, which is that it is the userspace daemon's responsibility to check for low free space when a device is resumed, since it won't get a special event indicating low free space in that situation.
Signed-off-by: mulhern amulhern@redhat.com Signed-off-by: Mike Snitzer snitzer@redhat.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- Documentation/device-mapper/thin-provisioning.txt | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt index 1699a55b7b70..ef639960b272 100644 --- a/Documentation/device-mapper/thin-provisioning.txt +++ b/Documentation/device-mapper/thin-provisioning.txt @@ -112,9 +112,11 @@ $low_water_mark is expressed in blocks of size $data_block_size. If free space on the data device drops below this level then a dm event will be triggered which a userspace daemon should catch allowing it to extend the pool device. Only one such event will be sent. -Resuming a device with a new table itself triggers an event so the -userspace daemon can use this to detect a situation where a new table -already exceeds the threshold. + +No special event is triggered if a just resumed device's free space is below +the low water mark. However, resuming a device always triggers an +event; a userspace daemon should verify that free space exceeds the low +water mark when handling this event.
A low water mark for the metadata device is maintained in the kernel and will trigger a dm event if free space on the metadata device drops below
From: Peter Xu peterx@redhat.com
[ Upstream commit 9d2e6505f6d6934e681aed502f566198cb25c74a ]
after commit a1ddcbe93010 ("iommu/vt-d: Pass dmar_domain directly into iommu_flush_iotlb_psi", 2015-08-12), we have domain pointer as parameter to iommu_flush_iotlb_psi(), so no need to fetch it from cache again.
More importantly, a NULL reference pointer bug is reported on RHEL7 (and it can be reproduced on some old upstream kernels too, e.g., v4.13) by unplugging an 40g nic from a VM (hard to test unplug on real host, but it should be the same):
https://bugzilla.redhat.com/show_bug.cgi?id=1531367
[ 24.391863] pciehp 0000:00:03.0:pcie004: Slot(0): Attention button pressed [ 24.393442] pciehp 0000:00:03.0:pcie004: Slot(0): Powering off due to button press [ 29.721068] i40evf 0000:01:00.0: Unable to send opcode 2 to PF, err I40E_ERR_QUEUE_EMPTY, aq_err OK [ 29.783557] iommu: Removing device 0000:01:00.0 from group 3 [ 29.784662] BUG: unable to handle kernel NULL pointer dereference at 0000000000000304 [ 29.785817] IP: iommu_flush_iotlb_psi+0xcf/0x120 [ 29.786486] PGD 0 [ 29.786487] P4D 0 [ 29.786812] [ 29.787390] Oops: 0000 [#1] SMP [ 29.787876] Modules linked in: ip6t_rpfilter ip6t_REJECT nf_reject_ipv6 xt_conntrack ip_set nfnetlink ebtable_nat ebtable_broute bridge stp llc ip6table_ng [ 29.795371] CPU: 0 PID: 156 Comm: kworker/0:2 Not tainted 4.13.0 #14 [ 29.796366] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.11.0-1.el7 04/01/2014 [ 29.797593] Workqueue: pciehp-0 pciehp_power_thread [ 29.798328] task: ffff94f5745b4a00 task.stack: ffffb326805ac000 [ 29.799178] RIP: 0010:iommu_flush_iotlb_psi+0xcf/0x120 [ 29.799919] RSP: 0018:ffffb326805afbd0 EFLAGS: 00010086 [ 29.800666] RAX: ffff94f5bc56e800 RBX: 0000000000000000 RCX: 0000000200000025 [ 29.801667] RDX: ffff94f5bc56e000 RSI: 0000000000000082 RDI: 0000000000000000 [ 29.802755] RBP: ffffb326805afbf8 R08: 0000000000000000 R09: ffff94f5bc86bbf0 [ 29.803772] R10: ffffb326805afba8 R11: 00000000000ffdc4 R12: ffff94f5bc86a400 [ 29.804789] R13: 0000000000000000 R14: 00000000ffdc4000 R15: 0000000000000000 [ 29.805792] FS: 0000000000000000(0000) GS:ffff94f5bfc00000(0000) knlGS:0000000000000000 [ 29.806923] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 29.807736] CR2: 0000000000000304 CR3: 000000003499d000 CR4: 00000000000006f0 [ 29.808747] Call Trace: [ 29.809156] flush_unmaps_timeout+0x126/0x1c0 [ 29.809800] domain_exit+0xd6/0x100 [ 29.810322] device_notifier+0x6b/0x70 [ 29.810902] notifier_call_chain+0x4a/0x70 [ 29.812822] __blocking_notifier_call_chain+0x47/0x60 [ 29.814499] blocking_notifier_call_chain+0x16/0x20 [ 29.816137] device_del+0x233/0x320 [ 29.817588] pci_remove_bus_device+0x6f/0x110 [ 29.819133] pci_stop_and_remove_bus_device+0x1a/0x20 [ 29.820817] pciehp_unconfigure_device+0x7a/0x1d0 [ 29.822434] pciehp_disable_slot+0x52/0xe0 [ 29.823931] pciehp_power_thread+0x8a/0xa0 [ 29.825411] process_one_work+0x18c/0x3a0 [ 29.826875] worker_thread+0x4e/0x3b0 [ 29.828263] kthread+0x109/0x140 [ 29.829564] ? process_one_work+0x3a0/0x3a0 [ 29.831081] ? kthread_park+0x60/0x60 [ 29.832464] ret_from_fork+0x25/0x30 [ 29.833794] Code: 85 ed 74 0b 5b 41 5c 41 5d 41 5e 41 5f 5d c3 49 8b 54 24 60 44 89 f8 0f b6 c4 48 8b 04 c2 48 85 c0 74 49 45 0f b6 ff 4a 8b 3c f8 <80> bf [ 29.838514] RIP: iommu_flush_iotlb_psi+0xcf/0x120 RSP: ffffb326805afbd0 [ 29.840362] CR2: 0000000000000304 [ 29.841716] ---[ end trace b10ec0d6900868d3 ]---
This patch fixes that problem if applied to v4.13 kernel.
The bug does not exist on latest upstream kernel since it's fixed as a side effect of commit 13cf01744608 ("iommu/vt-d: Make use of iova deferred flushing", 2017-08-15). But IMHO it's still good to have this patch upstream.
CC: Alex Williamson alex.williamson@redhat.com Signed-off-by: Peter Xu peterx@redhat.com Fixes: a1ddcbe93010 ("iommu/vt-d: Pass dmar_domain directly into iommu_flush_iotlb_psi") Reviewed-by: Alex Williamson alex.williamson@redhat.com Signed-off-by: Joerg Roedel jroedel@suse.de Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/iommu/intel-iommu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 4efec2db4ee2..0a63a8bd6a8f 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1573,8 +1573,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, * flush. However, device IOTLB doesn't need to be flushed in this case. */ if (!cap_caching_mode(iommu->cap) || !map) - iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), - addr, mask); + iommu_flush_dev_iotlb(domain, addr, mask); }
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
From: Jan Chochol jan@chochol.info
[ Upstream commit cbebc6ef4fc830f4040d4140bf53484812d5d5d9 ]
Since commit 57e62324e469 ("NFS: Store the legacy idmapper result in the keyring") nfs_idmap_cache_timeout changed units from jiffies to seconds. Unfortunately sysctl interface was not updated accordingly.
As a effect updating /proc/sys/fs/nfs/idmap_cache_timeout with some value will incorrectly multiply this value by HZ. Also reading /proc/sys/fs/nfs/idmap_cache_timeout will show real value divided by HZ.
Fixes: 57e62324e469 ("NFS: Store the legacy idmapper result in the keyring") Signed-off-by: Jan Chochol jan@chochol.info Signed-off-by: Trond Myklebust trond.myklebust@primarydata.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/nfs/nfs4sysctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c index 0fbd3ab1be22..44a7bbbf92f8 100644 --- a/fs/nfs/nfs4sysctl.c +++ b/fs/nfs/nfs4sysctl.c @@ -31,7 +31,7 @@ static struct ctl_table nfs4_cb_sysctls[] = { .data = &nfs_idmap_cache_timeout, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec_jiffies, + .proc_handler = proc_dointvec, }, { } };
From: Paul Cercueil paul@crapouillou.net
[ Upstream commit e6cfa64375d34a6c8c1861868a381013b2d3b921 ]
Previously, the clocks with a fixed divider would report their rate as being the same as the one of their parent, independently of the divider in use. This commit fixes this behaviour.
This went unnoticed as neither the jz4740 nor the jz4780 CGU code have clocks with fixed dividers yet.
Signed-off-by: Paul Cercueil paul@crapouillou.net Acked-by: Stephen Boyd sboyd@codeaurora.org Cc: Ralf Baechle ralf@linux-mips.org Cc: Maarten ter Huurne maarten@treewalker.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/18477/ Signed-off-by: James Hogan jhogan@kernel.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/clk/ingenic/cgu.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c index 7cfb7b2a2ed6..e5b1bf4dadcc 100644 --- a/drivers/clk/ingenic/cgu.c +++ b/drivers/clk/ingenic/cgu.c @@ -327,6 +327,8 @@ ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) div += 1;
rate /= div; + } else if (clk_info->type & CGU_CLK_FIXDIV) { + rate /= clk_info->fixdiv.div; }
return rate;
From: Guenter Roeck linux@roeck-us.net
[ Upstream commit f541c09ebfc61697b586b38c9ebaf4b70defb278 ]
According to all published information, the watchdog disable bit for SB800 compatible controllers is bit 1 of PM register 0x48, not bit 2. For the most part that doesn't matter in practice, since the bit has to be cleared to enable watchdog address decoding, which is the default setting, but it still needs to be fixed.
Cc: Zoltán Böszörményi zboszor@pr.hu Signed-off-by: Guenter Roeck linux@roeck-us.net Signed-off-by: Wim Van Sebroeck wim@iguana.be Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/watchdog/sp5100_tco.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h index 2b28c00da0df..dfe20b81ced5 100644 --- a/drivers/watchdog/sp5100_tco.h +++ b/drivers/watchdog/sp5100_tco.h @@ -54,7 +54,7 @@ #define SB800_PM_WATCHDOG_CONFIG 0x4C
#define SB800_PCI_WATCHDOG_DECODE_EN (1 << 0) -#define SB800_PM_WATCHDOG_DISABLE (1 << 2) +#define SB800_PM_WATCHDOG_DISABLE (1 << 1) #define SB800_PM_WATCHDOG_SECOND_RES (3 << 0) #define SB800_ACPI_MMIO_DECODE_EN (1 << 0) #define SB800_ACPI_MMIO_SEL (1 << 1)
From: Ulf Magnusson ulfalizer@gmail.com
[ Upstream commit ae7440ef0c8013d68c00dad6900e7cce5311bb1c ]
expr_trans_compare() always allocates and returns a new expression, giving the following leak outline:
... *Allocate* basedep = expr_trans_compare(basedep, E_UNEQUAL, &symbol_no); ... for (menu = parent->next; menu; menu = menu->next) { ... *Copy* dep2 = expr_copy(basedep); ... *Free copy* expr_free(dep2); } *basedep lost!*
Fix by freeing 'basedep' after the loop.
Summary from Valgrind on 'menuconfig' (ARCH=x86) before the fix:
LEAK SUMMARY: definitely lost: 344,376 bytes in 14,349 blocks ...
Summary after the fix:
LEAK SUMMARY: definitely lost: 44,448 bytes in 1,852 blocks ...
Signed-off-by: Ulf Magnusson ulfalizer@gmail.com Signed-off-by: Masahiro Yamada yamada.masahiro@socionext.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- scripts/kconfig/menu.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c index b05cc3d4a9be..8360feaf51ce 100644 --- a/scripts/kconfig/menu.c +++ b/scripts/kconfig/menu.c @@ -364,6 +364,7 @@ void menu_finalize(struct menu *parent) menu->parent = parent; last_menu = menu; } + expr_free(basedep); if (last_menu) { parent->list = parent->next; parent->next = last_menu->next;
From: Ulf Magnusson ulfalizer@gmail.com
[ Upstream commit 0724a7c32a54e3e50d28e19e30c59014f61d4e2c ]
If a 'mainmenu' entry appeared in the Kconfig files, two things would leak:
- The 'struct property' allocated for the default "Linux Kernel Configuration" prompt.
- The string for the T_WORD/T_WORD_QUOTE prompt after the T_MAINMENU token, allocated on the heap in zconf.l.
To fix it, introduce a new 'no_mainmenu_stmt' nonterminal that matches if there's no 'mainmenu' and adds the default prompt. That means the prompt only gets allocated once regardless of whether there's a 'mainmenu' statement or not, and managing it becomes simple.
Summary from Valgrind on 'menuconfig' (ARCH=x86) before the fix:
LEAK SUMMARY: definitely lost: 344,568 bytes in 14,352 blocks ...
Summary after the fix:
LEAK SUMMARY: definitely lost: 344,440 bytes in 14,350 blocks ...
Signed-off-by: Ulf Magnusson ulfalizer@gmail.com Signed-off-by: Masahiro Yamada yamada.masahiro@socionext.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- scripts/kconfig/zconf.y | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-)
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y index 71bf8bff696a..5122ed2d839a 100644 --- a/scripts/kconfig/zconf.y +++ b/scripts/kconfig/zconf.y @@ -107,7 +107,27 @@ static struct menu *current_menu, *current_entry; %% input: nl start | start;
-start: mainmenu_stmt stmt_list | stmt_list; +start: mainmenu_stmt stmt_list | no_mainmenu_stmt stmt_list; + +/* mainmenu entry */ + +mainmenu_stmt: T_MAINMENU prompt nl +{ + menu_add_prompt(P_MENU, $2, NULL); +}; + +/* Default main menu, if there's no mainmenu entry */ + +no_mainmenu_stmt: /* empty */ +{ + /* + * Hack: Keep the main menu title on the heap so we can safely free it + * later regardless of whether it comes from the 'prompt' in + * mainmenu_stmt or here + */ + menu_add_prompt(P_MENU, strdup("Linux Kernel Configuration"), NULL); +}; +
stmt_list: /* empty */ @@ -344,13 +364,6 @@ if_block: | if_block choice_stmt ;
-/* mainmenu entry */ - -mainmenu_stmt: T_MAINMENU prompt nl -{ - menu_add_prompt(P_MENU, $2, NULL); -}; - /* menu entry */
menu: T_MENU prompt T_EOL @@ -495,6 +508,7 @@ word_opt: /* empty */ { $$ = NULL; }
void conf_parse(const char *name) { + const char *tmp; struct symbol *sym; int i;
@@ -502,7 +516,6 @@ void conf_parse(const char *name)
sym_init(); _menu_init(); - rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
if (getenv("ZCONF_DEBUG")) zconfdebug = 1; @@ -512,8 +525,10 @@ void conf_parse(const char *name) if (!modules_sym) modules_sym = sym_find( "n" );
+ tmp = rootmenu.prompt->text; rootmenu.prompt->text = _(rootmenu.prompt->text); rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text); + free((char*)tmp);
menu_finalize(&rootmenu); for_all_symbols(i, sym) {
From: Ulf Magnusson ulfalizer@gmail.com
[ Upstream commit 5b1374b3b3c2fc4f63a398adfa446fb8eff791a4 ]
Only the E_NOT operand and not the E_NOT node itself was freed, due to accidentally returning too early in expr_free(). Outline of leak:
switch (e->type) { ... case E_NOT: expr_free(e->left.expr); return; ... } *Never reached, 'e' leaked* free(e);
Fix by changing the 'return' to a 'break'.
Summary from Valgrind on 'menuconfig' (ARCH=x86) before the fix:
LEAK SUMMARY: definitely lost: 44,448 bytes in 1,852 blocks ...
Summary after the fix:
LEAK SUMMARY: definitely lost: 1,608 bytes in 67 blocks ...
Signed-off-by: Ulf Magnusson ulfalizer@gmail.com Signed-off-by: Masahiro Yamada yamada.masahiro@socionext.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- scripts/kconfig/expr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c index cbf4996dd9c1..ed29bad1f03a 100644 --- a/scripts/kconfig/expr.c +++ b/scripts/kconfig/expr.c @@ -113,7 +113,7 @@ void expr_free(struct expr *e) break; case E_NOT: expr_free(e->left.expr); - return; + break; case E_EQUAL: case E_GEQ: case E_GTH:
From: "weiyongjun (A)" weiyongjun1@huawei.com
[ Upstream commit 0ddcff49b672239dda94d70d0fcf50317a9f4b51 ]
'hwname' is malloced in hwsim_new_radio_nl() and should be freed before leaving from the error handling cases, otherwise it will cause memory leak.
Fixes: ff4dd73dd2b4 ("mac80211_hwsim: check HWSIM_ATTR_RADIO_NAME length") Signed-off-by: Wei Yongjun weiyongjun1@huawei.com Reviewed-by: Ben Hutchings ben.hutchings@codethink.co.uk Signed-off-by: Johannes Berg johannes.berg@intel.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/wireless/mac80211_hwsim.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 8a9164da6c50..e8b770a95f7a 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2925,8 +2925,10 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) { u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
- if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) + if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) { + kfree(hwname); return -EINVAL; + } param.regd = hwsim_world_regdom_custom[idx]; }
From: Wei Yongjun weiyongjun1@huawei.com
[ Upstream commit e749d328b0b450aa78d562fa26a0cd8872325dd9 ]
Fix to return a negative error code from the request_irq() error handling case instead of 0, as done elsewhere in this function.
Fixes: dce143c3381c ("ipmi/powernv: Convert to irq event interface") Signed-off-by: Wei Yongjun weiyongjun1@huawei.com Reviewed-by: Alexey Kardashevskiy aik@ozlabs.ru Signed-off-by: Corey Minyard cminyard@mvista.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/char/ipmi/ipmi_powernv.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c index 6e658aa114f1..a70518a4fcec 100644 --- a/drivers/char/ipmi/ipmi_powernv.c +++ b/drivers/char/ipmi/ipmi_powernv.c @@ -251,8 +251,9 @@ static int ipmi_powernv_probe(struct platform_device *pdev) ipmi->irq = opal_event_request(prop); }
- if (request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH, - "opal-ipmi", ipmi)) { + rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH, + "opal-ipmi", ipmi); + if (rc) { dev_warn(dev, "Unable to request irq\n"); goto err_dispose; }
From: Liu Bo bo.li.liu@oracle.com
[ Upstream commit 343e4fc1c60971b0734de26dbbd475d433950982 ]
Setting plug can merge adjacent IOs before dispatching IOs to the disk driver.
Without plug, it'd not be a problem for single disk usecases, but for multiple disks using raid profile, a large IO can be split to several IOs of stripe length, and plug can be helpful to bring them together for each disk so that we can save several disk access.
Moreover, fsync issues synchronous writes, so plug can really take effect.
Signed-off-by: Liu Bo bo.li.liu@oracle.com Reviewed-by: David Sterba dsterba@suse.com Signed-off-by: David Sterba dsterba@suse.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/btrfs/file.c | 9 +++++++++ 1 file changed, 9 insertions(+)
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index d4a6eef31854..052973620595 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1861,10 +1861,19 @@ int btrfs_release_file(struct inode *inode, struct file *filp) static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end) { int ret; + struct blk_plug plug;
+ /* + * This is only called in fsync, which would do synchronous writes, so + * a plug can merge adjacent IOs as much as possible. Esp. in case of + * multiple disks using raid profile, a large IO can be split to + * several segments of stripe length (currently 64K). + */ + blk_start_plug(&plug); atomic_inc(&BTRFS_I(inode)->sync_writers); ret = btrfs_fdatawrite_range(inode, start, end); atomic_dec(&BTRFS_I(inode)->sync_writers); + blk_finish_plug(&plug);
return ret; }
From: Nikolay Borisov nborisov@suse.com
[ Upstream commit 9ea2c7c9da13c9073e371c046cbbc45481ecb459 ]
When modifying a tree where the root is at BTRFS_MAX_LEVEL - 1 then the level variable is going to be 7 (this is the max height of the tree). On the other hand btrfs_cow_block is always called with "level + 1" as an index into the nodes and slots arrays. This leads to an out of bounds access. Admittdely this will be benign since an OOB access of the nodes array will likely read the 0th element from the slots array, which in this case is going to be 0 (since we start CoW at the top of the tree). The OOB access into the slots array in turn will read the 0th and 1st values of the locks array, which would both be 0 at the time. However, this benign behavior relies on the fact that the path being passed hasn't been initialised, if it has already been used to query a btree then it could potentially have populated the nodes/slots arrays.
Fix it by explicitly checking if we are at level 7 (the maximum allowed index in nodes/slots arrays) and explicitly call the CoW routine with NULL for parent's node/slot.
Signed-off-by: Nikolay Borisov nborisov@suse.com Fixes-coverity-id: 711515 Reviewed-by: David Sterba dsterba@suse.com Signed-off-by: David Sterba dsterba@suse.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/btrfs/ctree.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 0f2b7c622ce3..ce1861620ef7 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2769,6 +2769,8 @@ again: * contention with the cow code */ if (cow) { + bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); + /* * if we don't really need to cow this block * then we don't want to set the path blocking, @@ -2793,9 +2795,13 @@ again: }
btrfs_set_path_blocking(p); - err = btrfs_cow_block(trans, root, b, - p->nodes[level + 1], - p->slots[level + 1], &b); + if (last_level) + err = btrfs_cow_block(trans, root, b, NULL, 0, + &b); + else + err = btrfs_cow_block(trans, root, b, + p->nodes[level + 1], + p->slots[level + 1], &b); if (err) { ret = err; goto done;
From: Liu Bo bo.li.liu@oracle.com
[ Upstream commit 762221f095e3932669093466aaf4b85ed9ad2ac1 ]
The raid6 corruption is that, suppose that all disks can be read without problems and if the content that was read out doesn't match its checksum, currently for raid6 btrfs at most retries twice,
- the 1st retry is to rebuild with all other stripes, it'll eventually be a raid5 xor rebuild, - if the 1st fails, the 2nd retry will deliberately fail parity p so that it will do raid6 style rebuild,
however, the chances are that another non-parity stripe content also has something corrupted, so that the above retries are not able to return correct content.
We've fixed normal reads to rebuild raid6 correctly with more retries in Patch "Btrfs: make raid6 rebuild retry more"[1], this is to fix scrub to do the exactly same rebuild process.
[1]: https://patchwork.kernel.org/patch/10091755/
Signed-off-by: Liu Bo bo.li.liu@oracle.com Signed-off-by: David Sterba dsterba@suse.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/btrfs/raid56.c | 18 ++++++++++++++---- fs/btrfs/volumes.c | 9 ++++++++- 2 files changed, 22 insertions(+), 5 deletions(-)
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 1a33d3eb36de..b9fa99577bf7 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -2160,11 +2160,21 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, }
/* - * reconstruct from the q stripe if they are - * asking for mirror 3 + * Loop retry: + * for 'mirror == 2', reconstruct from all other stripes. + * for 'mirror_num > 2', select a stripe to fail on every retry. */ - if (mirror_num == 3) - rbio->failb = rbio->real_stripes - 2; + if (mirror_num > 2) { + /* + * 'mirror == 3' is to fail the p stripe and + * reconstruct from the q stripe. 'mirror > 3' is to + * fail a data stripe and reconstruct from p+q stripe. + */ + rbio->failb = rbio->real_stripes - (mirror_num - 1); + ASSERT(rbio->failb > 0); + if (rbio->failb <= rbio->faila) + rbio->failb--; + }
ret = lock_stripe_add(rbio);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6d874b1cd53c..992ed3e7efb2 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -5047,7 +5047,14 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) else if (map->type & BTRFS_BLOCK_GROUP_RAID5) ret = 2; else if (map->type & BTRFS_BLOCK_GROUP_RAID6) - ret = 3; + /* + * There could be two corrupted data stripes, we need + * to loop retry in order to rebuild the correct data. + * + * Fail a stripe at a time on every retry except the + * stripe under reconstruction. + */ + ret = map->num_stripes; else ret = 1; free_extent_map(em);
From: Xose Vazquez Perez xose.vazquez@gmail.com
[ Upstream commit 3f884a0a8bdf28cfd1e9987d54d83350096cdd46 ]
Replace "" with NULL for product revision level, and merge TEXEL duplicate entries.
Cc: Hannes Reinecke hare@suse.de Cc: Martin K. Petersen martin.petersen@oracle.com Cc: James E.J. Bottomley jejb@linux.vnet.ibm.com Cc: SCSI ML linux-scsi@vger.kernel.org Signed-off-by: Xose Vazquez Perez xose.vazquez@gmail.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/scsi/scsi_devinfo.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 6b61b09b3226..9a25e798e195 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -108,8 +108,8 @@ static struct { * seagate controller, which causes SCSI code to reset bus. */ {"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */ - {"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */ - {"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */ + {"HP", "C1790A", NULL, BLIST_NOLUN}, /* scanjet iip */ + {"HP", "C2500A", NULL, BLIST_NOLUN}, /* scanjet iicx */ {"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */ {"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */ {"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */ @@ -119,7 +119,7 @@ static struct { {"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */ {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */ {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */ - {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN}, + {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN | BLIST_BORKEN}, {"transtec", "T5008", "0001", BLIST_NOREPORTLUN }, {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */ {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */ @@ -249,7 +249,6 @@ static struct { {"ST650211", "CF", NULL, BLIST_RETRY_HWERROR}, {"SUN", "T300", "*", BLIST_SPARSELUN}, {"SUN", "T4", "*", BLIST_SPARSELUN}, - {"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN}, {"Tornado-", "F4", "*", BLIST_NOREPORTLUN}, {"TOSHIBA", "CDROM", NULL, BLIST_ISROM}, {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
From: Arnd Bergmann arnd@arndb.de
[ Upstream commit 96d5eaa9bb74d299508d811d865c2c41b38b0301 ]
While testing with the ARM specific memset() macro removed, I ran into a compiler warning that shows an old bug:
drivers/scsi/arm/fas216.c: In function 'fas216_rq_sns_done': drivers/scsi/arm/fas216.c:2014:40: error: argument to 'sizeof' in 'memset' call is the same expression as the destination; did you mean to provide an explicit length? [-Werror=sizeof-pointer-memaccess]
It turns out that the definition of the scsi_cmd structure changed back in linux-2.6.25, so now we clear only four bytes (sizeof(pointer)) instead of 96 (SCSI_SENSE_BUFFERSIZE). I did not check whether we actually need to initialize the buffer here, but it's clear that if we do it, we should use the correct size.
Fixes: de25deb18016 ("[SCSI] use dynamically allocated sense buffer") Signed-off-by: Arnd Bergmann arnd@arndb.de Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/scsi/arm/fas216.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index decdc71b6b86..f6d7c4712e66 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -2009,7 +2009,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, * have valid data in the sense buffer that could * confuse the higher levels. */ - memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); + memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); //printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id); //{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); } /*
From: Dan Carpenter dan.carpenter@oracle.com
[ Upstream commit 7ad81482cad67cbe1ec808490d1ddfc420c42008 ]
We get the "new_profile_index" value from the mouse device when we're handling raw events. Smatch taints it as untrusted data and complains that we need a bounds check. This seems like a reasonable warning otherwise there is a small read beyond the end of the array.
Fixes: 0e70f97f257e ("HID: roccat: Add support for Kova[+] mouse") Signed-off-by: Dan Carpenter dan.carpenter@oracle.com Acked-by: Silvan Jegen s.jegen@gmail.com Signed-off-by: Jiri Kosina jkosina@suse.cz Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/hid/hid-roccat-kovaplus.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c index 966047711fbf..1073c0d1fae5 100644 --- a/drivers/hid/hid-roccat-kovaplus.c +++ b/drivers/hid/hid-roccat-kovaplus.c @@ -37,6 +37,8 @@ static uint kovaplus_convert_event_cpi(uint value) static void kovaplus_profile_activated(struct kovaplus_device *kovaplus, uint new_profile_index) { + if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings)) + return; kovaplus->actual_profile = new_profile_index; kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level; kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
From: Jake Daryll Obina jake.obina@gmail.com
[ Upstream commit 5bdd0c6f89fba430e18d636493398389dadc3b17 ]
If jffs2_iget() fails for a newly-allocated inode, jffs2_do_clear_inode() can get called twice in the error handling path, the first call in jffs2_iget() itself and the second through iget_failed(). This can result to a use-after-free error in the second jffs2_do_clear_inode() call, such as shown by the oops below wherein the second jffs2_do_clear_inode() call was trying to free node fragments that were already freed in the first jffs2_do_clear_inode() call.
[ 78.178860] jffs2: error: (1904) jffs2_do_read_inode_internal: CRC failed for read_inode of inode 24 at physical location 0x1fc00c [ 78.178914] Unable to handle kernel paging request at virtual address 6b6b6b6b6b6b6b7b [ 78.185871] pgd = ffffffc03a567000 [ 78.188794] [6b6b6b6b6b6b6b7b] *pgd=0000000000000000, *pud=0000000000000000 [ 78.194968] Internal error: Oops: 96000004 [#1] PREEMPT SMP ... [ 78.513147] PC is at rb_first_postorder+0xc/0x28 [ 78.516503] LR is at jffs2_kill_fragtree+0x28/0x90 [jffs2] [ 78.520672] pc : [<ffffff8008323d28>] lr : [<ffffff8000eb1cc8>] pstate: 60000105 [ 78.526757] sp : ffffff800cea38f0 [ 78.528753] x29: ffffff800cea38f0 x28: ffffffc01f3f8e80 [ 78.532754] x27: 0000000000000000 x26: ffffff800cea3c70 [ 78.536756] x25: 00000000dc67c8ae x24: ffffffc033d6945d [ 78.540759] x23: ffffffc036811740 x22: ffffff800891a5b8 [ 78.544760] x21: 0000000000000000 x20: 0000000000000000 [ 78.548762] x19: ffffffc037d48910 x18: ffffff800891a588 [ 78.552764] x17: 0000000000000800 x16: 0000000000000c00 [ 78.556766] x15: 0000000000000010 x14: 6f2065646f6e695f [ 78.560767] x13: 6461657220726f66 x12: 2064656c69616620 [ 78.564769] x11: 435243203a6c616e x10: 7265746e695f6564 [ 78.568771] x9 : 6f6e695f64616572 x8 : ffffffc037974038 [ 78.572774] x7 : bbbbbbbbbbbbbbbb x6 : 0000000000000008 [ 78.576775] x5 : 002f91d85bd44a2f x4 : 0000000000000000 [ 78.580777] x3 : 0000000000000000 x2 : 000000403755e000 [ 78.584779] x1 : 6b6b6b6b6b6b6b6b x0 : 6b6b6b6b6b6b6b6b ... [ 79.038551] [<ffffff8008323d28>] rb_first_postorder+0xc/0x28 [ 79.042962] [<ffffff8000eb5578>] jffs2_do_clear_inode+0x88/0x100 [jffs2] [ 79.048395] [<ffffff8000eb9ddc>] jffs2_evict_inode+0x3c/0x48 [jffs2] [ 79.053443] [<ffffff8008201ca8>] evict+0xb0/0x168 [ 79.056835] [<ffffff8008202650>] iput+0x1c0/0x200 [ 79.060228] [<ffffff800820408c>] iget_failed+0x30/0x3c [ 79.064097] [<ffffff8000eba0c0>] jffs2_iget+0x2d8/0x360 [jffs2] [ 79.068740] [<ffffff8000eb0a60>] jffs2_lookup+0xe8/0x130 [jffs2] [ 79.073434] [<ffffff80081f1a28>] lookup_slow+0x118/0x190 [ 79.077435] [<ffffff80081f4708>] walk_component+0xfc/0x28c [ 79.081610] [<ffffff80081f4dd0>] path_lookupat+0x84/0x108 [ 79.085699] [<ffffff80081f5578>] filename_lookup+0x88/0x100 [ 79.089960] [<ffffff80081f572c>] user_path_at_empty+0x58/0x6c [ 79.094396] [<ffffff80081ebe14>] vfs_statx+0xa4/0x114 [ 79.098138] [<ffffff80081ec44c>] SyS_newfstatat+0x58/0x98 [ 79.102227] [<ffffff800808354c>] __sys_trace_return+0x0/0x4 [ 79.106489] Code: d65f03c0 f9400001 b40000e1 aa0103e0 (f9400821)
The jffs2_do_clear_inode() call in jffs2_iget() is unnecessary since iget_failed() will eventually call jffs2_do_clear_inode() if needed, so just remove it.
Fixes: 5451f79f5f81 ("iget: stop JFFS2 from using iget() and read_inode()") Reviewed-by: Richard Weinberger richard@nod.at Signed-off-by: Jake Daryll Obina jake.obina@gmail.com Signed-off-by: Al Viro viro@zeniv.linux.org.uk Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/jffs2/fs.c | 1 - 1 file changed, 1 deletion(-)
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 2caf1682036d..85e2594fe95c 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -361,7 +361,6 @@ error_io: ret = -EIO; error: mutex_unlock(&f->sem); - jffs2_do_clear_inode(c, f); iget_failed(inode); return ERR_PTR(ret); }
From: Michael Bringmann mwb@linux.vnet.ibm.com
[ Upstream commit a346137e9142b039fd13af2e59696e3d40c487ef ]
On powerpc systems which allow 'hot-add' of CPU or memory resources, it may occur that the new resources are to be inserted into nodes that were not used for these resources at bootup. In the kernel, any node that is used must be defined and initialized. These empty nodes may occur when,
* Dedicated vs. shared resources. Shared resources require information such as the VPHN hcall for CPU assignment to nodes. Associativity decisions made based on dedicated resource rules, such as associativity properties in the device tree, may vary from decisions made using the values returned by the VPHN hcall.
* memoryless nodes at boot. Nodes need to be defined as 'possible' at boot for operation with other code modules. Previously, the powerpc code would limit the set of possible nodes to those which have memory assigned at boot, and were thus online. Subsequent add/remove of CPUs or memory would only work with this subset of possible nodes.
* memoryless nodes with CPUs at boot. Due to the previous restriction on nodes, nodes that had CPUs but no memory were being collapsed into other nodes that did have memory at boot. In practice this meant that the node assignment presented by the runtime kernel differed from the affinity and associativity attributes presented by the device tree or VPHN hcalls. Nodes that might be known to the pHyp were not 'possible' in the runtime kernel because they did not have memory at boot.
This patch ensures that sufficient nodes are defined to support configuration requirements after boot, as well as at boot. This patch set fixes a couple of problems.
* Nodes known to powerpc to be memoryless at boot, but to have CPUs in them are allowed to be 'possible' and 'online'. Memory allocations for those nodes are taken from another node that does have memory until and if memory is hot-added to the node. * Nodes which have no resources assigned at boot, but which may still be referenced subsequently by affinity or associativity attributes, are kept in the list of 'possible' nodes for powerpc. Hot-add of memory or CPUs to the system can reference these nodes and bring them online instead of redirecting to one of the set of nodes that were known to have memory at boot.
This patch extracts the value of the lowest domain level (number of allocable resources) from the device tree property "ibm,max-associativity-domains" to use as the maximum number of nodes to setup as possibly available in the system. This new setting will override the instruction:
nodes_and(node_possible_map, node_possible_map, node_online_map);
presently seen in the function arch/powerpc/mm/numa.c:initmem_init().
If the "ibm,max-associativity-domains" property is not present at boot, no operation will be performed to define or enable additional nodes, or enable the above 'nodes_and()'.
Signed-off-by: Michael Bringmann mwb@linux.vnet.ibm.com Reviewed-by: Nathan Fontenot nfont@linux.vnet.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/powerpc/mm/numa.c | 37 ++++++++++++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 669a15e7fa76..8fd8425af27f 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -951,6 +951,34 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) NODE_DATA(nid)->node_spanned_pages = spanned_pages; }
+static void __init find_possible_nodes(void) +{ + struct device_node *rtas; + u32 numnodes, i; + + if (min_common_depth <= 0) + return; + + rtas = of_find_node_by_path("/rtas"); + if (!rtas) + return; + + if (of_property_read_u32_index(rtas, + "ibm,max-associativity-domains", + min_common_depth, &numnodes)) + goto out; + + for (i = 0; i < numnodes; i++) { + if (!node_possible(i)) { + setup_node_data(i, 0, 0); + node_set(i, node_possible_map); + } + } + +out: + of_node_put(rtas); +} + void __init initmem_init(void) { int nid, cpu; @@ -966,12 +994,15 @@ void __init initmem_init(void) memblock_dump_all();
/* - * Reduce the possible NUMA nodes to the online NUMA nodes, - * since we do not support node hotplug. This ensures that we - * lower the maximum NUMA node ID to what is actually present. + * Modify the set of possible NUMA nodes to reflect information + * available about the set of online nodes, and the set of nodes + * that we expect to make use of for this platform's affinity + * calculations. */ nodes_and(node_possible_map, node_possible_map, node_online_map);
+ find_possible_nodes(); + for_each_online_node(nid) { unsigned long start_pfn, end_pfn;
From: Leon Romanovsky leonro@mellanox.com
[ Upstream commit b081808a66345ba725b77ecd8d759bee874cd937 ]
Failure in XRCD FW deallocation command leaves memory leaked and returns error to the user which he can't do anything about it.
This patch changes behavior to always free memory and always return success to the user.
Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") Reviewed-by: Majd Dibbiny majd@mellanox.com Signed-off-by: Leon Romanovsky leonro@mellanox.com Reviewed-by: Yuval Shaia yuval.shaia@oracle.com Signed-off-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/infiniband/hw/mlx5/qp.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index cfcfbb6b84d7..3d3952ab013a 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3157,12 +3157,9 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd) int err;
err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn); - if (err) { + if (err) mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn); - return err; - }
kfree(xrcd); - return 0; }
From: Michael Bringmann mwb@linux.vnet.ibm.com
[ Upstream commit ea05ba7c559c8e5a5946c3a94a2a266e9a6680a6 ]
This patch fixes some problems encountered at runtime with configurations that support memory-less nodes, or that hot-add CPUs into nodes that are memoryless during system execution after boot. The problems of interest include:
* Nodes known to powerpc to be memoryless at boot, but to have CPUs in them are allowed to be 'possible' and 'online'. Memory allocations for those nodes are taken from another node that does have memory until and if memory is hot-added to the node.
* Nodes which have no resources assigned at boot, but which may still be referenced subsequently by affinity or associativity attributes, are kept in the list of 'possible' nodes for powerpc. Hot-add of memory or CPUs to the system can reference these nodes and bring them online instead of redirecting the references to one of the set of nodes known to have memory at boot.
Note that this software operates under the context of CPU hotplug. We are not doing memory hotplug in this code, but rather updating the kernel's CPU topology (i.e. arch_update_cpu_topology / numa_update_cpu_topology). We are initializing a node that may be used by CPUs or memory before it can be referenced as invalid by a CPU hotplug operation. CPU hotplug operations are protected by a range of APIs including cpu_maps_update_begin/cpu_maps_update_done, cpus_read/write_lock / cpus_read/write_unlock, device locks, and more. Memory hotplug operations, including try_online_node, are protected by mem_hotplug_begin/mem_hotplug_done, device locks, and more. In the case of CPUs being hot-added to a previously memoryless node, the try_online_node operation occurs wholly within the CPU locks with no overlap. Using HMC hot-add/hot-remove operations, we have been able to add and remove CPUs to any possible node without failures. HMC operations involve a degree self-serialization, though.
Signed-off-by: Michael Bringmann mwb@linux.vnet.ibm.com Reviewed-by: Nathan Fontenot nfont@linux.vnet.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/powerpc/mm/numa.c | 47 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 10 deletions(-)
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 8fd8425af27f..3c4faa4c2742 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -551,7 +551,7 @@ static int numa_setup_cpu(unsigned long lcpu) nid = of_node_to_nid_single(cpu);
out_present: - if (nid < 0 || !node_online(nid)) + if (nid < 0 || !node_possible(nid)) nid = first_online_node;
map_cpu_to_node(lcpu, nid); @@ -969,10 +969,8 @@ static void __init find_possible_nodes(void) goto out;
for (i = 0; i < numnodes; i++) { - if (!node_possible(i)) { - setup_node_data(i, 0, 0); + if (!node_possible(i)) node_set(i, node_possible_map); - } }
out: @@ -1335,6 +1333,40 @@ static long vphn_get_associativity(unsigned long cpu, return rc; }
+static inline int find_and_online_cpu_nid(int cpu) +{ + __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; + int new_nid; + + /* Use associativity from first thread for all siblings */ + vphn_get_associativity(cpu, associativity); + new_nid = associativity_to_nid(associativity); + if (new_nid < 0 || !node_possible(new_nid)) + new_nid = first_online_node; + + if (NODE_DATA(new_nid) == NULL) { +#ifdef CONFIG_MEMORY_HOTPLUG + /* + * Need to ensure that NODE_DATA is initialized for a node from + * available memory (see memblock_alloc_try_nid). If unable to + * init the node, then default to nearest node that has memory + * installed. + */ + if (try_online_node(new_nid)) + new_nid = first_online_node; +#else + /* + * Default to using the nearest node that has memory installed. + * Otherwise, it would be necessary to patch the kernel MM code + * to deal with more memoryless-node error conditions. + */ + new_nid = first_online_node; +#endif + } + + return new_nid; +} + /* * Update the CPU maps and sysfs entries for a single CPU when its NUMA * characteristics change. This function doesn't perform any locking and is @@ -1400,7 +1432,6 @@ int arch_update_cpu_topology(void) { unsigned int cpu, sibling, changed = 0; struct topology_update_data *updates, *ud; - __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; cpumask_t updated_cpus; struct device *dev; int weight, new_nid, i = 0; @@ -1435,11 +1466,7 @@ int arch_update_cpu_topology(void) continue; }
- /* Use associativity from first thread for all siblings */ - vphn_get_associativity(cpu, associativity); - new_nid = associativity_to_nid(associativity); - if (new_nid < 0 || !node_online(new_nid)) - new_nid = first_online_node; + new_nid = find_and_online_cpu_nid(cpu);
if (new_nid == numa_cpu_lookup_table[cpu]) { cpumask_andnot(&cpu_associativity_changes_mask,
From: Logan Gunthorpe logang@deltatee.com
[ Upstream commit cbd27448faff4843ac4b66cc71445a10623ff48d ]
When using the max_mw_size parameter of ntb_transport to limit the size of the Memory windows, communication cannot be established and the queues freeze.
This is because the mw_size that's reported to the peer is correctly limited but the size used locally is not. So the MW is initialized with a buffer smaller than the window but the TX side is using the full window. This means the TX side will be writing to a region of the window that points nowhere.
This is easily fixed by applying the same limit to tx_size in ntb_transport_init_queue().
Fixes: e26a5843f7f5 ("NTB: Split ntb_hw_intel and ntb_transport drivers") Signed-off-by: Logan Gunthorpe logang@deltatee.com Acked-by: Allen Hubbe Allen.Hubbe@dell.com Cc: Dave Jiang dave.jiang@intel.com Signed-off-by: Jon Mason jdmason@kudzu.us Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/ntb/ntb_transport.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 3bbdf60f8908..49f3fba75f4d 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -955,6 +955,9 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, mw_base = nt->mw_vec[mw_num].phys_addr; mw_size = nt->mw_vec[mw_num].phys_size;
+ if (max_mw_size && mw_size > max_mw_size) + mw_size = max_mw_size; + tx_size = (unsigned int)mw_size / num_qps_mw; qp_offset = tx_size * (qp_num / mw_count);
From: piaojun piaojun@huawei.com
[ Upstream commit 025bcbde3634b2c9b316f227fed13ad6ad6817fb ]
If metadata is corrupted such as 'invalid inode block', we will get failed by calling 'mount()' and then set filesystem readonly as below:
ocfs2_mount ocfs2_initialize_super ocfs2_init_global_system_inodes ocfs2_iget ocfs2_read_locked_inode ocfs2_validate_inode_block ocfs2_error ocfs2_handle_error ocfs2_set_ro_flag(osb, 0); // set readonly
In this situation we need return -EROFS to 'mount.ocfs2', so that user can fix it by fsck. And then mount again. In addition, 'mount.ocfs2' should be updated correspondingly as it only return 1 for all errno. And I will post a patch for 'mount.ocfs2' too.
Link: http://lkml.kernel.org/r/5A4302FA.2010606@huawei.com Signed-off-by: Jun Piao piaojun@huawei.com Reviewed-by: Alex Chen alex.chen@huawei.com Reviewed-by: Joseph Qi jiangqi903@gmail.com Reviewed-by: Changwei Ge ge.changwei@h3c.com Reviewed-by: Gang He ghe@suse.com Cc: Mark Fasheh mfasheh@versity.com Cc: Joel Becker jlbec@evilplan.org Cc: Junxiao Bi junxiao.bi@oracle.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/ocfs2/super.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 2de4c8a9340c..4f5141350af8 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -477,9 +477,8 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb) new = ocfs2_get_system_file_inode(osb, i, osb->slot_num); if (!new) { ocfs2_release_system_inodes(osb); - status = -EINVAL; + status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL; mlog_errno(status); - /* FIXME: Should ERROR_RO_FS */ mlog(ML_ERROR, "Unable to load system inode %d, " "possibly corrupt fs?", i); goto bail; @@ -508,7 +507,7 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb) new = ocfs2_get_system_file_inode(osb, i, osb->slot_num); if (!new) { ocfs2_release_system_inodes(osb); - status = -EINVAL; + status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL; mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n", status, i, osb->slot_num); goto bail;
From: piaojun piaojun@huawei.com
[ Upstream commit 16c8d569f5704a84164f30ff01b29879f3438065 ]
The race between *set_acl and *get_acl will cause getting incomplete xattr data as below:
processA processB
ocfs2_set_acl ocfs2_xattr_set __ocfs2_xattr_set_handle
ocfs2_get_acl_nolock ocfs2_xattr_get_nolock:
processB may get incomplete xattr data if processA hasn't set_acl done.
So we should use 'ip_xattr_sem' to protect getting extended attribute in ocfs2_get_acl_nolock(), as other processes could be changing it concurrently.
Link: http://lkml.kernel.org/r/5A5DDCFF.7030001@huawei.com Signed-off-by: Jun Piao piaojun@huawei.com Reviewed-by: Alex Chen alex.chen@huawei.com Cc: Mark Fasheh mfasheh@versity.com Cc: Joel Becker jlbec@evilplan.org Cc: Junxiao Bi junxiao.bi@oracle.com Cc: Joseph Qi jiangqi903@gmail.com Cc: Changwei Ge ge.changwei@h3c.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/ocfs2/acl.c | 6 ++++++ fs/ocfs2/xattr.c | 2 ++ 2 files changed, 8 insertions(+)
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index 164307b99405..1e0d8da0d3cd 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c @@ -314,7 +314,9 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type) return ERR_PTR(ret); }
+ down_read(&OCFS2_I(inode)->ip_xattr_sem); acl = ocfs2_get_acl_nolock(inode, type, di_bh); + up_read(&OCFS2_I(inode)->ip_xattr_sem);
ocfs2_inode_unlock(inode, 0); brelse(di_bh); @@ -333,7 +335,9 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh) if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) return 0;
+ down_read(&OCFS2_I(inode)->ip_xattr_sem); acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh); + up_read(&OCFS2_I(inode)->ip_xattr_sem); if (IS_ERR(acl) || !acl) return PTR_ERR(acl); ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); @@ -364,8 +368,10 @@ int ocfs2_init_acl(handle_t *handle,
if (!S_ISLNK(inode->i_mode)) { if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) { + down_read(&OCFS2_I(dir)->ip_xattr_sem); acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT, dir_bh); + up_read(&OCFS2_I(dir)->ip_xattr_sem); if (IS_ERR(acl)) return PTR_ERR(acl); } diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index d2b7192c0937..7f78daf88f35 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -639,9 +639,11 @@ int ocfs2_calc_xattr_init(struct inode *dir, si->value_len);
if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) { + down_read(&OCFS2_I(dir)->ip_xattr_sem); acl_len = ocfs2_xattr_get_nolock(dir, dir_bh, OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT, "", NULL, 0); + up_read(&OCFS2_I(dir)->ip_xattr_sem); if (acl_len > 0) { a_size = ocfs2_xattr_entry_real_size(0, acl_len); if (S_ISDIR(mode))
From: piaojun piaojun@huawei.com
[ Upstream commit d984187e3a1ad7d12447a7ab2c43ce3717a2b5b3 ]
We should not reuse the dirty bh in jbd2 directly due to the following situation:
1. When removing extent rec, we will dirty the bhs of extent rec and truncate log at the same time, and hand them over to jbd2.
2. The bhs are submitted to jbd2 area successfully.
3. The write-back thread of device help flush the bhs to disk but encounter write error due to abnormal storage link.
4. After a while the storage link become normal. Truncate log flush worker triggered by the next space reclaiming found the dirty bh of truncate log and clear its 'BH_Write_EIO' and then set it uptodate in __ocfs2_journal_access():
ocfs2_truncate_log_worker ocfs2_flush_truncate_log __ocfs2_flush_truncate_log ocfs2_replay_truncate_records ocfs2_journal_access_di __ocfs2_journal_access // here we clear io_error and set 'tl_bh' uptodata.
5. Then jbd2 will flush the bh of truncate log to disk, but the bh of extent rec is still in error state, and unfortunately nobody will take care of it.
6. At last the space of extent rec was not reduced, but truncate log flush worker have given it back to globalalloc. That will cause duplicate cluster problem which could be identified by fsck.ocfs2.
Sadly we can hardly revert this but set fs read-only in case of ruining atomicity and consistency of space reclaim.
Link: http://lkml.kernel.org/r/5A6E8092.8090701@huawei.com Fixes: acf8fdbe6afb ("ocfs2: do not BUG if buffer not uptodate in __ocfs2_journal_access") Signed-off-by: Jun Piao piaojun@huawei.com Reviewed-by: Yiwen Jiang jiangyiwen@huawei.com Reviewed-by: Changwei Ge ge.changwei@h3c.com Cc: Mark Fasheh mfasheh@versity.com Cc: Joel Becker jlbec@evilplan.org Cc: Junxiao Bi junxiao.bi@oracle.com Cc: Joseph Qi jiangqi903@gmail.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/ocfs2/journal.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 13534f4fe5b5..722eb5bc9b8f 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -666,23 +666,24 @@ static int __ocfs2_journal_access(handle_t *handle, /* we can safely remove this assertion after testing. */ if (!buffer_uptodate(bh)) { mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n"); - mlog(ML_ERROR, "b_blocknr=%llu\n", - (unsigned long long)bh->b_blocknr); + mlog(ML_ERROR, "b_blocknr=%llu, b_state=0x%lx\n", + (unsigned long long)bh->b_blocknr, bh->b_state);
lock_buffer(bh); /* - * A previous attempt to write this buffer head failed. - * Nothing we can do but to retry the write and hope for - * the best. + * A previous transaction with a couple of buffer heads fail + * to checkpoint, so all the bhs are marked as BH_Write_EIO. + * For current transaction, the bh is just among those error + * bhs which previous transaction handle. We can't just clear + * its BH_Write_EIO and reuse directly, since other bhs are + * not written to disk yet and that will cause metadata + * inconsistency. So we should set fs read-only to avoid + * further damage. */ if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) { - clear_buffer_write_io_error(bh); - set_buffer_uptodate(bh); - } - - if (!buffer_uptodate(bh)) { unlock_buffer(bh); - return -EIO; + return ocfs2_error(osb->sb, "A previous attempt to " + "write this buffer head failed\n"); } unlock_buffer(bh); }
From: Yisheng Xie xieyisheng1@huawei.com
[ Upstream commit 56521e7a02b7b84a5e72691a1fb15570e6055545 ]
As Xiaojun reported the ltp of migrate_pages01 will fail on arm64 system which has 4 nodes[0...3], all have memory and CONFIG_NODES_SHIFT=2:
migrate_pages01 0 TINFO : test_invalid_nodes migrate_pages01 14 TFAIL : migrate_pages_common.c:45: unexpected failure - returned value = 0, expected: -1 migrate_pages01 15 TFAIL : migrate_pages_common.c:55: call succeeded unexpectedly
In this case the test_invalid_nodes of migrate_pages01 will call: SYSC_migrate_pages as:
migrate_pages(0, , {0x0000000000000001}, 64, , {0x0000000000000010}, 64) = 0
The new nodes specifies one or more node IDs that are greater than the maximum supported node ID, however, the errno is not set to EINVAL as expected.
As man pages of set_mempolicy[1], mbind[2], and migrate_pages[3] mentioned, when nodemask specifies one or more node IDs that are greater than the maximum supported node ID, the errno should set to EINVAL. However, get_nodes only check whether the part of bits [BITS_PER_LONG*BITS_TO_LONGS(MAX_NUMNODES), maxnode) is zero or not, and remain [MAX_NUMNODES, BITS_PER_LONG*BITS_TO_LONGS(MAX_NUMNODES) unchecked.
This patch is to check the bits of [MAX_NUMNODES, maxnode) in get_nodes to let migrate_pages set the errno to EINVAL when nodemask specifies one or more node IDs that are greater than the maximum supported node ID, which follows the manpage's guide.
[1] http://man7.org/linux/man-pages/man2/set_mempolicy.2.html [2] http://man7.org/linux/man-pages/man2/mbind.2.html [3] http://man7.org/linux/man-pages/man2/migrate_pages.2.html
Link: http://lkml.kernel.org/r/1510882624-44342-3-git-send-email-xieyisheng1@huawe... Signed-off-by: Yisheng Xie xieyisheng1@huawei.com Reported-by: Tan Xiaojun tanxiaojun@huawei.com Acked-by: Vlastimil Babka vbabka@suse.cz Cc: Andi Kleen ak@linux.intel.com Cc: Chris Salls salls@cs.ucsb.edu Cc: Christopher Lameter cl@linux.com Cc: David Rientjes rientjes@google.com Cc: Ingo Molnar mingo@kernel.org Cc: Naoya Horiguchi n-horiguchi@ah.jp.nec.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- mm/mempolicy.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index c947014d128a..6196505a1119 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1232,6 +1232,7 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, unsigned long maxnode) { unsigned long k; + unsigned long t; unsigned long nlongs; unsigned long endmask;
@@ -1248,13 +1249,19 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, else endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
- /* When the user specified more nodes than supported just check - if the non supported part is all zero. */ + /* + * When the user specified more nodes than supported just check + * if the non supported part is all zero. + * + * If maxnode have more longs than MAX_NUMNODES, check + * the bits in that area first. And then go through to + * check the rest bits which equal or bigger than MAX_NUMNODES. + * Otherwise, just check bits [MAX_NUMNODES, maxnode). + */ if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { if (nlongs > PAGE_SIZE/sizeof(long)) return -EINVAL; for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { - unsigned long t; if (get_user(t, nmask + k)) return -EFAULT; if (k == nlongs - 1) { @@ -1267,6 +1274,16 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, endmask = ~0UL; }
+ if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) { + unsigned long valid_mask = endmask; + + valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); + if (get_user(t, nmask + nlongs - 1)) + return -EFAULT; + if (t & valid_mask) + return -EINVAL; + } + if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) return -EFAULT; nodes_addr(*nodes)[nlongs-1] &= endmask;
From: Yisheng Xie xieyisheng1@huawei.com
[ Upstream commit 0486a38bcc4749808edbc848f1bcf232042770fc ]
As in manpage of migrate_pages, the errno should be set to EINVAL when none of the node IDs specified by new_nodes are on-line and allowed by the process's current cpuset context, or none of the specified nodes contain memory. However, when test by following case:
new_nodes = 0; old_nodes = 0xf; ret = migrate_pages(pid, old_nodes, new_nodes, MAX);
The ret will be 0 and no errno is set. As the new_nodes is empty, we should expect EINVAL as documented.
To fix the case like above, this patch check whether target nodes AND current task_nodes is empty, and then check whether AND node_states[N_MEMORY] is empty.
Link: http://lkml.kernel.org/r/1510882624-44342-4-git-send-email-xieyisheng1@huawe... Signed-off-by: Yisheng Xie xieyisheng1@huawei.com Acked-by: Vlastimil Babka vbabka@suse.cz Cc: Andi Kleen ak@linux.intel.com Cc: Chris Salls salls@cs.ucsb.edu Cc: Christopher Lameter cl@linux.com Cc: David Rientjes rientjes@google.com Cc: Ingo Molnar mingo@kernel.org Cc: Naoya Horiguchi n-horiguchi@ah.jp.nec.com Cc: Tan Xiaojun tanxiaojun@huawei.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- mm/mempolicy.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 6196505a1119..77d6f750af3c 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1410,10 +1410,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, goto out_put; }
- if (!nodes_subset(*new, node_states[N_MEMORY])) { - err = -EINVAL; + task_nodes = cpuset_mems_allowed(current); + nodes_and(*new, *new, task_nodes); + if (nodes_empty(*new)) + goto out_put; + + nodes_and(*new, *new, node_states[N_MEMORY]); + if (nodes_empty(*new)) goto out_put; - }
err = security_task_movememory(task); if (err)
From: "Kirill A. Shutemov" kirill.shutemov@linux.intel.com
[ Upstream commit c58f0bb77ed8bf93dfdde762b01cb67eebbdfc29 ]
Patch series "Do not lose dirty bit on THP pages", v4.
Vlastimil noted that pmdp_invalidate() is not atomic and we can lose dirty and access bits if CPU sets them after pmdp dereference, but before set_pmd_at().
The bug can lead to data loss, but the race window is tiny and I haven't seen any reports that suggested that it happens in reality. So I don't think it worth sending it to stable.
Unfortunately, there's no way to address the issue in a generic way. We need to fix all architectures that support THP one-by-one.
All architectures that have THP supported have to provide atomic pmdp_invalidate() that returns previous value.
If generic implementation of pmdp_invalidate() is used, architecture needs to provide atomic pmdp_estabish().
pmdp_estabish() is not used out-side generic implementation of pmdp_invalidate() so far, but I think this can change in the future.
This patch (of 12):
This is an implementation of pmdp_establish() that is only suitable for an architecture that doesn't have hardware dirty/accessed bits. In this case we can't race with CPU which sets these bits and non-atomic approach is fine.
Link: http://lkml.kernel.org/r/20171213105756.69879-2-kirill.shutemov@linux.intel.... Signed-off-by: Kirill A. Shutemov kirill.shutemov@linux.intel.com Cc: Vlastimil Babka vbabka@suse.cz Cc: Andrea Arcangeli aarcange@redhat.com Cc: Michal Hocko mhocko@kernel.org Cc: Aneesh Kumar K.V aneesh.kumar@linux.vnet.ibm.com Cc: Catalin Marinas catalin.marinas@arm.com Cc: David Daney david.daney@cavium.com Cc: David Miller davem@davemloft.net Cc: H. Peter Anvin hpa@zytor.com Cc: Hugh Dickins hughd@google.com Cc: Ingo Molnar mingo@kernel.org Cc: Martin Schwidefsky schwidefsky@de.ibm.com Cc: Nitin Gupta nitin.m.gupta@oracle.com Cc: Ralf Baechle ralf@linux-mips.org Cc: Thomas Gleixner tglx@linutronix.de Cc: Vineet Gupta vgupta@synopsys.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- include/asm-generic/pgtable.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 4814cf971048..25b793325b09 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -237,6 +237,21 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * This is an implementation of pmdp_establish() that is only suitable for an + * architecture that doesn't have hardware dirty/accessed bits. In this case we + * can't race with CPU which sets these bits and non-atomic aproach is fine. + */ +static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + pmd_t old_pmd = *pmdp; + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + return old_pmd; +} +#endif + #ifndef __HAVE_ARCH_PMDP_INVALIDATE extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp);
From: Mel Gorman mgorman@techsingularity.net
[ Upstream commit 69d763fc6d3aee787a3e8c8c35092b4f4960fa5d ]
Minchan Kim asked the following question -- what locks protects address_space destroying when race happens between inode trauncation and __isolate_lru_page? Jan Kara clarified by describing the race as follows
CPU1 CPU2
truncate(inode) __isolate_lru_page() ... truncate_inode_page(mapping, page); delete_from_page_cache(page) spin_lock_irqsave(&mapping->tree_lock, flags); __delete_from_page_cache(page, NULL) page_cache_tree_delete(..) ... mapping = page_mapping(page); page->mapping = NULL; ... spin_unlock_irqrestore(&mapping->tree_lock, flags); page_cache_free_page(mapping, page) put_page(page) if (put_page_testzero(page)) -> false - inode now has no pages and can be freed including embedded address_space
if (mapping && !mapping->a_ops->migratepage) - we've dereferenced mapping which is potentially already free.
The race is theoretically possible but unlikely. Before the delete_from_page_cache, truncate_cleanup_page is called so the page is likely to be !PageDirty or PageWriteback which gets skipped by the only caller that checks the mappping in __isolate_lru_page. Even if the race occurs, a substantial amount of work has to happen during a tiny window with no preemption but it could potentially be done using a virtual machine to artifically slow one CPU or halt it during the critical window.
This patch should eliminate the race with truncation by try-locking the page before derefencing mapping and aborting if the lock was not acquired. There was a suggestion from Huang Ying to use RCU as a side-effect to prevent mapping being freed. However, I do not like the solution as it's an unconventional means of preserving a mapping and it's not a context where rcu_read_lock is obviously protecting rcu data.
Link: http://lkml.kernel.org/r/20180104102512.2qos3h5vqzeisrek@techsingularity.net Fixes: c82449352854 ("mm: compaction: make isolate_lru_page() filter-aware again") Signed-off-by: Mel Gorman mgorman@techsingularity.net Acked-by: Minchan Kim minchan@kernel.org Cc: "Huang, Ying" ying.huang@intel.com Cc: Jan Kara jack@suse.cz Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- mm/vmscan.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c index 930f7c67a9c1..d9dc6a48936c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1312,6 +1312,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
if (PageDirty(page)) { struct address_space *mapping; + bool migrate_dirty;
/* ISOLATE_CLEAN means only clean pages */ if (mode & ISOLATE_CLEAN) @@ -1320,10 +1321,19 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode) /* * Only pages without mappings or that have a * ->migratepage callback are possible to migrate - * without blocking + * without blocking. However, we can be racing with + * truncation so it's necessary to lock the page + * to stabilise the mapping as truncation holds + * the page lock until after the page is removed + * from the page cache. */ + if (!trylock_page(page)) + return ret; + mapping = page_mapping(page); - if (mapping && !mapping->a_ops->migratepage) + migrate_dirty = mapping && mapping->a_ops->migratepage; + unlock_page(page); + if (!migrate_dirty) return ret; } }
From: Alex Estrin alex.estrin@intel.com
[ Upstream commit 1029361084d18cc270f64dfd39529fafa10cfe01 ]
On reboot SM can program port pkey table before ipoib registered its event handler, which could result in missing pkey event and leave root interface with initial pkey value from index 0.
Since OPA port starts with invalid pkey in index 0, root interface will fail to initialize and stay down with no-carrier flag.
For IB ipoib interface may end up with pkey different from value opensm put in pkey table idx 0, resulting in connectivity issues (different mcast groups, for example).
Close the window by calling event handler after registration to make sure ipoib pkey is in sync with port pkey table.
Reviewed-by: Mike Marciniszyn mike.marciniszyn@intel.com Reviewed-by: Ira Weiny ira.weiny@intel.com Signed-off-by: Alex Estrin alex.estrin@intel.com Signed-off-by: Dennis Dalessandro dennis.dalessandro@intel.com Signed-off-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 37b42447045d..fcb18b11db75 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1953,6 +1953,9 @@ static struct net_device *ipoib_add_port(const char *format, goto event_failed; }
+ /* call event handler to ensure pkey in sync */ + queue_work(ipoib_workqueue, &priv->flush_heavy); + result = register_netdev(priv->dev); if (result) { printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
From: Arnd Bergmann arnd@arndb.de
[ Upstream commit 328008a72d38b5bde6491e463405c34a81a65d3e ]
The declaration for swsusp_arch_resume marks it as 'asmlinkage', but the definition in x86-32 does not, and it fails to include the header with the declaration. This leads to a warning when building with link-time-optimizations:
kernel/power/power.h:108:23: error: type of 'swsusp_arch_resume' does not match original declaration [-Werror=lto-type-mismatch] extern asmlinkage int swsusp_arch_resume(void); ^ arch/x86/power/hibernate_32.c:148:0: note: 'swsusp_arch_resume' was previously declared here int swsusp_arch_resume(void)
This moves the declaration into a globally visible header file and fixes up both x86 definitions to match it.
Signed-off-by: Arnd Bergmann arnd@arndb.de Signed-off-by: Thomas Gleixner tglx@linutronix.de Cc: Len Brown len.brown@intel.com Cc: Andi Kleen ak@linux.intel.com Cc: Nicolas Pitre nico@linaro.org Cc: linux-pm@vger.kernel.org Cc: "Rafael J. Wysocki" rjw@rjwysocki.net Cc: Pavel Machek pavel@ucw.cz Cc: Bart Van Assche bart.vanassche@wdc.com Link: https://lkml.kernel.org/r/20180202145634.200291-2-arnd@arndb.de Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/x86/power/hibernate_32.c | 2 +- arch/x86/power/hibernate_64.c | 2 +- include/linux/suspend.h | 2 ++ kernel/power/power.h | 3 --- 4 files changed, 4 insertions(+), 5 deletions(-)
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c index 291226b952a9..77ac4e4deb16 100644 --- a/arch/x86/power/hibernate_32.c +++ b/arch/x86/power/hibernate_32.c @@ -142,7 +142,7 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir) #endif }
-int swsusp_arch_resume(void) +asmlinkage int swsusp_arch_resume(void) { int error;
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 009947d419a6..0e0c773edffc 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -78,7 +78,7 @@ static int set_up_temporary_mappings(void) return 0; }
-int swsusp_arch_resume(void) +asmlinkage int swsusp_arch_resume(void) { int error;
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 8b6ec7ef0854..4a69bca7c6ab 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -377,6 +377,8 @@ extern int swsusp_page_is_forbidden(struct page *); extern void swsusp_set_page_free(struct page *); extern void swsusp_unset_page_free(struct page *); extern unsigned long get_safe_page(gfp_t gfp_mask); +extern asmlinkage int swsusp_arch_suspend(void); +extern asmlinkage int swsusp_arch_resume(void);
extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); extern int hibernate(void); diff --git a/kernel/power/power.h b/kernel/power/power.h index caadb566e82b..25367fc0b152 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -85,9 +85,6 @@ extern int in_suspend; extern dev_t swsusp_resume_device; extern sector_t swsusp_resume_block;
-extern asmlinkage int swsusp_arch_suspend(void); -extern asmlinkage int swsusp_arch_resume(void); - extern int create_basic_memory_bitmaps(void); extern void free_basic_memory_bitmaps(void); extern int hibernate_preallocate_memory(void);
From: Jean Delvare jdelvare@suse.de
[ Upstream commit a7770ae194569e96a93c48aceb304edded9cc648 ]
The handling of empty DMI strings looks quite broken to me: * Strings from 1 to 7 spaces are not considered empty. * True empty DMI strings (string index set to 0) are not considered empty, and result in allocating a 0-char string. * Strings with invalid index also result in allocating a 0-char string. * Strings starting with 8 spaces are all considered empty, even if non-space characters follow (sounds like a weird thing to do, but I have actually seen occurrences of this in DMI tables before.) * Strings which are considered empty are reported as 8 spaces, instead of being actually empty.
Some of these issues are the result of an off-by-one error in memcmp, the rest is incorrect by design.
So let's get it square: missing strings and strings made of only spaces, regardless of their length, should be treated as empty and no memory should be allocated for them. All other strings are non-empty and should be allocated.
Signed-off-by: Jean Delvare jdelvare@suse.de Fixes: 79da4721117f ("x86: fix DMI out of memory problems") Cc: Parag Warudkar parag.warudkar@gmail.com Cc: Ingo Molnar mingo@kernel.org Cc: Thomas Gleixner tglx@linutronix.de Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/firmware/dmi_scan.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index e8eea181a601..ad860de53c1f 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -18,7 +18,7 @@ EXPORT_SYMBOL_GPL(dmi_kobj); * of and an antecedent to, SMBIOS, which stands for System * Management BIOS. See further: http://www.dmtf.org/standards */ -static const char dmi_empty_string[] = " "; +static const char dmi_empty_string[] = "";
static u32 dmi_ver __initdata; static u32 dmi_len; @@ -44,25 +44,21 @@ static int dmi_memdev_nr; static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s) { const u8 *bp = ((u8 *) dm) + dm->length; + const u8 *nsp;
if (s) { - s--; - while (s > 0 && *bp) { + while (--s > 0 && *bp) bp += strlen(bp) + 1; - s--; - } - - if (*bp != 0) { - size_t len = strlen(bp)+1; - size_t cmp_len = len > 8 ? 8 : len;
- if (!memcmp(bp, dmi_empty_string, cmp_len)) - return dmi_empty_string; + /* Strings containing only spaces are considered empty */ + nsp = bp; + while (*nsp == ' ') + nsp++; + if (*nsp != '\0') return bp; - } }
- return ""; + return dmi_empty_string; }
static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
From: Chen Yu yu.c.chen@intel.com
[ Upstream commit ba1edb9a5125a617d612f98eead14b9b84e75c3a ]
The following warning was triggered after resumed from S3 - if all the nonboot CPUs were put offline before suspend:
[ 1840.329515] unchecked MSR access error: RDMSR from 0x771 at rIP: 0xffffffff86061e3a (native_read_msr+0xa/0x30) [ 1840.329516] Call Trace: [ 1840.329521] __rdmsr_on_cpu+0x33/0x50 [ 1840.329525] generic_exec_single+0x81/0xb0 [ 1840.329527] smp_call_function_single+0xd2/0x100 [ 1840.329530] ? acpi_ds_result_pop+0xdd/0xf2 [ 1840.329532] ? acpi_ds_create_operand+0x215/0x23c [ 1840.329534] rdmsrl_on_cpu+0x57/0x80 [ 1840.329536] ? cpumask_next+0x1b/0x20 [ 1840.329538] ? rdmsrl_on_cpu+0x57/0x80 [ 1840.329541] intel_pstate_update_perf_limits+0xf3/0x220 [ 1840.329544] ? notifier_call_chain+0x4a/0x70 [ 1840.329546] intel_pstate_set_policy+0x4e/0x150 [ 1840.329548] cpufreq_set_policy+0xcd/0x2f0 [ 1840.329550] cpufreq_update_policy+0xb2/0x130 [ 1840.329552] ? cpufreq_update_policy+0x130/0x130 [ 1840.329556] acpi_processor_ppc_has_changed+0x65/0x80 [ 1840.329558] acpi_processor_notify+0x80/0x100 [ 1840.329561] acpi_ev_notify_dispatch+0x44/0x5c [ 1840.329563] acpi_os_execute_deferred+0x14/0x20 [ 1840.329565] process_one_work+0x193/0x3c0 [ 1840.329567] worker_thread+0x35/0x3b0 [ 1840.329569] kthread+0x125/0x140 [ 1840.329571] ? process_one_work+0x3c0/0x3c0 [ 1840.329572] ? kthread_park+0x60/0x60 [ 1840.329575] ? do_syscall_64+0x67/0x180 [ 1840.329577] ret_from_fork+0x25/0x30 [ 1840.329585] unchecked MSR access error: WRMSR to 0x774 (tried to write 0x0000000000000000) at rIP: 0xffffffff86061f78 (native_write_msr+0x8/0x30) [ 1840.329586] Call Trace: [ 1840.329587] __wrmsr_on_cpu+0x37/0x40 [ 1840.329589] generic_exec_single+0x81/0xb0 [ 1840.329592] smp_call_function_single+0xd2/0x100 [ 1840.329594] ? acpi_ds_create_operand+0x215/0x23c [ 1840.329595] ? cpumask_next+0x1b/0x20 [ 1840.329597] wrmsrl_on_cpu+0x57/0x70 [ 1840.329598] ? rdmsrl_on_cpu+0x57/0x80 [ 1840.329599] ? wrmsrl_on_cpu+0x57/0x70 [ 1840.329602] intel_pstate_hwp_set+0xd3/0x150 [ 1840.329604] intel_pstate_set_policy+0x119/0x150 [ 1840.329606] cpufreq_set_policy+0xcd/0x2f0 [ 1840.329607] cpufreq_update_policy+0xb2/0x130 [ 1840.329610] ? cpufreq_update_policy+0x130/0x130 [ 1840.329613] acpi_processor_ppc_has_changed+0x65/0x80 [ 1840.329615] acpi_processor_notify+0x80/0x100 [ 1840.329617] acpi_ev_notify_dispatch+0x44/0x5c [ 1840.329619] acpi_os_execute_deferred+0x14/0x20 [ 1840.329620] process_one_work+0x193/0x3c0 [ 1840.329622] worker_thread+0x35/0x3b0 [ 1840.329624] kthread+0x125/0x140 [ 1840.329625] ? process_one_work+0x3c0/0x3c0 [ 1840.329626] ? kthread_park+0x60/0x60 [ 1840.329628] ? do_syscall_64+0x67/0x180 [ 1840.329631] ret_from_fork+0x25/0x30
This is because if there's only one online CPU, the MSR_PM_ENABLE (package wide)can not be enabled after resumed, due to intel_pstate_hwp_enable() will only be invoked on AP's online process after resumed - if there's no AP online, the HWP remains disabled after resumed (BIOS has disabled it in S3). Then if there comes a _PPC change notification which touches HWP register during this stage, the warning is triggered.
Since we don't call acpi_processor_register_performance() when HWP is enabled, the pr->performance will be NULL. When this is NULL we don't need to do _PPC change notification.
Reported-by: Doug Smythies dsmythies@telus.net Suggested-by: Srinivas Pandruvada srinivas.pandruvada@linux.intel.com Signed-off-by: Yu Chen yu.c.chen@intel.com Signed-off-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/acpi/processor_perflib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index bb01dea39fdc..9825780a1cd2 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -161,7 +161,7 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) { int ret;
- if (ignore_ppc) { + if (ignore_ppc || !pr->performance) { /* * Only when it is notification event, the _OST object * will be evaluated. Otherwise it is skipped.
From: Yonghong Song yhs@fb.com
[ Upstream commit 09584b406742413ac4c8d7e030374d4daa045b69 ]
With CONFIG_BPF_JIT_ALWAYS_ON is defined in the config file, tools/testing/selftests/bpf/test_kmod.sh failed like below: [root@localhost bpf]# ./test_kmod.sh sysctl: setting key "net.core.bpf_jit_enable": Invalid argument [ JIT enabled:0 hardened:0 ] [ 132.175681] test_bpf: #297 BPF_MAXINSNS: Jump, gap, jump, ... FAIL to prog_create err=-524 len=4096 [ 132.458834] test_bpf: Summary: 348 PASSED, 1 FAILED, [340/340 JIT'ed] [ JIT enabled:1 hardened:0 ] [ 133.456025] test_bpf: #297 BPF_MAXINSNS: Jump, gap, jump, ... FAIL to prog_create err=-524 len=4096 [ 133.730935] test_bpf: Summary: 348 PASSED, 1 FAILED, [340/340 JIT'ed] [ JIT enabled:1 hardened:1 ] [ 134.769730] test_bpf: #297 BPF_MAXINSNS: Jump, gap, jump, ... FAIL to prog_create err=-524 len=4096 [ 135.050864] test_bpf: Summary: 348 PASSED, 1 FAILED, [340/340 JIT'ed] [ JIT enabled:1 hardened:2 ] [ 136.442882] test_bpf: #297 BPF_MAXINSNS: Jump, gap, jump, ... FAIL to prog_create err=-524 len=4096 [ 136.821810] test_bpf: Summary: 348 PASSED, 1 FAILED, [340/340 JIT'ed] [root@localhost bpf]#
The test_kmod.sh load/remove test_bpf.ko multiple times with different settings for sysctl net.core.bpf_jit_{enable,harden}. The failed test #297 of test_bpf.ko is designed such that JIT always fails.
Commit 290af86629b2 (bpf: introduce BPF_JIT_ALWAYS_ON config) introduced the following tightening logic: ... if (!bpf_prog_is_dev_bound(fp->aux)) { fp = bpf_int_jit_compile(fp); #ifdef CONFIG_BPF_JIT_ALWAYS_ON if (!fp->jited) { *err = -ENOTSUPP; return fp; } #endif ... With this logic, Test #297 always gets return value -ENOTSUPP when CONFIG_BPF_JIT_ALWAYS_ON is defined, causing the test failure.
This patch fixed the failure by marking Test #297 as expected failure when CONFIG_BPF_JIT_ALWAYS_ON is defined.
Fixes: 290af86629b2 (bpf: introduce BPF_JIT_ALWAYS_ON config) Signed-off-by: Yonghong Song yhs@fb.com Signed-off-by: Daniel Borkmann daniel@iogearbox.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- lib/test_bpf.c | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index b7908d949a5f..b1495f586f29 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -83,6 +83,7 @@ struct bpf_test { __u32 result; } test[MAX_SUBTESTS]; int (*fill_helper)(struct bpf_test *self); + int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */ __u8 frag_data[MAX_DATA]; };
@@ -1780,7 +1781,9 @@ static struct bpf_test tests[] = { }, CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, { }, - { } + { }, + .fill_helper = NULL, + .expected_errcode = -EINVAL, }, { "check: div_k_0", @@ -1790,7 +1793,9 @@ static struct bpf_test tests[] = { }, CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, { }, - { } + { }, + .fill_helper = NULL, + .expected_errcode = -EINVAL, }, { "check: unknown insn", @@ -1801,7 +1806,9 @@ static struct bpf_test tests[] = { }, CLASSIC | FLAG_EXPECTED_FAIL, { }, - { } + { }, + .fill_helper = NULL, + .expected_errcode = -EINVAL, }, { "check: out of range spill/fill", @@ -1811,7 +1818,9 @@ static struct bpf_test tests[] = { }, CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, { }, - { } + { }, + .fill_helper = NULL, + .expected_errcode = -EINVAL, }, { "JUMPS + HOLES", @@ -1903,6 +1912,8 @@ static struct bpf_test tests[] = { CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, { }, { }, + .fill_helper = NULL, + .expected_errcode = -EINVAL, }, { "check: LDX + RET X", @@ -1913,6 +1924,8 @@ static struct bpf_test tests[] = { CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, { }, { }, + .fill_helper = NULL, + .expected_errcode = -EINVAL, }, { /* Mainly checking JIT here. */ "M[]: alt STX + LDX", @@ -2087,6 +2100,8 @@ static struct bpf_test tests[] = { CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, { }, { }, + .fill_helper = NULL, + .expected_errcode = -EINVAL, }, { /* Passes checker but fails during runtime. */ "LD [SKF_AD_OFF-1]", @@ -4462,6 +4477,7 @@ static struct bpf_test tests[] = { { }, { }, .fill_helper = bpf_fill_maxinsns4, + .expected_errcode = -EINVAL, }, { /* Mainly checking JIT here. */ "BPF_MAXINSNS: Very long jump", @@ -4517,10 +4533,15 @@ static struct bpf_test tests[] = { { "BPF_MAXINSNS: Jump, gap, jump, ...", { }, +#ifdef CONFIG_BPF_JIT_ALWAYS_ON + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, +#else CLASSIC | FLAG_NO_DATA, +#endif { }, { { 0, 0xababcbac } }, .fill_helper = bpf_fill_maxinsns11, + .expected_errcode = -ENOTSUPP, }, { "BPF_MAXINSNS: ld_abs+get_processor_id", @@ -5290,7 +5311,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
*err = bpf_prog_create(&fp, &fprog); if (tests[which].aux & FLAG_EXPECTED_FAIL) { - if (*err == -EINVAL) { + if (*err == tests[which].expected_errcode) { pr_cont("PASS\n"); /* Verifier rejected filter as expected. */ *err = 0;
From: Matt Redfearn matt.redfearn@mips.com
[ Upstream commit 0cde5b44a30f1daaef1c34e08191239dc63271c4 ]
When commit b27311e1cace ("MIPS: TXx9: Add RBTX4939 board support") added board support for the RBTX4939, it added a call to led_classdev_register even if the LED class is built as a module. Built-in arch code cannot call module code directly like this. Commit b33b44073734 ("MIPS: TXX9: use IS_ENABLED() macro") subsequently changed the inclusion of this code to a single check that CONFIG_LEDS_CLASS is either builtin or a module, but the same issue remains.
This leads to MIPS allmodconfig builds failing when CONFIG_MACH_TX49XX=y is set:
arch/mips/txx9/rbtx4939/setup.o: In function `rbtx4939_led_probe': setup.c:(.init.text+0xc0): undefined reference to `of_led_classdev_register' make: *** [Makefile:999: vmlinux] Error 1
Fix this by using the IS_BUILTIN() macro instead.
Fixes: b27311e1cace ("MIPS: TXx9: Add RBTX4939 board support") Signed-off-by: Matt Redfearn matt.redfearn@mips.com Reviewed-by: James Hogan jhogan@kernel.org Cc: Ralf Baechle ralf@linux-mips.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/18544/ Signed-off-by: James Hogan jhogan@kernel.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/mips/txx9/rbtx4939/setup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c index 37030409745c..586ca7ea3e7c 100644 --- a/arch/mips/txx9/rbtx4939/setup.c +++ b/arch/mips/txx9/rbtx4939/setup.c @@ -186,7 +186,7 @@ static void __init rbtx4939_update_ioc_pen(void)
#define RBTX4939_MAX_7SEGLEDS 8
-#if IS_ENABLED(CONFIG_LEDS_CLASS) +#if IS_BUILTIN(CONFIG_LEDS_CLASS) static u8 led_val[RBTX4939_MAX_7SEGLEDS]; struct rbtx4939_led_data { struct led_classdev cdev; @@ -261,7 +261,7 @@ static inline void rbtx4939_led_setup(void)
static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val) { -#if IS_ENABLED(CONFIG_LEDS_CLASS) +#if IS_BUILTIN(CONFIG_LEDS_CLASS) unsigned long flags; local_irq_save(flags); /* bit7: reserved for LED class */
From: Ross Lagerwall ross.lagerwall@citrix.com
[ Upstream commit f599c64fdf7d9c108e8717fb04bc41c680120da4 ]
When a netfront device is set up it registers a netdev fairly early on, before it has set up the queues and is actually usable. A userspace tool like NetworkManager will immediately try to open it and access its state as soon as it appears. The bug can be reproduced by hotplugging VIFs until the VM runs out of grant refs. It registers the netdev but fails to set up any queues (since there are no more grant refs). In the meantime, NetworkManager opens the device and the kernel crashes trying to access the queues (of which there are none).
Fix this in two ways: * For initial setup, register the netdev much later, after the queues are setup. This avoids the race entirely. * During a suspend/resume cycle, the frontend reconnects to the backend and the queues are recreated. It is possible (though highly unlikely) to race with something opening the device and accessing the queues after they have been destroyed but before they have been recreated. Extend the region covered by the rtnl semaphore to protect against this race. There is a possibility that we fail to recreate the queues so check for this in the open function.
Signed-off-by: Ross Lagerwall ross.lagerwall@citrix.com Reviewed-by: Boris Ostrovsky boris.ostrovsky@oracle.com Signed-off-by: Juergen Gross jgross@suse.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/net/xen-netfront.c | 46 ++++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 22 deletions(-)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 0b8d2655985f..d612ed0a821c 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -342,6 +342,9 @@ static int xennet_open(struct net_device *dev) unsigned int i = 0; struct netfront_queue *queue = NULL;
+ if (!np->queues) + return -ENODEV; + for (i = 0; i < num_queues; ++i) { queue = &np->queues[i]; napi_enable(&queue->napi); @@ -1363,18 +1366,8 @@ static int netfront_probe(struct xenbus_device *dev, #ifdef CONFIG_SYSFS info->netdev->sysfs_groups[0] = &xennet_dev_group; #endif - err = register_netdev(info->netdev); - if (err) { - pr_warn("%s: register_netdev err=%d\n", __func__, err); - goto fail; - }
return 0; - - fail: - xennet_free_netdev(netdev); - dev_set_drvdata(&dev->dev, NULL); - return err; }
static void xennet_end_access(int ref, void *page) @@ -1743,8 +1736,6 @@ static void xennet_destroy_queues(struct netfront_info *info) { unsigned int i;
- rtnl_lock(); - for (i = 0; i < info->netdev->real_num_tx_queues; i++) { struct netfront_queue *queue = &info->queues[i];
@@ -1753,8 +1744,6 @@ static void xennet_destroy_queues(struct netfront_info *info) netif_napi_del(&queue->napi); }
- rtnl_unlock(); - kfree(info->queues); info->queues = NULL; } @@ -1770,8 +1759,6 @@ static int xennet_create_queues(struct netfront_info *info, if (!info->queues) return -ENOMEM;
- rtnl_lock(); - for (i = 0; i < *num_queues; i++) { struct netfront_queue *queue = &info->queues[i];
@@ -1780,7 +1767,7 @@ static int xennet_create_queues(struct netfront_info *info,
ret = xennet_init_queue(queue); if (ret < 0) { - dev_warn(&info->netdev->dev, + dev_warn(&info->xbdev->dev, "only created %d queues\n", i); *num_queues = i; break; @@ -1794,10 +1781,8 @@ static int xennet_create_queues(struct netfront_info *info,
netif_set_real_num_tx_queues(info->netdev, *num_queues);
- rtnl_unlock(); - if (*num_queues == 0) { - dev_err(&info->netdev->dev, "no queues\n"); + dev_err(&info->xbdev->dev, "no queues\n"); return -EINVAL; } return 0; @@ -1839,6 +1824,7 @@ static int talk_to_netback(struct xenbus_device *dev, goto out; }
+ rtnl_lock(); if (info->queues) xennet_destroy_queues(info);
@@ -1849,6 +1835,7 @@ static int talk_to_netback(struct xenbus_device *dev, info->queues = NULL; goto out; } + rtnl_unlock();
/* Create shared ring, alloc event channel -- for each queue */ for (i = 0; i < num_queues; ++i) { @@ -1945,8 +1932,10 @@ abort_transaction_no_dev_fatal: xenbus_transaction_end(xbt, 1); destroy_ring: xennet_disconnect_backend(info); + rtnl_lock(); xennet_destroy_queues(info); out: + rtnl_unlock(); device_unregister(&dev->dev); return err; } @@ -1982,6 +1971,15 @@ static int xennet_connect(struct net_device *dev) netdev_update_features(dev); rtnl_unlock();
+ if (dev->reg_state == NETREG_UNINITIALIZED) { + err = register_netdev(dev); + if (err) { + pr_warn("%s: register_netdev err=%d\n", __func__, err); + device_unregister(&np->xbdev->dev); + return err; + } + } + /* * All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver @@ -2167,10 +2165,14 @@ static int xennet_remove(struct xenbus_device *dev)
xennet_disconnect_backend(info);
- unregister_netdev(info->netdev); + if (info->netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(info->netdev);
- if (info->queues) + if (info->queues) { + rtnl_lock(); xennet_destroy_queues(info); + rtnl_unlock(); + } xennet_free_netdev(info->netdev);
return 0;
From: Ross Lagerwall ross.lagerwall@citrix.com
[ Upstream commit 3ac7292a25db1c607a50752055a18aba32ac2176 ]
The page given to gnttab_end_foreign_access() to free could be a compound page so use put_page() instead of free_page() since it can handle both compound and single pages correctly.
This bug was discovered when migrating a Xen VM with several VIFs and CONFIG_DEBUG_VM enabled. It hits a BUG usually after fewer than 10 iterations. All netfront devices disconnect from the backend during a suspend/resume and this will call gnttab_end_foreign_access() if a netfront queue has an outstanding skb. The mismatch between calling get_page() and free_page() on a compound page causes a reference counting error which is detected when DEBUG_VM is enabled.
Signed-off-by: Ross Lagerwall ross.lagerwall@citrix.com Reviewed-by: Boris Ostrovsky boris.ostrovsky@oracle.com Signed-off-by: Juergen Gross jgross@suse.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/xen/grant-table.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index c49f79ed58c5..4b7ce442d8e5 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -328,7 +328,7 @@ static void gnttab_handle_deferred(unsigned long unused) if (entry->page) { pr_debug("freeing g.e. %#x (pfn %#lx)\n", entry->ref, page_to_pfn(entry->page)); - __free_page(entry->page); + put_page(entry->page); } else pr_info("freeing g.e. %#x\n", entry->ref); kfree(entry); @@ -384,7 +384,7 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly, if (gnttab_end_foreign_access_ref(ref, readonly)) { put_free_entry(ref); if (page != 0) - free_page(page); + put_page(virt_to_page(page)); } else gnttab_add_deferred(ref, readonly, page ? virt_to_page(page) : NULL);
From: Guanglei Li guanglei.li@oracle.com
[ Upstream commit 2c0aa08631b86a4678dbc93b9caa5248014b4458 ]
Scenario: 1. Port down and do fail over 2. Ap do rds_bind syscall
PID: 47039 TASK: ffff89887e2fe640 CPU: 47 COMMAND: "kworker/u:6" #0 [ffff898e35f159f0] machine_kexec at ffffffff8103abf9 #1 [ffff898e35f15a60] crash_kexec at ffffffff810b96e3 #2 [ffff898e35f15b30] oops_end at ffffffff8150f518 #3 [ffff898e35f15b60] no_context at ffffffff8104854c #4 [ffff898e35f15ba0] __bad_area_nosemaphore at ffffffff81048675 #5 [ffff898e35f15bf0] bad_area_nosemaphore at ffffffff810487d3 #6 [ffff898e35f15c00] do_page_fault at ffffffff815120b8 #7 [ffff898e35f15d10] page_fault at ffffffff8150ea95 [exception RIP: unknown or invalid address] RIP: 0000000000000000 RSP: ffff898e35f15dc8 RFLAGS: 00010282 RAX: 00000000fffffffe RBX: ffff889b77f6fc00 RCX:ffffffff81c99d88 RDX: 0000000000000000 RSI: ffff896019ee08e8 RDI:ffff889b77f6fc00 RBP: ffff898e35f15df0 R8: ffff896019ee08c8 R9:0000000000000000 R10: 0000000000000400 R11: 0000000000000000 R12:ffff896019ee08c0 R13: ffff889b77f6fe68 R14: ffffffff81c99d80 R15: ffffffffa022a1e0 ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018 #8 [ffff898e35f15dc8] cma_ndev_work_handler at ffffffffa022a228 [rdma_cm] #9 [ffff898e35f15df8] process_one_work at ffffffff8108a7c6 #10 [ffff898e35f15e58] worker_thread at ffffffff8108bda0 #11 [ffff898e35f15ee8] kthread at ffffffff81090fe6
PID: 45659 TASK: ffff880d313d2500 CPU: 31 COMMAND: "oracle_45659_ap" #0 [ffff881024ccfc98] __schedule at ffffffff8150bac4 #1 [ffff881024ccfd40] schedule at ffffffff8150c2cf #2 [ffff881024ccfd50] __mutex_lock_slowpath at ffffffff8150cee7 #3 [ffff881024ccfdc0] mutex_lock at ffffffff8150cdeb #4 [ffff881024ccfde0] rdma_destroy_id at ffffffffa022a027 [rdma_cm] #5 [ffff881024ccfe10] rds_ib_laddr_check at ffffffffa0357857 [rds_rdma] #6 [ffff881024ccfe50] rds_trans_get_preferred at ffffffffa0324c2a [rds] #7 [ffff881024ccfe80] rds_bind at ffffffffa031d690 [rds] #8 [ffff881024ccfeb0] sys_bind at ffffffff8142a670
PID: 45659 PID: 47039 rds_ib_laddr_check /* create id_priv with a null event_handler */ rdma_create_id rdma_bind_addr cma_acquire_dev /* add id_priv to cma_dev->id_list */ cma_attach_to_dev cma_ndev_work_handler /* event_hanlder is null */ id_priv->id.event_handler
Signed-off-by: Guanglei Li guanglei.li@oracle.com Signed-off-by: Honglei Wang honglei.wang@oracle.com Reviewed-by: Junxiao Bi junxiao.bi@oracle.com Reviewed-by: Yanjun Zhu yanjun.zhu@oracle.com Reviewed-by: Leon Romanovsky leonro@mellanox.com Acked-by: Santosh Shilimkar santosh.shilimkar@oracle.com Acked-by: Doug Ledford dledford@redhat.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- net/rds/ib.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/net/rds/ib.c b/net/rds/ib.c index f222885ac0c7..ed51ccc84b3a 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -336,7 +336,8 @@ static int rds_ib_laddr_check(struct net *net, __be32 addr) /* Create a CMA ID and try to bind it. This catches both * IB and iWARP capable NICs. */ - cm_id = rdma_create_id(&init_net, NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); + cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, + NULL, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cm_id)) return PTR_ERR(cm_id);
From: Will Deacon will.deacon@arm.com
[ Upstream commit 202fb4ef81e3ec765c23bd1e6746a5c25b797d0e ]
If the spinlock "next" ticket wraps around between the initial LDR and the cmpxchg in the LSE version of spin_trylock, then we can erroneously think that we have successfuly acquired the lock because we only check whether the next ticket return by the cmpxchg is equal to the owner ticket in our updated lock word.
This patch fixes the issue by performing a full 32-bit check of the lock word when trying to determine whether or not the CASA instruction updated memory.
Reported-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Will Deacon will.deacon@arm.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- arch/arm64/include/asm/spinlock.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 499e8de33a00..fbbd7fb83fd6 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -94,8 +94,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) " cbnz %w1, 1f\n" " add %w1, %w0, %3\n" " casa %w0, %w1, %2\n" - " and %w1, %w1, #0xffff\n" - " eor %w1, %w1, %w0, lsr #16\n" + " sub %w1, %w1, %3\n" + " eor %w1, %w1, %w0\n" "1:") : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) : "I" (1 << TICKET_SHIFT)
From: Alexey Dobriyan adobriyan@gmail.com
[ Upstream commit ac7f1061c2c11bb8936b1b6a94cdb48de732f7a4 ]
Current code does:
if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2)
However sscanf() is broken garbage.
It silently accepts whitespace between format specifiers (did you know that?).
It silently accepts valid strings which result in integer overflow.
Do not use sscanf() for any even remotely reliable parsing code.
OK # readlink '/proc/1/map_files/55a23af39000-55a23b05b000' /lib/systemd/systemd
broken # readlink '/proc/1/map_files/ 55a23af39000-55a23b05b000' /lib/systemd/systemd
broken # readlink '/proc/1/map_files/55a23af39000-55a23b05b000 ' /lib/systemd/systemd
very broken # readlink '/proc/1/map_files/1000000000000000055a23af39000-55a23b05b000' /lib/systemd/systemd
Andrei said:
: This patch breaks criu. It was a bug in criu. And this bug is on a minor : path, which works when memfd_create() isn't available. It is a reason why : I ask to not backport this patch to stable kernels. : : In CRIU this bug can be triggered, only if this patch will be backported : to a kernel which version is lower than v3.16.
Link: http://lkml.kernel.org/r/20171120212706.GA14325@avx2 Signed-off-by: Alexey Dobriyan adobriyan@gmail.com Cc: Pavel Emelyanov xemul@openvz.org Cc: Andrei Vagin avagin@virtuozzo.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/proc/base.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-)
diff --git a/fs/proc/base.c b/fs/proc/base.c index dd732400578e..2e6266944253 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -94,6 +94,8 @@ #include "internal.h" #include "fd.h"
+#include "../../lib/kstrtox.h" + /* NOTE: * Implementing inode permission operations in /proc is almost * certainly an error. Permission checks need to happen during @@ -1829,8 +1831,33 @@ end_instantiate: static int dname_to_vma_addr(struct dentry *dentry, unsigned long *start, unsigned long *end) { - if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2) + const char *str = dentry->d_name.name; + unsigned long long sval, eval; + unsigned int len; + + len = _parse_integer(str, 16, &sval); + if (len & KSTRTOX_OVERFLOW) + return -EINVAL; + if (sval != (unsigned long)sval) return -EINVAL; + str += len; + + if (*str != '-') + return -EINVAL; + str++; + + len = _parse_integer(str, 16, &eval); + if (len & KSTRTOX_OVERFLOW) + return -EINVAL; + if (eval != (unsigned long)eval) + return -EINVAL; + str += len; + + if (*str != '\0') + return -EINVAL; + + *start = sval; + *end = eval;
return 0; }
From: Arnd Bergmann arnd@arndb.de
[ Upstream commit ade7db991b47ab3016a414468164f4966bd08202 ]
This bug was fixed before, but came up again with the latest compiler in another function:
fs/cifs/cifssmb.c: In function 'CIFSSMBSetEA': fs/cifs/cifssmb.c:6362:3: error: 'strncpy' offset 8 is out of the bounds [0, 4] [-Werror=array-bounds] strncpy(parm_data->list[0].name, ea_name, name_len);
Let's apply the same fix that was used for the other instances.
Fixes: b2a3ad9ca502 ("cifs: silence compiler warnings showing up with gcc-4.7.0") Signed-off-by: Arnd Bergmann arnd@arndb.de Signed-off-by: Steve French smfrench@gmail.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/cifs/cifssmb.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 0c92af11f4f4..8632380d2b94 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -6421,9 +6421,7 @@ SetEARetry: pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_EA);
- parm_data = - (struct fealist *) (((char *) &pSMB->hdr.Protocol) + - offset); + parm_data = (void *)pSMB + offsetof(struct smb_hdr, Protocol) + offset; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1;
From: Coly Li colyli@suse.de
[ Upstream commit 99361bbf26337186f02561109c17a4c4b1a7536a ]
Kernel thread routine bch_writeback_thread() has the following code block,
447 down_write(&dc->writeback_lock); 448~450 if (check conditions) { 451 up_write(&dc->writeback_lock); 452 set_current_state(TASK_INTERRUPTIBLE); 453 454 if (kthread_should_stop()) 455 return 0; 456 457 schedule(); 458 continue; 459 }
If condition check is true, its task state is set to TASK_INTERRUPTIBLE and call schedule() to wait for others to wake up it.
There are 2 issues in current code, 1, Task state is set to TASK_INTERRUPTIBLE after the condition checks, if another process changes the condition and call wake_up_process(dc-> writeback_thread), then at line 452 task state is set back to TASK_INTERRUPTIBLE, the writeback kernel thread will lose a chance to be waken up. 2, At line 454 if kthread_should_stop() is true, writeback kernel thread will return to kernel/kthread.c:kthread() with TASK_INTERRUPTIBLE and call do_exit(). It is not good to enter do_exit() with task state TASK_INTERRUPTIBLE, in following code path might_sleep() is called and a warning message is reported by __might_sleep(): "WARNING: do not call blocking ops when !TASK_RUNNING; state=1 set at [xxxx]".
For the first issue, task state should be set before condition checks. Ineed because dc->writeback_lock is required when modifying all the conditions, calling set_current_state() inside code block where dc-> writeback_lock is hold is safe. But this is quite implicit, so I still move set_current_state() before all the condition checks.
For the second issue, frankley speaking it does not hurt when kernel thread exits with TASK_INTERRUPTIBLE state, but this warning message scares users, makes them feel there might be something risky with bcache and hurt their data. Setting task state to TASK_RUNNING before returning fixes this problem.
In alloc.c:allocator_wait(), there is also a similar issue, and is also fixed in this patch.
Changelog: v3: merge two similar fixes into one patch v2: fix the race issue in v1 patch. v1: initial buggy fix.
Signed-off-by: Coly Li colyli@suse.de Reviewed-by: Hannes Reinecke hare@suse.de Reviewed-by: Michael Lyle mlyle@lyle.org Cc: Michael Lyle mlyle@lyle.org Cc: Junhui Tang tang.junhui@zte.com.cn Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/md/bcache/alloc.c | 4 +++- drivers/md/bcache/writeback.c | 7 +++++-- 2 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 4d46f2ce606f..3263658ccedb 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -285,8 +285,10 @@ do { \ break; \ \ mutex_unlock(&(ca)->set->bucket_lock); \ - if (kthread_should_stop()) \ + if (kthread_should_stop()) { \ + set_current_state(TASK_RUNNING); \ return 0; \ + } \ \ try_to_freeze(); \ schedule(); \ diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index bbb1dc9e1639..4639270bf99b 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -425,19 +425,22 @@ static int bch_writeback_thread(void *arg)
while (!kthread_should_stop()) { down_write(&dc->writeback_lock); + set_current_state(TASK_INTERRUPTIBLE); if (!atomic_read(&dc->has_dirty) || (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && !dc->writeback_running)) { up_write(&dc->writeback_lock); - set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop()) + if (kthread_should_stop()) { + set_current_state(TASK_RUNNING); return 0; + }
try_to_freeze(); schedule(); continue; } + set_current_state(TASK_RUNNING);
searched_full_index = refill_dirty(dc);
From: Tang Junhui tang.junhui@zte.com.cn
[ Upstream commit 682811b3ce1a5a4e20d700939a9042f01dbc66c4 ]
After long time running of random small IO writing, I reboot the machine, and after the machine power on, I found bcache got stuck, the stack is: [root@ceph153 ~]# cat /proc/2510/task/*/stack [<ffffffffa06b2455>] closure_sync+0x25/0x90 [bcache] [<ffffffffa06b6be8>] bch_journal+0x118/0x2b0 [bcache] [<ffffffffa06b6dc7>] bch_journal_meta+0x47/0x70 [bcache] [<ffffffffa06be8f7>] bch_prio_write+0x237/0x340 [bcache] [<ffffffffa06a8018>] bch_allocator_thread+0x3c8/0x3d0 [bcache] [<ffffffff810a631f>] kthread+0xcf/0xe0 [<ffffffff8164c318>] ret_from_fork+0x58/0x90 [<ffffffffffffffff>] 0xffffffffffffffff [root@ceph153 ~]# cat /proc/2038/task/*/stack [<ffffffffa06b1abd>] __bch_btree_map_nodes+0x12d/0x150 [bcache] [<ffffffffa06b1bd1>] bch_btree_insert+0xf1/0x170 [bcache] [<ffffffffa06b637f>] bch_journal_replay+0x13f/0x230 [bcache] [<ffffffffa06c75fe>] run_cache_set+0x79a/0x7c2 [bcache] [<ffffffffa06c0cf8>] register_bcache+0xd48/0x1310 [bcache] [<ffffffff812f702f>] kobj_attr_store+0xf/0x20 [<ffffffff8125b216>] sysfs_write_file+0xc6/0x140 [<ffffffff811dfbfd>] vfs_write+0xbd/0x1e0 [<ffffffff811e069f>] SyS_write+0x7f/0xe0 [<ffffffff8164c3c9>] system_call_fastpath+0x16/0x1 The stack shows the register thread and allocator thread were getting stuck when registering cache device.
I reboot the machine several times, the issue always exsit in this machine.
I debug the code, and found the call trace as bellow: register_bcache() ==>run_cache_set() ==>bch_journal_replay() ==>bch_btree_insert() ==>__bch_btree_map_nodes() ==>btree_insert_fn() ==>btree_split() //node need split ==>btree_check_reserve() In btree_check_reserve(), It will check if there is enough buckets of RESERVE_BTREE type, since allocator thread did not work yet, so no buckets of RESERVE_BTREE type allocated, so the register thread waits on c->btree_cache_wait, and goes to sleep.
Then the allocator thread initialized, the call trace is bellow: bch_allocator_thread() ==>bch_prio_write() ==>bch_journal_meta() ==>bch_journal() ==>journal_wait_for_write() In journal_wait_for_write(), It will check if journal is full by journal_full(), but the long time random small IO writing causes the exhaustion of journal buckets(journal.blocks_free=0), In order to release the journal buckets, the allocator calls btree_flush_write() to flush keys to btree nodes, and waits on c->journal.wait until btree nodes writing over or there has already some journal buckets space, then the allocator thread goes to sleep. but in btree_flush_write(), since bch_journal_replay() is not finished, so no btree nodes have journal (condition "if (btree_current_write(b)->journal)" never satisfied), so we got no btree node to flush, no journal bucket released, and allocator sleep all the times.
Through the above analysis, we can see that: 1) Register thread wait for allocator thread to allocate buckets of RESERVE_BTREE type; 2) Alloctor thread wait for register thread to replay journal, so it can flush btree nodes and get journal bucket. then they are all got stuck by waiting for each other.
Hua Rui provided a patch for me, by allocating some buckets of RESERVE_BTREE type in advance, so the register thread can get bucket when btree node splitting and no need to waiting for the allocator thread. I tested it, it has effect, and register thread run a step forward, but finally are still got stuck, the reason is only 8 bucket of RESERVE_BTREE type were allocated, and in bch_journal_replay(), after 2 btree nodes splitting, only 4 bucket of RESERVE_BTREE type left, then btree_check_reserve() is not satisfied anymore, so it goes to sleep again, and in the same time, alloctor thread did not flush enough btree nodes to release a journal bucket, so they all got stuck again.
So we need to allocate more buckets of RESERVE_BTREE type in advance, but how much is enough? By experience and test, I think it should be as much as journal buckets. Then I modify the code as this patch, and test in the machine, and it works.
This patch modified base on Hua Rui’s patch, and allocate more buckets of RESERVE_BTREE type in advance to avoid register thread and allocate thread going to wait for each other.
[patch v2] ca->sb.njournal_buckets would be 0 in the first time after cache creation, and no journal exists, so just 8 btree buckets is OK.
Signed-off-by: Hua Rui huarui.dev@gmail.com Signed-off-by: Tang Junhui tang.junhui@zte.com.cn Reviewed-by: Michael Lyle mlyle@lyle.org Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/md/bcache/btree.c | 9 ++++++--- drivers/md/bcache/super.c | 13 ++++++++++++- 2 files changed, 18 insertions(+), 4 deletions(-)
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index a5a6909280fe..4ed621ad27e4 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1869,14 +1869,17 @@ void bch_initial_gc_finish(struct cache_set *c) */ for_each_cache(ca, c, i) { for_each_bucket(b, ca) { - if (fifo_full(&ca->free[RESERVE_PRIO])) + if (fifo_full(&ca->free[RESERVE_PRIO]) && + fifo_full(&ca->free[RESERVE_BTREE])) break;
if (bch_can_invalidate_bucket(ca, b) && !GC_MARK(b)) { __bch_invalidate_one_bucket(ca, b); - fifo_push(&ca->free[RESERVE_PRIO], - b - ca->buckets); + if (!fifo_push(&ca->free[RESERVE_PRIO], + b - ca->buckets)) + fifo_push(&ca->free[RESERVE_BTREE], + b - ca->buckets); } } } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index be8307550bd7..132d6417c66e 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1822,6 +1822,7 @@ void bch_cache_release(struct kobject *kobj) static int cache_alloc(struct cache_sb *sb, struct cache *ca) { size_t free; + size_t btree_buckets; struct bucket *b;
__module_get(THIS_MODULE); @@ -1831,9 +1832,19 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca) ca->journal.bio.bi_max_vecs = 8; ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
+ /* + * when ca->sb.njournal_buckets is not zero, journal exists, + * and in bch_journal_replay(), tree node may split, + * so bucket of RESERVE_BTREE type is needed, + * the worst situation is all journal buckets are valid journal, + * and all the keys need to replay, + * so the number of RESERVE_BTREE type buckets should be as much + * as journal buckets + */ + btree_buckets = ca->sb.njournal_buckets ?: 8; free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
- if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || + if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) || !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
From: Tang Junhui tang.junhui@zte.com.cn
[ Upstream commit 73ac105be390c1de42a2f21643c9778a5e002930 ]
back-end device sdm has already attached a cache_set with ID f67ebe1f-f8bc-4d73-bfe5-9dc88607f119, then try to attach with another cache set, and it returns with an error: [root]# cd /sys/block/sdm/bcache [root]# echo 5ccd0a63-148e-48b8-afa2-aca9cbd6279f > attach -bash: echo: write error: Invalid argument
After that, execute a command to modify the label of bcache device: [root]# echo data_disk1 > label
Then we reboot the system, when the system power on, the back-end device can not attach to cache_set, a messages show in the log: Feb 5 12:05:52 ceph152 kernel: [922385.508498] bcache: bch_cached_dev_attach() couldn't find uuid for sdm in set
In sysfs_attach(), dc->sb.set_uuid was assigned to the value which input through sysfs, no matter whether it is success or not in bch_cached_dev_attach(). For example, If the back-end device has already attached to an cache set, bch_cached_dev_attach() would fail, but dc->sb.set_uuid was changed. Then modify the label of bcache device, it will call bch_write_bdev_super(), which would write the dc->sb.set_uuid to the super block, so we record a wrong cache set ID in the super block, after the system reboot, the cache set couldn't find the uuid of the back-end device, so the bcache device couldn't exist and use any more.
In this patch, we don't assigned cache set ID to dc->sb.set_uuid in sysfs_attach() directly, but input it into bch_cached_dev_attach(), and assigned dc->sb.set_uuid to the cache set ID after the back-end device attached to the cache set successful.
Signed-off-by: Tang Junhui tang.junhui@zte.com.cn Reviewed-by: Michael Lyle mlyle@lyle.org Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/md/bcache/bcache.h | 2 +- drivers/md/bcache/super.c | 10 ++++++---- drivers/md/bcache/sysfs.c | 6 ++++-- 3 files changed, 11 insertions(+), 7 deletions(-)
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 02619cabda8b..7fe7df56fa33 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -904,7 +904,7 @@ void bcache_write_super(struct cache_set *);
int bch_flash_dev_create(struct cache_set *c, uint64_t size);
-int bch_cached_dev_attach(struct cached_dev *, struct cache_set *); +int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *); void bch_cached_dev_detach(struct cached_dev *); void bch_cached_dev_run(struct cached_dev *); void bcache_device_stop(struct bcache_device *); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 132d6417c66e..4a3ae14d25e0 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -930,7 +930,8 @@ void bch_cached_dev_detach(struct cached_dev *dc) cached_dev_put(dc); }
-int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) +int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, + uint8_t *set_uuid) { uint32_t rtime = cpu_to_le32(get_seconds()); struct uuid_entry *u; @@ -939,7 +940,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
bdevname(dc->bdev, buf);
- if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) + if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || + (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) return -ENOENT;
if (dc->disk.c) { @@ -1183,7 +1185,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
list_add(&dc->list, &uncached_devices); list_for_each_entry(c, &bch_cache_sets, list) - bch_cached_dev_attach(dc, c); + bch_cached_dev_attach(dc, c, NULL);
if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) @@ -1705,7 +1707,7 @@ static void run_cache_set(struct cache_set *c) bcache_write_super(c);
list_for_each_entry_safe(dc, t, &uncached_devices, list) - bch_cached_dev_attach(dc, c); + bch_cached_dev_attach(dc, c, NULL);
flash_devs_run(c);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 4fbb5532f24c..1efe31615281 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -263,11 +263,13 @@ STORE(__cached_dev) }
if (attr == &sysfs_attach) { - if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16) + uint8_t set_uuid[16]; + + if (bch_parse_uuid(buf, set_uuid) < 16) return -EINVAL;
list_for_each_entry(c, &bch_cache_sets, list) { - v = bch_cached_dev_attach(dc, c); + v = bch_cached_dev_attach(dc, c, set_uuid); if (!v) return size; }
From: Tang Junhui tang.junhui@zte.com.cn
[ Upstream commit 7f4fc93d4713394ee8f1cd44c238e046e11b4f15 ]
I attach a back-end device to a cache set, and the cache set is not registered yet, this back-end device did not attach successfully, and no error returned: [root]# echo 87859280-fec6-4bcc-20df7ca8f86b > /sys/block/sde/bcache/attach [root]#
In sysfs_attach(), the return value "v" is initialized to "size" in the beginning, and if no cache set exist in bch_cache_sets, the "v" value would not change any more, and return to sysfs, sysfs regard it as success since the "size" is a positive number.
This patch fixes this issue by assigning "v" with "-ENOENT" in the initialization.
Signed-off-by: Tang Junhui tang.junhui@zte.com.cn Reviewed-by: Michael Lyle mlyle@lyle.org Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/md/bcache/sysfs.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 1efe31615281..5a5c1f1bd8a5 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -191,7 +191,7 @@ STORE(__cached_dev) { struct cached_dev *dc = container_of(kobj, struct cached_dev, disk.kobj); - ssize_t v = size; + ssize_t v; struct cache_set *c; struct kobj_uevent_env *env;
@@ -268,6 +268,7 @@ STORE(__cached_dev) if (bch_parse_uuid(buf, set_uuid) < 16) return -EINVAL;
+ v = -ENOENT; list_for_each_entry(c, &bch_cache_sets, list) { v = bch_cached_dev_attach(dc, c, set_uuid); if (!v) @@ -275,7 +276,7 @@ STORE(__cached_dev) }
pr_err("Can't attach %s: cache set not found", buf); - size = v; + return v; }
if (attr == &sysfs_detach && dc->disk.c)
From: Jesper Dangaard Brouer brouer@redhat.com
[ Upstream commit e3d91b0ca523d53158f435a3e13df7f0cb360ea2 ]
V3: More generic skipping of relo-section (suggested by Daniel)
If clang >= 4.0.1 is missing the option '-target bpf', it will cause llc/llvm to create two ELF sections for "Exception Frames", with section names '.eh_frame' and '.rel.eh_frame'.
The BPF ELF loader library libbpf fails when loading files with these sections. The other in-kernel BPF ELF loader in samples/bpf/bpf_load.c, handle this gracefully. And iproute2 loader also seems to work with these "eh" sections.
The issue in libbpf is caused by bpf_object__elf_collect() skipping some sections, and later when performing relocation it will be pointing to a skipped section, as these sections cannot be found by bpf_object__find_prog_by_idx() in bpf_object__collect_reloc().
This is a general issue that also occurs for other sections, like debug sections which are also skipped and can have relo section.
As suggested by Daniel. To avoid keeping state about all skipped sections, instead perform a direct qlookup in the ELF object. Lookup the section that the relo-section points to and check if it contains executable machine instructions (denoted by the sh_flags SHF_EXECINSTR). Use this check to also skip irrelevant relo-sections.
Note, for samples/bpf/ the '-target bpf' parameter to clang cannot be used due to incompatibility with asm embedded headers, that some of the samples include. This is explained in more details by Yonghong Song in bpf_devel_QA.
Signed-off-by: Jesper Dangaard Brouer brouer@redhat.com Signed-off-by: Daniel Borkmann daniel@iogearbox.net Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- tools/lib/bpf/libbpf.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index e176bad19bcb..ca080a129b33 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -487,6 +487,24 @@ bpf_object__init_maps(struct bpf_object *obj, void *data, return 0; }
+static bool section_have_execinstr(struct bpf_object *obj, int idx) +{ + Elf_Scn *scn; + GElf_Shdr sh; + + scn = elf_getscn(obj->efile.elf, idx); + if (!scn) + return false; + + if (gelf_getshdr(scn, &sh) != &sh) + return false; + + if (sh.sh_flags & SHF_EXECINSTR) + return true; + + return false; +} + static int bpf_object__elf_collect(struct bpf_object *obj) { Elf *elf = obj->efile.elf; @@ -567,6 +585,14 @@ static int bpf_object__elf_collect(struct bpf_object *obj) } else if (sh.sh_type == SHT_REL) { void *reloc = obj->efile.reloc; int nr_reloc = obj->efile.nr_reloc + 1; + int sec = sh.sh_info; /* points to other section */ + + /* Only do relo for section with exec instructions */ + if (!section_have_execinstr(obj, sec)) { + pr_debug("skip relo %s(%d) for section(%d)\n", + name, idx, sec); + continue; + }
reloc = realloc(reloc, sizeof(*obj->efile.reloc) * nr_reloc);
From: "J. Bruce Fields" bfields@redhat.com
[ Upstream commit 0078117c6d9160031b866cfa1853514d4f6865d2 ]
A client that sends more than a hundred ops in a single compound currently gets an rpc-level GARBAGE_ARGS error.
It would be more helpful to return NFS4ERR_RESOURCE, since that gives the client a better idea how to recover (for example by splitting up the compound into smaller compounds).
This is all a bit academic since we've never actually seen a reason for clients to send such long compounds, but we may as well fix it.
While we're there, just use NFSD4_MAX_OPS_PER_COMPOUND == 16, the constant we already use in the 4.1 case, instead of hard-coding 100. Chances anyone actually uses even 16 ops per compound are small enough that I think there's a neglible risk or any regression.
This fixes pynfs test COMP6.
Reported-by: "Lu, Xinyu" luxy.fnst@cn.fujitsu.com Signed-off-by: J. Bruce Fields bfields@redhat.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/nfsd/nfs4proc.c | 3 +++ fs/nfsd/nfs4xdr.c | 9 +++++++-- 2 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index bfbee8ddf978..8f35f0a15bfa 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1627,6 +1627,9 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, status = nfserr_minor_vers_mismatch; if (nfsd_minorversion(args->minorversion, NFSD_TEST) <= 0) goto out; + status = nfserr_resource; + if (args->opcnt > NFSD_MAX_OPS_PER_COMPOUND) + goto out;
status = nfs41_check_op_ordering(args); if (status) { diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 544672b440de..5ad02bdb62a3 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -1824,8 +1824,13 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
if (argp->taglen > NFSD4_MAX_TAGLEN) goto xdr_error; - if (argp->opcnt > 100) - goto xdr_error; + /* + * NFS4ERR_RESOURCE is a more helpful error than GARBAGE_ARGS + * here, so we return success at the xdr level so that + * nfsd4_proc can handle this is an NFS-level error. + */ + if (argp->opcnt > NFSD_MAX_OPS_PER_COMPOUND) + return 0;
if (argp->opcnt > ARRAY_SIZE(argp->iops)) { argp->ops = kzalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL);
From: Will Deacon will.deacon@arm.com
[ Upstream commit 11dc13224c975efcec96647a4768a6f1bb7a19a8 ]
When queuing on the qspinlock, the count field for the current CPU's head node is incremented. This needn't be atomic because locking in e.g. IRQ context is balanced and so an IRQ will return with node->count as it found it.
However, the compiler could in theory reorder the initialisation of node[idx] before the increment of the head node->count, causing an IRQ to overwrite the initialised node and potentially corrupt the lock state.
Avoid the potential for this harmful compiler reordering by placing a barrier() between the increment of the head node->count and the subsequent node initialisation.
Signed-off-by: Will Deacon will.deacon@arm.com Acked-by: Peter Zijlstra (Intel) peterz@infradead.org Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Thomas Gleixner tglx@linutronix.de Link: http://lkml.kernel.org/r/1518528177-19169-3-git-send-email-will.deacon@arm.c... Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- kernel/locking/qspinlock.c | 8 ++++++++ 1 file changed, 8 insertions(+)
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 8173bc7fec92..3b40c8809e52 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -423,6 +423,14 @@ queue: tail = encode_tail(smp_processor_id(), idx);
node += idx; + + /* + * Ensure that we increment the head node->count before initialising + * the actual node. If the compiler is kind enough to reorder these + * stores, then an IRQ could overwrite our assignments. + */ + barrier(); + node->locked = 0; node->next = NULL; pv_init_node(node);
From: Mark Salter msalter@redhat.com
[ Upstream commit b6dd4d83dc2f78cebc9a7e6e7e4bc2be4d29b94d ]
The pr_debug() in gic-v3 gic_send_sgi() can trigger a circular locking warning:
GICv3: CPU10: ICC_SGI1R_EL1 5000400 ====================================================== WARNING: possible circular locking dependency detected 4.15.0+ #1 Tainted: G W ------------------------------------------------------ dynamic_debug01/1873 is trying to acquire lock: ((console_sem).lock){-...}, at: [<0000000099c891ec>] down_trylock+0x20/0x4c
but task is already holding lock: (&rq->lock){-.-.}, at: [<00000000842e1587>] __task_rq_lock+0x54/0xdc
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #2 (&rq->lock){-.-.}: __lock_acquire+0x3b4/0x6e0 lock_acquire+0xf4/0x2a8 _raw_spin_lock+0x4c/0x60 task_fork_fair+0x3c/0x148 sched_fork+0x10c/0x214 copy_process.isra.32.part.33+0x4e8/0x14f0 _do_fork+0xe8/0x78c kernel_thread+0x48/0x54 rest_init+0x34/0x2a4 start_kernel+0x45c/0x488
-> #1 (&p->pi_lock){-.-.}: __lock_acquire+0x3b4/0x6e0 lock_acquire+0xf4/0x2a8 _raw_spin_lock_irqsave+0x58/0x70 try_to_wake_up+0x48/0x600 wake_up_process+0x28/0x34 __up.isra.0+0x60/0x6c up+0x60/0x68 __up_console_sem+0x4c/0x7c console_unlock+0x328/0x634 vprintk_emit+0x25c/0x390 dev_vprintk_emit+0xc4/0x1fc dev_printk_emit+0x88/0xa8 __dev_printk+0x58/0x9c _dev_info+0x84/0xa8 usb_new_device+0x100/0x474 hub_port_connect+0x280/0x92c hub_event+0x740/0xa84 process_one_work+0x240/0x70c worker_thread+0x60/0x400 kthread+0x110/0x13c ret_from_fork+0x10/0x18
-> #0 ((console_sem).lock){-...}: validate_chain.isra.34+0x6e4/0xa20 __lock_acquire+0x3b4/0x6e0 lock_acquire+0xf4/0x2a8 _raw_spin_lock_irqsave+0x58/0x70 down_trylock+0x20/0x4c __down_trylock_console_sem+0x3c/0x9c console_trylock+0x20/0xb0 vprintk_emit+0x254/0x390 vprintk_default+0x58/0x90 vprintk_func+0xbc/0x164 printk+0x80/0xa0 __dynamic_pr_debug+0x84/0xac gic_raise_softirq+0x184/0x18c smp_cross_call+0xac/0x218 smp_send_reschedule+0x3c/0x48 resched_curr+0x60/0x9c check_preempt_curr+0x70/0xdc wake_up_new_task+0x310/0x470 _do_fork+0x188/0x78c SyS_clone+0x44/0x50 __sys_trace_return+0x0/0x4
other info that might help us debug this:
Chain exists of: (console_sem).lock --> &p->pi_lock --> &rq->lock
Possible unsafe locking scenario:
CPU0 CPU1 ---- ---- lock(&rq->lock); lock(&p->pi_lock); lock(&rq->lock); lock((console_sem).lock);
*** DEADLOCK ***
2 locks held by dynamic_debug01/1873: #0: (&p->pi_lock){-.-.}, at: [<000000001366df53>] wake_up_new_task+0x40/0x470 #1: (&rq->lock){-.-.}, at: [<00000000842e1587>] __task_rq_lock+0x54/0xdc
stack backtrace: CPU: 10 PID: 1873 Comm: dynamic_debug01 Tainted: G W 4.15.0+ #1 Hardware name: GIGABYTE R120-T34-00/MT30-GS2-00, BIOS T48 10/02/2017 Call trace: dump_backtrace+0x0/0x188 show_stack+0x24/0x2c dump_stack+0xa4/0xe0 print_circular_bug.isra.31+0x29c/0x2b8 check_prev_add.constprop.39+0x6c8/0x6dc validate_chain.isra.34+0x6e4/0xa20 __lock_acquire+0x3b4/0x6e0 lock_acquire+0xf4/0x2a8 _raw_spin_lock_irqsave+0x58/0x70 down_trylock+0x20/0x4c __down_trylock_console_sem+0x3c/0x9c console_trylock+0x20/0xb0 vprintk_emit+0x254/0x390 vprintk_default+0x58/0x90 vprintk_func+0xbc/0x164 printk+0x80/0xa0 __dynamic_pr_debug+0x84/0xac gic_raise_softirq+0x184/0x18c smp_cross_call+0xac/0x218 smp_send_reschedule+0x3c/0x48 resched_curr+0x60/0x9c check_preempt_curr+0x70/0xdc wake_up_new_task+0x310/0x470 _do_fork+0x188/0x78c SyS_clone+0x44/0x50 __sys_trace_return+0x0/0x4 GICv3: CPU0: ICC_SGI1R_EL1 12000
This could be fixed with printk_deferred() but that might lessen its usefulness for debugging. So change it to pr_devel to keep it out of production kernels. Developers working on gic-v3 can enable it as needed in their kernels.
Signed-off-by: Mark Salter msalter@redhat.com Signed-off-by: Marc Zyngier marc.zyngier@arm.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- drivers/irqchip/irq-gic-v3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index eed31f9bee05..cb0d0caadc3f 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -589,7 +589,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
- pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); + pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); gic_write_sgi1r(val); }
linux-stable-mirror@lists.linaro.org