From: Marko Kohtala marko.kohtala@okoko.fi
[ Upstream commit dd9782834dd9dde3624ff1acea8859f3d3e792d4 ]
The page_offset was only applied to the end of the page range. This caused the display updates to cause a scrolling effect on the display because the amount of data written to the display did not match the range display expected.
Fixes: 301bc0675b67 ("video: ssd1307fb: Make use of horizontal addressing mode") Signed-off-by: Marko Kohtala marko.kohtala@okoko.fi Cc: Mark Rutland mark.rutland@arm.com Cc: Rob Herring robh+dt@kernel.org Cc: Daniel Vetter daniel@ffwll.ch Cc: David Airlie airlied@linux.ie Cc: Michal Vokáč michal.vokac@ysoft.com Signed-off-by: Bartlomiej Zolnierkiewicz b.zolnierkie@samsung.com Link: https://patchwork.freedesktop.org/patch/msgid/20190618074111.9309-4-marko.ko... Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/video/fbdev/ssd1307fb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index fa3480815cdb6..88e0763edcc72 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -421,7 +421,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par) if (ret < 0) return ret;
- ret = ssd1307fb_write_cmd(par->client, 0x0); + ret = ssd1307fb_write_cmd(par->client, par->page_offset); if (ret < 0) return ret;
From: Jia-Ju Bai baijiaju1990@gmail.com
[ Upstream commit f3eb9b8f67bc28783eddc142ad805ebdc53d6339 ]
In radeon_connector_set_property(), there is an if statement on line 743 to check whether connector->encoder is NULL: if (connector->encoder)
When connector->encoder is NULL, it is used on line 755: if (connector->encoder->crtc)
Thus, a possible null-pointer dereference may occur.
To fix this bug, connector->encoder is checked before being used.
This bug is found by a static analysis tool STCheck written by us.
Signed-off-by: Jia-Ju Bai baijiaju1990@gmail.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/radeon/radeon_connectors.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index c6bf378534f83..bebcef2ce6b88 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -758,7 +758,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
radeon_encoder->output_csc = val;
- if (connector->encoder->crtc) { + if (connector->encoder && connector->encoder->crtc) { struct drm_crtc *crtc = connector->encoder->crtc; const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
From: Corey Minyard cminyard@mvista.com
[ Upstream commit 340ff31ab00bca5c15915e70ad9ada3030c98cf8 ]
ipmi_thread() uses back-to-back schedule() to poll for command completion which, on some machines, can push up CPU consumption and heavily tax the scheduler locks leading to noticeable overall performance degradation.
This was originally added so firmware updates through IPMI would complete in a timely manner. But we can't kill the scheduler locks for that one use case.
Instead, only run schedule() continuously in maintenance mode, where firmware updates should run.
Signed-off-by: Corey Minyard cminyard@mvista.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/char/ipmi/ipmi_si_intf.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 2f9abe0d04dcb..2f8ff63bbbe43 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -281,6 +281,9 @@ struct smi_info { */ bool irq_enable_broken;
+ /* Is the driver in maintenance mode? */ + bool in_maintenance_mode; + /* * Did we get an attention that we did not handle? */ @@ -1091,11 +1094,20 @@ static int ipmi_thread(void *data) spin_unlock_irqrestore(&(smi_info->si_lock), flags); busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, &busy_until); - if (smi_result == SI_SM_CALL_WITHOUT_DELAY) + if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { ; /* do nothing */ - else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) - schedule(); - else if (smi_result == SI_SM_IDLE) { + } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) { + /* + * In maintenance mode we run as fast as + * possible to allow firmware updates to + * complete as fast as possible, but normally + * don't bang on the scheduler. + */ + if (smi_info->in_maintenance_mode) + schedule(); + else + usleep_range(100, 200); + } else if (smi_result == SI_SM_IDLE) { if (atomic_read(&smi_info->need_watch)) { schedule_timeout_interruptible(100); } else { @@ -1103,8 +1115,9 @@ static int ipmi_thread(void *data) __set_current_state(TASK_INTERRUPTIBLE); schedule(); } - } else + } else { schedule_timeout_interruptible(1); + } } return 0; } @@ -1283,6 +1296,7 @@ static void set_maintenance_mode(void *send_info, bool enable)
if (!enable) atomic_set(&smi_info->req_events, 0); + smi_info->in_maintenance_mode = enable; }
static const struct ipmi_smi_handlers handlers = {
From: Nathan Huckleberry nhuck@google.com
[ Upstream commit a95fb581b144b5e73da382eaedb2e32027610597 ]
drivers/clk/clk-qoriq.c:138:38: warning: unused variable 'p5020_cmux_grp1' [-Wunused-const-variable] static const struct clockgen_muxinfo p5020_cmux_grp1
drivers/clk/clk-qoriq.c:146:38: warning: unused variable 'p5020_cmux_grp2' [-Wunused-const-variable] static const struct clockgen_muxinfo p5020_cmux_grp2
In the definition of the p5020 chip, the p2041 chip's info was used instead. The p5020 and p2041 chips have different info. This is most likely a typo.
Link: https://github.com/ClangBuiltLinux/linux/issues/525 Cc: clang-built-linux@googlegroups.com Signed-off-by: Nathan Huckleberry nhuck@google.com Link: https://lkml.kernel.org/r/20190627220642.78575-1-nhuck@google.com Reviewed-by: Nick Desaulniers ndesaulniers@google.com Acked-by: Scott Wood oss@buserror.net Signed-off-by: Stephen Boyd sboyd@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/clk-qoriq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index a5070f9cb0d4a..7244a621c61b9 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -540,7 +540,7 @@ static const struct clockgen_chipinfo chipinfo[] = { .guts_compat = "fsl,qoriq-device-config-1.0", .init_periph = p5020_init_periph, .cmux_groups = { - &p2041_cmux_grp1, &p2041_cmux_grp2 + &p5020_cmux_grp1, &p5020_cmux_grp2 }, .cmux_to_group = { 0, 1, -1
From: Stephen Boyd sboyd@kernel.org
[ Upstream commit af55dadfbce35b4f4c6247244ce3e44b2e242b84 ]
A future patch is going to change semantics of clk_register() so that clk_hw::init is guaranteed to be NULL after a clk is registered. Avoid referencing this member here so that we don't run into NULL pointer exceptions.
Cc: Guo Zeng Guo.Zeng@csr.com Cc: Barry Song Baohua.Song@csr.com Signed-off-by: Stephen Boyd sboyd@kernel.org Link: https://lkml.kernel.org/r/20190731193517.237136-6-sboyd@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/sirf/clk-common.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c index 77e1e2491689b..edb7197cc4b4d 100644 --- a/drivers/clk/sirf/clk-common.c +++ b/drivers/clk/sirf/clk-common.c @@ -298,9 +298,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw) { struct clk_dmn *clk = to_dmnclk(hw); u32 cfg = clkc_readl(clk->regofs); + const char *name = clk_hw_get_name(hw);
/* parent of io domain can only be pll3 */ - if (strcmp(hw->init->name, "io") == 0) + if (strcmp(name, "io") == 0) return 4;
WARN_ON((cfg & (BIT(3) - 1)) > 4); @@ -312,9 +313,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent) { struct clk_dmn *clk = to_dmnclk(hw); u32 cfg = clkc_readl(clk->regofs); + const char *name = clk_hw_get_name(hw);
/* parent of io domain can only be pll3 */ - if (strcmp(hw->init->name, "io") == 0) + if (strcmp(name, "io") == 0) return -EINVAL;
cfg &= ~(BIT(3) - 1); @@ -354,7 +356,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate, { unsigned long fin; unsigned ratio, wait, hold; - unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4; + const char *name = clk_hw_get_name(hw); + unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
fin = *parent_rate; ratio = fin / rate; @@ -376,7 +379,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate, struct clk_dmn *clk = to_dmnclk(hw); unsigned long fin; unsigned ratio, wait, hold, reg; - unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4; + const char *name = clk_hw_get_name(hw); + unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
fin = parent_rate; ratio = fin / rate;
From: Nathan Lynch nathanl@linux.ibm.com
[ Upstream commit a6717c01ddc259f6f73364779df058e2c67309f8 ]
The LPAR migration implementation and userspace-initiated cpu hotplug can interleave their executions like so:
1. Set cpu 7 offline via sysfs.
2. Begin a partition migration, whose implementation requires the OS to ensure all present cpus are online; cpu 7 is onlined:
rtas_ibm_suspend_me -> rtas_online_cpus_mask -> cpu_up
This sets cpu 7 online in all respects except for the cpu's corresponding struct device; dev->offline remains true.
3. Set cpu 7 online via sysfs. _cpu_up() determines that cpu 7 is already online and returns success. The driver core (device_online) sets dev->offline = false.
4. The migration completes and restores cpu 7 to offline state:
rtas_ibm_suspend_me -> rtas_offline_cpus_mask -> cpu_down
This leaves cpu7 in a state where the driver core considers the cpu device online, but in all other respects it is offline and unused. Attempts to online the cpu via sysfs appear to succeed but the driver core actually does not pass the request to the lower-level cpuhp support code. This makes the cpu unusable until the cpu device is manually set offline and then online again via sysfs.
Instead of directly calling cpu_up/cpu_down, the migration code should use the higher-level device core APIs to maintain consistent state and serialize operations.
Fixes: 120496ac2d2d ("powerpc: Bring all threads online prior to migration/hibernation") Signed-off-by: Nathan Lynch nathanl@linux.ibm.com Reviewed-by: Gautham R. Shenoy ego@linux.vnet.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/20190802192926.19277-2-nathanl@linux.ibm.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/kernel/rtas.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 5a753fae8265a..0c42e872d548b 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -857,15 +857,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, return 0;
for_each_cpu(cpu, cpus) { + struct device *dev = get_cpu_device(cpu); + switch (state) { case DOWN: - cpuret = cpu_down(cpu); + cpuret = device_offline(dev); break; case UP: - cpuret = cpu_up(cpu); + cpuret = device_online(dev); break; } - if (cpuret) { + if (cpuret < 0) { pr_debug("%s: cpu_%s for cpu#%d returned %d.\n", __func__, ((state == UP) ? "up" : "down"), @@ -954,6 +956,8 @@ int rtas_ibm_suspend_me(u64 handle) data.token = rtas_token("ibm,suspend-me"); data.complete = &done;
+ lock_device_hotplug(); + /* All present CPUs must be online */ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); cpuret = rtas_online_cpus_mask(offline_mask); @@ -985,6 +989,7 @@ int rtas_ibm_suspend_me(u64 handle) __func__);
out: + unlock_device_hotplug(); free_cpumask_var(offline_mask); return atomic_read(&data.error); }
From: Christophe Leroy christophe.leroy@c-s.fr
[ Upstream commit 38a0d0cdb46d3f91534e5b9839ec2d67be14c59d ]
We see warnings such as: kernel/futex.c: In function 'do_futex': kernel/futex.c:1676:17: warning: 'oldval' may be used uninitialized in this function [-Wmaybe-uninitialized] return oldval == cmparg; ^ kernel/futex.c:1651:6: note: 'oldval' was declared here int oldval, ret; ^
This is because arch_futex_atomic_op_inuser() only sets *oval if ret is 0 and GCC doesn't see that it will only use it when ret is 0.
Anyway, the non-zero ret path is an error path that won't suffer from setting *oval, and as *oval is a local var in futex_atomic_op_inuser() it will have no impact.
Signed-off-by: Christophe Leroy christophe.leroy@c-s.fr [mpe: reword change log slightly] Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/86b72f0c134367b214910b27b9a6dd3321af93bb.156577465... Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/include/asm/futex.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index f4c7467f74655..b73ab8a7ebc3f 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -60,8 +60,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
pagefault_enable();
- if (!ret) - *oval = oldval; + *oval = oldval;
return ret; }
From: Nathan Lynch nathanl@linux.ibm.com
[ Upstream commit ccfb5bd71d3d1228090a8633800ae7cdf42a94ac ]
After a partition migration, pseries_devicetree_update() processes changes to the device tree communicated from the platform to Linux. This is a relatively heavyweight operation, with multiple device tree searches, memory allocations, and conversations with partition firmware.
There's a few levels of nested loops which are bounded only by decisions made by the platform, outside of Linux's control, and indeed we have seen RCU stalls on large systems while executing this call graph. Use cond_resched() in these loops so that the cpu is yielded when needed.
Signed-off-by: Nathan Lynch nathanl@linux.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/20190802192926.19277-4-nathanl@linux.ibm.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/platforms/pseries/mobility.c | 9 +++++++++ 1 file changed, 9 insertions(+)
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index c773396d0969b..8d30a425a88ab 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -11,6 +11,7 @@
#include <linux/kernel.h> #include <linux/kobject.h> +#include <linux/sched.h> #include <linux/smp.h> #include <linux/stat.h> #include <linux/completion.h> @@ -206,7 +207,11 @@ static int update_dt_node(__be32 phandle, s32 scope)
prop_data += vd; } + + cond_resched(); } + + cond_resched(); } while (rtas_rc == 1);
of_node_put(dn); @@ -282,8 +287,12 @@ int pseries_devicetree_update(s32 scope) add_dt_node(phandle, drc_index); break; } + + cond_resched(); } } + + cond_resched(); } while (rc == 1);
kfree(rtas_buf);
From: Sowjanya Komatineni skomatineni@nvidia.com
[ Upstream commit c2cf351eba2ff6002ce8eb178452219d2521e38e ]
pmx_writel uses writel which inserts write barrier before the register write.
This patch has fix to replace writel with writel_relaxed followed by a readback and memory barrier to ensure write operation is completed for successful pinctrl change.
Acked-by: Thierry Reding treding@nvidia.com Reviewed-by: Dmitry Osipenko digetx@gmail.com Signed-off-by: Sowjanya Komatineni skomatineni@nvidia.com Link: https://lore.kernel.org/r/1565984527-5272-2-git-send-email-skomatineni@nvidi... Signed-off-by: Linus Walleij linus.walleij@linaro.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/pinctrl/pinctrl-tegra.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c index 0fd7fd2b0f72c..a30e967d75c2a 100644 --- a/drivers/pinctrl/pinctrl-tegra.c +++ b/drivers/pinctrl/pinctrl-tegra.c @@ -52,7 +52,9 @@ static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg) { - writel(val, pmx->regs[bank] + reg); + writel_relaxed(val, pmx->regs[bank] + reg); + /* make sure pinmux register write completed */ + pmx_readl(pmx, bank, reg); }
static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
From: Sam Bobroff sbobroff@linux.ibm.com
[ Upstream commit aa06e3d60e245284d1e55497eb3108828092818d ]
The EEH_DEV_NO_HANDLER flag is used by the EEH system to prevent the use of driver callbacks in drivers that have been bound part way through the recovery process. This is necessary to prevent later stage handlers from being called when the earlier stage handlers haven't, which can be confusing for drivers.
However, the flag is set for all devices that are added after boot time and only cleared at the end of the EEH recovery process. This results in hot plugged devices erroneously having the flag set during the first recovery after they are added (causing their driver's handlers to be incorrectly ignored).
To remedy this, clear the flag at the beginning of recovery processing. The flag is still cleared at the end of recovery processing, although it is no longer really necessary.
Also clear the flag during eeh_handle_special_event(), for the same reasons.
Signed-off-by: Sam Bobroff sbobroff@linux.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/b8ca5629d27de74c957d4f4b250177d1b6fc4bbd.156593077... Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/kernel/eeh_driver.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 9837c98caabe9..045038469295d 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -675,6 +675,10 @@ static bool eeh_handle_normal_event(struct eeh_pe *pe) pr_warn("EEH: This PCI device has failed %d times in the last hour\n", pe->freeze_count);
+ eeh_for_each_pe(pe, tmp_pe) + eeh_pe_for_each_dev(tmp_pe, edev, tmp) + edev->mode &= ~EEH_DEV_NO_HANDLER; + /* Walk the various device drivers attached to this slot through * a reset sequence, giving each an opportunity to do what it needs * to accomplish the reset. Each child gets a report of the @@ -840,7 +844,8 @@ static bool eeh_handle_normal_event(struct eeh_pe *pe)
static void eeh_handle_special_event(void) { - struct eeh_pe *pe, *phb_pe; + struct eeh_pe *pe, *phb_pe, *tmp_pe; + struct eeh_dev *edev, *tmp_edev; struct pci_bus *bus; struct pci_controller *hose; unsigned long flags; @@ -919,6 +924,10 @@ static void eeh_handle_special_event(void) (phb_pe->state & EEH_PE_RECOVERING)) continue;
+ eeh_for_each_pe(pe, tmp_pe) + eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev) + edev->mode &= ~EEH_DEV_NO_HANDLER; + /* Notify all devices to be down */ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); bus = eeh_pe_bus_get(phb_pe);
From: hexin hexin.op@gmail.com
[ Upstream commit 92c8026854c25093946e0d7fe536fd9eac440f06 ]
vfio_pci_enable() saves the device's initial configuration information with the intent that it is restored in vfio_pci_disable(). However, the commit referenced in Fixes: below replaced the call to __pci_reset_function_locked(), which is not wrapped in a state save and restore, with pci_try_reset_function(), which overwrites the restored device state with the current state before applying it to the device. Reinstate use of __pci_reset_function_locked() to return to the desired behavior.
Fixes: 890ed578df82 ("vfio-pci: Use pci "try" reset interface") Signed-off-by: hexin hexin15@baidu.com Signed-off-by: Liu Qi liuqi16@baidu.com Signed-off-by: Zhang Yu zhangyu31@baidu.com Signed-off-by: Alex Williamson alex.williamson@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/vfio/pci/vfio_pci.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-)
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 47b229fa5e8ec..4b62eb3b59233 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -221,11 +221,20 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev) pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
/* - * Try to reset the device. The success of this is dependent on - * being able to lock the device, which is not always possible. + * Try to get the locks ourselves to prevent a deadlock. The + * success of this is dependent on being able to lock the device, + * which is not always possible. + * We can not use the "try" reset interface here, which will + * overwrite the previously restored configuration information. */ - if (vdev->reset_works && !pci_try_reset_function(pdev)) - vdev->needs_reset = false; + if (vdev->reset_works && pci_cfg_access_trylock(pdev)) { + if (device_trylock(&pdev->dev)) { + if (!__pci_reset_function_locked(pdev)) + vdev->needs_reset = false; + device_unlock(&pdev->dev); + } + pci_cfg_access_unlock(pdev); + }
pci_restore_state(pdev); out:
From: Nicholas Piggin npiggin@gmail.com
[ Upstream commit 0b66370c61fcf5fcc1d6901013e110284da6e2bb ]
Bare metal machine checks run an "early" handler in real mode before running the main handler which reports the event.
The main handler runs exactly as a normal interrupt handler, after the "windup" which sets registers back as they were at interrupt entry. CFAR does not get restored by the windup code, so that will be wrong when the handler is run.
Restore the CFAR to the saved value before running the late handler.
Signed-off-by: Nicholas Piggin npiggin@gmail.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/20190802105709.27696-8-npiggin@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/kernel/exceptions-64s.S | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index a44f1755dc4bf..536718ed033fc 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1465,6 +1465,10 @@ machine_check_handle_early: RFI_TO_USER_OR_KERNEL 9: /* Deliver the machine check to host kernel in V mode. */ +BEGIN_FTR_SECTION + ld r10,ORIG_GPR3(r1) + mtspr SPRN_CFAR,r10 +END_FTR_SECTION_IFSET(CPU_FTR_CFAR) MACHINE_CHECK_HANDLER_WINDUP b machine_check_pSeries
From: Nathan Lynch nathanl@linux.ibm.com
[ Upstream commit 92c94dfb69e350471473fd3075c74bc68150879e ]
prep_irq_for_idle() is intended to be called before entering H_CEDE (and it is used by the pseries cpuidle driver). However the default pseries idle routine does not call it, leading to mismanaged lazy irq state when the cpuidle driver isn't in use. Manifestations of this include:
* Dropped IPIs in the time immediately after a cpu comes online (before it has installed the cpuidle handler), making the online operation block indefinitely waiting for the new cpu to respond.
* Hitting this WARN_ON in arch_local_irq_restore(): /* * We should already be hard disabled here. We had bugs * where that wasn't the case so let's dbl check it and * warn if we are wrong. Only do that when IRQ tracing * is enabled as mfmsr() can be costly. */ if (WARN_ON_ONCE(mfmsr() & MSR_EE)) __hard_irq_disable();
Call prep_irq_for_idle() from pseries_lpar_idle() and honor its result.
Fixes: 363edbe2614a ("powerpc: Default arch idle could cede processor on pseries") Signed-off-by: Nathan Lynch nathanl@linux.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/20190910225244.25056-1-nathanl@linux.ibm.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/platforms/pseries/setup.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 9cc976ff7fecc..88fcf6a95fa67 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -369,6 +369,9 @@ static void pseries_lpar_idle(void) * low power mode by cedeing processor to hypervisor */
+ if (!prep_irq_for_idle()) + return; + /* Indicate to hypervisor that we are idle. */ get_lppaca()->idle = 1;
From: Bart Van Assche bvanassche@acm.org
[ Upstream commit dccc96abfb21dc19d69e707c38c8ba439bba7160 ]
The data structure used for log messages is so large that it can cause a boot failure. Since allocations from that data structure can fail anyway, use kmalloc() / kfree() instead of that data structure.
See also https://bugzilla.kernel.org/show_bug.cgi?id=204119. See also commit ded85c193a39 ("scsi: Implement per-cpu logging buffer") # v4.0.
Reported-by: Jan Palus jpalus@fastmail.com Cc: Christoph Hellwig hch@lst.de Cc: Hannes Reinecke hare@suse.com Cc: Johannes Thumshirn jthumshirn@suse.de Cc: Ming Lei ming.lei@redhat.com Cc: Jan Palus jpalus@fastmail.com Signed-off-by: Bart Van Assche bvanassche@acm.org Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/scsi_logging.c | 48 +++---------------------------------- include/scsi/scsi_dbg.h | 2 -- 2 files changed, 3 insertions(+), 47 deletions(-)
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c index bd70339c1242e..03d9855a6afd7 100644 --- a/drivers/scsi/scsi_logging.c +++ b/drivers/scsi/scsi_logging.c @@ -16,57 +16,15 @@ #include <scsi/scsi_eh.h> #include <scsi/scsi_dbg.h>
-#define SCSI_LOG_SPOOLSIZE 4096 - -#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG -#warning SCSI logging bitmask too large -#endif - -struct scsi_log_buf { - char buffer[SCSI_LOG_SPOOLSIZE]; - unsigned long map; -}; - -static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log); - static char *scsi_log_reserve_buffer(size_t *len) { - struct scsi_log_buf *buf; - unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE; - unsigned long idx = 0; - - preempt_disable(); - buf = this_cpu_ptr(&scsi_format_log); - idx = find_first_zero_bit(&buf->map, map_bits); - if (likely(idx < map_bits)) { - while (test_and_set_bit(idx, &buf->map)) { - idx = find_next_zero_bit(&buf->map, map_bits, idx); - if (idx >= map_bits) - break; - } - } - if (WARN_ON(idx >= map_bits)) { - preempt_enable(); - return NULL; - } - *len = SCSI_LOG_BUFSIZE; - return buf->buffer + idx * SCSI_LOG_BUFSIZE; + *len = 128; + return kmalloc(*len, GFP_ATOMIC); }
static void scsi_log_release_buffer(char *bufptr) { - struct scsi_log_buf *buf; - unsigned long idx; - int ret; - - buf = this_cpu_ptr(&scsi_format_log); - if (bufptr >= buf->buffer && - bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) { - idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE; - ret = test_and_clear_bit(idx, &buf->map); - WARN_ON(!ret); - } - preempt_enable(); + kfree(bufptr); }
static inline const char *scmd_name(const struct scsi_cmnd *scmd) diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h index f8170e90b49d2..bbe71a6361db9 100644 --- a/include/scsi/scsi_dbg.h +++ b/include/scsi/scsi_dbg.h @@ -5,8 +5,6 @@ struct scsi_cmnd; struct scsi_device; struct scsi_sense_hdr;
-#define SCSI_LOG_BUFSIZE 128 - extern void scsi_print_command(struct scsi_cmnd *); extern size_t __scsi_format_command(char *, size_t, const unsigned char *, size_t);
linux-stable-mirror@lists.linaro.org