This is the start of the stable review cycle for the 4.14.286 release.
There are 35 patches in this series, all will be posted as a response
to this one. If anyone has any issues with these being applied, please
let me know.
Responses should be made by Sat, 02 Jul 2022 13:32:22 +0000.
Anything received after that time might be too late.
The whole patch series can be found in one patch at:
https://www.kernel.org/pub/linux/kernel/v4.x/stable-review/patch-4.14.286-r…
or in the git tree and branch at:
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable-rc.git linux-4.14.y
and the diffstat can be found below.
thanks,
greg k-h
-------------
Pseudo-Shortlog of commits:
Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Linux 4.14.286-rc1
Liu Shixin <liushixin2(a)huawei.com>
swiotlb: skip swiotlb_bounce when orig_addr is zero
Naveen N. Rao <naveen.n.rao(a)linux.vnet.ibm.com>
kexec_file: drop weak attribute from arch_kexec_apply_relocations[_add]
Hsin-Yi Wang <hsinyi(a)chromium.org>
fdt: Update CRC check for rng-seed
Masahiro Yamada <masahiroy(a)kernel.org>
xen: unexport __init-annotated xen_xlate_map_ballooned_pages()
Christoph Hellwig <hch(a)lst.de>
drm: remove drm_fb_helper_modinit
Jason A. Donenfeld <Jason(a)zx2c4.com>
powerpc/pseries: wire up rng during setup_arch()
Masahiro Yamada <masahiroy(a)kernel.org>
modpost: fix section mismatch check for exported init/exit sections
Miaoqian Lin <linmq006(a)gmail.com>
ARM: cns3xxx: Fix refcount leak in cns3xxx_init
Miaoqian Lin <linmq006(a)gmail.com>
ARM: Fix refcount leak in axxia_boot_secondary
Miaoqian Lin <linmq006(a)gmail.com>
ARM: exynos: Fix refcount leak in exynos_map_pmu
Lucas Stach <l.stach(a)pengutronix.de>
ARM: dts: imx6qdl: correct PU regulator ramp delay
Jason A. Donenfeld <Jason(a)zx2c4.com>
powerpc/powernv: wire up rng during setup_arch
Andrew Donnellan <ajd(a)linux.ibm.com>
powerpc/rtas: Allow ibm,platform-dump RTAS call with null buffer address
Naveen N. Rao <naveen.n.rao(a)linux.vnet.ibm.com>
powerpc: Enable execve syscall exit tracepoint
Liang He <windhl(a)126.com>
xtensa: Fix refcount leak bug in time.c
Liang He <windhl(a)126.com>
xtensa: xtfpga: Fix refcount leak bug in setup
Hans de Goede <hdegoede(a)redhat.com>
iio: adc: axp288: Override TS pin bias current for some models
Vincent Whitchurch <vincent.whitchurch(a)axis.com>
iio: trigger: sysfs: fix use-after-free on remove
Zheyu Ma <zheyuma97(a)gmail.com>
iio: gyro: mpu3050: Fix the error handling in mpu3050_power_up()
Haibo Chen <haibo.chen(a)nxp.com>
iio: accel: mma8452: ignore the return value of reset operation
Dmitry Rokosov <DDRokosov(a)sberdevices.ru>
iio:accel:bma180: rearrange iio trigger get and register
Xu Yang <xu.yang_2(a)nxp.com>
usb: chipidea: udc: check request status before setting device address
Baruch Siach <baruch(a)tkos.co.il>
iio: adc: vf610: fix conversion mode sysfs node name
Kai-Heng Feng <kai.heng.feng(a)canonical.com>
igb: Make DMA faster when CPU is active on the PCIe link
huhai <huhai(a)kylinos.cn>
MIPS: Remove repetitive increase irq_err_count
Julien Grall <jgrall(a)amazon.com>
x86/xen: Remove undefined behavior in setup_features()
Jay Vosburgh <jay.vosburgh(a)canonical.com>
bonding: ARP monitor spams NETDEV_NOTIFY_PEERS notifiers
Macpaul Lin <macpaul.lin(a)mediatek.com>
USB: serial: option: add Quectel RM500K module support
Yonglin Tan <yonglin.tan(a)outlook.com>
USB: serial: option: add Quectel EM05-G modem
Carlo Lobrano <c.lobrano(a)gmail.com>
USB: serial: option: add Telit LE910Cx 0x1250 composition
Jason A. Donenfeld <Jason(a)zx2c4.com>
random: quiet urandom warning ratelimit suppression message
Nikos Tsironis <ntsironis(a)arrikto.com>
dm era: commit metadata in postsuspend after worker stops
Edward Wu <edwardwu(a)realtek.com>
ata: libata: add qc->flags in ata_qc_complete_template tracepoint
Jason A. Donenfeld <Jason(a)zx2c4.com>
random: schedule mix_interrupt_randomness() less often
Jiri Slaby <jslaby(a)suse.cz>
vt: drop old FONT ioctls
-------------
Diffstat:
Documentation/ABI/testing/sysfs-bus-iio-vf610 | 2 +-
Makefile | 4 +-
arch/arm/boot/dts/imx6qdl.dtsi | 2 +-
arch/arm/mach-axxia/platsmp.c | 1 +
arch/arm/mach-cns3xxx/core.c | 2 +
arch/arm/mach-exynos/exynos.c | 1 +
arch/mips/vr41xx/common/icu.c | 2 -
arch/powerpc/kernel/process.c | 2 +-
arch/powerpc/kernel/rtas.c | 11 +-
arch/powerpc/platforms/powernv/powernv.h | 2 +
arch/powerpc/platforms/powernv/rng.c | 52 ++++++---
arch/powerpc/platforms/powernv/setup.c | 2 +
arch/powerpc/platforms/pseries/pseries.h | 2 +
arch/powerpc/platforms/pseries/rng.c | 11 +-
arch/powerpc/platforms/pseries/setup.c | 1 +
arch/x86/include/asm/kexec.h | 6 ++
arch/xtensa/kernel/time.c | 1 +
arch/xtensa/platforms/xtfpga/setup.c | 1 +
drivers/char/random.c | 4 +-
drivers/gpio/gpio-vr41xx.c | 2 -
drivers/gpu/drm/drm_crtc_helper_internal.h | 10 --
drivers/gpu/drm/drm_fb_helper.c | 21 ----
drivers/gpu/drm/drm_kms_helper_common.c | 25 +++--
drivers/iio/accel/bma180.c | 3 +-
drivers/iio/accel/mma8452.c | 10 +-
drivers/iio/adc/axp288_adc.c | 8 ++
drivers/iio/gyro/mpu3050-core.c | 1 +
drivers/iio/trigger/iio-trig-sysfs.c | 1 +
drivers/md/dm-era-target.c | 8 +-
drivers/net/bonding/bond_main.c | 4 +-
drivers/net/ethernet/intel/igb/igb_main.c | 12 +--
drivers/of/fdt.c | 8 +-
drivers/tty/vt/vt.c | 34 +-----
drivers/tty/vt/vt_ioctl.c | 149 --------------------------
drivers/usb/chipidea/udc.c | 3 +
drivers/usb/serial/option.c | 6 ++
drivers/xen/features.c | 2 +-
drivers/xen/xlate_mmu.c | 1 -
include/linux/kd.h | 8 --
include/linux/kexec.h | 26 ++++-
include/linux/ratelimit.h | 12 ++-
include/trace/events/libata.h | 1 +
kernel/kexec_file.c | 18 ----
lib/swiotlb.c | 3 +-
scripts/mod/modpost.c | 2 +-
45 files changed, 174 insertions(+), 313 deletions(-)
commit dbe97cff7dd9f0f75c524afdd55ad46be3d15295 upstream
unmap_grant_pages() currently waits for the pages to no longer be used.
In https://github.com/QubesOS/qubes-issues/issues/7481, this lead to a
deadlock against i915: i915 was waiting for gntdev's MMU notifier to
finish, while gntdev was waiting for i915 to free its pages. I also
believe this is responsible for various deadlocks I have experienced in
the past.
Avoid these problems by making unmap_grant_pages async. This requires
making it return void, as any errors will not be available when the
function returns. Fortunately, the only use of the return value is a
WARN_ON(), which can be replaced by a WARN_ON when the error is
detected. Additionally, a failed call will not prevent further calls
from being made, but this is harmless.
Because unmap_grant_pages is now async, the grant handle will be sent to
INVALID_GRANT_HANDLE too late to prevent multiple unmaps of the same
handle. Instead, a separate bool array is allocated for this purpose.
This wastes memory, but stuffing this information in padding bytes is
too fragile. Furthermore, it is necessary to grab a reference to the
map before making the asynchronous call, and release the reference when
the call returns.
It is also necessary to guard against reentrancy in gntdev_map_put(),
and to handle the case where userspace tries to map a mapping whose
contents have not all been freed yet.
Fixes: 745282256c75 ("xen/gntdev: safely unmap grants in case they are still in use")
Cc: stable(a)vger.kernel.org
Signed-off-by: Demi Marie Obenour <demi(a)invisiblethingslab.com>
Reviewed-by: Juergen Gross <jgross(a)suse.com>
Link: https://lore.kernel.org/r/20220622022726.2538-1-demi@invisiblethingslab.com
Signed-off-by: Juergen Gross <jgross(a)suse.com>
---
drivers/xen/gntdev-common.h | 7 ++
drivers/xen/gntdev.c | 142 +++++++++++++++++++++++++-----------
2 files changed, 106 insertions(+), 43 deletions(-)
diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
index 20d7d059dadb..40ef379c28ab 100644
--- a/drivers/xen/gntdev-common.h
+++ b/drivers/xen/gntdev-common.h
@@ -16,6 +16,7 @@
#include <linux/mmu_notifier.h>
#include <linux/types.h>
#include <xen/interface/event_channel.h>
+#include <xen/grant_table.h>
struct gntdev_dmabuf_priv;
@@ -56,6 +57,7 @@ struct gntdev_grant_map {
struct gnttab_unmap_grant_ref *unmap_ops;
struct gnttab_map_grant_ref *kmap_ops;
struct gnttab_unmap_grant_ref *kunmap_ops;
+ bool *being_removed;
struct page **pages;
unsigned long pages_vm_start;
@@ -73,6 +75,11 @@ struct gntdev_grant_map {
/* Needed to avoid allocation in gnttab_dma_free_pages(). */
xen_pfn_t *frames;
#endif
+
+ /* Number of live grants */
+ atomic_t live_grants;
+ /* Needed to avoid allocation in __unmap_grant_pages */
+ struct gntab_unmap_queue_data unmap_data;
};
struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 54778aadf618..a631a453eb57 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/refcount.h>
+#include <linux/workqueue.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -60,10 +61,11 @@ module_param(limit, uint, 0644);
MODULE_PARM_DESC(limit,
"Maximum number of grants that may be mapped by one mapping request");
+/* True in PV mode, false otherwise */
static int use_ptemod;
-static int unmap_grant_pages(struct gntdev_grant_map *map,
- int offset, int pages);
+static void unmap_grant_pages(struct gntdev_grant_map *map,
+ int offset, int pages);
static struct miscdevice gntdev_miscdev;
@@ -120,6 +122,7 @@ static void gntdev_free_map(struct gntdev_grant_map *map)
kvfree(map->unmap_ops);
kvfree(map->kmap_ops);
kvfree(map->kunmap_ops);
+ kvfree(map->being_removed);
kfree(map);
}
@@ -140,12 +143,13 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
add->kunmap_ops = kvcalloc(count,
sizeof(add->kunmap_ops[0]), GFP_KERNEL);
add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
+ add->being_removed =
+ kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL);
if (NULL == add->grants ||
- NULL == add->map_ops ||
- NULL == add->unmap_ops ||
NULL == add->kmap_ops ||
NULL == add->kunmap_ops ||
- NULL == add->pages)
+ NULL == add->pages ||
+ NULL == add->being_removed)
goto err;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
@@ -240,9 +244,36 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
if (!refcount_dec_and_test(&map->users))
return;
- if (map->pages && !use_ptemod)
+ if (map->pages && !use_ptemod) {
+ /*
+ * Increment the reference count. This ensures that the
+ * subsequent call to unmap_grant_pages() will not wind up
+ * re-entering itself. It *can* wind up calling
+ * gntdev_put_map() recursively, but such calls will be with a
+ * reference count greater than 1, so they will return before
+ * this code is reached. The recursion depth is thus limited to
+ * 1. Do NOT use refcount_inc() here, as it will detect that
+ * the reference count is zero and WARN().
+ */
+ refcount_set(&map->users, 1);
+
+ /*
+ * Unmap the grants. This may or may not be asynchronous, so it
+ * is possible that the reference count is 1 on return, but it
+ * could also be greater than 1.
+ */
unmap_grant_pages(map, 0, map->count);
+ /* Check if the memory now needs to be freed */
+ if (!refcount_dec_and_test(&map->users))
+ return;
+
+ /*
+ * All pages have been returned to the hypervisor, so free the
+ * map.
+ */
+ }
+
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event);
@@ -288,6 +319,7 @@ static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
int gntdev_map_grant_pages(struct gntdev_grant_map *map)
{
+ size_t alloced = 0;
int i, err = 0;
if (!use_ptemod) {
@@ -336,87 +368,109 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
map->pages, map->count);
for (i = 0; i < map->count; i++) {
- if (map->map_ops[i].status == GNTST_okay)
+ if (map->map_ops[i].status == GNTST_okay) {
map->unmap_ops[i].handle = map->map_ops[i].handle;
- else if (!err)
+ if (!use_ptemod)
+ alloced++;
+ } else if (!err)
err = -EINVAL;
if (map->flags & GNTMAP_device_map)
map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
if (use_ptemod) {
- if (map->kmap_ops[i].status == GNTST_okay)
+ if (map->kmap_ops[i].status == GNTST_okay) {
+ if (map->map_ops[i].status == GNTST_okay)
+ alloced++;
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
- else if (!err)
+ } else if (!err)
err = -EINVAL;
}
}
+ atomic_add(alloced, &map->live_grants);
return err;
}
-static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
- int pages)
+static void __unmap_grant_pages_done(int result,
+ struct gntab_unmap_queue_data *data)
{
- int i, err = 0;
- struct gntab_unmap_queue_data unmap_data;
+ unsigned int i;
+ struct gntdev_grant_map *map = data->data;
+ unsigned int offset = data->unmap_ops - map->unmap_ops;
+ for (i = 0; i < data->count; i++) {
+ WARN_ON(map->unmap_ops[offset+i].status);
+ pr_debug("unmap handle=%d st=%d\n",
+ map->unmap_ops[offset+i].handle,
+ map->unmap_ops[offset+i].status);
+ map->unmap_ops[offset+i].handle = -1;
+ }
+ /*
+ * Decrease the live-grant counter. This must happen after the loop to
+ * prevent premature reuse of the grants by gnttab_mmap().
+ */
+ atomic_sub(data->count, &map->live_grants);
+
+ /* Release reference taken by __unmap_grant_pages */
+ gntdev_put_map(NULL, map);
+}
+
+static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
+{
if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
int pgno = (map->notify.addr >> PAGE_SHIFT);
+
if (pgno >= offset && pgno < offset + pages) {
/* No need for kmap, pages are in lowmem */
uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
+
tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
}
}
- unmap_data.unmap_ops = map->unmap_ops + offset;
- unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
- unmap_data.pages = map->pages + offset;
- unmap_data.count = pages;
+ map->unmap_data.unmap_ops = map->unmap_ops + offset;
+ map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
+ map->unmap_data.pages = map->pages + offset;
+ map->unmap_data.count = pages;
+ map->unmap_data.done = __unmap_grant_pages_done;
+ map->unmap_data.data = map;
+ refcount_inc(&map->users); /* to keep map alive during async call below */
- err = gnttab_unmap_refs_sync(&unmap_data);
- if (err)
- return err;
-
- for (i = 0; i < pages; i++) {
- if (map->unmap_ops[offset+i].status)
- err = -EINVAL;
- pr_debug("unmap handle=%d st=%d\n",
- map->unmap_ops[offset+i].handle,
- map->unmap_ops[offset+i].status);
- map->unmap_ops[offset+i].handle = -1;
- }
- return err;
+ gnttab_unmap_refs_async(&map->unmap_data);
}
-static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
- int pages)
+static void unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
{
- int range, err = 0;
+ int range;
+
+ if (atomic_read(&map->live_grants) == 0)
+ return; /* Nothing to do */
pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
/* It is possible the requested range will have a "hole" where we
* already unmapped some of the grants. Only unmap valid ranges.
*/
- while (pages && !err) {
- while (pages && map->unmap_ops[offset].handle == -1) {
+ while (pages) {
+ while (pages && map->being_removed[offset]) {
offset++;
pages--;
}
range = 0;
while (range < pages) {
- if (map->unmap_ops[offset+range].handle == -1)
+ if (map->being_removed[offset + range])
break;
+ map->being_removed[offset + range] = true;
range++;
}
- err = __unmap_grant_pages(map, offset, range);
+ if (range)
+ __unmap_grant_pages(map, offset, range);
offset += range;
pages -= range;
}
-
- return err;
}
/* ------------------------------------------------------------------ */
@@ -468,7 +522,6 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
struct gntdev_grant_map *map =
container_of(mn, struct gntdev_grant_map, notifier);
unsigned long mstart, mend;
- int err;
if (!mmu_notifier_range_blockable(range))
return false;
@@ -489,10 +542,9 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
map->index, map->count,
map->vma->vm_start, map->vma->vm_end,
range->start, range->end, mstart, mend);
- err = unmap_grant_pages(map,
+ unmap_grant_pages(map,
(mstart - map->vma->vm_start) >> PAGE_SHIFT,
(mend - mstart) >> PAGE_SHIFT);
- WARN_ON(err);
return true;
}
@@ -980,6 +1032,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto unlock_out;
if (use_ptemod && map->vma)
goto unlock_out;
+ if (atomic_read(&map->live_grants)) {
+ err = -EAGAIN;
+ goto unlock_out;
+ }
refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops;
--
Sincerely,
Demi Marie Obenour (she/her/hers)
Invisible Things Lab