The patch titled
Subject: hugetlbfs: extend hugetlb_vma_lock to private VMAs
has been added to the -mm mm-hotfixes-unstable branch. Its filename is
hugetlbfs-extend-hugetlb_vma_lock-to-private-vmas.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patche…
This patch will later appear in the mm-hotfixes-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: Rik van Riel <riel(a)surriel.com>
Subject: hugetlbfs: extend hugetlb_vma_lock to private VMAs
Date: Tue, 3 Oct 2023 23:25:03 -0400
Extend the locking scheme used to protect shared hugetlb mappings from
truncate vs page fault races, in order to protect private hugetlb mappings
(with resv_map) against MADV_DONTNEED.
Add a read-write semaphore to the resv_map data structure, and use that
from the hugetlb_vma_(un)lock_* functions, in preparation for closing the
race between MADV_DONTNEED and page faults.
Link: https://lkml.kernel.org/r/20231004032814.3108383-2-riel@surriel.com
Fixes: 04ada095dcfc ("hugetlb: don't delete vma_lock in hugetlb MADV_DONTNEED processing")
Signed-off-by: Rik van Riel <riel(a)surriel.com>
Reviewed-by: Mike Kravetz <mike.kravetz(a)oracle.com>
Cc: Matthew Wilcox <willy(a)infradead.org>
Cc: Muchun Song <muchun.song(a)linux.dev>
Cc: Rik van Riel <riel(a)surriel.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
include/linux/hugetlb.h | 6 +++++
mm/hugetlb.c | 41 ++++++++++++++++++++++++++++++++++----
2 files changed, 43 insertions(+), 4 deletions(-)
--- a/include/linux/hugetlb.h~hugetlbfs-extend-hugetlb_vma_lock-to-private-vmas
+++ a/include/linux/hugetlb.h
@@ -60,6 +60,7 @@ struct resv_map {
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
+ struct rw_semaphore rw_sema;
#ifdef CONFIG_CGROUP_HUGETLB
/*
* On private mappings, the counter to uncharge reservations is stored
@@ -1246,6 +1247,11 @@ static inline bool __vma_shareable_lock(
return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
}
+static inline bool __vma_private_lock(struct vm_area_struct *vma)
+{
+ return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
+}
+
/*
* Safe version of huge_pte_offset() to check the locks. See comments
* above huge_pte_offset().
--- a/mm/hugetlb.c~hugetlbfs-extend-hugetlb_vma_lock-to-private-vmas
+++ a/mm/hugetlb.c
@@ -97,6 +97,7 @@ static void hugetlb_vma_lock_alloc(struc
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
+static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
static inline bool subpool_is_free(struct hugepage_subpool *spool)
{
@@ -267,6 +268,10 @@ void hugetlb_vma_lock_read(struct vm_are
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
down_read(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ down_read(&resv_map->rw_sema);
}
}
@@ -276,6 +281,10 @@ void hugetlb_vma_unlock_read(struct vm_a
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
up_read(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ up_read(&resv_map->rw_sema);
}
}
@@ -285,6 +294,10 @@ void hugetlb_vma_lock_write(struct vm_ar
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
down_write(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ down_write(&resv_map->rw_sema);
}
}
@@ -294,17 +307,27 @@ void hugetlb_vma_unlock_write(struct vm_
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
up_write(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ up_write(&resv_map->rw_sema);
}
}
int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
{
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
- if (!__vma_shareable_lock(vma))
- return 1;
+ if (__vma_shareable_lock(vma)) {
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
- return down_write_trylock(&vma_lock->rw_sema);
+ return down_write_trylock(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ return down_write_trylock(&resv_map->rw_sema);
+ }
+
+ return 1;
}
void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
@@ -313,6 +336,10 @@ void hugetlb_vma_assert_locked(struct vm
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
lockdep_assert_held(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ lockdep_assert_held(&resv_map->rw_sema);
}
}
@@ -345,6 +372,11 @@ static void __hugetlb_vma_unlock_write_f
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
__hugetlb_vma_unlock_write_put(vma_lock);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ /* no free for anon vmas, but still need to unlock */
+ up_write(&resv_map->rw_sema);
}
}
@@ -1061,6 +1093,7 @@ struct resv_map *resv_map_alloc(void)
kref_init(&resv_map->refs);
spin_lock_init(&resv_map->lock);
INIT_LIST_HEAD(&resv_map->regions);
+ init_rwsem(&resv_map->rw_sema);
resv_map->adds_in_progress = 0;
/*
_
Patches currently in -mm which might be from riel(a)surriel.com are
hugetlbfs-extend-hugetlb_vma_lock-to-private-vmas.patch
hugetlbfs-close-race-between-madv_dontneed-and-page-fault.patch
Iain reports that USB devices can't be used to wake a Lenovo Z13 from
suspend. This occurs because on some AMD platforms, even though the Root
Ports advertise PME_Support for D3hot and D3cold, they don't handle PME
messages and generate wakeup interrupts from those states when amd-pmc has
put the platform in a hardware sleep state.
Iain reported this on an AMD Rembrandt platform, but it also affects
Phoenix SoCs. On Iain's system, a USB4 router below the affected Root Port
generates the PME. To avoid this issue, disable D3 for the root port
associated with USB4 controllers at suspend time.
Restore D3 support at resume so that it can be used by runtime suspend.
The amd-pmc driver doesn't put the platform in a hardware sleep state for
runtime suspend, so PMEs work as advertised.
Cc: stable(a)vger.kernel.org # 6.1.y: 70b70a4: PCI/sysfs: Protect driver's D3cold preference from user space
Cc: stable(a)vger.kernel.org # 6.5.y: 70b70a4: PCI/sysfs: Protect driver's D3cold preference from user space
Cc: stable(a)vger.kernel.org # 6.6.y: 70b70a4: PCI/sysfs: Protect driver's D3cold preference from user space
Link: https://learn.microsoft.com/en-us/windows-hardware/design/device-experience… [1]
Fixes: 9d26d3a8f1b0 ("PCI: Put PCIe ports into D3 during suspend")
Reported-by: Iain Lane <iain(a)orangesquash.org.uk>
Closes: https://forums.lenovo.com/t5/Ubuntu/Z13-can-t-resume-from-suspend-with-exte…
Signed-off-by: Mario Limonciello <mario.limonciello(a)amd.com>
---
v20-v21:
* Rewrite commit message, lifting most of what Bjorn clipped down to on v20.
* Use pci_d3cold_disable()/pci_d3cold_enable() instead
* Do the quirk on the USB4 controller instead of RP->USB->RP
---
drivers/pci/quirks.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 44 insertions(+)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index eeec1d6f9023..5674065011e7 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -6188,3 +6188,47 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5020, of_pci_make_dev_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5021, of_pci_make_dev_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REDHAT, 0x0005, of_pci_make_dev_node);
+
+#ifdef CONFIG_SUSPEND
+/*
+ * Root Ports on some AMD SoCs advertise PME_Support for D3hot and D3cold, but
+ * if the SoC is put into a hardware sleep state by the amd-pmc driver, the
+ * Root Ports don't generate wakeup interrupts for USB devices.
+ *
+ * When suspending, disable D3 support for the Root Port so we don't use it.
+ * Restore D3 support when resuming.
+ */
+static void quirk_enable_rp_d3cold(struct pci_dev *dev)
+{
+ pci_d3cold_enable(pcie_find_root_port(dev));
+}
+
+static void quirk_disable_rp_d3cold_suspend(struct pci_dev *dev)
+{
+ struct pci_dev *rp;
+
+ /*
+ * PM_SUSPEND_ON means we're doing runtime suspend, which means
+ * amd-pmc will not be involved so PMEs during D3 work as advertised.
+ *
+ * The PMEs *do* work if amd-pmc doesn't put the SoC in the hardware
+ * sleep state, but we assume amd-pmc is always present.
+ */
+ if (pm_suspend_target_state == PM_SUSPEND_ON)
+ return;
+
+ rp = pcie_find_root_port(dev);
+ pci_d3cold_disable(rp);
+ dev_info_once(&rp->dev, "quirk: disabling D3cold for suspend\n");
+}
+/* Rembrandt (yellow_carp) */
+DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162e, quirk_disable_rp_d3cold_suspend);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162e, quirk_enable_rp_d3cold);
+DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162f, quirk_disable_rp_d3cold_suspend);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162f, quirk_enable_rp_d3cold);
+/* Phoenix (pink_sardine) */
+DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1668, quirk_disable_rp_d3cold_suspend);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1668, quirk_enable_rp_d3cold);
+DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1669, quirk_disable_rp_d3cold_suspend);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1669, quirk_enable_rp_d3cold);
+#endif /* CONFIG_SUSPEND */
--
2.34.1
There is a potential race condition in amdtee_close_session that may
cause use-after-free in amdtee_open_session. For instance, if a session
has refcount == 1, and one thread tries to free this session via:
kref_put(&sess->refcount, destroy_session);
the reference count will get decremented, and the next step would be to
call destroy_session(). However, if in another thread,
amdtee_open_session() is called before destroy_session() has completed
execution, alloc_session() may return 'sess' that will be freed up
later in destroy_session() leading to use-after-free in
amdtee_open_session.
To fix this issue, treat decrement of sess->refcount and removal of
'sess' from session list in destroy_session() as a critical section, so
that it is executed atomically.
Fixes: 757cc3e9ff1d ("tee: add AMD-TEE driver")
Cc: stable(a)vger.kernel.org
Signed-off-by: Rijo Thomas <Rijo-john.Thomas(a)amd.com>
---
v2:
* Introduced kref_put_mutex() as suggested by Sumit Garg.
drivers/tee/amdtee/core.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index 372d64756ed6..3c15f6a9e91c 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -217,12 +217,12 @@ static int copy_ta_binary(struct tee_context *ctx, void *ptr, void **ta,
return rc;
}
+/* mutex must be held by caller */
static void destroy_session(struct kref *ref)
{
struct amdtee_session *sess = container_of(ref, struct amdtee_session,
refcount);
- mutex_lock(&session_list_mutex);
list_del(&sess->list_node);
mutex_unlock(&session_list_mutex);
kfree(sess);
@@ -272,7 +272,8 @@ int amdtee_open_session(struct tee_context *ctx,
if (arg->ret != TEEC_SUCCESS) {
pr_err("open_session failed %d\n", arg->ret);
handle_unload_ta(ta_handle);
- kref_put(&sess->refcount, destroy_session);
+ kref_put_mutex(&sess->refcount, destroy_session,
+ &session_list_mutex);
goto out;
}
@@ -290,7 +291,8 @@ int amdtee_open_session(struct tee_context *ctx,
pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
handle_close_session(ta_handle, session_info);
handle_unload_ta(ta_handle);
- kref_put(&sess->refcount, destroy_session);
+ kref_put_mutex(&sess->refcount, destroy_session,
+ &session_list_mutex);
rc = -ENOMEM;
goto out;
}
@@ -331,7 +333,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
handle_close_session(ta_handle, session_info);
handle_unload_ta(ta_handle);
- kref_put(&sess->refcount, destroy_session);
+ kref_put_mutex(&sess->refcount, destroy_session, &session_list_mutex);
return 0;
}
--
2.25.1
This suggests a commit (and a follow-up fix for it) from 5.16+ for stable
because it fixes the usage of watchdog.handle_boot_enabled=0 for iTCO,
closing a monitoring gap in OTA update scenarios.
These patches are applicable to and have been tested with 5.15, 5.10 and
5.4 stable heads.
Jan
Cc: Malin Jonsson <malin.jonsson(a)ericsson.com>
Mika Westerberg (2):
watchdog: iTCO_wdt: No need to stop the timer in probe
watchdog: iTCO_wdt: Set NO_REBOOT if the watchdog is not already
running
drivers/watchdog/iTCO_wdt.c | 26 +++++++++++++++++++++-----
1 file changed, 21 insertions(+), 5 deletions(-)
--
2.35.3
Hi,
Please backport commit dad651b2a44e ("nvme-pci: do not set the NUMA node
of device if it has none") to stable branches 6.1, 5.15, 5.10, 5.4,
4.19, and 4.14. The commit fixes a4aea5623d4a ("NVMe: Convert to
blk-mq") which was introduced in v3.19 but I forgot to add this when
sending the patch.
--
Regards,
Pratyush Yadav
Amazon Development Center Germany GmbH
Krausenstr. 38
10117 Berlin
Geschaeftsfuehrung: Christian Schlaeger, Jonathan Weiss
Eingetragen am Amtsgericht Charlottenburg unter HRB 149173 B
Sitz: Berlin
Ust-ID: DE 289 237 879