From: Maíra Canal <mcanal(a)igalia.com>
[ Upstream commit a0e6a017ab56936c0405fe914a793b241ed25ee0 ]
Currently, it is possible for the composer to be set as enabled and then
as disabled without a proper call for the vkms_vblank_simulate(). This
is problematic, because the driver would skip one CRC output, causing CRC
tests to fail. Therefore, we need to make sure that, for each time the
composer is set as enabled, a composer job is added to the queue.
In order to provide this guarantee, add a mutex that will lock before
the composer is set as enabled and will unlock only after the composer
job is added to the queue. This way, we can have a guarantee that the
driver won't skip a CRC entry.
This race-condition is affecting the IGT test "writeback-check-output",
making the test fail and also, leaking writeback framebuffers, as the
writeback job is queued, but it is not signaled. This patch avoids both
problems.
[v2]:
* Create a new mutex and keep the spinlock across the atomic commit in
order to avoid interrupts that could result in deadlocks.
[ Backport to 5.15: context cleanly applied with no semantic changes.
Build-tested. ]
Signed-off-by: Maíra Canal <mcanal(a)igalia.com>
Reviewed-by: Arthur Grillo <arthurgrillo(a)riseup.net>
Signed-off-by: Maíra Canal <mairacanal(a)riseup.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20230523123207.173976-1-mcana…
Signed-off-by: Pranav Tyagi <pranav.tyagi03(a)gmail.com>
---
drivers/gpu/drm/vkms/vkms_composer.c | 9 +++++++--
drivers/gpu/drm/vkms/vkms_crtc.c | 9 +++++----
drivers/gpu/drm/vkms/vkms_drv.h | 4 +++-
3 files changed, 15 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index 9e8204be9a14..77fced36af55 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -332,10 +332,15 @@ void vkms_set_composer(struct vkms_output *out, bool enabled)
if (enabled)
drm_crtc_vblank_get(&out->crtc);
- spin_lock_irq(&out->lock);
+ mutex_lock(&out->enabled_lock);
old_enabled = out->composer_enabled;
out->composer_enabled = enabled;
- spin_unlock_irq(&out->lock);
+
+ /* the composition wasn't enabled, so unlock the lock to make sure the lock
+ * will be balanced even if we have a failed commit
+ */
+ if (!out->composer_enabled)
+ mutex_unlock(&out->enabled_lock);
if (old_enabled)
drm_crtc_vblank_put(&out->crtc);
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 57bbd32e9beb..1b02dee8587a 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -16,7 +16,7 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
struct drm_crtc *crtc = &output->crtc;
struct vkms_crtc_state *state;
u64 ret_overrun;
- bool ret, fence_cookie;
+ bool ret, fence_cookie, composer_enabled;
fence_cookie = dma_fence_begin_signalling();
@@ -25,15 +25,15 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
if (ret_overrun != 1)
pr_warn("%s: vblank timer overrun\n", __func__);
- spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
DRM_ERROR("vkms failure on handling vblank");
state = output->composer_state;
- spin_unlock(&output->lock);
+ composer_enabled = output->composer_enabled;
+ mutex_unlock(&output->enabled_lock);
- if (state && output->composer_enabled) {
+ if (state && composer_enabled) {
u64 frame = drm_crtc_accurate_vblank_count(crtc);
/* update frame_start only if a queued vkms_composer_worker()
@@ -293,6 +293,7 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
spin_lock_init(&vkms_out->lock);
spin_lock_init(&vkms_out->composer_lock);
+ mutex_init(&vkms_out->enabled_lock);
vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
if (!vkms_out->composer_workq)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index d48c23d40ce5..666997e2bcab 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -83,8 +83,10 @@ struct vkms_output {
struct workqueue_struct *composer_workq;
/* protects concurrent access to composer */
spinlock_t lock;
+ /* guarantees that if the composer is enabled, a job will be queued */
+ struct mutex enabled_lock;
- /* protected by @lock */
+ /* protected by @enabled_lock */
bool composer_enabled;
struct vkms_crtc_state *composer_state;
--
2.49.0
USB3 devices connected behind several external suspended hubs may not
be detected when plugged in due to aggressive hub runtime pm suspend.
The hub driver immediately runtime-suspends hubs if there are no
active children or port activity.
There is a delay between the wake signal causing hub resume, and driver
visible port activity on the hub downstream facing ports.
Most of the LFPS handshake, resume signaling and link training done
on the downstream ports is not visible to the hub driver until completed,
when device then will appear fully enabled and running on the port.
This delay between wake signal and detectable port change is even more
significant with chained suspended hubs where the wake signal will
propagate upstream first. Suspended hubs will only start resuming
downstream ports after upstream facing port resumes.
The hub driver may resume a USB3 hub, read status of all ports, not
yet see any activity, and runtime suspend back the hub before any
port activity is visible.
This exact case was seen when conncting USB3 devices to a suspended
Thunderbolt dock.
USB3 specification defines a 100ms tU3WakeupRetryDelay, indicating
USB3 devices expect to be resumed within 100ms after signaling wake.
if not then device will resend the wake signal.
Give the USB3 hubs twice this time (200ms) to detect any port
changes after resume, before allowing hub to runtime suspend again.
Cc: stable(a)vger.kernel.org
Fixes: 2839f5bcfcfc ("USB: Turn on auto-suspend for USB 3.0 hubs.")
Acked-by: Alan Stern <stern(a)rowland.harvard.edu>
Signed-off-by: Mathias Nyman <mathias.nyman(a)linux.intel.com>
---
v2 changes
- Update commit in Fixes tag
- Add Ack from Alan Stern
drivers/usb/core/hub.c | 33 ++++++++++++++++++++++++++++++++-
1 file changed, 32 insertions(+), 1 deletion(-)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 770d1e91183c..5c12dfdef569 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -68,6 +68,12 @@
*/
#define USB_SHORT_SET_ADDRESS_REQ_TIMEOUT 500 /* ms */
+/*
+ * Give SS hubs 200ms time after wake to train downstream links before
+ * assuming no port activity and allowing hub to runtime suspend back.
+ */
+#define USB_SS_PORT_U0_WAKE_TIME 200 /* ms */
+
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
@@ -1068,11 +1074,12 @@ int usb_remove_device(struct usb_device *udev)
enum hub_activation_type {
HUB_INIT, HUB_INIT2, HUB_INIT3, /* INITs must come first */
- HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME,
+ HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME, HUB_POST_RESUME,
};
static void hub_init_func2(struct work_struct *ws);
static void hub_init_func3(struct work_struct *ws);
+static void hub_post_resume(struct work_struct *ws);
static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
{
@@ -1095,6 +1102,13 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
goto init2;
goto init3;
}
+
+ if (type == HUB_POST_RESUME) {
+ usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
+ hub_put(hub);
+ return;
+ }
+
hub_get(hub);
/* The superspeed hub except for root hub has to use Hub Depth
@@ -1343,6 +1357,16 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
device_unlock(&hdev->dev);
}
+ if (type == HUB_RESUME && hub_is_superspeed(hub->hdev)) {
+ /* give usb3 downstream links training time after hub resume */
+ INIT_DELAYED_WORK(&hub->init_work, hub_post_resume);
+ queue_delayed_work(system_power_efficient_wq, &hub->init_work,
+ msecs_to_jiffies(USB_SS_PORT_U0_WAKE_TIME));
+ usb_autopm_get_interface_no_resume(
+ to_usb_interface(hub->intfdev));
+ return;
+ }
+
hub_put(hub);
}
@@ -1361,6 +1385,13 @@ static void hub_init_func3(struct work_struct *ws)
hub_activate(hub, HUB_INIT3);
}
+static void hub_post_resume(struct work_struct *ws)
+{
+ struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work);
+
+ hub_activate(hub, HUB_POST_RESUME);
+}
+
enum hub_quiescing_type {
HUB_DISCONNECT, HUB_PRE_RESET, HUB_SUSPEND
};
--
2.43.0
Changes from v1:
* Fix minor typos
* Use the more generic and standard ex_handler_default(). Had the
original code used this helper, the bug would not have been there
in the first place.
--
From: Dave Hansen <dave.hansen(a)linux.intel.com>
Right now, if XRSTOR fails a console message like this is be printed:
Bad FPU state detected at restore_fpregs_from_fpstate+0x9a/0x170, reinitializing FPU registers.
However, the text location (...+0x9a in this case) is the instruction
*AFTER* the XRSTOR. The highlighted instruction in the "Code:" dump
also points one instruction late.
The reason is that the "fixup" moves RIP up to pass the bad XRSTOR and
keep on running after returning from the #GP handler. But it does this
fixup before warning.
The resulting warning output is nonsensical because it looks like the
non-FPU-related instruction is #GP'ing.
Do not fix up RIP until after printing the warning. Do this by using
the more generic and standard ex_handler_default().
Signed-off-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Fixes: d5c8028b4788 ("x86/fpu: Reinitialize FPU registers if restoring FPU state fails")
Acked-by: Alison Schofield <alison.schofield(a)intel.com>
Cc: stable(a)vger.kernel.org
Cc: Eric Biggers <ebiggers(a)google.com>
Cc: Rik van Riel <riel(a)redhat.com>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Chang S. Bae <chang.seok.bae(a)intel.com>
---
b/arch/x86/mm/extable.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff -puN arch/x86/mm/extable.c~fixup-fpu-gp-ip-later arch/x86/mm/extable.c
--- a/arch/x86/mm/extable.c~fixup-fpu-gp-ip-later 2025-06-24 13:58:09.722855233 -0700
+++ b/arch/x86/mm/extable.c 2025-06-24 13:58:09.736856435 -0700
@@ -122,13 +122,12 @@ static bool ex_handler_sgx(const struct
static bool ex_handler_fprestore(const struct exception_table_entry *fixup,
struct pt_regs *regs)
{
- regs->ip = ex_fixup_addr(fixup);
-
WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
(void *)instruction_pointer(regs));
fpu_reset_from_exception_fixup();
- return true;
+
+ return ex_handler_default(fixup, regs);
}
/*
_
'rx_ring->size' means the count of ring descriptors multiplied by the
size of one descriptor. When increasing the count of ring descriptors,
it may exceed the limit of pool size.
[ 864.209610] page_pool_create_percpu() gave up with errno -7
[ 864.209613] txgbe 0000:11:00.0: Page pool creation failed: -7
Fix to set the pool_size to the count of ring descriptors.
Fixes: 850b971110b2 ("net: libwx: Allocate Rx and Tx resources")
Cc: stable(a)vger.kernel.org
Signed-off-by: Jiawen Wu <jiawenwu(a)trustnetic.com>
---
drivers/net/ethernet/wangxun/libwx/wx_lib.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 7f2e6cddfeb1..c57cc4f27249 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -2623,7 +2623,7 @@ static int wx_alloc_page_pool(struct wx_ring *rx_ring)
struct page_pool_params pp_params = {
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.order = 0,
- .pool_size = rx_ring->size,
+ .pool_size = rx_ring->count,
.nid = dev_to_node(rx_ring->dev),
.dev = rx_ring->dev,
.dma_dir = DMA_FROM_DEVICE,
--
2.48.1
The following commit has been merged into the x86/fpu branch of tip:
Commit-ID: 1cec9ac2d071cfd2da562241aab0ef701355762a
Gitweb: https://git.kernel.org/tip/1cec9ac2d071cfd2da562241aab0ef701355762a
Author: Dave Hansen <dave.hansen(a)linux.intel.com>
AuthorDate: Tue, 24 Jun 2025 14:01:48 -07:00
Committer: Dave Hansen <dave.hansen(a)linux.intel.com>
CommitterDate: Wed, 25 Jun 2025 16:28:06 -07:00
x86/fpu: Delay instruction pointer fixup until after warning
Right now, if XRSTOR fails a console message like this is be printed:
Bad FPU state detected at restore_fpregs_from_fpstate+0x9a/0x170, reinitializing FPU registers.
However, the text location (...+0x9a in this case) is the instruction
*AFTER* the XRSTOR. The highlighted instruction in the "Code:" dump
also points one instruction late.
The reason is that the "fixup" moves RIP up to pass the bad XRSTOR and
keep on running after returning from the #GP handler. But it does this
fixup before warning.
The resulting warning output is nonsensical because it looks like the
non-FPU-related instruction is #GP'ing.
Do not fix up RIP until after printing the warning. Do this by using
the more generic and standard ex_handler_default().
Fixes: d5c8028b4788 ("x86/fpu: Reinitialize FPU registers if restoring FPU state fails")
Signed-off-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Reviewed-by: Chao Gao <chao.gao(a)intel.com>
Acked-by: Alison Schofield <alison.schofield(a)intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz(a)infradead.org>
Cc:stable@vger.kernel.org
Link: https://lore.kernel.org/all/20250624210148.97126F9E%40davehans-spike.ostc.i…
---
arch/x86/mm/extable.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index bf8dab1..2fdc1f1 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -122,13 +122,12 @@ static bool ex_handler_sgx(const struct exception_table_entry *fixup,
static bool ex_handler_fprestore(const struct exception_table_entry *fixup,
struct pt_regs *regs)
{
- regs->ip = ex_fixup_addr(fixup);
-
WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
(void *)instruction_pointer(regs));
fpu_reset_from_exception_fixup();
- return true;
+
+ return ex_handler_default(fixup, regs);
}
/*
Hello Andy,
On Thu, Jun 26, 2025 at 12:39:26AM +0000, Andy Yang wrote:
> Thank you once again for your efforts on this patch. I also want to
> sincerely apologize for any inconvenience caused by the email reply
> incident.
Don't even think about it :)
We might have been able to avoid this extra fix,
but that's it. No biggie regardless.
Thank you for your help debugging this.
Kind regards,
Niklas
Hi Igor,
[For context, there was a regression report in Debian at
https://bugs.debian.org/1108069]
On Thu, Jun 19, 2025 at 09:36:13PM -0500, Igor Tamara wrote:
> Package: src:linux
> Version: 6.12.32-1
> Severity: normal
> Tags: a11y
>
> Dear Maintainer,
>
> The builtin microphone on my Asus X507UA does not record, is
> recognized and some time ago it worked on Bookworm with image-6.1.0-31,
> newer images are able to record when appending snd_hda_intel.model=1043:1271
> to the boot as a workaround.
>
> The images that work with the boot option appended are, but not without
> it are:
>
> linux-image-6.15-amd64
> linux-image-6.12.32-amd64
> linux-image-6.1.0-37-amd64
> linux-image-6.1.0-0.a.test-amd64-unsigned_6.1.129-1a~test_amd64.deb
> referenced by https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1100928
> Also compiled from upstream 6.12.22 and 6.1.133 with the same result
>
> The image linux-image-6.1.0-31-amd64 worked properly, the problem was
> introduced in 129 and the result of the bisect was
>
> d26408df0e25f2bd2808d235232ab776e4dd08b9 is the first bad commit
> commit d26408df0e25f2bd2808d235232ab776e4dd08b9
> Author: Kuan-Wei Chiu <visitorckw(a)gmail.com>
> Date: Wed Jan 29 00:54:15 2025 +0800
>
> ALSA: hda: Fix headset detection failure due to unstable sort
>
> commit 3b4309546b48fc167aa615a2d881a09c0a97971f upstream.
>
> The auto_parser assumed sort() was stable, but the kernel's sort() uses
> heapsort, which has never been stable. After commit 0e02ca29a563
> ("lib/sort: optimize heapsort with double-pop variation"), the order of
> equal elements changed, causing the headset to fail to work.
>
> Fix the issue by recording the original order of elements before
> sorting and using it as a tiebreaker for equal elements in the
> comparison function.
>
> Fixes: b9030a005d58 ("ALSA: hda - Use standard sort function in hda_auto_parser.c")
> Reported-by: Austrum <austrum.lab(a)gmail.com>
> Closes: https://bugzilla.kernel.org/show_bug.cgi?id=219158
> Tested-by: Austrum <austrum.lab(a)gmail.com>
> Cc: stable(a)vger.kernel.org
> Signed-off-by: Kuan-Wei Chiu <visitorckw(a)gmail.com>
> Link: https://patch.msgid.link/20250128165415.643223-1-visitorckw@gmail.com
> Signed-off-by: Takashi Iwai <tiwai(a)suse.de>
> Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
>
> sound/pci/hda/hda_auto_parser.c | 8 +++++++-
> sound/pci/hda/hda_auto_parser.h | 1 +
> 2 files changed, 8 insertions(+), 1 deletion(-)
>
> I'm attaching the output of alsa-info_alsa-info.sh script
>
> Please let me know if I can provide more information.
Might you be able to try please the attached patch to see if it fixes
the issue?
Regards,
Salvatore
The quilt patch titled
Subject: scripts/gdb: fix dentry_name() lookup
has been removed from the -mm tree. Its filename was
scripts-gdb-fix-dentry_name-lookup.patch
This patch was dropped because it was merged into the mm-hotfixes-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------
From: Florian Fainelli <florian.fainelli(a)broadcom.com>
Subject: scripts/gdb: fix dentry_name() lookup
Date: Thu, 19 Jun 2025 15:51:05 -0700
The "d_iname" member was replaced with "d_shortname.string" in the commit
referenced in the Fixes tag. This prevented the GDB script "lx-mount"
command to properly function:
(gdb) lx-mounts
mount super_block devname pathname fstype options
0xff11000002d21180 0xff11000002d24800 rootfs / rootfs rw 0 0
0xff11000002e18a80 0xff11000003713000 /dev/root / ext4 rw,relatime 0 0
Python Exception <class 'gdb.error'>: There is no member named d_iname.
Error occurred in Python: There is no member named d_iname.
Link: https://lkml.kernel.org/r/20250619225105.320729-1-florian.fainelli@broadcom…
Fixes: 58cf9c383c5c ("dcache: back inline names with a struct-wrapped array of unsigned long")
Signed-off-by: Florian Fainelli <florian.fainelli(a)broadcom.com>
Cc: Al Viro <viro(a)zeniv.linux.org.uk>
Cc: Jan Kara <jack(a)suse.cz>
Cc: Jan Kiszka <jan.kiszka(a)siemens.com>
Cc: Jeff Layton <jlayton(a)kernel.org>
Cc: Kieran Bingham <kbingham(a)kernel.org>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
scripts/gdb/linux/vfs.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/scripts/gdb/linux/vfs.py~scripts-gdb-fix-dentry_name-lookup
+++ a/scripts/gdb/linux/vfs.py
@@ -22,7 +22,7 @@ def dentry_name(d):
if parent == d or parent == 0:
return ""
p = dentry_name(d['d_parent']) + "/"
- return p + d['d_iname'].string()
+ return p + d['d_shortname']['string'].string()
class DentryName(gdb.Function):
"""Return string of the full path of a dentry.
_
Patches currently in -mm which might be from florian.fainelli(a)broadcom.com are
scripts-gdb-fix-interrupts-display-after-mcp-on-x86.patch
scripts-gdb-fix-interruptspy-after-maple-tree-conversion.patch
scripts-gdb-de-reference-per-cpu-mce-interrupts.patch
The quilt patch titled
Subject: mm/damon/sysfs-schemes: free old damon_sysfs_scheme_filter->memcg_path on write
has been removed from the -mm tree. Its filename was
mm-damon-sysfs-schemes-free-old-damon_sysfs_scheme_filter-memcg_path-on-write.patch
This patch was dropped because it was merged into the mm-hotfixes-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------
From: SeongJae Park <sj(a)kernel.org>
Subject: mm/damon/sysfs-schemes: free old damon_sysfs_scheme_filter->memcg_path on write
Date: Thu, 19 Jun 2025 11:36:07 -0700
memcg_path_store() assigns a newly allocated memory buffer to
filter->memcg_path, without deallocating the previously allocated and
assigned memory buffer. As a result, users can leak kernel memory by
continuously writing a data to memcg_path DAMOS sysfs file. Fix the leak
by deallocating the previously set memory buffer.
Link: https://lkml.kernel.org/r/20250619183608.6647-2-sj@kernel.org
Fixes: 7ee161f18b5d ("mm/damon/sysfs-schemes: implement filter directory")
Signed-off-by: SeongJae Park <sj(a)kernel.org>
Cc: Shuah Khan <shuah(a)kernel.org>
Cc: <stable(a)vger.kernel.org> [6.3.x]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/damon/sysfs-schemes.c | 1 +
1 file changed, 1 insertion(+)
--- a/mm/damon/sysfs-schemes.c~mm-damon-sysfs-schemes-free-old-damon_sysfs_scheme_filter-memcg_path-on-write
+++ a/mm/damon/sysfs-schemes.c
@@ -472,6 +472,7 @@ static ssize_t memcg_path_store(struct k
return -ENOMEM;
strscpy(path, buf, count + 1);
+ kfree(filter->memcg_path);
filter->memcg_path = path;
return count;
}
_
Patches currently in -mm which might be from sj(a)kernel.org are
mm-damon-introduce-damon_stat-module.patch
mm-damon-introduce-damon_stat-module-fix.patch
mm-damon-stat-calculate-and-expose-estimated-memory-bandwidth.patch
mm-damon-stat-calculate-and-expose-idle-time-percentiles.patch
docs-admin-guide-mm-damon-add-damon_stat-usage-document.patch
mm-damon-paddr-use-alloc_migartion_target-with-no-migration-fallback-nodemask.patch
revert-mm-rename-alloc_demote_folio-to-alloc_migrate_folio.patch
revert-mm-make-alloc_demote_folio-externally-invokable-for-migration.patch
selftets-damon-add-a-test-for-memcg_path-leak.patch
mm-damon-sysfs-schemes-decouple-from-damos_quota_goal_metric.patch
mm-damon-sysfs-schemes-decouple-from-damos_action.patch
mm-damon-sysfs-schemes-decouple-from-damos_wmark_metric.patch
mm-damon-sysfs-schemes-decouple-from-damos_filter_type.patch
mm-damon-sysfs-decouple-from-damon_ops_id.patch
The quilt patch titled
Subject: lib/group_cpus: fix NULL pointer dereference from group_cpus_evenly()
has been removed from the -mm tree. Its filename was
lib-group_cpus-fix-null-pointer-dereference-from-group_cpus_evenly.patch
This patch was dropped because it was merged into the mm-hotfixes-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------
From: Yu Kuai <yukuai3(a)huawei.com>
Subject: lib/group_cpus: fix NULL pointer dereference from group_cpus_evenly()
Date: Thu, 19 Jun 2025 21:26:55 +0800
While testing null_blk with configfs, echo 0 > poll_queues will trigger
following panic:
BUG: kernel NULL pointer dereference, address: 0000000000000010
Oops: Oops: 0000 [#1] SMP NOPTI
CPU: 27 UID: 0 PID: 920 Comm: bash Not tainted 6.15.0-02023-gadbdb95c8696-dirty #1238 PREEMPT(undef)
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.1-2.fc37 04/01/2014
RIP: 0010:__bitmap_or+0x48/0x70
Call Trace:
<TASK>
__group_cpus_evenly+0x822/0x8c0
group_cpus_evenly+0x2d9/0x490
blk_mq_map_queues+0x1e/0x110
null_map_queues+0xc9/0x170 [null_blk]
blk_mq_update_queue_map+0xdb/0x160
blk_mq_update_nr_hw_queues+0x22b/0x560
nullb_update_nr_hw_queues+0x71/0xf0 [null_blk]
nullb_device_poll_queues_store+0xa4/0x130 [null_blk]
configfs_write_iter+0x109/0x1d0
vfs_write+0x26e/0x6f0
ksys_write+0x79/0x180
__x64_sys_write+0x1d/0x30
x64_sys_call+0x45c4/0x45f0
do_syscall_64+0xa5/0x240
entry_SYSCALL_64_after_hwframe+0x76/0x7e
Root cause is that numgrps is set to 0, and ZERO_SIZE_PTR is returned from
kcalloc(), and later ZERO_SIZE_PTR will be deferenced.
Fix the problem by checking numgrps first in group_cpus_evenly(), and
return NULL directly if numgrps is zero.
[yukuai3(a)huawei.com: also fix the non-SMP version]
Link: https://lkml.kernel.org/r/20250620010958.1265984-1-yukuai1@huaweicloud.com
Link: https://lkml.kernel.org/r/20250619132655.3318883-1-yukuai1@huaweicloud.com
Fixes: 6a6dcae8f486 ("blk-mq: Build default queue map via group_cpus_evenly()")
Signed-off-by: Yu Kuai <yukuai3(a)huawei.com>
Reviewed-by: Ming Lei <ming.lei(a)redhat.com>
Reviewed-by: Jens Axboe <axboe(a)kernel.dk>
Cc: ErKun Yang <yangerkun(a)huawei.com>
Cc: John Garry <john.g.garry(a)oracle.com>
Cc: Thomas Gleinxer <tglx(a)linutronix.de>
Cc: "zhangyi (F)" <yi.zhang(a)huawei.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
lib/group_cpus.c | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
--- a/lib/group_cpus.c~lib-group_cpus-fix-null-pointer-dereference-from-group_cpus_evenly
+++ a/lib/group_cpus.c
@@ -352,6 +352,9 @@ struct cpumask *group_cpus_evenly(unsign
int ret = -ENOMEM;
struct cpumask *masks = NULL;
+ if (numgrps == 0)
+ return NULL;
+
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
return NULL;
@@ -426,8 +429,12 @@ struct cpumask *group_cpus_evenly(unsign
#else /* CONFIG_SMP */
struct cpumask *group_cpus_evenly(unsigned int numgrps)
{
- struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+ struct cpumask *masks;
+ if (numgrps == 0)
+ return NULL;
+
+ masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
if (!masks)
return NULL;
_
Patches currently in -mm which might be from yukuai3(a)huawei.com are
The quilt patch titled
Subject: mm/hugetlb: remove unnecessary holding of hugetlb_lock
has been removed from the -mm tree. Its filename was
mm-hugetlb-remove-unnecessary-holding-of-hugetlb_lock.patch
This patch was dropped because it was merged into the mm-hotfixes-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------
From: Ge Yang <yangge1116(a)126.com>
Subject: mm/hugetlb: remove unnecessary holding of hugetlb_lock
Date: Tue, 27 May 2025 11:36:50 +0800
In isolate_or_dissolve_huge_folio(), after acquiring the hugetlb_lock, it
is only for the purpose of obtaining the correct hstate, which is then
passed to alloc_and_dissolve_hugetlb_folio().
alloc_and_dissolve_hugetlb_folio() itself also acquires the hugetlb_lock.
We can have alloc_and_dissolve_hugetlb_folio() obtain the hstate by
itself, so that isolate_or_dissolve_huge_folio() no longer needs to
acquire the hugetlb_lock. In addition, we keep the folio_test_hugetlb()
check within isolate_or_dissolve_huge_folio(). By doing so, we can avoid
disrupting the normal path by vainly holding the hugetlb_lock.
replace_free_hugepage_folios() has the same issue, and we should address
it as well.
Addresses a possible performance problem which was added by the hotfix
113ed54ad276 ("mm/hugetlb: fix kernel NULL pointer dereference when
replacing free hugetlb folios").
Link: https://lkml.kernel.org/r/1748317010-16272-1-git-send-email-yangge1116@126.…
Fixes: 113ed54ad276 ("mm/hugetlb: fix kernel NULL pointer dereference when replacing free hugetlb folios")
Signed-off-by: Ge Yang <yangge1116(a)126.com>
Suggested-by: Oscar Salvador <osalvador(a)suse.de>
Reviewed-by: Muchun Song <muchun.song(a)linux.dev>
Cc: Baolin Wang <baolin.wang(a)linux.alibaba.com>
Cc: Barry Song <21cnbao(a)gmail.com>
Cc: David Hildenbrand <david(a)redhat.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/hugetlb.c | 54 +++++++++++++++----------------------------------
1 file changed, 17 insertions(+), 37 deletions(-)
--- a/mm/hugetlb.c~mm-hugetlb-remove-unnecessary-holding-of-hugetlb_lock
+++ a/mm/hugetlb.c
@@ -2787,20 +2787,24 @@ void restore_reserve_on_error(struct hst
/*
* alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
* the old one
- * @h: struct hstate old page belongs to
* @old_folio: Old folio to dissolve
* @list: List to isolate the page in case we need to
* Returns 0 on success, otherwise negated error.
*/
-static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
- struct folio *old_folio, struct list_head *list)
+static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
+ struct list_head *list)
{
- gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
+ gfp_t gfp_mask;
+ struct hstate *h;
int nid = folio_nid(old_folio);
struct folio *new_folio = NULL;
int ret = 0;
retry:
+ /*
+ * The old_folio might have been dissolved from under our feet, so make sure
+ * to carefully check the state under the lock.
+ */
spin_lock_irq(&hugetlb_lock);
if (!folio_test_hugetlb(old_folio)) {
/*
@@ -2829,8 +2833,10 @@ retry:
cond_resched();
goto retry;
} else {
+ h = folio_hstate(old_folio);
if (!new_folio) {
spin_unlock_irq(&hugetlb_lock);
+ gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid,
NULL, NULL);
if (!new_folio)
@@ -2874,35 +2880,24 @@ free_new:
int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
{
- struct hstate *h;
int ret = -EBUSY;
- /*
- * The page might have been dissolved from under our feet, so make sure
- * to carefully check the state under the lock.
- * Return success when racing as if we dissolved the page ourselves.
- */
- spin_lock_irq(&hugetlb_lock);
- if (folio_test_hugetlb(folio)) {
- h = folio_hstate(folio);
- } else {
- spin_unlock_irq(&hugetlb_lock);
+ /* Not to disrupt normal path by vainly holding hugetlb_lock */
+ if (!folio_test_hugetlb(folio))
return 0;
- }
- spin_unlock_irq(&hugetlb_lock);
/*
* Fence off gigantic pages as there is a cyclic dependency between
* alloc_contig_range and them. Return -ENOMEM as this has the effect
* of bailing out right away without further retrying.
*/
- if (hstate_is_gigantic(h))
+ if (folio_order(folio) > MAX_PAGE_ORDER)
return -ENOMEM;
if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
ret = 0;
else if (!folio_ref_count(folio))
- ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
+ ret = alloc_and_dissolve_hugetlb_folio(folio, list);
return ret;
}
@@ -2916,7 +2911,6 @@ int isolate_or_dissolve_huge_folio(struc
*/
int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
{
- struct hstate *h;
struct folio *folio;
int ret = 0;
@@ -2925,23 +2919,9 @@ int replace_free_hugepage_folios(unsigne
while (start_pfn < end_pfn) {
folio = pfn_folio(start_pfn);
- /*
- * The folio might have been dissolved from under our feet, so make sure
- * to carefully check the state under the lock.
- */
- spin_lock_irq(&hugetlb_lock);
- if (folio_test_hugetlb(folio)) {
- h = folio_hstate(folio);
- } else {
- spin_unlock_irq(&hugetlb_lock);
- start_pfn++;
- continue;
- }
- spin_unlock_irq(&hugetlb_lock);
-
- if (!folio_ref_count(folio)) {
- ret = alloc_and_dissolve_hugetlb_folio(h, folio,
- &isolate_list);
+ /* Not to disrupt normal path by vainly holding hugetlb_lock */
+ if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) {
+ ret = alloc_and_dissolve_hugetlb_folio(folio, &isolate_list);
if (ret)
break;
_
Patches currently in -mm which might be from yangge1116(a)126.com are
The quilt patch titled
Subject: fs/proc/task_mmu: fix PAGE_IS_PFNZERO detection for the huge zero folio
has been removed from the -mm tree. Its filename was
fs-proc-task_mmu-fix-page_is_pfnzero-detection-for-the-huge-zero-folio.patch
This patch was dropped because it was merged into the mm-hotfixes-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------
From: David Hildenbrand <david(a)redhat.com>
Subject: fs/proc/task_mmu: fix PAGE_IS_PFNZERO detection for the huge zero folio
Date: Tue, 17 Jun 2025 16:35:32 +0200
is_zero_pfn() does not work for the huge zero folio. Fix it by using
is_huge_zero_pmd().
This can cause the PAGEMAP_SCAN ioctl against /proc/pid/pagemap to
present pages as PAGE_IS_PRESENT rather than as PAGE_IS_PFNZERO.
Found by code inspection.
Link: https://lkml.kernel.org/r/20250617143532.2375383-1-david@redhat.com
Fixes: 52526ca7fdb9 ("fs/proc/task_mmu: implement IOCTL to get and optionally clear info about PTEs")
Signed-off-by: David Hildenbrand <david(a)redhat.com>
Cc: Muhammad Usama Anjum <usama.anjum(a)collabora.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
fs/proc/task_mmu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/fs/proc/task_mmu.c~fs-proc-task_mmu-fix-page_is_pfnzero-detection-for-the-huge-zero-folio
+++ a/fs/proc/task_mmu.c
@@ -2182,7 +2182,7 @@ static unsigned long pagemap_thp_categor
categories |= PAGE_IS_FILE;
}
- if (is_zero_pfn(pmd_pfn(pmd)))
+ if (is_huge_zero_pmd(pmd))
categories |= PAGE_IS_PFNZERO;
if (pmd_soft_dirty(pmd))
categories |= PAGE_IS_SOFT_DIRTY;
_
Patches currently in -mm which might be from david(a)redhat.com are
mm-gup-remove-vm_bug_ons.patch
mm-gup-remove-vm_bug_ons-fix.patch
mm-huge_memory-dont-ignore-queried-cachemode-in-vmf_insert_pfn_pud.patch
mm-huge_memory-dont-mark-refcounted-folios-special-in-vmf_insert_folio_pmd.patch
mm-huge_memory-dont-mark-refcounted-folios-special-in-vmf_insert_folio_pud.patch
This reverts minor parts of the changes made in commit b338d91703fa
("Bluetooth: Implement support for Mesh"). It looks like these changes
were only made for development purposes but shouldn't have been part of
the commit.
Fixes: b338d91703fa ("Bluetooth: Implement support for Mesh")
Cc: stable(a)vger.kernel.org
Signed-off-by: Christian Eggers <ceggers(a)arri.de>
---
net/bluetooth/hci_sync.c | 16 ++++------------
1 file changed, 4 insertions(+), 12 deletions(-)
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 6687f2a4d1eb..1f8806dfa556 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -1970,13 +1970,10 @@ static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
{
struct adv_info *adv, *n;
- int err = 0;
if (ext_adv_capable(hdev))
/* Remove all existing sets */
- err = hci_clear_adv_sets_sync(hdev, sk);
- if (ext_adv_capable(hdev))
- return err;
+ return hci_clear_adv_sets_sync(hdev, sk);
/* This is safe as long as there is no command send while the lock is
* held.
@@ -2004,13 +2001,11 @@ static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
struct sock *sk)
{
- int err = 0;
+ int err;
/* If we use extended advertising, instance has to be removed first. */
if (ext_adv_capable(hdev))
- err = hci_remove_ext_adv_instance_sync(hdev, instance, sk);
- if (ext_adv_capable(hdev))
- return err;
+ return hci_remove_ext_adv_instance_sync(hdev, instance, sk);
/* This is safe as long as there is no command send while the lock is
* held.
@@ -2109,16 +2104,13 @@ int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
int hci_disable_advertising_sync(struct hci_dev *hdev)
{
u8 enable = 0x00;
- int err = 0;
/* If controller is not advertising we are done. */
if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
return 0;
if (ext_adv_capable(hdev))
- err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
- if (ext_adv_capable(hdev))
- return err;
+ return hci_disable_ext_adv_instance_sync(hdev, 0x00);
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
sizeof(enable), &enable, HCI_CMD_TIMEOUT);
--
2.43.0