Page cache folios from a file system that support large block size (LBS)
can have minimal folio order greater than 0, thus a high order folio might
not be able to be split down to order-0. Commit e220917fa507 ("mm: split a
folio in minimum folio order chunks") bumps the target order of
split_huge_page*() to the minimum allowed order when splitting a LBS folio.
This causes confusion for some split_huge_page*() callers like memory
failure handling code, since they expect after-split folios all have
order-0 when split succeeds but in reality get min_order_for_split() order
folios and give warnings.
Fix it by failing a split if the folio cannot be split to the target order.
Rename try_folio_split() to try_folio_split_to_order() to reflect the added
new_order parameter. Remove its unused list parameter.
Fixes: e220917fa507 ("mm: split a folio in minimum folio order chunks")
[The test poisons LBS folios, which cannot be split to order-0 folios, and
also tries to poison all memory. The non split LBS folios take more memory
than the test anticipated, leading to OOM. The patch fixed the kernel
warning and the test needs some change to avoid OOM.]
Reported-by: syzbot+e6367ea2fdab6ed46056(a)syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/68d2c943.a70a0220.1b52b.02b3.GAE@google.com/
Cc: stable(a)vger.kernel.org
Signed-off-by: Zi Yan <ziy(a)nvidia.com>
Reviewed-by: Luis Chamberlain <mcgrof(a)kernel.org>
Reviewed-by: Pankaj Raghav <p.raghav(a)samsung.com>
Reviewed-by: Wei Yang <richard.weiyang(a)gmail.com>
---
From V2[1]:
1. Removed a typo in try_folio_split_to_order() comment.
2. Sent the Fixes patch separately.
[1] https://lore.kernel.org/linux-mm/20251016033452.125479-1-ziy@nvidia.com/
include/linux/huge_mm.h | 55 +++++++++++++++++------------------------
mm/huge_memory.c | 9 +------
mm/truncate.c | 6 +++--
3 files changed, 28 insertions(+), 42 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index c4a811958cda..7698b3542c4f 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -383,45 +383,30 @@ static inline int split_huge_page_to_list_to_order(struct page *page, struct lis
}
/*
- * try_folio_split - try to split a @folio at @page using non uniform split.
+ * try_folio_split_to_order - try to split a @folio at @page to @new_order using
+ * non uniform split.
* @folio: folio to be split
- * @page: split to order-0 at the given page
- * @list: store the after-split folios
+ * @page: split to @new_order at the given page
+ * @new_order: the target split order
*
- * Try to split a @folio at @page using non uniform split to order-0, if
- * non uniform split is not supported, fall back to uniform split.
+ * Try to split a @folio at @page using non uniform split to @new_order, if
+ * non uniform split is not supported, fall back to uniform split. After-split
+ * folios are put back to LRU list. Use min_order_for_split() to get the lower
+ * bound of @new_order.
*
* Return: 0: split is successful, otherwise split failed.
*/
-static inline int try_folio_split(struct folio *folio, struct page *page,
- struct list_head *list)
+static inline int try_folio_split_to_order(struct folio *folio,
+ struct page *page, unsigned int new_order)
{
- int ret = min_order_for_split(folio);
-
- if (ret < 0)
- return ret;
-
- if (!non_uniform_split_supported(folio, 0, false))
- return split_huge_page_to_list_to_order(&folio->page, list,
- ret);
- return folio_split(folio, ret, page, list);
+ if (!non_uniform_split_supported(folio, new_order, /* warns= */ false))
+ return split_huge_page_to_list_to_order(&folio->page, NULL,
+ new_order);
+ return folio_split(folio, new_order, page, NULL);
}
static inline int split_huge_page(struct page *page)
{
- struct folio *folio = page_folio(page);
- int ret = min_order_for_split(folio);
-
- if (ret < 0)
- return ret;
-
- /*
- * split_huge_page() locks the page before splitting and
- * expects the same page that has been split to be locked when
- * returned. split_folio(page_folio(page)) cannot be used here
- * because it converts the page to folio and passes the head
- * page to be split.
- */
- return split_huge_page_to_list_to_order(page, NULL, ret);
+ return split_huge_page_to_list_to_order(page, NULL, 0);
}
void deferred_split_folio(struct folio *folio, bool partially_mapped);
#ifdef CONFIG_MEMCG
@@ -611,14 +596,20 @@ static inline int split_huge_page(struct page *page)
return -EINVAL;
}
+static inline int min_order_for_split(struct folio *folio)
+{
+ VM_WARN_ON_ONCE_FOLIO(1, folio);
+ return -EINVAL;
+}
+
static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
{
VM_WARN_ON_ONCE_FOLIO(1, folio);
return -EINVAL;
}
-static inline int try_folio_split(struct folio *folio, struct page *page,
- struct list_head *list)
+static inline int try_folio_split_to_order(struct folio *folio,
+ struct page *page, unsigned int new_order)
{
VM_WARN_ON_ONCE_FOLIO(1, folio);
return -EINVAL;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f14fbef1eefd..fc65ec3393d2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3812,8 +3812,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
min_order = mapping_min_folio_order(folio->mapping);
if (new_order < min_order) {
- VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",
- min_order);
ret = -EINVAL;
goto out;
}
@@ -4165,12 +4163,7 @@ int min_order_for_split(struct folio *folio)
int split_folio_to_list(struct folio *folio, struct list_head *list)
{
- int ret = min_order_for_split(folio);
-
- if (ret < 0)
- return ret;
-
- return split_huge_page_to_list_to_order(&folio->page, list, ret);
+ return split_huge_page_to_list_to_order(&folio->page, list, 0);
}
/*
diff --git a/mm/truncate.c b/mm/truncate.c
index 91eb92a5ce4f..9210cf808f5c 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -194,6 +194,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
size_t size = folio_size(folio);
unsigned int offset, length;
struct page *split_at, *split_at2;
+ unsigned int min_order;
if (pos < start)
offset = start - pos;
@@ -223,8 +224,9 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
if (!folio_test_large(folio))
return true;
+ min_order = mapping_min_folio_order(folio->mapping);
split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE);
- if (!try_folio_split(folio, split_at, NULL)) {
+ if (!try_folio_split_to_order(folio, split_at, min_order)) {
/*
* try to split at offset + length to make sure folios within
* the range can be dropped, especially to avoid memory waste
@@ -254,7 +256,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
*/
if (folio_test_large(folio2) &&
folio2->mapping == folio->mapping)
- try_folio_split(folio2, split_at2, NULL);
+ try_folio_split_to_order(folio2, split_at2, min_order);
folio_unlock(folio2);
out:
--
2.51.0
The migration of IDIO-16 GPIO drivers to the regmap API resulted in some
regressions to the gpio-104-idio-16, gpio-pci-idio-16, and gpio-idio-16
modules. Specifically, the 104-idio-16 and pci-idio-16 GPIO drivers
utilize regmap caching and thus must set max_register for their
regmap_config, while gpio-idio-16 requires fixed_direction_output to
represent the fixed direction of the IDIO-16 GPIO lines. Fixes for these
regressions are provided by this series.
Link: https://lore.kernel.org/r/cover.1680618405.git.william.gray@linaro.org
Closes: https://lore.kernel.org/r/9b0375fd-235f-4ee1-a7fa-daca296ef6bf@nutanix.com
Signed-off-by: William Breathitt Gray <wbg(a)kernel.org>
---
Changes in v2:
- Pick up Reviewed-by tags
- Replace Link tags with Closes tags for fixes addressing bug reports
- Link to v1: https://lore.kernel.org/r/20251017-fix-gpio-idio-16-regmap-v1-0-a7c71080f74…
---
William Breathitt Gray (3):
gpio: 104-idio-16: Define maximum valid register address offset
gpio: pci-idio-16: Define maximum valid register address offset
gpio: idio-16: Define fixed direction of the GPIO lines
drivers/gpio/gpio-104-idio-16.c | 1 +
drivers/gpio/gpio-idio-16.c | 5 +++++
drivers/gpio/gpio-pci-idio-16.c | 1 +
3 files changed, 7 insertions(+)
---
base-commit: eba11116f39533d2e38cc5898014f2c95f32d23a
change-id: 20251017-fix-gpio-idio-16-regmap-1282cdc56a19
Best regards,
--
William Breathitt Gray <wbg(a)kernel.org>
Dear sir/Ma
I'm an Investment Financing Consultant working for top investment financing companies globally.
By virtue of my esteemed affiliation with a Bank in Hong Kong, S. Korea, Japan and the USA, we can help you get a Credit Line which will actively give you access to Funds between $10 million to $5 billion up to a period of 5 to 10 years for 5% annual rate on the credit line in returns.
I will help you process the Credit Line within a few days if interested.
The Fund is available to fund any type of viable businesses, investments or projects.
Let me know the exact amount you need for your projects.
Get in touch if you have a viable project that requires funding.
Regards
Ms. Madelaine Alfelor.
[2025-10-20 10:36] gregkh linuxfoundation ! org:
> This is a note to let you know that I've just added the patch titled
>
> PM: hibernate: Add pm_hibernation_mode_is_suspend()
>
> to the 6.17-stable tree which can be found at:
> http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
>
> The filename of the patch is:
> pm-hibernate-add-pm_hibernation_mode_is_suspend.patch
> and it can be found in the queue-6.17 subdirectory.
>
> If you, or anyone else, feels it should not be added to the stable tree,
> please let <stable(a)vger.kernel.org> know about it.
>
>
> From stable+bounces-187839-greg=kroah.com(a)vger.kernel.org Sat Oct 18 15:51:25 2025
> From: Sasha Levin <sashal(a)kernel.org>
> Date: Sat, 18 Oct 2025 09:51:01 -0400
> Subject: PM: hibernate: Add pm_hibernation_mode_is_suspend()
> To: stable(a)vger.kernel.org
> Cc: "Mario Limonciello (AMD)" <superm1(a)kernel.org>, Ionut Nechita <ionut_n2001(a)yahoo.com>, Kenneth Crudup <kenny(a)panix.com>, Alex Deucher <alexander.deucher(a)amd.com>, "Rafael J. Wysocki" <rafael.j.wysocki(a)intel.com>, Sasha Levin <sashal(a)kernel.org>
> Message-ID: <20251018135102.711457-1-sashal(a)kernel.org>
>
> From: "Mario Limonciello (AMD)" <superm1(a)kernel.org>
>
> [ Upstream commit 495c8d35035edb66e3284113bef01f3b1b843832 ]
>
> Some drivers have different flows for hibernation and suspend. If
> the driver opportunistically will skip thaw() then it needs a hint
> to know what is happening after the hibernate.
>
> Introduce a new symbol pm_hibernation_mode_is_suspend() that drivers
> can call to determine if suspending the system for this purpose.
>
> Tested-by: Ionut Nechita <ionut_n2001(a)yahoo.com>
> Tested-by: Kenneth Crudup <kenny(a)panix.com>
> Acked-by: Alex Deucher <alexander.deucher(a)amd.com>
> Signed-off-by: Mario Limonciello (AMD) <superm1(a)kernel.org>
> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki(a)intel.com>
> Stable-dep-of: 0a6e9e098fcc ("drm/amd: Fix hybrid sleep")
> Signed-off-by: Sasha Levin <sashal(a)kernel.org>
> Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
> ---
> include/linux/suspend.h | 2 ++
> kernel/power/hibernate.c | 11 +++++++++++
> 2 files changed, 13 insertions(+)
>
> --- a/include/linux/suspend.h
> +++ b/include/linux/suspend.h
> @@ -276,6 +276,7 @@ extern void arch_suspend_enable_irqs(voi
>
> extern int pm_suspend(suspend_state_t state);
> extern bool sync_on_suspend_enabled;
> +bool pm_hibernation_mode_is_suspend(void);
> #else /* !CONFIG_SUSPEND */
> #define suspend_valid_only_mem NULL
>
> @@ -288,6 +289,7 @@ static inline bool pm_suspend_via_firmwa
> static inline bool pm_resume_via_firmware(void) { return false; }
> static inline bool pm_suspend_no_platform(void) { return false; }
> static inline bool pm_suspend_default_s2idle(void) { return false; }
> +static inline bool pm_hibernation_mode_is_suspend(void) { return false; }
>
> static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
> static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
> --- a/kernel/power/hibernate.c
> +++ b/kernel/power/hibernate.c
> @@ -80,6 +80,17 @@ static const struct platform_hibernation
>
> static atomic_t hibernate_atomic = ATOMIC_INIT(1);
>
> +#ifdef CONFIG_SUSPEND
> +/**
> + * pm_hibernation_mode_is_suspend - Check if hibernation has been set to suspend
> + */
> +bool pm_hibernation_mode_is_suspend(void)
> +{
> + return hibernation_mode == HIBERNATION_SUSPEND;
> +}
> +EXPORT_SYMBOL_GPL(pm_hibernation_mode_is_suspend);
> +#endif
> +
> bool hibernate_acquire(void)
> {
> return atomic_add_unless(&hibernate_atomic, -1, 0);
>
>
> Patches currently in stable-queue which might be from sashal(a)kernel.org are
>
> queue-6.17/drm-amd-fix-hybrid-sleep.patch
> queue-6.17/usb-gadget-introduce-free_usb_request-helper.patch
> queue-6.17/pm-hibernate-add-pm_hibernation_mode_is_suspend.patch
> queue-6.17/usb-gadget-store-endpoint-pointer-in-usb_request.patch
> queue-6.17/media-nxp-imx8-isi-m2m-fix-streaming-cleanup-on-release.patch
> queue-6.17/usb-gadget-f_rndis-refactor-bind-path-to-use-__free.patch
Hi,
I kept getting "ERROR: modpost: "pm_hibernation_mode_is_suspend" [drivers/gpu/drm/amd/amdgpu/amdgpu.ko] undefined!" when trying to build a 6.17.4 kernel with all the patches from queue-6.17 applied on top (from the stable-queue git repo at commit id 6aceec507fd0d3cefa7cac227eaf897edf09bf32). That build-time error is gone and the resulting kernel boots/runs fine on various x86_64 machines and VMs since I've removed/omitted both this patch and queue-6.17/drm-amd-fix-hybrid-sleep.patch.
I'm not sure if omitting queue-6.17/drm-amd-fix-hybrid-sleep.patch would have been sufficient, but both patches are part of the same patch set anyway.
Sadly, I haven't been able to figure out what about the changes actually causes the issue. My first guess was that if CONFIG_SUSPEND is not selected/enabled, then the whole pm_hibernation_mode_is_suspend() function and the corresponding export symbol would be missing. However, the kernel that I was trying to build *does* have CONFIG_SUSPEND=y in its config, so this shouldn't be the cause…
Regards
Pascal
The code contains a use-after-free vulnerability due to missing
cancellation of delayed work during device removal. Specifically,
in acpi_video_bus_remove(), the function acpi_video_bus_put_devices()
is called, which frees all acpi_video_device structures without
cancelling the associated delayed work (switch_brightness_work).
This work is scheduled via brightness_switch_event() in response to
ACPI events (e.g., brightness key presses) with a 100ms delay. If
the work is pending when the device is removed, it may execute after
the memory is freed, leading to use-after-free when the work function
acpi_video_switch_brightness() accesses the device structure.
Fix this by calling cancel_delayed_work_sync() before freeing each
acpi_video_device to ensure the work is fully completed before the
memory is released.
Fixes: 67b662e189f46 ("ACPI / video: seperate backlight control and event interface")
Cc: stable(a)vger.kernel.org
Signed-off-by: Yuhao Jiang <danisjiang(a)gmail.com>
---
drivers/acpi/acpi_video.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 103f29661576..5b80f87e078f 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1974,6 +1974,7 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
mutex_lock(&video->device_list_lock);
list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
+ cancel_delayed_work_sync(&dev->switch_brightness_work);
list_del(&dev->entry);
kfree(dev);
}
--
2.34.1
The code contains a use-after-free vulnerability due to missing
cancellation of delayed work during device removal. Specifically,
in acpi_video_bus_remove(), the function acpi_video_bus_put_devices()
is called, which frees all acpi_video_device structures without
cancelling the associated delayed work (switch_brightness_work).
This work is scheduled via brightness_switch_event() in response to
ACPI events (e.g., brightness key presses) with a 100ms delay. If
the work is pending when the device is removed, it may execute after
the memory is freed, leading to use-after-free when the work function
acpi_video_switch_brightness() accesses the device structure.
Fix this by calling cancel_delayed_work_sync() before freeing each
acpi_video_device to ensure the work is fully completed before the
memory is released.
Fixes: 67b662e189f46 ("ACPI / video: seperate backlight control and event interface")
Cc: stable(a)vger.kernel.org
Signed-off-by: Yuhao Jiang <danisjiang(a)gmail.com>
---
drivers/acpi/acpi_video.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 103f29661576..5b80f87e078f 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1974,6 +1974,7 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
mutex_lock(&video->device_list_lock);
list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
+ cancel_delayed_work_sync(&dev->switch_brightness_work);
list_del(&dev->entry);
kfree(dev);
}
--
2.34.1
When GSO tunnel is negotiated virtio_net_hdr_tnl_from_skb() tries to
initialize the tunnel metadata but forget to zero unused rxhash
fields. This may leak information to another side. Fixing this by
zeroing the unused hash fields.
Fixes: a2fb4bc4e2a6a ("net: implement virtio helpers to handle UDP GSO tunneling")x
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Jason Wang <jasowang(a)redhat.com>
---
include/linux/virtio_net.h | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 20e0584db1dd..4d1780848d0e 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -401,6 +401,10 @@ virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
if (!tnl_hdr_negotiated)
return -EINVAL;
+ vhdr->hash_hdr.hash_value = 0;
+ vhdr->hash_hdr.hash_report = 0;
+ vhdr->hash_hdr.padding = 0;
+
/* Let the basic parsing deal with plain GSO features. */
skb_shinfo(skb)->gso_type &= ~tnl_gso_type;
ret = virtio_net_hdr_from_skb(skb, hdr, true, false, vlan_hlen);
--
2.42.0
Greetings,
I hope this email finds you well.
I am Mrs. Diana Owen contacting you from Reality Trading Group Ltd India.
.
As a new growing company, we are looking for reliable company of your product as seen in your website for urgent purchase order basis.
Could you please give me more details and specification of your products.
We also need your products catalog, MOQ, Production Lead Time and Unit price?
Best Regards,
Mrs. Diana Owen
Purchase Manager
Company: Reality Trading Group Ltd
From: Srinivasan Shanmugam <srinivasan.shanmugam(a)amd.com>
commit cdb637d339572398821204a1142d8d615668f1e9 upstream.
The issue arises when the array 'adev->vcn.vcn_config' is accessed
before checking if the index 'adev->vcn.num_vcn_inst' is within the
bounds of the array.
The fix involves moving the bounds check before the array access. This
ensures that 'adev->vcn.num_vcn_inst' is within the bounds of the array
before it is used as an index.
Fixes the below:
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c:1289 amdgpu_discovery_reg_base_init() error: testing array offset 'adev->vcn.num_vcn_inst' after use.
Fixes: a0ccc717c4ab ("drm/amdgpu/discovery: validate VCN and SDMA instances")
Cc: Christian König <christian.koenig(a)amd.com>
Cc: Alex Deucher <alexander.deucher(a)amd.com>
Signed-off-by: Srinivasan Shanmugam <srinivasan.shanmugam(a)amd.com>
Reviewed-by: Alex Deucher <alexander.deucher(a)amd.com>
Signed-off-by: Alex Deucher <alexander.deucher(a)amd.com>
[ kovalev: bp to fix CVE-2024-27042 ]
Signed-off-by: Vasiliy Kovalev <kovalev(a)altlinux.org>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index d8441e273..8bc3a4bf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1128,15 +1128,16 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
* 0b10 : encode is disabled
* 0b01 : decode is disabled
*/
- adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
- ip->revision & 0xc0;
- ip->revision &= ~0xc0;
- if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES)
+ if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES) {
+ adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+ ip->revision & 0xc0;
adev->vcn.num_vcn_inst++;
+ }
else
dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
adev->vcn.num_vcn_inst + 1,
AMDGPU_MAX_VCN_INSTANCES);
+ ip->revision &= ~0xc0;
}
if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
--
2.50.1