This is a note to let you know that I've just added the patch titled
iio: adc: ad7173: prevent scan if too many setups requested
to my char-misc git tree which can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
in the char-misc-linus branch.
The patch will show up in the next release of the linux-next tree
(usually sometime within the next 24 hours during the week.)
The patch will hopefully also be merged in Linus's tree for the
next -rc kernel release.
If you have any questions about this process, please let me know.
From 1cfb22c277c7274f54babaa5b416dfbc00181e16 Mon Sep 17 00:00:00 2001
From: David Lechner <dlechner(a)baylibre.com>
Date: Tue, 22 Jul 2025 14:20:07 -0500
Subject: iio: adc: ad7173: prevent scan if too many setups requested
Add a check to ad7173_update_scan_mode() to ensure that we didn't exceed
the maximum number of unique channel configurations.
In the AD7173 family of chips, there are some chips that have 16
CHANNELx registers but only 8 setups (combination of CONFIGx, FILTERx,
GAINx and OFFSETx registers). Since commit 92c247216918 ("iio: adc:
ad7173: fix num_slots"), it is possible to have more than 8 channels
enabled in a scan at the same time, so it is possible to get a bad
configuration when more than 8 channels are using unique configurations.
This happens because the algorithm to allocate the setup slots only
takes into account which slot has been least recently used and doesn't
know about the maximum number of slots available.
Since the algorithm to allocate the setup slots is quite complex, it is
simpler to check after the fact if the current state is valid or not.
So this patch adds a check in ad7173_update_scan_mode() after setting up
all of the configurations to make sure that the actual setup still
matches the requested setup for each enabled channel. If not, we prevent
the scan from being enabled and return an error.
The setup comparison in ad7173_setup_equal() is refactored to a separate
function since we need to call it in two places now.
Fixes: 92c247216918 ("iio: adc: ad7173: fix num_slots")
Signed-off-by: David Lechner <dlechner(a)baylibre.com>
Link: https://patch.msgid.link/20250722-iio-adc-ad7173-fix-setup-use-limits-v2-1-…
Cc: <Stable(a)vger.kernel.org>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
---
drivers/iio/adc/ad7173.c | 87 ++++++++++++++++++++++++++++++++++------
1 file changed, 75 insertions(+), 12 deletions(-)
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index 4413207be28f..683146e83ab2 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -200,7 +200,7 @@ struct ad7173_channel_config {
/*
* Following fields are used to compare equality. If you
* make adaptations in it, you most likely also have to adapt
- * ad7173_find_live_config(), too.
+ * ad7173_is_setup_equal(), too.
*/
struct_group(config_props,
bool bipolar;
@@ -561,12 +561,19 @@ static void ad7173_reset_usage_cnts(struct ad7173_state *st)
st->config_usage_counter = 0;
}
-static struct ad7173_channel_config *
-ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg)
+/**
+ * ad7173_is_setup_equal - Compare two channel setups
+ * @cfg1: First channel configuration
+ * @cfg2: Second channel configuration
+ *
+ * Compares all configuration options that affect the registers connected to
+ * SETUP_SEL, namely CONFIGx, FILTERx, GAINx and OFFSETx.
+ *
+ * Returns: true if the setups are identical, false otherwise
+ */
+static bool ad7173_is_setup_equal(const struct ad7173_channel_config *cfg1,
+ const struct ad7173_channel_config *cfg2)
{
- struct ad7173_channel_config *cfg_aux;
- int i;
-
/*
* This is just to make sure that the comparison is adapted after
* struct ad7173_channel_config was changed.
@@ -579,14 +586,22 @@ ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *c
u8 ref_sel;
}));
+ return cfg1->bipolar == cfg2->bipolar &&
+ cfg1->input_buf == cfg2->input_buf &&
+ cfg1->odr == cfg2->odr &&
+ cfg1->ref_sel == cfg2->ref_sel;
+}
+
+static struct ad7173_channel_config *
+ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg)
+{
+ struct ad7173_channel_config *cfg_aux;
+ int i;
+
for (i = 0; i < st->num_channels; i++) {
cfg_aux = &st->channels[i].cfg;
- if (cfg_aux->live &&
- cfg->bipolar == cfg_aux->bipolar &&
- cfg->input_buf == cfg_aux->input_buf &&
- cfg->odr == cfg_aux->odr &&
- cfg->ref_sel == cfg_aux->ref_sel)
+ if (cfg_aux->live && ad7173_is_setup_equal(cfg, cfg_aux))
return cfg_aux;
}
return NULL;
@@ -1228,7 +1243,7 @@ static int ad7173_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
struct ad7173_state *st = iio_priv(indio_dev);
- int i, ret;
+ int i, j, k, ret;
for (i = 0; i < indio_dev->num_channels; i++) {
if (test_bit(i, scan_mask))
@@ -1239,6 +1254,54 @@ static int ad7173_update_scan_mode(struct iio_dev *indio_dev,
return ret;
}
+ /*
+ * On some chips, there are more channels that setups, so if there were
+ * more unique setups requested than the number of available slots,
+ * ad7173_set_channel() will have written over some of the slots. We
+ * can detect this by making sure each assigned cfg_slot matches the
+ * requested configuration. If it doesn't, we know that the slot was
+ * overwritten by a different channel.
+ */
+ for_each_set_bit(i, scan_mask, indio_dev->num_channels) {
+ const struct ad7173_channel_config *cfg1, *cfg2;
+
+ cfg1 = &st->channels[i].cfg;
+
+ for_each_set_bit(j, scan_mask, indio_dev->num_channels) {
+ cfg2 = &st->channels[j].cfg;
+
+ /*
+ * Only compare configs that are assigned to the same
+ * SETUP_SEL slot and don't compare channel to itself.
+ */
+ if (i == j || cfg1->cfg_slot != cfg2->cfg_slot)
+ continue;
+
+ /*
+ * If we find two different configs trying to use the
+ * same SETUP_SEL slot, then we know that the that we
+ * have too many unique configurations requested for
+ * the available slots and at least one was overwritten.
+ */
+ if (!ad7173_is_setup_equal(cfg1, cfg2)) {
+ /*
+ * At this point, there isn't a way to tell
+ * which setups are actually programmed in the
+ * ADC anymore, so we could read them back to
+ * see, but it is simpler to just turn off all
+ * of the live flags so that everything gets
+ * reprogramed on the next attempt read a sample.
+ */
+ for (k = 0; k < st->num_channels; k++)
+ st->channels[k].cfg.live = false;
+
+ dev_err(&st->sd.spi->dev,
+ "Too many unique channel configurations requested for scan\n");
+ return -EINVAL;
+ }
+ }
+ }
+
return 0;
}
--
2.50.1
This is a note to let you know that I've just added the patch titled
iio: proximity: isl29501: fix buffered read on big-endian systems
to my char-misc git tree which can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
in the char-misc-linus branch.
The patch will show up in the next release of the linux-next tree
(usually sometime within the next 24 hours during the week.)
The patch will hopefully also be merged in Linus's tree for the
next -rc kernel release.
If you have any questions about this process, please let me know.
From de18e978d0cda23e4c102e18092b63a5b0b3a800 Mon Sep 17 00:00:00 2001
From: David Lechner <dlechner(a)baylibre.com>
Date: Tue, 22 Jul 2025 15:54:21 -0500
Subject: iio: proximity: isl29501: fix buffered read on big-endian systems
Fix passing a u32 value as a u16 buffer scan item. This works on little-
endian systems, but not on big-endian systems.
A new local variable is introduced for getting the register value and
the array is changed to a struct to make the data layout more explicit
rather than just changing the type and having to recalculate the proper
length needed for the timestamp.
Fixes: 1c28799257bc ("iio: light: isl29501: Add support for the ISL29501 ToF sensor.")
Signed-off-by: David Lechner <dlechner(a)baylibre.com>
Link: https://patch.msgid.link/20250722-iio-use-more-iio_declare_buffer_with_ts-7…
Cc: <Stable(a)vger.kernel.org>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
---
drivers/iio/proximity/isl29501.c | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
index d1510fe24050..f69db6f2f380 100644
--- a/drivers/iio/proximity/isl29501.c
+++ b/drivers/iio/proximity/isl29501.c
@@ -938,12 +938,18 @@ static irqreturn_t isl29501_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct isl29501_private *isl29501 = iio_priv(indio_dev);
const unsigned long *active_mask = indio_dev->active_scan_mask;
- u32 buffer[4] __aligned(8) = {}; /* 1x16-bit + naturally aligned ts */
+ u32 value;
+ struct {
+ u16 data;
+ aligned_s64 ts;
+ } scan = { };
- if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask))
- isl29501_register_read(isl29501, REG_DISTANCE, buffer);
+ if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask)) {
+ isl29501_register_read(isl29501, REG_DISTANCE, &value);
+ scan.data = value;
+ }
- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
--
2.50.1
The Cadence PCIe Controller integrated in the TI K3 SoCs supports both
Root-Complex and Endpoint modes of operation. The Glue Layer allows
"strapping" the mode of operation of the Controller, the Link Speed
and the Link Width. This is enabled by programming the "PCIEn_CTRL"
register (n corresponds to the PCIe instance) within the CTRL_MMR
memory-mapped register space.
In the PCIe Controller's register space, the same set of registers
that correspond to the Root-Port configuration space when the
Controller is configured for Root-Complex mode of operation, also
correspond to the Physical Function configuration space when the
Controller is configured for Endpoint mode of operation. As a result,
the "reset-value" of these set of registers _should_ vary depending
on the selected mode of operation. This is the expected behavior
according to the description of the registers and their reset values
in the Technical Reference Manual for the SoCs.
However, it is observed that the "reset-value" seen in practice
do not match the description. To be precise, when the Controller
is configured for Root-Complex mode of operation, the "reset-value"
of the Root-Port configuration space reflect the "reset-value"
corresponding to the Physical Function configuration space.
This can be attributed to the fact that the "strap" settings play
a role in "switching" the "reset-value" of the registers to match
the expected values as determined by the selected mode of operation.
Since the "strap" settings are sampled the moment the PCIe Controller
is powered ON, the "reset-value" of the registers are setup at that
point in time. As a result, if the "strap" settings are programmed
at a later point in time, it _will not_ update the "reset-value" of
the registers. This will cause the Physical Function configuration
space to be seen when the Root-Port configuration space is accessed
after programming the PCIe Controller for Root-Complex mode of
operation.
Fix this by powering off the PCIe Controller before programming the
"strap" settings and powering it on after that. This will ensure
that the "strap" settings that have been sampled convey the intended
mode of operation, thereby resulting in the "reset-value" of the
registers being accurate.
Fixes: f3e25911a430 ("PCI: j721e: Add TI J721E PCIe driver")
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Siddharth Vadapalli <s-vadapalli(a)ti.com>
---
Hello,
This patch is based on commit
be48bcf004f9 Merge tag 'for-6.17-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
of Mainline Linux.
v1 of this patch is at:
https://lore.kernel.org/r/20250716102851.121742-1-s-vadapalli@ti.com/
Changes since v1:
- Rebased patch on latest Mainline Linux.
Regards,
Siddharth.
drivers/pci/controller/cadence/pci-j721e.c | 82 ++++++++++++++--------
1 file changed, 53 insertions(+), 29 deletions(-)
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 6c93f39d0288..d5e7cb7277dc 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -173,10 +174,9 @@ static const struct cdns_pcie_ops j721e_pcie_ops = {
.link_up = j721e_pcie_link_up,
};
-static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon,
- unsigned int offset)
+static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct device *dev,
+ struct regmap *syscon, unsigned int offset)
{
- struct device *dev = pcie->cdns_pcie->dev;
u32 mask = J721E_MODE_RC;
u32 mode = pcie->mode;
u32 val = 0;
@@ -193,9 +193,9 @@ static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon,
}
static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
+ struct device *dev,
struct regmap *syscon, unsigned int offset)
{
- struct device *dev = pcie->cdns_pcie->dev;
struct device_node *np = dev->of_node;
int link_speed;
u32 val = 0;
@@ -214,9 +214,9 @@ static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
}
static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
+ struct device *dev,
struct regmap *syscon, unsigned int offset)
{
- struct device *dev = pcie->cdns_pcie->dev;
u32 lanes = pcie->num_lanes;
u32 mask = BIT(8);
u32 val = 0;
@@ -234,9 +234,9 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
}
static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie,
+ struct device *dev,
struct regmap *syscon)
{
- struct device *dev = pcie->cdns_pcie->dev;
struct device_node *node = dev->of_node;
u32 mask = ACSPCIE_PAD_DISABLE_MASK;
struct of_phandle_args args;
@@ -263,9 +263,8 @@ static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie,
return 0;
}
-static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
+static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie, struct device *dev)
{
- struct device *dev = pcie->cdns_pcie->dev;
struct device_node *node = dev->of_node;
struct of_phandle_args args;
unsigned int offset = 0;
@@ -284,19 +283,19 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
if (!ret)
offset = args.args[0];
- ret = j721e_pcie_set_mode(pcie, syscon, offset);
+ ret = j721e_pcie_set_mode(pcie, dev, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set pci mode\n");
return ret;
}
- ret = j721e_pcie_set_link_speed(pcie, syscon, offset);
+ ret = j721e_pcie_set_link_speed(pcie, dev, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set link speed\n");
return ret;
}
- ret = j721e_pcie_set_lane_count(pcie, syscon, offset);
+ ret = j721e_pcie_set_lane_count(pcie, dev, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set num-lanes\n");
return ret;
@@ -308,7 +307,7 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
if (!syscon)
return 0;
- return j721e_enable_acspcie_refclk(pcie, syscon);
+ return j721e_enable_acspcie_refclk(pcie, dev, syscon);
}
static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
@@ -469,6 +468,47 @@ static int j721e_pcie_probe(struct platform_device *pdev)
if (!pcie)
return -ENOMEM;
+ pcie->mode = mode;
+
+ ret = of_property_read_u32(node, "num-lanes", &num_lanes);
+ if (ret || num_lanes > data->max_lanes) {
+ dev_warn(dev, "num-lanes property not provided or invalid, setting num-lanes to 1\n");
+ num_lanes = 1;
+ }
+
+ pcie->num_lanes = num_lanes;
+ pcie->max_lanes = data->max_lanes;
+
+ /*
+ * The PCIe Controller's registers have different "reset-values" depending
+ * on the "strap" settings programmed into the Controller's Glue Layer.
+ * This is because the same set of registers are used for representing the
+ * Physical Function configuration space in Endpoint mode and for
+ * representing the Root-Port configuration space in Root-Complex mode.
+ *
+ * The registers latch onto a "reset-value" based on the "strap" settings
+ * sampled after the Controller is powered on. Therefore, for the
+ * "reset-value" to be accurate, it is necessary to program the "strap"
+ * settings when the Controller is powered off, and power on the Controller
+ * after the "strap" settings have been programmed.
+ *
+ * The "strap" settings are programmed by "j721e_pcie_ctrl_init()".
+ * Therefore, power off the Controller before invoking "j721e_pcie_ctrl_init()",
+ * program the "strap" settings, and then power on the Controller. This ensures
+ * that the reset values are accurate and reflect the "strap" settings.
+ */
+ dev_pm_domain_detach(dev, true);
+
+ ret = j721e_pcie_ctrl_init(pcie, dev);
+ if (ret < 0)
+ return ret;
+
+ ret = dev_pm_domain_attach(dev, true);
+ if (ret < 0) {
+ dev_err(dev, "failed to power on device\n");
+ return ret;
+ }
+
switch (mode) {
case PCI_MODE_RC:
if (!IS_ENABLED(CONFIG_PCI_J721E_HOST))
@@ -510,7 +550,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
return 0;
}
- pcie->mode = mode;
pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg");
@@ -523,15 +562,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
return PTR_ERR(base);
pcie->user_cfg_base = base;
- ret = of_property_read_u32(node, "num-lanes", &num_lanes);
- if (ret || num_lanes > data->max_lanes) {
- dev_warn(dev, "num-lanes property not provided or invalid, setting num-lanes to 1\n");
- num_lanes = 1;
- }
-
- pcie->num_lanes = num_lanes;
- pcie->max_lanes = data->max_lanes;
-
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)))
return -EINVAL;
@@ -547,12 +577,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- ret = j721e_pcie_ctrl_init(pcie);
- if (ret < 0) {
- dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
- goto err_get_sync;
- }
-
ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0,
"j721e-pcie-link-down-irq", pcie);
if (ret < 0) {
@@ -680,7 +704,7 @@ static int j721e_pcie_resume_noirq(struct device *dev)
struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
int ret;
- ret = j721e_pcie_ctrl_init(pcie);
+ ret = j721e_pcie_ctrl_init(pcie, dev);
if (ret < 0)
return ret;
--
2.43.0
From: Chen Junlin <chen.junlin(a)zte.com.cn>
Although the upstream commit 2b0f2fc9ed62 ("Bluetooth: hci_conn:
Use disable_delayed_work_sync") has fixed the issue CVE-2024-56591, that
patch depends on the implementaion of disable/enable_work() of workqueue
[1], which are merged into 6.9/6.10 and so on. But for branch linux-6.6,
there's no these feature of workqueue.
To solve CVE-2024-56591 without backport too many feature patches about
workqueue, we can set a new flag HCI_CONN_DELETE when hci_conn_dell() is
called, and the subsequent queuing of work will be ignored.
[1] https://lore.kernel.org/all/20240216180559.208276-1-tj@kernel.org/
Signed-off-by: Chen Junlin <chen.junlin(a)zte.com.cn>
Signed-off-by: xu xin <xu.xin16(a)zte.com.cn>
---
include/net/bluetooth/hci_core.h | 8 +++++++-
net/bluetooth/hci_conn.c | 1 +
2 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 4f067599e6e9..9a3ec55079a1 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -954,6 +954,7 @@ enum {
HCI_CONN_BIG_SYNC_FAILED,
HCI_CONN_PA_SYNC,
HCI_CONN_PA_SYNC_FAILED,
+ HCI_CONN_DELETE,
};
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -1575,7 +1576,12 @@ static inline void hci_conn_drop(struct hci_conn *conn)
}
cancel_delayed_work(&conn->disc_work);
- queue_delayed_work(conn->hdev->workqueue,
+ /*
+ * When HCI_CONN_DELETE is set, the conn is goint to be freed.
+ * Don't queue the work to avoid noisy WARNing about refcnt < 0.
+ */
+ if (!test_bit(HCI_CONN_DELETE, &conn->flags))
+ queue_delayed_work(conn->hdev->workqueue,
&conn->disc_work, timeo);
}
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 549ee9e87d63..67a6513bb01c 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1112,6 +1112,7 @@ void hci_conn_del(struct hci_conn *conn)
hci_conn_unlink(conn);
+ set_bit(HCI_CONN_DELETE, &conn->flags);
cancel_delayed_work_sync(&conn->disc_work);
cancel_delayed_work_sync(&conn->auto_accept_work);
cancel_delayed_work_sync(&conn->idle_work);
--
2.15.2
The quilt patch titled
Subject: mm/mremap: fix WARN with uffd that has remap events disabled
has been removed from the -mm tree. Its filename was
mm-mremap-fix-warn-with-uffd-that-has-remap-events-disabled.patch
This patch was dropped because it was merged into the mm-hotfixes-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------
From: David Hildenbrand <david(a)redhat.com>
Subject: mm/mremap: fix WARN with uffd that has remap events disabled
Date: Mon, 18 Aug 2025 19:53:58 +0200
Registering userfaultd on a VMA that spans at least one PMD and then
mremap()'ing that VMA can trigger a WARN when recovering from a failed
page table move due to a page table allocation error.
The code ends up doing the right thing (recurse, avoiding moving actual
page tables), but triggering that WARN is unpleasant:
WARNING: CPU: 2 PID: 6133 at mm/mremap.c:357 move_normal_pmd mm/mremap.c:357 [inline]
WARNING: CPU: 2 PID: 6133 at mm/mremap.c:357 move_pgt_entry mm/mremap.c:595 [inline]
WARNING: CPU: 2 PID: 6133 at mm/mremap.c:357 move_page_tables+0x3832/0x44a0 mm/mremap.c:852
Modules linked in:
CPU: 2 UID: 0 PID: 6133 Comm: syz.0.19 Not tainted 6.17.0-rc1-syzkaller-00004-g53e760d89498 #0 PREEMPT(full)
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2~bpo12+1 04/01/2014
RIP: 0010:move_normal_pmd mm/mremap.c:357 [inline]
RIP: 0010:move_pgt_entry mm/mremap.c:595 [inline]
RIP: 0010:move_page_tables+0x3832/0x44a0 mm/mremap.c:852
Code: ...
RSP: 0018:ffffc900037a76d8 EFLAGS: 00010293
RAX: 0000000000000000 RBX: 0000000032930007 RCX: ffffffff820c6645
RDX: ffff88802e56a440 RSI: ffffffff820c7201 RDI: 0000000000000007
RBP: ffff888037728fc0 R08: 0000000000000007 R09: 0000000000000000
R10: 0000000032930007 R11: 0000000000000000 R12: 0000000000000000
R13: ffffc900037a79a8 R14: 0000000000000001 R15: dffffc0000000000
FS: 000055556316a500(0000) GS:ffff8880d68bc000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000001b30863fff CR3: 0000000050171000 CR4: 0000000000352ef0
Call Trace:
<TASK>
copy_vma_and_data+0x468/0x790 mm/mremap.c:1215
move_vma+0x548/0x1780 mm/mremap.c:1282
mremap_to+0x1b7/0x450 mm/mremap.c:1406
do_mremap+0xfad/0x1f80 mm/mremap.c:1921
__do_sys_mremap+0x119/0x170 mm/mremap.c:1977
do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
do_syscall_64+0xcd/0x4c0 arch/x86/entry/syscall_64.c:94
entry_SYSCALL_64_after_hwframe+0x77/0x7f
RIP: 0033:0x7f00d0b8ebe9
Code: ...
RSP: 002b:00007ffe5ea5ee98 EFLAGS: 00000246 ORIG_RAX: 0000000000000019
RAX: ffffffffffffffda RBX: 00007f00d0db5fa0 RCX: 00007f00d0b8ebe9
RDX: 0000000000400000 RSI: 0000000000c00000 RDI: 0000200000000000
RBP: 00007ffe5ea5eef0 R08: 0000200000c00000 R09: 0000000000000000
R10: 0000000000000003 R11: 0000000000000246 R12: 0000000000000002
R13: 00007f00d0db5fa0 R14: 00007f00d0db5fa0 R15: 0000000000000005
</TASK>
The underlying issue is that we recurse during the original page table
move, but not during the recovery move.
Fix it by checking for both VMAs and performing the check before the
pmd_none() sanity check.
Add a new helper where we perform+document that check for the PMD and PUD
level.
Thanks to Harry for bisecting.
Link: https://lkml.kernel.org/r/20250818175358.1184757-1-david@redhat.com
Fixes: 0cef0bb836e3 ("mm: clear uffd-wp PTE/PMD state on mremap()")
Signed-off-by: David Hildenbrand <david(a)redhat.com>
Reported-by: syzbot+4d9a13f0797c46a29e42(a)syzkaller.appspotmail.com
Closes: https://lkml.kernel.org/r/689bb893.050a0220.7f033.013a.GAE@google.com
Tested-by: Harry Yoo <harry.yoo(a)oracle.com>
Cc: "Liam R. Howlett" <Liam.Howlett(a)oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes(a)oracle.com>
Cc: Vlastimil Babka <vbabka(a)suse.cz>
Cc: Jann Horn <jannh(a)google.com>
Cc: Pedro Falcato <pfalcato(a)suse.de>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/mremap.c | 41 +++++++++++++++++++++++------------------
1 file changed, 23 insertions(+), 18 deletions(-)
--- a/mm/mremap.c~mm-mremap-fix-warn-with-uffd-that-has-remap-events-disabled
+++ a/mm/mremap.c
@@ -323,6 +323,25 @@ static inline bool arch_supports_page_ta
}
#endif
+static inline bool uffd_supports_page_table_move(struct pagetable_move_control *pmc)
+{
+ /*
+ * If we are moving a VMA that has uffd-wp registered but with
+ * remap events disabled (new VMA will not be registered with uffd), we
+ * need to ensure that the uffd-wp state is cleared from all pgtables.
+ * This means recursing into lower page tables in move_page_tables().
+ *
+ * We might get called with VMAs reversed when recovering from a
+ * failed page table move. In that case, the
+ * "old"-but-actually-"originally new" VMA during recovery will not have
+ * a uffd context. Recursing into lower page tables during the original
+ * move but not during the recovery move will cause trouble, because we
+ * run into already-existing page tables. So check both VMAs.
+ */
+ return !vma_has_uffd_without_event_remap(pmc->old) &&
+ !vma_has_uffd_without_event_remap(pmc->new);
+}
+
#ifdef CONFIG_HAVE_MOVE_PMD
static bool move_normal_pmd(struct pagetable_move_control *pmc,
pmd_t *old_pmd, pmd_t *new_pmd)
@@ -335,6 +354,8 @@ static bool move_normal_pmd(struct paget
if (!arch_supports_page_table_move())
return false;
+ if (!uffd_supports_page_table_move(pmc))
+ return false;
/*
* The destination pmd shouldn't be established, free_pgtables()
* should have released it.
@@ -361,15 +382,6 @@ static bool move_normal_pmd(struct paget
if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
return false;
- /* If this pmd belongs to a uffd vma with remap events disabled, we need
- * to ensure that the uffd-wp state is cleared from all pgtables. This
- * means recursing into lower page tables in move_page_tables(), and we
- * can reuse the existing code if we simply treat the entry as "not
- * moved".
- */
- if (vma_has_uffd_without_event_remap(vma))
- return false;
-
/*
* We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_lock prevents deadlock.
@@ -418,6 +430,8 @@ static bool move_normal_pud(struct paget
if (!arch_supports_page_table_move())
return false;
+ if (!uffd_supports_page_table_move(pmc))
+ return false;
/*
* The destination pud shouldn't be established, free_pgtables()
* should have released it.
@@ -425,15 +439,6 @@ static bool move_normal_pud(struct paget
if (WARN_ON_ONCE(!pud_none(*new_pud)))
return false;
- /* If this pud belongs to a uffd vma with remap events disabled, we need
- * to ensure that the uffd-wp state is cleared from all pgtables. This
- * means recursing into lower page tables in move_page_tables(), and we
- * can reuse the existing code if we simply treat the entry as "not
- * moved".
- */
- if (vma_has_uffd_without_event_remap(vma))
- return false;
-
/*
* We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_lock prevents deadlock.
_
Patches currently in -mm which might be from david(a)redhat.com are
mm-migrate-remove-migratepage_unmap.patch
mm-migrate-remove-migratepage_unmap-fix.patch
treewide-remove-migratepage_success.patch
mm-huge_memory-move-more-common-code-into-insert_pmd.patch
mm-huge_memory-move-more-common-code-into-insert_pud.patch
mm-huge_memory-support-huge-zero-folio-in-vmf_insert_folio_pmd.patch
fs-dax-use-vmf_insert_folio_pmd-to-insert-the-huge-zero-folio.patch
mm-huge_memory-mark-pmd-mappings-of-the-huge-zero-folio-special.patch
powerpc-ptdump-rename-struct-pgtable_level-to-struct-ptdump_pglevel.patch
mm-rmap-convert-enum-rmap_level-to-enum-pgtable_level.patch
mm-memory-convert-print_bad_pte-to-print_bad_page_map.patch
mm-memory-factor-out-common-code-from-vm_normal_page_.patch
mm-introduce-and-use-vm_normal_page_pud.patch
mm-rename-vm_ops-find_special_page-to-vm_ops-find_normal_page.patch
prctl-extend-pr_set_thp_disable-to-optionally-exclude-vm_hugepage.patch
mm-huge_memory-convert-tva_flags-to-enum-tva_type.patch
mm-huge_memory-respect-madv_collapse-with-pr_thp_disable_except_advised.patch
The quilt patch titled
Subject: mm/damon/core: fix damos_commit_filter not changing allow
has been removed from the -mm tree. Its filename was
mm-damon-core-fix-damos_commit_filter-not-changing-allow.patch
This patch was dropped because it was merged into the mm-hotfixes-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------
From: Sang-Heon Jeon <ekffu200098(a)gmail.com>
Subject: mm/damon/core: fix damos_commit_filter not changing allow
Date: Sat, 16 Aug 2025 10:51:16 +0900
Current damos_commit_filter() does not persist the `allow' value of the
filter. As a result, changing the `allow' value of a filter and
committing doesn't change the `allow' value.
Add the missing `allow' value update, so committing the filter
persistently changes the `allow' value well.
Link: https://lkml.kernel.org/r/20250816015116.194589-1-ekffu200098@gmail.com
Fixes: fe6d7fdd6249 ("mm/damon/core: add damos_filter->allow field")
Signed-off-by: Sang-Heon Jeon <ekffu200098(a)gmail.com>
Reviewed-by: SeongJae Park <sj(a)kernel.org>
Cc: <stable(a)vger.kernel.org> [6.14.x]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/damon/core.c | 1 +
1 file changed, 1 insertion(+)
--- a/mm/damon/core.c~mm-damon-core-fix-damos_commit_filter-not-changing-allow
+++ a/mm/damon/core.c
@@ -883,6 +883,7 @@ static void damos_commit_filter(
{
dst->type = src->type;
dst->matching = src->matching;
+ dst->allow = src->allow;
damos_commit_filter_arg(dst, src);
}
_
Patches currently in -mm which might be from ekffu200098(a)gmail.com are
mm-damon-core-set-quota-charged_from-to-jiffies-at-first-charge-window.patch
mm-damon-update-expired-description-of-damos_action.patch
docs-mm-damon-design-fix-typo-s-sz_trtied-sz_tried.patch
selftests-damon-test-no-op-commit-broke-damon-status.patch
selftests-damon-test-no-op-commit-broke-damon-status-fix.patch
mm-damon-tests-core-kunit-add-damos_commit_filter-test.patch
The quilt patch titled
Subject: mm/memory-failure: fix infinite UCE for VM_PFNMAP pfn
has been removed from the -mm tree. Its filename was
mm-memory-failure-fix-infinite-uce-for-vm_pfnmap-pfn.patch
This patch was dropped because it was merged into the mm-hotfixes-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------
From: Jinjiang Tu <tujinjiang(a)huawei.com>
Subject: mm/memory-failure: fix infinite UCE for VM_PFNMAP pfn
Date: Fri, 15 Aug 2025 15:32:09 +0800
When memory_failure() is called for a already hwpoisoned pfn,
kill_accessing_process() will be called to kill current task. However, if
the vma of the accessing vaddr is VM_PFNMAP, walk_page_range() will skip
the vma in walk_page_test() and return 0.
Before commit aaf99ac2ceb7 ("mm/hwpoison: do not send SIGBUS to processes
with recovered clean pages"), kill_accessing_process() will return EFAULT.
For x86, the current task will be killed in kill_me_maybe().
However, after this commit, kill_accessing_process() simplies return 0,
that means UCE is handled properly, but it doesn't actually. In such
case, the user task will trigger UCE infinitely.
To fix it, add .test_walk callback for hwpoison_walk_ops to scan all vmas.
Link: https://lkml.kernel.org/r/20250815073209.1984582-1-tujinjiang@huawei.com
Fixes: aaf99ac2ceb7 ("mm/hwpoison: do not send SIGBUS to processes with recovered clean pages")
Signed-off-by: Jinjiang Tu <tujinjiang(a)huawei.com>
Acked-by: David Hildenbrand <david(a)redhat.com>
Acked-by: Miaohe Lin <linmiaohe(a)huawei.com>
Reviewed-by: Jane Chu <jane.chu(a)oracle.com>
Cc: Kefeng Wang <wangkefeng.wang(a)huawei.com>
Cc: Naoya Horiguchi <nao.horiguchi(a)gmail.com>
Cc: Oscar Salvador <osalvador(a)suse.de>
Cc: Shuai Xue <xueshuai(a)linux.alibaba.com>
Cc: Zi Yan <ziy(a)nvidia.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/memory-failure.c | 8 ++++++++
1 file changed, 8 insertions(+)
--- a/mm/memory-failure.c~mm-memory-failure-fix-infinite-uce-for-vm_pfnmap-pfn
+++ a/mm/memory-failure.c
@@ -853,9 +853,17 @@ static int hwpoison_hugetlb_range(pte_t
#define hwpoison_hugetlb_range NULL
#endif
+static int hwpoison_test_walk(unsigned long start, unsigned long end,
+ struct mm_walk *walk)
+{
+ /* We also want to consider pages mapped into VM_PFNMAP. */
+ return 0;
+}
+
static const struct mm_walk_ops hwpoison_walk_ops = {
.pmd_entry = hwpoison_pte_range,
.hugetlb_entry = hwpoison_hugetlb_range,
+ .test_walk = hwpoison_test_walk,
.walk_lock = PGWALK_RDLOCK,
};
_
Patches currently in -mm which might be from tujinjiang(a)huawei.com are
mm-memory_hotplug-fix-hwpoisoned-large-folio-handling-in-do_migrate_range.patch
The quilt patch titled
Subject: iov_iter: iterate_folioq: fix handling of offset >= folio size
has been removed from the -mm tree. Its filename was
iov_iter-iterate_folioq-fix-handling-of-offset-=-folio-size.patch
This patch was dropped because it was merged into the mm-hotfixes-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------
From: Dominique Martinet <asmadeus(a)codewreck.org>
Subject: iov_iter: iterate_folioq: fix handling of offset >= folio size
Date: Wed, 13 Aug 2025 15:04:55 +0900
It's apparently possible to get an iov advanced all the way up to the end
of the current page we're looking at, e.g.
(gdb) p *iter
$24 = {iter_type = 4 '\004', nofault = false, data_source = false, iov_offset = 4096, {__ubuf_iovec = {
iov_base = 0xffff88800f5bc000, iov_len = 655}, {{__iov = 0xffff88800f5bc000, kvec = 0xffff88800f5bc000,
bvec = 0xffff88800f5bc000, folioq = 0xffff88800f5bc000, xarray = 0xffff88800f5bc000,
ubuf = 0xffff88800f5bc000}, count = 655}}, {nr_segs = 2, folioq_slot = 2 '\002', xarray_start = 2}}
Where iov_offset is 4k with 4k-sized folios
This should have been fine because we're only in the 2nd slot and there's
another one after this, but iterate_folioq should not try to map a folio
that skips the whole size, and more importantly part here does not end up
zero (because 'PAGE_SIZE - skip % PAGE_SIZE' ends up PAGE_SIZE and not
zero..), so skip forward to the "advance to next folio" code
Link: https://lkml.kernel.org/r/20250813-iot_iter_folio-v3-0-a0ffad2b665a@codewre…
Link: https://lkml.kernel.org/r/20250813-iot_iter_folio-v3-1-a0ffad2b665a@codewre…
Signed-off-by: Dominique Martinet <asmadeus(a)codewreck.org>
Fixes: db0aa2e9566f ("mm: Define struct folio_queue and ITER_FOLIOQ to handle a sequence of folios")
Reported-by: Maximilian Bosch <maximilian(a)mbosch.me>
Reported-by: Ryan Lahfa <ryan(a)lahfa.xyz>
Reported-by: Christian Theune <ct(a)flyingcircus.io>
Reported-by: Arnout Engelen <arnout(a)bzzt.net>
Link: https://lkml.kernel.org/r/D4LHHUNLG79Y.12PI0X6BEHRHW@mbosch.me/
Acked-by: David Howells <dhowells(a)redhat.com>
Cc: Al Viro <viro(a)zeniv.linux.org.uk>
Cc: Christian Brauner <brauner(a)kernel.org>
Cc: Matthew Wilcox (Oracle) <willy(a)infradead.org>
Cc: <stable(a)vger.kernel.org> [6.12+]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
include/linux/iov_iter.h | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
--- a/include/linux/iov_iter.h~iov_iter-iterate_folioq-fix-handling-of-offset-=-folio-size
+++ a/include/linux/iov_iter.h
@@ -160,7 +160,7 @@ size_t iterate_folioq(struct iov_iter *i
do {
struct folio *folio = folioq_folio(folioq, slot);
- size_t part, remain, consumed;
+ size_t part, remain = 0, consumed;
size_t fsize;
void *base;
@@ -168,14 +168,16 @@ size_t iterate_folioq(struct iov_iter *i
break;
fsize = folioq_folio_size(folioq, slot);
- base = kmap_local_folio(folio, skip);
- part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
- remain = step(base, progress, part, priv, priv2);
- kunmap_local(base);
- consumed = part - remain;
- len -= consumed;
- progress += consumed;
- skip += consumed;
+ if (skip < fsize) {
+ base = kmap_local_folio(folio, skip);
+ part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
+ remain = step(base, progress, part, priv, priv2);
+ kunmap_local(base);
+ consumed = part - remain;
+ len -= consumed;
+ progress += consumed;
+ skip += consumed;
+ }
if (skip >= fsize) {
skip = 0;
slot++;
_
Patches currently in -mm which might be from asmadeus(a)codewreck.org are
The quilt patch titled
Subject: mm/damon/core: fix commit_ops_filters by using correct nth function
has been removed from the -mm tree. Its filename was
mm-damon-core-fix-commit_ops_filters-by-using-correct-nth-function.patch
This patch was dropped because it was merged into the mm-hotfixes-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------
From: Sang-Heon Jeon <ekffu200098(a)gmail.com>
Subject: mm/damon/core: fix commit_ops_filters by using correct nth function
Date: Sun, 10 Aug 2025 21:42:01 +0900
damos_commit_ops_filters() incorrectly uses damos_nth_filter() which
iterates core_filters. As a result, performing a commit unintentionally
corrupts ops_filters.
Add damos_nth_ops_filter() which iterates ops_filters. Use this function
to fix issues caused by wrong iteration.
Link: https://lkml.kernel.org/r/20250810124201.15743-1-ekffu200098@gmail.com
Fixes: 3607cc590f18 ("mm/damon/core: support committing ops_filters") # 6.15.x
Signed-off-by: Sang-Heon Jeon <ekffu200098(a)gmail.com>
Reviewed-by: SeongJae Park <sj(a)kernel.org>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/damon/core.c | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
--- a/mm/damon/core.c~mm-damon-core-fix-commit_ops_filters-by-using-correct-nth-function
+++ a/mm/damon/core.c
@@ -845,6 +845,18 @@ static struct damos_filter *damos_nth_fi
return NULL;
}
+static struct damos_filter *damos_nth_ops_filter(int n, struct damos *s)
+{
+ struct damos_filter *filter;
+ int i = 0;
+
+ damos_for_each_ops_filter(filter, s) {
+ if (i++ == n)
+ return filter;
+ }
+ return NULL;
+}
+
static void damos_commit_filter_arg(
struct damos_filter *dst, struct damos_filter *src)
{
@@ -908,7 +920,7 @@ static int damos_commit_ops_filters(stru
int i = 0, j = 0;
damos_for_each_ops_filter_safe(dst_filter, next, dst) {
- src_filter = damos_nth_filter(i++, src);
+ src_filter = damos_nth_ops_filter(i++, src);
if (src_filter)
damos_commit_filter(dst_filter, src_filter);
else
_
Patches currently in -mm which might be from ekffu200098(a)gmail.com are
mm-damon-core-set-quota-charged_from-to-jiffies-at-first-charge-window.patch
mm-damon-update-expired-description-of-damos_action.patch
docs-mm-damon-design-fix-typo-s-sz_trtied-sz_tried.patch
selftests-damon-test-no-op-commit-broke-damon-status.patch
selftests-damon-test-no-op-commit-broke-damon-status-fix.patch
mm-damon-tests-core-kunit-add-damos_commit_filter-test.patch
The quilt patch titled
Subject: mm/debug_vm_pgtable: clear page table entries at destroy_args()
has been removed from the -mm tree. Its filename was
mm-debug_vm_pgtable-clear-page-table-entries-at-destroy_args.patch
This patch was dropped because it was merged into the mm-hotfixes-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------
From: "Herton R. Krzesinski" <herton(a)redhat.com>
Subject: mm/debug_vm_pgtable: clear page table entries at destroy_args()
Date: Thu, 31 Jul 2025 18:40:51 -0300
The mm/debug_vm_pagetable test allocates manually page table entries for
the tests it runs, using also its manually allocated mm_struct. That in
itself is ok, but when it exits, at destroy_args() it fails to clear those
entries with the *_clear functions.
The problem is that leaves stale entries. If another process allocates an
mm_struct with a pgd at the same address, it may end up running into the
stale entry. This is happening in practice on a debug kernel with
CONFIG_DEBUG_VM_PGTABLE=y, for example this is the output with some extra
debugging I added (it prints a warning trace if pgtables_bytes goes
negative, in addition to the warning at check_mm() function):
[ 2.539353] debug_vm_pgtable: [get_random_vaddr ]: random_vaddr is 0x7ea247140000
[ 2.539366] kmem_cache info
[ 2.539374] kmem_cachep 0x000000002ce82385 - freelist 0x0000000000000000 - offset 0x508
[ 2.539447] debug_vm_pgtable: [init_args ]: args->mm is 0x000000002267cc9e
(...)
[ 2.552800] WARNING: CPU: 5 PID: 116 at include/linux/mm.h:2841 free_pud_range+0x8bc/0x8d0
[ 2.552816] Modules linked in:
[ 2.552843] CPU: 5 UID: 0 PID: 116 Comm: modprobe Not tainted 6.12.0-105.debug_vm2.el10.ppc64le+debug #1 VOLUNTARY
[ 2.552859] Hardware name: IBM,9009-41A POWER9 (architected) 0x4e0202 0xf000005 of:IBM,FW910.00 (VL910_062) hv:phyp pSeries
[ 2.552872] NIP: c0000000007eef3c LR: c0000000007eef30 CTR: c0000000003d8c90
[ 2.552885] REGS: c0000000622e73b0 TRAP: 0700 Not tainted (6.12.0-105.debug_vm2.el10.ppc64le+debug)
[ 2.552899] MSR: 800000000282b033 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI,LE> CR: 24002822 XER: 0000000a
[ 2.552954] CFAR: c0000000008f03f0 IRQMASK: 0
[ 2.552954] GPR00: c0000000007eef30 c0000000622e7650 c000000002b1ac00 0000000000000001
[ 2.552954] GPR04: 0000000000000008 0000000000000000 c0000000007eef30 ffffffffffffffff
[ 2.552954] GPR08: 00000000ffff00f5 0000000000000001 0000000000000048 0000000000004000
[ 2.552954] GPR12: 00000003fa440000 c000000017ffa300 c0000000051d9f80 ffffffffffffffdb
[ 2.552954] GPR16: 0000000000000000 0000000000000008 000000000000000a 60000000000000e0
[ 2.552954] GPR20: 4080000000000000 c0000000113af038 00007fffcf130000 0000700000000000
[ 2.552954] GPR24: c000000062a6a000 0000000000000001 8000000062a68000 0000000000000001
[ 2.552954] GPR28: 000000000000000a c000000062ebc600 0000000000002000 c000000062ebc760
[ 2.553170] NIP [c0000000007eef3c] free_pud_range+0x8bc/0x8d0
[ 2.553185] LR [c0000000007eef30] free_pud_range+0x8b0/0x8d0
[ 2.553199] Call Trace:
[ 2.553207] [c0000000622e7650] [c0000000007eef30] free_pud_range+0x8b0/0x8d0 (unreliable)
[ 2.553229] [c0000000622e7750] [c0000000007f40b4] free_pgd_range+0x284/0x3b0
[ 2.553248] [c0000000622e7800] [c0000000007f4630] free_pgtables+0x450/0x570
[ 2.553274] [c0000000622e78e0] [c0000000008161c0] exit_mmap+0x250/0x650
[ 2.553292] [c0000000622e7a30] [c0000000001b95b8] __mmput+0x98/0x290
[ 2.558344] [c0000000622e7a80] [c0000000001d1018] exit_mm+0x118/0x1b0
[ 2.558361] [c0000000622e7ac0] [c0000000001d141c] do_exit+0x2ec/0x870
[ 2.558376] [c0000000622e7b60] [c0000000001d1ca8] do_group_exit+0x88/0x150
[ 2.558391] [c0000000622e7bb0] [c0000000001d1db8] sys_exit_group+0x48/0x50
[ 2.558407] [c0000000622e7be0] [c00000000003d810] system_call_exception+0x1e0/0x4c0
[ 2.558423] [c0000000622e7e50] [c00000000000d05c] system_call_vectored_common+0x15c/0x2ec
(...)
[ 2.558892] ---[ end trace 0000000000000000 ]---
[ 2.559022] BUG: Bad rss-counter state mm:000000002267cc9e type:MM_ANONPAGES val:1
[ 2.559037] BUG: non-zero pgtables_bytes on freeing mm: -6144
Here the modprobe process ended up with an allocated mm_struct from the
mm_struct slab that was used before by the debug_vm_pgtable test. That is
not a problem, since the mm_struct is initialized again etc., however, if
it ends up using the same pgd table, it bumps into the old stale entry
when clearing/freeing the page table entries, so it tries to free an entry
already gone (that one which was allocated by the debug_vm_pgtable test),
which also explains the negative pgtables_bytes since it's accounting for
not allocated entries in the current process.
As far as I looked pgd_{alloc,free} etc. does not clear entries, and
clearing of the entries is explicitly done in the free_pgtables->
free_pgd_range->free_p4d_range->free_pud_range->free_pmd_range->
free_pte_range path. However, the debug_vm_pgtable test does not call
free_pgtables, since it allocates mm_struct and entries manually for its
test and eg. not goes through page faults. So it also should clear
manually the entries before exit at destroy_args().
This problem was noticed on a reboot X number of times test being done on
a powerpc host, with a debug kernel with CONFIG_DEBUG_VM_PGTABLE enabled.
Depends on the system, but on a 100 times reboot loop the problem could
manifest once or twice, if a process ends up getting the right mm->pgd
entry with the stale entries used by mm/debug_vm_pagetable. After using
this patch, I couldn't reproduce/experience the problems anymore. I was
able to reproduce the problem as well on latest upstream kernel (6.16).
I also modified destroy_args() to use mmput() instead of mmdrop(), there
is no reason to hold mm_users reference and not release the mm_struct
entirely, and in the output above with my debugging prints I already had
patched it to use mmput, it did not fix the problem, but helped in the
debugging as well.
Link: https://lkml.kernel.org/r/20250731214051.4115182-1-herton@redhat.com
Fixes: 3c9b84f044a9 ("mm/debug_vm_pgtable: introduce struct pgtable_debug_args")
Signed-off-by: Herton R. Krzesinski <herton(a)redhat.com>
Cc: Anshuman Khandual <anshuman.khandual(a)arm.com>
Cc: Christophe Leroy <christophe.leroy(a)csgroup.eu>
Cc: Gavin Shan <gshan(a)redhat.com>
Cc: Gerald Schaefer <gerald.schaefer(a)linux.ibm.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/debug_vm_pgtable.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
--- a/mm/debug_vm_pgtable.c~mm-debug_vm_pgtable-clear-page-table-entries-at-destroy_args
+++ a/mm/debug_vm_pgtable.c
@@ -990,29 +990,34 @@ static void __init destroy_args(struct p
/* Free page table entries */
if (args->start_ptep) {
+ pmd_clear(args->pmdp);
pte_free(args->mm, args->start_ptep);
mm_dec_nr_ptes(args->mm);
}
if (args->start_pmdp) {
+ pud_clear(args->pudp);
pmd_free(args->mm, args->start_pmdp);
mm_dec_nr_pmds(args->mm);
}
if (args->start_pudp) {
+ p4d_clear(args->p4dp);
pud_free(args->mm, args->start_pudp);
mm_dec_nr_puds(args->mm);
}
- if (args->start_p4dp)
+ if (args->start_p4dp) {
+ pgd_clear(args->pgdp);
p4d_free(args->mm, args->start_p4dp);
+ }
/* Free vma and mm struct */
if (args->vma)
vm_area_free(args->vma);
if (args->mm)
- mmdrop(args->mm);
+ mmput(args->mm);
}
static struct page * __init
_
Patches currently in -mm which might be from herton(a)redhat.com are