When the host actively triggers SSR and collects coredump data,
the Bluetooth stack sends a reset command to the controller. However, due
to the inability to clear the QCA_SSR_TRIGGERED and QCA_IBS_DISABLED bits,
the reset command times out.
To address this, this patch clears the QCA_SSR_TRIGGERED and
QCA_IBS_DISABLED flags and adds a 50ms delay after SSR, but only when
HCI_QUIRK_NON_PERSISTENT_SETUP is not set. This ensures the controller
completes the SSR process when BT_EN is always high due to hardware.
For the purpose of HCI_QUIRK_NON_PERSISTENT_SETUP, please refer to
the comment in `include/net/bluetooth/hci.h`.
The HCI_QUIRK_NON_PERSISTENT_SETUP quirk is associated with BT_EN,
and its presence can be used to determine whether BT_EN is defined in DTS.
After SSR, host will not download the firmware, causing
controller to remain in the IBS_WAKE state. Host needs
to synchronize with the controller to maintain proper operation.
Multiple triggers of SSR only first generate coredump file,
due to memcoredump_flag no clear.
add clear coredump flag when ssr completed.
When the SSR duration exceeds 2 seconds, it triggers
host tx_idle_timeout, which sets host TX state to sleep. due to the
hardware pulling up bt_en, the firmware is not downloaded after the SSR.
As a result, the controller does not enter sleep mode. Consequently,
when the host sends a command afterward, it sends 0xFD to the controller,
but the controller does not respond, leading to a command timeout.
So reset tx_idle_timer after SSR to prevent host enter TX IBS_Sleep mode.
---
Changes since v6-7:
- Merge the changes into a single patch.
- Update commit.
Changes since v1-5:
- Add an explanation for HCI_QUIRK_NON_PERSISTENT_SETUP.
- Add commments for msleep(50).
- Update format and commit.
Signed-off-by: Shuai Zhang <quic_shuaz(a)quicinc.com>
---
drivers/bluetooth/hci_qca.c | 33 +++++++++++++++++++++++++++++++++
1 file changed, 33 insertions(+)
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 4cff4d9be..97aaf4985 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1653,6 +1653,39 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
skb_queue_purge(&qca->rx_memdump_q);
}
+ /*
+ * If the BT chip's bt_en pin is connected to a 3.3V power supply via
+ * hardware and always stays high, driver cannot control the bt_en pin.
+ * As a result, during SSR (SubSystem Restart), QCA_SSR_TRIGGERED and
+ * QCA_IBS_DISABLED flags cannot be cleared, which leads to a reset
+ * command timeout.
+ * Add an msleep delay to ensure controller completes the SSR process.
+ *
+ * Host will not download the firmware after SSR, controller to remain
+ * in the IBS_WAKE state, and the host needs to synchronize with it
+ *
+ * Since the bluetooth chip has been reset, clear the memdump state.
+ */
+ if (!test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
+ /*
+ * When the SSR (SubSystem Restart) duration exceeds 2 seconds,
+ * it triggers host tx_idle_delay, which sets host TX state
+ * to sleep. Reset tx_idle_timer after SSR to prevent
+ * host enter TX IBS_Sleep mode.
+ */
+ mod_timer(&qca->tx_idle_timer, jiffies +
+ msecs_to_jiffies(qca->tx_idle_delay));
+
+ /* Controller reset completion time is 50ms */
+ msleep(50);
+
+ clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
+ clear_bit(QCA_IBS_DISABLED, &qca->flags);
+
+ qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
+ qca->memdump_state = QCA_MEMDUMP_IDLE;
+ }
+
clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
}
--
2.34.1
`dma_free_coherent()` must only be called if the corresponding
`dma_alloc_coherent()` call has succeeded. Calling it when the allocation
fails leads to undefined behavior.
Add a check to ensure that the memory is only freed when the allocation
was successful.
Signed-off-by: Salah Triki <salah.triki(a)gmail.com>
Fixes: 71bcada88b0f3 ("edac: altera: Add Altera SDRAM EDAC support")
Cc: Markus Elfring <Markus.Elfring(a)web.de>
Cc: Dinh Nguyen <dinguyen(a)kernel.org>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Tony Luck <tony.luck(a)intel.com>
Cc: James Morse <james.morse(a)arm.com>
Cc: Mauro Carvalho Chehab <mchehab(a)kernel.org>
Cc: Robert Richter <rric(a)kernel.org>
Cc: linux-edac(a)vger.kernel.org
Cc: linux-kernel(a)vger.kernel.org
Cc: stable(a)vger.kernel.org
---
drivers/edac/altera_edac.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index cae52c654a15..7685a8550d4b 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -128,7 +128,6 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL);
if (!ptemp) {
- dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
edac_printk(KERN_ERR, EDAC_MC,
"Inject: Buffer Allocation error\n");
return -ENOMEM;
--
2.43.0
Some recent commits incorrectly assumed the natural alignment of locks.
That assumption fails on Linux/m68k (and, interestingly, would have failed
on Linux/cris also). This leads to spurious warnings from the hang check
code. Fix this bug by adding the necessary 'aligned' attribute.
Cc: Andrew Morton <akpm(a)linux-foundation.org>
Cc: Geert Uytterhoeven <geert(a)linux-m68k.org>
Cc: Lance Yang <lance.yang(a)linux.dev>
Cc: Masami Hiramatsu <mhiramat(a)kernel.org>
Cc: Eero Tamminen <oak(a)helsinkinet.fi>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Will Deacon <will(a)kernel.org>
Cc: stable(a)vger.kernel.org
Reported-by: Eero Tamminen <oak(a)helsinkinet.fi>
Closes: https://lore.kernel.org/lkml/CAMuHMdW7Ab13DdGs2acMQcix5ObJK0O2dG_Fxzr8_g58R…
Fixes: e711faaafbe5 ("hung_task: replace blocker_mutex with encoded blocker")
Signed-off-by: Finn Thain <fthain(a)linux-m68k.org>
---
I tested this on m68k using GCC and it fixed the problem for me. AFAIK,
the other architectures naturally align ints already so I'm expecting to
see no effect there.
---
include/linux/types.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/linux/types.h b/include/linux/types.h
index 6dfdb8e8e4c3..cd5b2b0f4b02 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -179,7 +179,7 @@ typedef phys_addr_t resource_size_t;
typedef unsigned long irq_hw_number_t;
typedef struct {
- int counter;
+ int counter __aligned(sizeof(int));
} atomic_t;
#define ATOMIC_INIT(i) { (i) }
--
2.49.1
[BUG]
When running test case generic/457, there is a chance to hit the
following error, with 64K page size and 4K btrfs block size, and
"compress=zstd" mount option:
FSTYP -- btrfs
PLATFORM -- Linux/aarch64 btrfs-aarch64 6.17.0-rc2-custom+ #129 SMP PREEMPT_DYNAMIC Wed Aug 20 18:52:51 ACST 2025
MKFS_OPTIONS -- -s 4k /dev/mapper/test-scratch1
MOUNT_OPTIONS -- -o compress=zstd /dev/mapper/test-scratch1 /mnt/scratch
generic/457 2s ... [failed, exit status 1]- output mismatch (see /home/adam/xfstests-dev/results//generic/457.out.bad)
--- tests/generic/457.out 2024-04-25 18:13:45.160550980 +0930
+++ /home/adam/xfstests-dev/results//generic/457.out.bad 2025-08-22 16:09:41.039352391 +0930
@@ -1,2 +1,3 @@
QA output created by 457
-Silence is golden
+testfile6 end md5sum mismatched
+(see /home/adam/xfstests-dev/results//generic/457.full for details)
...
(Run 'diff -u /home/adam/xfstests-dev/tests/generic/457.out /home/adam/xfstests-dev/results//generic/457.out.bad' to see the entire diff)
The root problem is, after certain fsx operations the file contents
change just after a mount cycle.
There is a much smaller reproducer based on that test case, which I
mainly used to debug the bug:
workload() {
mkfs.btrfs -f $dev > /dev/null
dmesg -C
trace-cmd clear
mount -o compress=zstd $dev $mnt
xfs_io -f -c "pwrite -S 0xff 0 256K" -c "sync" $mnt/base > /dev/null
cp --reflink=always -p -f $mnt/base $mnt/file
$fsx -N 4 -d -k -S 3746842 $mnt/file
if [ $? -ne 0 ]; then
echo "!!! FSX FAILURE !!!"
fail
fi
csum_before=$(_md5_checksum $mnt/file)
stop_trace
umount $mnt
mount $dev $mnt
csum_after=$(_md5_checksum $mnt/file)
umount $mnt
if [ "$csum_before" != "$csum_after" ]; then
echo "!!! CSUM MISMATCH !!!"
fail
fi
}
This seed value will cause 100% reproducible csum mismatch after a mount
cycle.
The seed value results only 2 real operations:
Seed set to 3746842
main: filesystem does not support fallocate mode FALLOC_FL_UNSHARE_RANGE, disabling!
main: filesystem does not support fallocate mode FALLOC_FL_COLLAPSE_RANGE, disabling!
main: filesystem does not support fallocate mode FALLOC_FL_INSERT_RANGE, disabling!
main: filesystem does not support exchange range, disabling!
main: filesystem does not support dontcache IO, disabling!
2 clone from 0x3b000 to 0x3f000, (0x4000 bytes) at 0x1f000
3 write 0x2975b thru 0x2ba20 (0x22c6 bytes) dontcache=0
All 4 operations completed A-OK!
[CAUSE]
With extra debug trace_printk(), the following sequence can explain the
root cause:
fsx-3900290 [002] ..... 161696.160966: btrfs_submit_compressed_read: r/i=5/258 file_off=131072 em start=126976 len=16384
The "r/i" is showing the root id and the ino number.
In this case, my minimal reproducer is indeed using inode 258 of
subvolume 5, and that's the inode with changing contents.
The above trace is from the function btrfs_submit_compressed_read(),
triggered by fsx to read the folio at file offset 128K.
Notice that the extent map, it's at offset 124K, with a length of 16K.
This means the extent map only covers the first 12K (3 blocks) of the
folio 128K.
fsx-3900290 [002] ..... 161696.160969: trace_dump_cb: btrfs_submit_compressed_read, r/i=5/258 file off start=131072 len=65536 bi_size=65536
This is the line I used to dump the basic info of a bbio, which shows the
bi_size is 64K, aka covering the whole 64K folio at file offset 128K.
But remember, the extent map only covers 3 blocks, definitely not enough
to cover the whole 64K folio at 128K file offset.
kworker/u19:1-3748349 [002] ..... 161696.161154: btrfs_decompress_buf2page: r/i=5/258 file_off=131072 copy_len=4096 content=ffff
kworker/u19:1-3748349 [002] ..... 161696.161155: btrfs_decompress_buf2page: r/i=5/258 file_off=135168 copy_len=4096 content=ffff
kworker/u19:1-3748349 [002] ..... 161696.161156: btrfs_decompress_buf2page: r/i=5/258 file_off=139264 copy_len=4096 content=ffff
kworker/u19:1-3748349 [002] ..... 161696.161157: btrfs_decompress_buf2page: r/i=5/258 file_off=143360 copy_len=4096 content=ffff
The above lines show that btrfs_decompress_buf2page() called by zstd
decompress code is copying the decompressed content into the filemap.
But notice that, the last line is already beyond the extent map range.
Furthermore, there are no more compressed content copy, as the
compressed bio only has the extent map to cover the first 3 blocks (the
4th block copy is already incorrect).
kworker/u19:1-3748349 [002] ..... 161696.161161: trace_dump_cb: r/i=5/258 file_pos=131072 content=ffff
kworker/u19:1-3748349 [002] ..... 161696.161161: trace_dump_cb: r/i=5/258 file_pos=135168 content=ffff
kworker/u19:1-3748349 [002] ..... 161696.161162: trace_dump_cb: r/i=5/258 file_pos=139264 content=ffff
kworker/u19:1-3748349 [002] ..... 161696.161162: trace_dump_cb: r/i=5/258 file_pos=143360 content=ffff
kworker/u19:1-3748349 [002] ..... 161696.161162: trace_dump_cb: r/i=5/258 file_pos=147456 content=0000
This is the extra dumpping of the compressed bio, after file offset
140K (143360), the content is all zero, which is incorrect.
The zero is there because we didn't copy anything into the folio.
The root cause of the corruption is, we are submitting a compressed read
for a whole folio, but the extent map we get only covers the first 3
blocks, meaning the compressed read path is merging reads that shouldn't
be merged.
The involved file extents are:
item 19 key (258 EXTENT_DATA 126976) itemoff 15143 itemsize 53
generation 9 type 1 (regular)
extent data disk byte 13635584 nr 4096
extent data offset 110592 nr 16384 ram 131072
extent compression 3 (zstd)
item 20 key (258 EXTENT_DATA 143360) itemoff 15090 itemsize 53
generation 9 type 1 (regular)
extent data disk byte 13635584 nr 4096
extent data offset 12288 nr 24576 ram 131072
extent compression 3 (zstd)
Note that, both extents at 124K and 140K are pointing to the same
compressed extent, but with different offset.
This means, we reads of range [124K, 140K) and [140K, 165K) should not
be merged.
But read merge check function, btrfs_bio_is_contig(), is only checking
the disk_bytenr of two compressed reads, as there are not enough info
like the involved extent maps to do more comprehensive checks, resulting
the incorrect compressed read.
Unfortunately this is a long existing bug, way before subpage block size
support.
But subpage block size support (and experimental large folio support)
makes it much easier to detect.
If block size equals page size, regular page read will only read one
block each time, thus no extent map sharing nor merge.
(This means for bs == ps cases, it's still possible to hit the bug with
readahead, just we don't have test coverage with content verification
for readahead)
[FIX]
Save the last hit compressed extent map start/len into btrfs_bio_ctrl,
and check if the current extent map is the same as the saved one.
Here we only save em::start/len to save memory for btrfs_bio_ctrl, as
it's using the stack memory, which is a very limited resource inside the
kernel.
Since the compressed extent maps are never merged, their start/len are
unique inside the same inode, thus just checking start/len will be
enough to make sure they are the same extent map.
If the extent maps do not match, force submitting the current bio, so
that the read will never be merged.
CC: stable(a)vger.kernel.org
Signed-off-by: Qu Wenruo <wqu(a)suse.com>
---
v2:
- Only save extent_map::start/len to save memory for btrfs_bio_ctrl
It's using on-stack memory which is very limited inside the kernel.
- Remove the commit message mentioning of clearing last saved em
Since we're using em::start/len, there is no need to clear them.
Either we hit the same em::start/len, meaning hitting the same extent
map, or we hit a different em, which will have a different start/len.
---
fs/btrfs/extent_io.c | 52 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 0c12fd64a1f3..418e3bf40f94 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -131,6 +131,22 @@ struct btrfs_bio_ctrl {
*/
unsigned long submit_bitmap;
struct readahead_control *ractl;
+
+ /*
+ * The start/len of the last hit compressed extent map.
+ *
+ * The current btrfs_bio_is_contig() only uses disk_bytenr as
+ * the condition to check if the read can be merged with previous
+ * bio, which is not correct. E.g. two file extents pointing to the
+ * same extent.
+ *
+ * So here we need to do extra check to merge reads that are
+ * covered by the same extent map.
+ * Just extent_map::start/len will be enough, as they are unique
+ * inside the same inode.
+ */
+ u64 last_compress_em_start;
+ u64 last_compress_em_len;
};
/*
@@ -957,6 +973,32 @@ static void btrfs_readahead_expand(struct readahead_control *ractl,
readahead_expand(ractl, ra_pos, em_end - ra_pos);
}
+static void save_compressed_em(struct btrfs_bio_ctrl *bio_ctrl,
+ const struct extent_map *em)
+{
+ if (btrfs_extent_map_compression(em) == BTRFS_COMPRESS_NONE)
+ return;
+ bio_ctrl->last_compress_em_start = em->start;
+ bio_ctrl->last_compress_em_len = em->len;
+}
+
+static bool is_same_compressed_em(struct btrfs_bio_ctrl *bio_ctrl,
+ const struct extent_map *em)
+{
+ /*
+ * Only if the em is completely the same as the previous one we can merge
+ * the current folio in the read bio.
+ *
+ * Here we only need to compare the em->start/len against saved
+ * last_compress_em_start/len, as start/len inside an inode are unique,
+ * and compressed extent maps are never merged.
+ */
+ if (em->start != bio_ctrl->last_compress_em_start ||
+ em->len != bio_ctrl->last_compress_em_len)
+ return false;
+ return true;
+}
+
/*
* basic readpage implementation. Locked extent state structs are inserted
* into the tree that are removed when the IO is done (by the end_io
@@ -1080,9 +1122,19 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
*prev_em_start != em->start)
force_bio_submit = true;
+ /*
+ * We must ensure we only merge compressed read when the current
+ * extent map matches the previous one exactly.
+ */
+ if (compress_type != BTRFS_COMPRESS_NONE) {
+ if (!is_same_compressed_em(bio_ctrl, em))
+ force_bio_submit = true;
+ }
+
if (prev_em_start)
*prev_em_start = em->start;
+ save_compressed_em(bio_ctrl, em);
em_gen = em->generation;
btrfs_free_extent_map(em);
em = NULL;
--
2.50.1
It is possible that the topology parsing function
audioreach_widget_load_module_common() could return NULL or an error
pointer. Add missing NULL check so that we do not dereference it.
Reported-by: Dan Carpenter <dan.carpenter(a)linaro.org>
Cc: <Stable(a)vger.kernel.org>
Fixes: 36ad9bf1d93d ("ASoC: qdsp6: audioreach: add topology support")
Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla(a)oss.qualcomm.com>
---
sound/soc/qcom/qdsp6/topology.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/sound/soc/qcom/qdsp6/topology.c b/sound/soc/qcom/qdsp6/topology.c
index ec51fabd98cb..c2226ed5164f 100644
--- a/sound/soc/qcom/qdsp6/topology.c
+++ b/sound/soc/qcom/qdsp6/topology.c
@@ -607,8 +607,8 @@ static int audioreach_widget_load_module_common(struct snd_soc_component *compon
return PTR_ERR(cont);
mod = audioreach_parse_common_tokens(apm, cont, &tplg_w->priv, w);
- if (IS_ERR(mod))
- return PTR_ERR(mod);
+ if (IS_ERR_OR_NULL(mod))
+ return mod ? PTR_ERR(mod) : -ENODEV;
mod->data = audioreach_get_module_priv_data(&tplg_w->priv);
--
2.50.0
Introduce and use {pgd,p4d}_populate_kernel() in core MM code when
populating PGD and P4D entries for the kernel address space.
These helpers ensure proper synchronization of page tables when
updating the kernel portion of top-level page tables.
Until now, the kernel has relied on each architecture to handle
synchronization of top-level page tables in an ad-hoc manner.
For example, see commit 9b861528a801 ("x86-64, mem: Update all PGDs for
direct mapping and vmemmap mapping changes").
However, this approach has proven fragile for following reasons:
1) It is easy to forget to perform the necessary page table
synchronization when introducing new changes.
For instance, commit 4917f55b4ef9 ("mm/sparse-vmemmap: improve memory
savings for compound devmaps") overlooked the need to synchronize
page tables for the vmemmap area.
2) It is also easy to overlook that the vmemmap and direct mapping areas
must not be accessed before explicit page table synchronization.
For example, commit 8d400913c231 ("x86/vmemmap: handle unpopulated
sub-pmd ranges")) caused crashes by accessing the vmemmap area
before calling sync_global_pgds().
To address this, as suggested by Dave Hansen, introduce _kernel() variants
of the page table population helpers, which invoke architecture-specific
hooks to properly synchronize page tables. These are introduced in a new
header file, include/linux/pgalloc.h, so they can be called from common code.
They reuse existing infrastructure for vmalloc and ioremap.
Synchronization requirements are determined by ARCH_PAGE_TABLE_SYNC_MASK,
and the actual synchronization is performed by arch_sync_kernel_mappings().
This change currently targets only x86_64, so only PGD and P4D level
helpers are introduced. Currently, these helpers are no-ops since no
architecture sets PGTBL_{PGD,P4D}_MODIFIED in ARCH_PAGE_TABLE_SYNC_MASK.
In theory, PUD and PMD level helpers can be added later if needed by
other architectures. For now, 32-bit architectures (x86-32 and arm) only
handle PGTBL_PMD_MODIFIED, so p*d_populate_kernel() will never affect
them unless we introduce a PMD level helper.
Cc: <stable(a)vger.kernel.org>
Fixes: 8d400913c231 ("x86/vmemmap: handle unpopulated sub-pmd ranges")
Suggested-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Acked-by: Kiryl Shutsemau <kas(a)kernel.org>
Reviewed-by: Mike Rapoport (Microsoft) <rppt(a)kernel.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes(a)oracle.com>
Signed-off-by: Harry Yoo <harry.yoo(a)oracle.com>
---
include/linux/pgalloc.h | 24 ++++++++++++++++++++++++
include/linux/pgtable.h | 13 +++++++------
mm/kasan/init.c | 12 ++++++------
mm/percpu.c | 6 +++---
mm/sparse-vmemmap.c | 6 +++---
5 files changed, 43 insertions(+), 18 deletions(-)
create mode 100644 include/linux/pgalloc.h
diff --git a/include/linux/pgalloc.h b/include/linux/pgalloc.h
new file mode 100644
index 000000000000..290ab864320f
--- /dev/null
+++ b/include/linux/pgalloc.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PGALLOC_H
+#define _LINUX_PGALLOC_H
+
+#include <linux/pgtable.h>
+#include <asm/pgalloc.h>
+
+static inline void pgd_populate_kernel(unsigned long addr, pgd_t *pgd,
+ p4d_t *p4d)
+{
+ pgd_populate(&init_mm, pgd, p4d);
+ if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_PGD_MODIFIED)
+ arch_sync_kernel_mappings(addr, addr);
+}
+
+static inline void p4d_populate_kernel(unsigned long addr, p4d_t *p4d,
+ pud_t *pud)
+{
+ p4d_populate(&init_mm, p4d, pud);
+ if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_P4D_MODIFIED)
+ arch_sync_kernel_mappings(addr, addr);
+}
+
+#endif /* _LINUX_PGALLOC_H */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index ba699df6ef69..2b80fd456c8b 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1469,8 +1469,8 @@ static inline void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned
/*
* Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
- * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
- * needs to be called.
+ * and let generic vmalloc, ioremap and page table update code know when
+ * arch_sync_kernel_mappings() needs to be called.
*/
#ifndef ARCH_PAGE_TABLE_SYNC_MASK
#define ARCH_PAGE_TABLE_SYNC_MASK 0
@@ -1954,10 +1954,11 @@ static inline bool arch_has_pfn_modify_check(void)
/*
* Page Table Modification bits for pgtbl_mod_mask.
*
- * These are used by the p?d_alloc_track*() set of functions an in the generic
- * vmalloc/ioremap code to track at which page-table levels entries have been
- * modified. Based on that the code can better decide when vmalloc and ioremap
- * mapping changes need to be synchronized to other page-tables in the system.
+ * These are used by the p?d_alloc_track*() and p*d_populate_kernel()
+ * functions in the generic vmalloc, ioremap and page table update code
+ * to track at which page-table levels entries have been modified.
+ * Based on that the code can better decide when page table changes need
+ * to be synchronized to other page-tables in the system.
*/
#define __PGTBL_PGD_MODIFIED 0
#define __PGTBL_P4D_MODIFIED 1
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index ced6b29fcf76..8fce3370c84e 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -13,9 +13,9 @@
#include <linux/mm.h>
#include <linux/pfn.h>
#include <linux/slab.h>
+#include <linux/pgalloc.h>
#include <asm/page.h>
-#include <asm/pgalloc.h>
#include "kasan.h"
@@ -191,7 +191,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
pud_t *pud;
pmd_t *pmd;
- p4d_populate(&init_mm, p4d,
+ p4d_populate_kernel(addr, p4d,
lm_alias(kasan_early_shadow_pud));
pud = pud_offset(p4d, addr);
pud_populate(&init_mm, pud,
@@ -212,7 +212,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
} else {
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
pud_init(p);
- p4d_populate(&init_mm, p4d, p);
+ p4d_populate_kernel(addr, p4d, p);
}
}
zero_pud_populate(p4d, addr, next);
@@ -251,10 +251,10 @@ int __ref kasan_populate_early_shadow(const void *shadow_start,
* puds,pmds, so pgd_populate(), pud_populate()
* is noops.
*/
- pgd_populate(&init_mm, pgd,
+ pgd_populate_kernel(addr, pgd,
lm_alias(kasan_early_shadow_p4d));
p4d = p4d_offset(pgd, addr);
- p4d_populate(&init_mm, p4d,
+ p4d_populate_kernel(addr, p4d,
lm_alias(kasan_early_shadow_pud));
pud = pud_offset(p4d, addr);
pud_populate(&init_mm, pud,
@@ -273,7 +273,7 @@ int __ref kasan_populate_early_shadow(const void *shadow_start,
if (!p)
return -ENOMEM;
} else {
- pgd_populate(&init_mm, pgd,
+ pgd_populate_kernel(addr, pgd,
early_alloc(PAGE_SIZE, NUMA_NO_NODE));
}
}
diff --git a/mm/percpu.c b/mm/percpu.c
index d9cbaee92b60..a56f35dcc417 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -3108,7 +3108,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
#endif /* BUILD_EMBED_FIRST_CHUNK */
#ifdef BUILD_PAGE_FIRST_CHUNK
-#include <asm/pgalloc.h>
+#include <linux/pgalloc.h>
#ifndef P4D_TABLE_SIZE
#define P4D_TABLE_SIZE PAGE_SIZE
@@ -3134,13 +3134,13 @@ void __init __weak pcpu_populate_pte(unsigned long addr)
if (pgd_none(*pgd)) {
p4d = memblock_alloc_or_panic(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
- pgd_populate(&init_mm, pgd, p4d);
+ pgd_populate_kernel(addr, pgd, p4d);
}
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d)) {
pud = memblock_alloc_or_panic(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
- p4d_populate(&init_mm, p4d, pud);
+ p4d_populate_kernel(addr, p4d, pud);
}
pud = pud_offset(p4d, addr);
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 41aa0493eb03..dbd8daccade2 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -27,9 +27,9 @@
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
+#include <linux/pgalloc.h>
#include <asm/dma.h>
-#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include "hugetlb_vmemmap.h"
@@ -229,7 +229,7 @@ p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
if (!p)
return NULL;
pud_init(p);
- p4d_populate(&init_mm, p4d, p);
+ p4d_populate_kernel(addr, p4d, p);
}
return p4d;
}
@@ -241,7 +241,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
if (!p)
return NULL;
- pgd_populate(&init_mm, pgd, p);
+ pgd_populate_kernel(addr, pgd, p);
}
return pgd;
}
--
2.43.0