When accessing the KASAN shadow area corresponding to the task stack which is in vmalloc space, the stack recursion would occur if the area`s page tables are unpopulated.
Calltrace: ... __dabt_svc+0x4c/0x80 __asan_load4+0x30/0x88 do_translation_fault+0x2c/0x110 do_DataAbort+0x4c/0xec __dabt_svc+0x4c/0x80 __asan_load4+0x30/0x88 do_translation_fault+0x2c/0x110 do_DataAbort+0x4c/0xec __dabt_svc+0x4c/0x80 sched_setscheduler_nocheck+0x60/0x158 kthread+0xec/0x198 ret_from_fork+0x14/0x28
Fixes: 565cbaad83d ("ARM: 9202/1: kasan: support CONFIG_KASAN_VMALLOC") Cc: stable@vger.kernel.org Signed-off-by: Melon Liu melon1335@163.org --- arch/arm/mm/ioremap.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-)
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 794cfea9f..f952b0b0f 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -115,16 +115,31 @@ int ioremap_page(unsigned long virt, unsigned long phys, } EXPORT_SYMBOL(ioremap_page);
+static inline void sync_pgds(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + end = ALIGN(end, PGDIR_SIZE); + memcpy(pgd_offset(mm, start), pgd_offset_k(start), + sizeof(pgd_t) * (pgd_index(end) - pgd_index(start))); +} + +static inline void sync_vmalloc_pgds(struct mm_struct *mm) +{ + sync_pgds(mm, VMALLOC_START, VMALLOC_END); + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) + sync_pgds(mm, (unsigned long)kasan_mem_to_shadow( + (void *)VMALLOC_START), + (unsigned long)kasan_mem_to_shadow( + (void *)VMALLOC_END)); +} + void __check_vmalloc_seq(struct mm_struct *mm) { int seq;
do { seq = atomic_read(&init_mm.context.vmalloc_seq); - memcpy(pgd_offset(mm, VMALLOC_START), - pgd_offset_k(VMALLOC_START), - sizeof(pgd_t) * (pgd_index(VMALLOC_END) - - pgd_index(VMALLOC_START))); + sync_vmalloc_pgds(mm); /* * Use a store-release so that other CPUs that observe the * counter's new value are guaranteed to see the results of the