Hi
[This is an automated email]
This commit has been processed because it contains a "Fixes:" tag fixing commit: 1a42010cdc26 ("s390/mm: convert to the generic get_user_pages_fast code").
The bot has tested the following trees: v5.8.9, v5.4.65.
v5.8.9: Build OK! v5.4.65: Failed to apply! Possible dependencies: 051a7a94aaa9 ("arm64: hibernate: use get_safe_page directly") 13373f0e6580 ("arm64: hibernate: rename dst to page in create_safe_exec_page") 48c963e31bc6 ("KVM: arm/arm64: Release kvm->mmu_lock in loop to prevent starvation") 68ecabd0e680 ("arm64/mm: Use phys_to_page() to access pgtable memory") 8a0af66b35f8 ("arm: mm: add p?d_leaf() definitions") 974b9b2c68f3 ("mm: consolidate pte_index() and pte_offset_*() definitions") a2c2e67923ec ("arm64: hibernate: add trans_pgd public functions") a89d7ff933b0 ("arm64: hibernate: remove gotos as they are not needed") d234332c2815 ("arm64: hibernate: pass the allocated pgdp to ttbr0") e9f6376858b9 ("arm64: add support for folded p4d page tables")
NOTE: The patch will not be queued to stable trees until it is upstream.
How should we proceed with this patch?
On Thu, Sep 17, 2020 at 03:53:33PM +0000, Sasha Levin wrote:
Hi
[This is an automated email]
This commit has been processed because it contains a "Fixes:" tag fixing commit: 1a42010cdc26 ("s390/mm: convert to the generic get_user_pages_fast code").
The bot has tested the following trees: v5.8.9, v5.4.65.
v5.8.9: Build OK! v5.4.65: Failed to apply! Possible dependencies: 051a7a94aaa9 ("arm64: hibernate: use get_safe_page directly") 13373f0e6580 ("arm64: hibernate: rename dst to page in create_safe_exec_page") 48c963e31bc6 ("KVM: arm/arm64: Release kvm->mmu_lock in loop to prevent starvation") 68ecabd0e680 ("arm64/mm: Use phys_to_page() to access pgtable memory") 8a0af66b35f8 ("arm: mm: add p?d_leaf() definitions") 974b9b2c68f3 ("mm: consolidate pte_index() and pte_offset_*() definitions") a2c2e67923ec ("arm64: hibernate: add trans_pgd public functions") a89d7ff933b0 ("arm64: hibernate: remove gotos as they are not needed") d234332c2815 ("arm64: hibernate: pass the allocated pgdp to ttbr0") e9f6376858b9 ("arm64: add support for folded p4d page tables")
NOTE: The patch will not be queued to stable trees until it is upstream.
How should we proceed with this patch?
Patch contains Cc: stable@vger.kernel.org [5.2+], so a stable backport to v5.4.y is also desired once upstream. A patch is self contained and has no dependencies.
arch/s390/include/asm/pgtable.h | 42 +++++++++++++++++++++++---------- include/asm-generic/pgtable.h | 10 ++++++++ mm/gup.c | 18 +++++++------- 3 files changed, 49 insertions(+), 21 deletions(-)
There are 2 trivial merge conflicts both for include/asm-generic/pgtable.h. 1. commit ca5999fde0a1 ("mm: introduce include/linux/pgtable.h") since v5.8 it renamed include/asm-generic/pgtable.h => include/linux/pgtable.h
2. commit 93fab1b22ef7 ("mm: add generic p?d_leaf() macros") since v5.6 introduced p?d_leaf() macros after mm_pmd_folded, so the bottom context of the hunk mismatch here:
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index e8cbc2e795d5..90654cb63e9e 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1427,6 +1427,16 @@ typedef unsigned int pgtbl_mod_mask; #define mm_pmd_folded(mm)჻·····__is_defined(__PAGETABLE_PMD_FOLDED) #endif
+#ifndef p4d_offset_lockless +#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address) +#endif +#ifndef pud_offset_lockless +#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address) +#endif +#ifndef pmd_offset_lockless +#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address) +#endif + /* * p?d_leaf() - true if this entry is a final mapping to a physical address. * This differs from p?d_huge() by the fact that they are always available (if
Patch for a stable-5.4.y backport sent as a follow on.
Thank you, Vasily
Currently to make sure that every page table entry is read just once gup_fast walks perform READ_ONCE and pass pXd value down to the next gup_pXd_range function by value e.g.:
static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) ... pudp = pud_offset(&p4d, addr);
This function passes a reference on that local value copy to pXd_offset, and might get the very same pointer in return. This happens when the level is folded (on most arches), and that pointer should not be iterated.
On s390 due to the fact that each task might have different 5,4 or 3-level address translation and hence different levels folded the logic is more complex and non-iteratable pointer to a local copy leads to severe problems.
Here is an example of what happens with gup_fast on s390, for a task with 3-levels paging, crossing a 2 GB pud boundary:
// addr = 0x1007ffff000, end = 0x10080001000 static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; pud_t *pudp;
// pud_offset returns &p4d itself (a pointer to a value on stack) pudp = pud_offset(&p4d, addr); do { // on second iteratation reading "random" stack value pud_t pud = READ_ONCE(*pudp);
// next = 0x10080000000, due to PUD_SIZE/MASK != PGDIR_SIZE/MASK on s390 next = pud_addr_end(addr, end); ... } while (pudp++, addr = next, addr != end); // pudp++ iterating over stack
return 1; }
This happens since s390 moved to common gup code with commit d1874a0c2805 ("s390/mm: make the pxd_offset functions more robust") and commit 1a42010cdc26 ("s390/mm: convert to the generic get_user_pages_fast code"). s390 tried to mimic static level folding by changing pXd_offset primitives to always calculate top level page table offset in pgd_offset and just return the value passed when pXd_offset has to act as folded.
What is crucial for gup_fast and what has been overlooked is that PxD_SIZE/MASK and thus pXd_addr_end should also change correspondingly. And the latter is not possible with dynamic folding.
To fix the issue in addition to pXd values pass original pXdp pointers down to gup_pXd_range functions. And introduce pXd_offset_lockless helpers, which take an additional pXd entry value parameter. This has already been discussed in https://lkml.kernel.org/r/20190418100218.0a4afd51@mschwideX1
Link: https://lkml.kernel.org/r/patch.git-943f1e5dcff2.your-ad-here.call-015998562... Fixes: 1a42010cdc26 ("s390/mm: convert to the generic get_user_pages_fast code") Signed-off-by: Vasily Gorbik gor@linux.ibm.com Reviewed-by: Gerald Schaefer gerald.schaefer@linux.ibm.com Reviewed-by: Alexander Gordeev agordeev@linux.ibm.com Reviewed-by: Jason Gunthorpe jgg@nvidia.com Reviewed-by: Mike Rapoport rppt@linux.ibm.com Reviewed-by: John Hubbard jhubbard@nvidia.com Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Peter Zijlstra peterz@infradead.org Cc: Dave Hansen dave.hansen@intel.com Cc: Russell King linux@armlinux.org.uk Cc: Catalin Marinas catalin.marinas@arm.com Cc: Will Deacon will@kernel.org Cc: Michael Ellerman mpe@ellerman.id.au Cc: Benjamin Herrenschmidt benh@kernel.crashing.org Cc: Paul Mackerras paulus@samba.org Cc: Jeff Dike jdike@addtoit.com Cc: Richard Weinberger richard@nod.at Cc: Dave Hansen dave.hansen@linux.intel.com Cc: Andy Lutomirski luto@kernel.org Cc: Thomas Gleixner tglx@linutronix.de Cc: Ingo Molnar mingo@redhat.com Cc: Borislav Petkov bp@alien8.de Cc: Arnd Bergmann arnd@arndb.de Cc: Andrey Ryabinin aryabinin@virtuozzo.com Cc: Heiko Carstens hca@linux.ibm.com Cc: Christian Borntraeger borntraeger@de.ibm.com Cc: Claudio Imbrenda imbrenda@linux.ibm.com Cc: stable@vger.kernel.org [5.2+] Signed-off-by: Andrew Morton akpm@linux-foundation.org --- arch/s390/include/asm/pgtable.h | 42 +++++++++++++++++++++++---------- include/asm-generic/pgtable.h | 10 ++++++++ mm/gup.c | 18 +++++++------- 3 files changed, 49 insertions(+), 21 deletions(-)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 34a655ad7123..a03862579d6b 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1247,25 +1247,43 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address) #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address) #define pgd_offset_k(address) pgd_offset(&init_mm, address)
-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address) { - if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1) - return (p4d_t *) pgd_deref(*pgd) + p4d_index(address); - return (p4d_t *) pgd; + if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1) + return (p4d_t *) pgd_deref(pgd) + p4d_index(address); + return (p4d_t *) pgdp; } +#define p4d_offset_lockless p4d_offset_lockless
-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) +static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address) { - if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2) - return (pud_t *) p4d_deref(*p4d) + pud_index(address); - return (pud_t *) p4d; + return p4d_offset_lockless(pgdp, *pgdp, address); }
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) +static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address) { - if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3) - return (pmd_t *) pud_deref(*pud) + pmd_index(address); - return (pmd_t *) pud; + if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2) + return (pud_t *) p4d_deref(p4d) + pud_index(address); + return (pud_t *) p4dp; +} +#define pud_offset_lockless pud_offset_lockless + +static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address) +{ + return pud_offset_lockless(p4dp, *p4dp, address); +} + +static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address) +{ + if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3) + return (pmd_t *) pud_deref(pud) + pmd_index(address); + return (pmd_t *) pudp; +} +#define pmd_offset_lockless pmd_offset_lockless + +static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address) +{ + return pmd_offset_lockless(pudp, *pudp, address); }
static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 818691846c90..1423f08c6ba9 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -1187,4 +1187,14 @@ static inline bool arch_has_pfn_modify_check(void) #define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) #endif
+#ifndef p4d_offset_lockless +#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address) +#endif +#ifndef pud_offset_lockless +#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address) +#endif +#ifndef pmd_offset_lockless +#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address) +#endif + #endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/mm/gup.c b/mm/gup.c index 4a8e969a6e59..3ef769529548 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2184,13 +2184,13 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, return 1; }
-static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, +static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp;
- pmdp = pmd_offset(&pud, addr); + pmdp = pmd_offset_lockless(pudp, pud, addr); do { pmd_t pmd = READ_ONCE(*pmdp);
@@ -2227,13 +2227,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, return 1; }
-static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, +static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; pud_t *pudp;
- pudp = pud_offset(&p4d, addr); + pudp = pud_offset_lockless(p4dp, p4d, addr); do { pud_t pud = READ_ONCE(*pudp);
@@ -2248,20 +2248,20 @@ static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, PUD_SHIFT, next, flags, pages, nr)) return 0; - } else if (!gup_pmd_range(pud, addr, next, flags, pages, nr)) + } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) return 0; } while (pudp++, addr = next, addr != end);
return 1; }
-static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, +static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; p4d_t *p4dp;
- p4dp = p4d_offset(&pgd, addr); + p4dp = p4d_offset_lockless(pgdp, pgd, addr); do { p4d_t p4d = READ_ONCE(*p4dp);
@@ -2273,7 +2273,7 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, P4D_SHIFT, next, flags, pages, nr)) return 0; - } else if (!gup_pud_range(p4d, addr, next, flags, pages, nr)) + } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) return 0; } while (p4dp++, addr = next, addr != end);
@@ -2301,7 +2301,7 @@ static void gup_pgd_range(unsigned long addr, unsigned long end, if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, PGDIR_SHIFT, next, flags, pages, nr)) return; - } else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr)) + } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) return; } while (pgdp++, addr = next, addr != end); }
linux-stable-mirror@lists.linaro.org