6.6-stable review patch. If anyone has any objections, please let me know.
------------------
From: David Hildenbrand david@redhat.com
[ Upstream commit 188cac58a8bcdf82c7f63275b68f7a46871e45d6 ]
Sharing page tables between processes but falling back to per-MM page table locks cannot possibly work.
So, let's make sure that we do have split PMD locks by adding a new Kconfig option and letting that depend on CONFIG_SPLIT_PMD_PTLOCKS.
Link: https://lkml.kernel.org/r/20240726150728.3159964-3-david@redhat.com Signed-off-by: David Hildenbrand david@redhat.com Acked-by: Mike Rapoport (Microsoft) rppt@kernel.org Cc: Alexander Viro viro@zeniv.linux.org.uk Cc: Borislav Petkov bp@alien8.de Cc: Boris Ostrovsky boris.ostrovsky@oracle.com Cc: Christian Brauner brauner@kernel.org Cc: Christophe Leroy christophe.leroy@csgroup.eu Cc: Dave Hansen dave.hansen@linux.intel.com Cc: "H. Peter Anvin" hpa@zytor.com Cc: Ingo Molnar mingo@redhat.com Cc: Juergen Gross jgross@suse.com Cc: Michael Ellerman mpe@ellerman.id.au Cc: Muchun Song muchun.song@linux.dev Cc: "Naveen N. Rao" naveen.n.rao@linux.ibm.com Cc: Nicholas Piggin npiggin@gmail.com Cc: Oscar Salvador osalvador@suse.de Cc: Peter Xu peterx@redhat.com Cc: Russell King linux@armlinux.org.uk Cc: Thomas Gleixner tglx@linutronix.de Signed-off-by: Andrew Morton akpm@linux-foundation.org Stable-dep-of: 59d9094df3d7 ("mm: hugetlb: independent PMD page table shared count") Signed-off-by: Sasha Levin sashal@kernel.org --- fs/Kconfig | 4 ++++ include/linux/hugetlb.h | 5 ++--- mm/hugetlb.c | 8 ++++---- 3 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/fs/Kconfig b/fs/Kconfig index 0ad3c7c7e984..02a9237807a7 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -284,6 +284,10 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP depends on SPARSEMEM_VMEMMAP
+config HUGETLB_PMD_PAGE_TABLE_SHARING + def_bool HUGETLB_PAGE + depends on ARCH_WANT_HUGE_PMD_SHARE && SPLIT_PMD_PTLOCKS + config ARCH_HAS_GIGANTIC_PAGE bool
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 0c50c4fceb95..0ca93c7574ad 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -1239,7 +1239,7 @@ static inline __init void hugetlb_cma_reserve(int order) } #endif
-#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING static inline bool hugetlb_pmd_shared(pte_t *pte) { return page_count(virt_to_page(pte)) > 1; @@ -1275,8 +1275,7 @@ bool __vma_private_lock(struct vm_area_struct *vma); static inline pte_t * hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { -#if defined(CONFIG_HUGETLB_PAGE) && \ - defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP) +#if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP) struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
/* diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 92b955cc5a41..5b8cc558ab6e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6907,7 +6907,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end, return 0; }
-#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING static unsigned long page_table_shareable(struct vm_area_struct *svma, struct vm_area_struct *vma, unsigned long addr, pgoff_t idx) @@ -7069,7 +7069,7 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, return 1; }
-#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ +#else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pud_t *pud) @@ -7092,7 +7092,7 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) { return false; } -#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ +#endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, @@ -7190,7 +7190,7 @@ unsigned long hugetlb_mask_last_page(struct hstate *h) /* See description above. Architectures can provide their own version. */ __weak unsigned long hugetlb_mask_last_page(struct hstate *h) { -#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING if (huge_page_size(h) == PMD_SIZE) return PUD_SIZE - PMD_SIZE; #endif