Let's add an optimized way to check "page_trans_huge_mapcount() > 1" that is allowed to break the loop early.
This commit is based on a prototype patch by Andrea.
Co-developed-by: Andrea Arcangeli aarcange@redhat.com Signed-off-by: Andrea Arcangeli aarcange@redhat.com Reviewed-by: Peter Xu peterx@redhat.com Signed-off-by: David Hildenbrand david@redhat.com --- include/linux/huge_mm.h | 7 +++++++ mm/gup.c | 2 +- mm/huge_memory.c | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 44e02d47c65a..3a9d8cf64219 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -361,6 +361,8 @@ static inline void thp_mapcount_unlock(struct page *page, local_irq_restore(irq_flags); }
+extern bool page_trans_huge_anon_shared(struct page *page); + #else /* CONFIG_TRANSPARENT_HUGEPAGE */ #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) @@ -532,6 +534,11 @@ static inline void thp_mapcount_unlock(struct page *page, { }
+static inline bool page_trans_huge_anon_shared(struct page *page) +{ + return false; +} + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/** diff --git a/mm/gup.c b/mm/gup.c index 35d1b28e3829..496575ff9ac8 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -70,7 +70,7 @@ bool gup_must_unshare(unsigned int flags, struct page *page, bool is_head) return __page_mapcount(page) > 1; if (is_head) { VM_BUG_ON(!PageTransHuge(page)); - return page_trans_huge_mapcount(page, NULL) > 1; + return page_trans_huge_anon_shared(page); } return page_mapcount(page) > 1; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 57842e8b13d4..dced82274f1d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1281,6 +1281,40 @@ void huge_pmd_set_accessed(struct vm_fault *vmf) spin_unlock(vmf->ptl); }
+ +static bool __page_trans_huge_anon_shared(struct page *page) +{ + int i, mapcount; + + mapcount = head_compound_mapcount(page); + if (mapcount > 1) + return true; + if (PageDoubleMap(page)) + mapcount -= 1; + for (i = 0; i < thp_nr_pages(page); i++) { + if (atomic_read(&page[i]._mapcount) + mapcount + 1 > 1) + return true; + } + return false; +} + +/* A lightweight check corresponding to "page_trans_huge_mapcount() > 1". */ +bool page_trans_huge_anon_shared(struct page *page) +{ + unsigned int seqcount; + bool shared; + + VM_BUG_ON_PAGE(PageHuge(page) || PageTail(page), page); + VM_BUG_ON_PAGE(!PageAnon(page) || !PageTransHuge(page), page); + + do { + seqcount = thp_mapcount_read_begin(page); + shared = __page_trans_huge_anon_shared(page); + } while (thp_mapcount_read_retry(page, seqcount)); + + return shared; +} + vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma;