Let's consolidate the mapcount logic to make it easier to understand and to prepare for further changes.
Reviewed-by: Peter Xu peterx@redhat.com Signed-off-by: David Hildenbrand david@redhat.com --- mm/huge_memory.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e5483347291c..4751d03947da 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2101,21 +2101,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pte = pte_offset_map(&_pmd, addr); BUG_ON(!pte_none(*pte)); set_pte_at(mm, addr, pte, entry); - if (!pmd_migration) - atomic_inc(&page[i]._mapcount); pte_unmap(pte); }
if (!pmd_migration) { + /* Sub-page mapcount accounting for above small mappings. */ + int val = 1; + /* * Set PG_double_map before dropping compound_mapcount to avoid * false-negative page_mapped(). + * + * The first to set PageDoubleMap() has to increment all + * sub-page mapcounts by one. */ - if (compound_mapcount(page) > 1 && - !TestSetPageDoubleMap(page)) { - for (i = 0; i < HPAGE_PMD_NR; i++) - atomic_inc(&page[i]._mapcount); - } + if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) + val++; + + for (i = 0; i < HPAGE_PMD_NR; i++) + atomic_add(val, &page[i]._mapcount);
lock_page_memcg(page); if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {