diff --git a/include/linux/mm.h b/include/linux/mm.h index 978c17df053e..37091f8a6a12 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3463,5 +3463,6 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start, * default, the flag is not set. */ #define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0)) +#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1)) #endif /* _LINUX_MM_H */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4a8c8456555e..245954d85553 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5201,8 +5201,10 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct static void __unmap_hugepage_range_locking(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page, - zap_flags_t zap_flags, bool final) + zap_flags_t zap_flags) { + bool final = zap_flags & ZAP_FLAG_UNMAP; + hugetlb_vma_lock_write(vma); i_mmap_lock_write(vma->vm_file->f_mapping); @@ -5232,7 +5234,7 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb, zap_flags_t zap_flags) { __unmap_hugepage_range_locking(tlb, vma, start, end, ref_page, - zap_flags, true); + zap_flags); } #ifdef CONFIG_ADVISE_SYSCALLS @@ -5252,7 +5254,7 @@ void clear_hugetlb_page_range(struct vm_area_struct *vma, unsigned long start, tlb_gather_mmu(&tlb, vma->vm_mm); update_hiwater_rss(vma->vm_mm); - __unmap_hugepage_range_locking(&tlb, vma, start, end, NULL, 0, false); + __unmap_hugepage_range_locking(&tlb, vma, start, end, NULL, 0); mmu_notifier_invalidate_range_end(&range); tlb_finish_mmu(&tlb); diff --git a/mm/memory.c b/mm/memory.c index c5599a9279b1..679b702af4ce 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1671,7 +1671,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt, { struct mmu_notifier_range range; struct zap_details details = { - .zap_flags = ZAP_FLAG_DROP_MARKER, + .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP, /* Careful - we need to zap private pages too! */ .even_cows = true, };