Hi Jann,
On 2024/10/8 05:42, Jann Horn wrote:
[...]
diff --git a/mm/mremap.c b/mm/mremap.c index 24712f8dbb6b..dda09e957a5d 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -238,6 +238,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, { spinlock_t *old_ptl, *new_ptl; struct mm_struct *mm = vma->vm_mm;
- bool res = false; pmd_t pmd;
if (!arch_supports_page_table_move()) @@ -277,19 +278,25 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
- /* Clear the pmd */ pmd = *old_pmd;
- /* Racing with collapse? */
- if (unlikely(!pmd_present(pmd) || pmd_leaf(pmd)))
Since we already hold the exclusive mmap lock, after a racing with collapse occurs, the pmd entry cannot be refilled with new content by page fault. So maybe we only need to recheck pmd_none(pmd) here?
Thanks, Qi
goto out_unlock;
- /* Clear the pmd */ pmd_clear(old_pmd);
- res = true;
VM_BUG_ON(!pmd_none(*new_pmd)); pmd_populate(mm, new_pmd, pmd_pgtable(pmd)); flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); +out_unlock: if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl);
- return true;
- return res; } #else static inline bool move_normal_pmd(struct vm_area_struct *vma,
base-commit: 8cf0b93919e13d1e8d4466eb4080a4c4d9d66d7b change-id: 20241007-move_normal_pmd-vs-collapse-fix-2-387e9a68c7d6