Let's add walk_page_range_vma(), which is similar to walk_page_vma(), however, is only interested in a subset of the VMA range.
To be used in KSM code to stop using follow_page() next.
Signed-off-by: David Hildenbrand david@redhat.com --- include/linux/pagewalk.h | 3 +++ mm/pagewalk.c | 27 +++++++++++++++++++++++++++ 2 files changed, 30 insertions(+)
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h index f3fafb731ffd..2f8f6cc980b4 100644 --- a/include/linux/pagewalk.h +++ b/include/linux/pagewalk.h @@ -99,6 +99,9 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, pgd_t *pgd, void *private); +int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start, + unsigned long end, const struct mm_walk_ops *ops, + void *private); int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, void *private); int walk_page_mapping(struct address_space *mapping, pgoff_t first_index, diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 131b2b335b2c..757c075da231 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -514,6 +514,33 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start, return __walk_page_range(start, end, &walk); }
+int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start, + unsigned long end, const struct mm_walk_ops *ops, + void *private) +{ + struct mm_walk walk = { + .ops = ops, + .mm = vma->vm_mm, + .vma = vma, + .private = private, + }; + int err; + + if (start >= end || !walk.mm) + return -EINVAL; + if (start < vma->vm_start || end > vma->vm_end) + return -EINVAL; + + mmap_assert_locked(walk.mm); + + err = walk_page_test(start, end, &walk); + if (err > 0) + return 0; + if (err < 0) + return err; + return __walk_page_range(start, end, &walk); +} + int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, void *private) {