On 8/9/23 12:55 AM, Andrei Vagin wrote:
On Tue, Aug 8, 2023 at 12:35 PM Muhammad Usama Anjum usama.anjum@collabora.com wrote:
On 8/9/23 12:21 AM, Andrei Vagin wrote:
On Tue, Aug 8, 2023 at 3:43 AM Muhammad Usama Anjum usama.anjum@collabora.com wrote:
....
+static int pagemap_scan_output(unsigned long categories,
struct pagemap_scan_private *p,
unsigned long addr, unsigned long *end)
+{
unsigned long n_pages, total_pages;
int ret = 0;
if (!p->vec_buf)
return 0;
categories &= p->arg.return_mask;
n_pages = (*end - addr) / PAGE_SIZE;
if (check_add_overflow(p->found_pages, n_pages, &total_pages) || //TODO
Need to fix this TODO.
Sorry, I forgot to remove the "//TODO". As far as I've understood, the last discussion ended in keeping the check_add_overflow(). [1] I'll just remove the TODO.
https://lore.kernel.org/all/CABb0KFEfmRz+Z_-7GygTL12E5Y254dvoUfWe4uSv9-wOx+C...
total_pages > p->arg.max_pages) {
size_t n_too_much = total_pages - p->arg.max_pages;
*end -= n_too_much * PAGE_SIZE;
n_pages -= n_too_much;
ret = -ENOSPC;
}
if (!pagemap_scan_push_range(categories, p, addr, *end)) {
*end = addr;
n_pages = 0;
ret = -ENOSPC;
}
p->found_pages += n_pages;
if (ret)
p->walk_end_addr = *end;
return ret;
+}
...
+static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg) +{
struct mmu_notifier_range range;
struct pagemap_scan_private p;
unsigned long walk_start;
size_t n_ranges_out = 0;
int ret;
memset(&p, 0, sizeof(p));
ret = pagemap_scan_get_args(&p.arg, uarg);
if (ret)
return ret;
p.masks_of_interest = MASKS_OF_INTEREST(p.arg);
ret = pagemap_scan_init_bounce_buffer(&p);
if (ret)
return ret;
/* Protection change for the range is going to happen. */
if (p.arg.flags & PM_SCAN_WP_MATCHING) {
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
mm, p.arg.start, p.arg.end);
mmu_notifier_invalidate_range_start(&range);
}
walk_start = p.arg.start;
for (; walk_start < p.arg.end; walk_start = p.arg.walk_end) {
int n_out;
if (fatal_signal_pending(current)) {
ret = -EINTR;
break;
}
ret = mmap_read_lock_killable(mm);
if (ret)
break;
ret = walk_page_range(mm, walk_start, p.arg.end,
&pagemap_scan_ops, &p);
mmap_read_unlock(mm);
n_out = pagemap_scan_flush_buffer(&p);
if (n_out < 0)
ret = n_out;
else
n_ranges_out += n_out;
if (ret != -ENOSPC || p.arg.vec_len - 1 == 0 ||
p.found_pages == p.arg.max_pages) {
p.walk_end_addr = p.arg.end;
You should not change p.walk_end_addr If ret is ENOSPC. Pls add a test case to check this.
Yeah, I'm not setting walk_end_addr if ret is ENOSPC.
I'm setting walk_end_addr only when ret = 0. I'd added this as a result of a test case in my local test application. I can look at adding some tests in pagemap_ioctl.c kselftest as well.
I am not sure that I understand what you mean here. ENOSPC can be returned when the vec array is full and in this case, walk_end_addr should be the address when it stops scanning.
I'll copy a test case in kselftest to prove or dis-prove the correctness of walk_end address.
break;
}
}
if (p.cur_buf.start != p.cur_buf.end) {
if (copy_to_user(p.vec_out, &p.cur_buf, sizeof(p.cur_buf)))
ret = -EFAULT;
else
++n_ranges_out;
}
/* ENOSPC signifies early stop (buffer full) from the walk. */
if (!ret || ret == -ENOSPC)
ret = n_ranges_out;
p.arg.walk_end = p.walk_end_addr ? p.walk_end_addr : walk_start;
if (pagemap_scan_writeback_args(&p.arg, uarg))
ret = -EFAULT;
if (p.arg.flags & PM_SCAN_WP_MATCHING)
mmu_notifier_invalidate_range_end(&range);
kfree(p.vec_buf);
return ret;
+}
Thanks, Andrei
-- BR, Muhammad Usama Anjum