Utilize per-vma locks to stabilize vma after lookup without taking mmap_lock during PROCMAP_QUERY ioctl execution. If vma lock is contended, we fall back to mmap_lock but take it only momentarily to lock the vma and release the mmap_lock. In a very unlikely case of vm_refcnt overflow, this fall back path will fail and ioctl is done under mmap_lock protection.
This change is designed to reduce mmap_lock contention and prevent PROCMAP_QUERY ioctl calls from blocking address space updates.
Signed-off-by: Suren Baghdasaryan surenb@google.com --- fs/proc/task_mmu.c | 81 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 65 insertions(+), 16 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 509fa162760a..b504b798e8fe 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -517,28 +517,78 @@ static int pid_maps_open(struct inode *inode, struct file *file) PROCMAP_QUERY_VMA_FLAGS \ )
-static int query_vma_setup(struct mm_struct *mm) +#ifdef CONFIG_PER_VMA_LOCK + +static int query_vma_setup(struct proc_maps_query_data *query) { - return mmap_read_lock_killable(mm); + query->locked_vma = NULL; + query->mmap_locked = false; + + return 0; }
-static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma) +static void query_vma_teardown(struct proc_maps_query_data *query) { - mmap_read_unlock(mm); + if (query->mmap_locked) + mmap_read_unlock(query->mm); + else + unlock_vma(query); }
-static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr) +static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_query_data *query, + unsigned long addr) { - return find_vma(mm, addr); + struct vm_area_struct *vma; + struct vma_iterator vmi; + + unlock_vma(query); + rcu_read_lock(); + vma_iter_init(&vmi, query->mm, addr); + vma = lock_next_vma(query->mm, &vmi, addr); + rcu_read_unlock(); + + if (!IS_ERR_OR_NULL(vma)) { + query->locked_vma = vma; + } else if (PTR_ERR(vma) == -EAGAIN) { + /* Fallback to mmap_lock on vma->vm_refcnt overflow */ + mmap_read_lock(query->mm); + vma = find_vma(query->mm, addr); + query->mmap_locked = true; + } + + return vma; }
-static struct vm_area_struct *query_matching_vma(struct mm_struct *mm, +#else /* CONFIG_PER_VMA_LOCK */ + +static int query_vma_setup(struct proc_maps_query_data *query) +{ + return mmap_read_lock_killable(query->mm); +} + +static void query_vma_teardown(struct proc_maps_query_data *query) +{ + mmap_read_unlock(query->mm); +} + +static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_query_data *query, + unsigned long addr) +{ + return find_vma(query->mm, addr); +} + +#endif /* CONFIG_PER_VMA_LOCK */ + +static struct vm_area_struct *query_matching_vma(struct proc_maps_query_data *query, unsigned long addr, u32 flags) { struct vm_area_struct *vma;
next_vma: - vma = query_vma_find_by_addr(mm, addr); + vma = query_vma_find_by_addr(query, addr); + if (IS_ERR(vma)) + return vma; + if (!vma) goto no_vma;
@@ -579,11 +629,11 @@ static struct vm_area_struct *query_matching_vma(struct mm_struct *mm, return ERR_PTR(-ENOENT); }
-static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg) +static int do_procmap_query(struct mm_struct *mm, void __user *uarg) { + struct proc_maps_query_data query = { .mm = mm }; struct procmap_query karg; struct vm_area_struct *vma; - struct mm_struct *mm; const char *name = NULL; char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL; __u64 usize; @@ -610,17 +660,16 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg) if (!!karg.build_id_size != !!karg.build_id_addr) return -EINVAL;
- mm = priv->query.mm; if (!mm || !mmget_not_zero(mm)) return -ESRCH;
- err = query_vma_setup(mm); + err = query_vma_setup(&query); if (err) { mmput(mm); return err; }
- vma = query_matching_vma(mm, karg.query_addr, karg.query_flags); + vma = query_matching_vma(&query, karg.query_addr, karg.query_flags); if (IS_ERR(vma)) { err = PTR_ERR(vma); vma = NULL; @@ -705,7 +754,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg) }
/* unlock vma or mmap_lock, and put mm_struct before copying data to user */ - query_vma_teardown(mm, vma); + query_vma_teardown(&query); mmput(mm);
if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr), @@ -725,7 +774,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg) return 0;
out: - query_vma_teardown(mm, vma); + query_vma_teardown(&query); mmput(mm); kfree(name_buf); return err; @@ -738,7 +787,7 @@ static long procfs_procmap_ioctl(struct file *file, unsigned int cmd, unsigned l
switch (cmd) { case PROCMAP_QUERY: - return do_procmap_query(priv, (void __user *)arg); + return do_procmap_query(priv->query.mm, (void __user *)arg); default: return -ENOIOCTLCMD; }