On 10/11/21 18:52, Suren Baghdasaryan wrote:
...
[surenb: backport notes
Since gup_pgd_range does not exist, made appropriate changes on
the the gup_huge_pgd, gup_huge_pd and gup_pud_range calls instead.
Replaced (gup_flags | FOLL_WRITE) with write=1 in gup_huge_pgd,
gup_huge_pd and gup_pud_range.
Removed FOLL_PIN usage in should_force_cow_break since it's missing in
the earlier kernels.]
This backport looks accurate. At first I thought you missed the comment-only
change to i915_gem_userptr.c, and then I noticed that the older branch still
uses non-fast gup, so not applicable there after all. :)
Agree with others that this whole area is still shaky, but it does sound as if
this will help.
--
John Hubbard
NVIDIA
> Reported-by: Jann Horn
jannh@google.com
> Tested-by: Christoph Hellwig
hch@lst.de
> Acked-by: Oleg Nesterov
oleg@redhat.com
> Acked-by: Kirill Shutemov
kirill@shutemov.name
> Acked-by: Jan Kara
jack@suse.cz
> Cc: Andrea Arcangeli
aarcange@redhat.com
> Cc: Matthew Wilcox
willy@infradead.org
> Signed-off-by: Linus Torvalds
torvalds@linux-foundation.org
> [surenb: backport to 4.4 kernel]
> Cc: stable@vger.kernel.org # 4.4.x
> Signed-off-by: Suren Baghdasaryan
surenb@google.com
> ---
> mm/gup.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
> mm/huge_memory.c | 7 +++----
> 2 files changed, 43 insertions(+), 12 deletions(-)
>
> diff --git a/mm/gup.c b/mm/gup.c
> index 4c5857889e9d..c80cdc408228 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -59,13 +59,22 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
> }
>
> /*
> - * FOLL_FORCE can write to even unwritable pte's, but only
> - * after we've gone through a COW cycle and they are dirty.
> + * FOLL_FORCE or a forced COW break can write even to unwritable pte's,
> + * but only after we've gone through a COW cycle and they are dirty.
> */
> static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
> {
> - return pte_write(pte) ||
> - ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
> + return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte));
> +}
> +
> +/*
> + * A (separate) COW fault might break the page the other way and
> + * get_user_pages() would return the page from what is now the wrong
> + * VM. So we need to force a COW break at GUP time even for reads.
> + */
> +static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
> +{
> + return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET);
> }
>
> static struct page *follow_page_pte(struct vm_area_struct *vma,
> @@ -509,12 +518,18 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
> if (!vma || check_vma_flags(vma, gup_flags))
> return i ? : -EFAULT;
> if (is_vm_hugetlb_page(vma)) {
> + if (should_force_cow_break(vma, foll_flags))
> + foll_flags |= FOLL_WRITE;
> i = follow_hugetlb_page(mm, vma, pages, vmas,
> &start, &nr_pages, i,
> - gup_flags);
> + foll_flags);
> continue;
> }
> }
> +
> + if (should_force_cow_break(vma, foll_flags))
> + foll_flags |= FOLL_WRITE;
> +
> retry:
> /*
> * If we have a pending SIGKILL, don't keep faulting pages and
> @@ -1346,6 +1361,10 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
> /*
> * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
> * the regular GUP. It will only return non-negative values.
> + *
> + * Careful, careful! COW breaking can go either way, so a non-write
> + * access can get ambiguous page results. If you call this function without
> + * 'write' set, you'd better be sure that you're ok with that ambiguity.
> */
> int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
> struct page **pages)
> @@ -1375,6 +1394,12 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
> *
> * We do not adopt an rcu_read_lock(.) here as we also want to
> * block IPIs that come from THPs splitting.
> + *
> + * NOTE! We allow read-only gup_fast() here, but you'd better be
> + * careful about possible COW pages. You'll get _a_ COW page, but
> + * not necessarily the one you intended to get depending on what
> + * COW event happens after this. COW may break the page copy in a
> + * random direction.
> */
>
> local_irq_save(flags);
> @@ -1385,15 +1410,22 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
> next = pgd_addr_end(addr, end);
> if (pgd_none(pgd))
> break;
> + /*
> + * The FAST_GUP case requires FOLL_WRITE even for pure reads,
> + * because get_user_pages() may need to cause an early COW in
> + * order to avoid confusing the normal COW routines. So only
> + * targets that are already writable are safe to do by just
> + * looking at the page tables.
> + */
> if (unlikely(pgd_huge(pgd))) {
> - if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
> + if (!gup_huge_pgd(pgd, pgdp, addr, next, 1,
> pages, &nr))
> break;
> } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
> if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
> - PGDIR_SHIFT, next, write, pages, &nr))
> + PGDIR_SHIFT, next, 1, pages, &nr))
> break;
> - } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
> + } else if (!gup_pud_range(pgd, addr, next, 1, pages, &nr))
> break;
> } while (pgdp++, addr = next, addr != end);
> local_irq_restore(flags);
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 6404e4fcb4ed..fae45c56e2ee 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -1268,13 +1268,12 @@ out_unlock:
> }
>
> /*
> - * FOLL_FORCE can write to even unwritable pmd's, but only
> - * after we've gone through a COW cycle and they are dirty.
> + * FOLL_FORCE or a forced COW break can write even to unwritable pmd's,
> + * but only after we've gone through a COW cycle and they are dirty.
> */
> static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
> {
> - return pmd_write(pmd) ||
> - ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
> + return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd));
> }
>
> struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
>