4.14-stable review patch. If anyone has any objections, please let me know.
------------------
From: Dan Williams dan.j.williams@intel.com
commit a9b6de77b1a3ff729f7bfc54b2e17711776a416c upstream.
get_user_pages_fast() for device pages is missing the typical validation that all page references have been taken while the mapping was valid. Without this validation truncate operations can not reliably coordinate against new page reference events like O_DIRECT.
Cc: stable@vger.kernel.org Fixes: 3565fce3a659 ("mm, x86: get_user_pages() for dax mappings") Reported-by: Jan Kara jack@suse.cz Reviewed-by: Jan Kara jack@suse.cz Signed-off-by: Dan Williams dan.j.williams@intel.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
--- mm/gup.c | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-)
--- a/mm/gup.c +++ b/mm/gup.c @@ -1469,32 +1469,48 @@ static int __gup_device_huge(unsigned lo return 1; }
-static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr, +static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; + int nr_start = *nr;
- fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); - return __gup_device_huge(fault_pfn, addr, end, pages, nr); + fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); + if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) + return 0; + + if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { + undo_dev_pagemap(nr, nr_start, pages); + return 0; + } + return 1; }
-static int __gup_device_huge_pud(pud_t pud, unsigned long addr, +static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; + int nr_start = *nr;
- fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); - return __gup_device_huge(fault_pfn, addr, end, pages, nr); + fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); + if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) + return 0; + + if (unlikely(pud_val(orig) != pud_val(*pudp))) { + undo_dev_pagemap(nr, nr_start, pages); + return 0; + } + return 1; } #else -static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr, +static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; }
-static int __gup_device_huge_pud(pud_t pud, unsigned long addr, +static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); @@ -1512,7 +1528,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_ return 0;
if (pmd_devmap(orig)) - return __gup_device_huge_pmd(orig, addr, end, pages, nr); + return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
refs = 0; page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); @@ -1550,7 +1566,7 @@ static int gup_huge_pud(pud_t orig, pud_ return 0;
if (pud_devmap(orig)) - return __gup_device_huge_pud(orig, addr, end, pages, nr); + return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
refs = 0; page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);