On Fri, Jan 31, 2020 at 07:40:24PM -0800, John Hubbard wrote:
@@ -4405,7 +4392,13 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, same_page: if (pages) { pages[i] = mem_map_offset(page, pfn_offset);
get_page(pages[i]);
if (!try_grab_page(pages[i], flags)) {
spin_unlock(ptl);
remainder = 0;
err = -ENOMEM;
WARN_ON_ONCE(1);
The WARN_ON_ONCE deserve a comment. And I guess you can put it into 'if' condition.
break;
}}
if (vmas) @@ -4965,6 +4958,12 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, struct page *page = NULL; spinlock_t *ptl; pte_t pte;
- /* FOLL_GET and FOLL_PIN are mutually exclusive. */
- if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
(FOLL_PIN | FOLL_GET)))
return NULL;
retry: ptl = pmd_lockptr(mm, pmd); spin_lock(ptl); @@ -4977,8 +4976,11 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, pte = huge_ptep_get((pte_t *)pmd); if (pte_present(pte)) { page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
if (flags & FOLL_GET)
get_page(page);
if (unlikely(!try_grab_page(page, flags))) {
WARN_ON_ONCE(1);
Ditto.
page = NULL;
goto out;
} else { if (is_hugetlb_entry_migration(pte)) { spin_unlock(ptl);}