It is desirable to perform "zero copy" operations on the numerous embedded systems that run the Linux kernel, to enable efficient performance on resource-constrained systems. Usually such embedded systems leverage mechanisms like CMA to carve-out regions of contiguous physical memory for use by bus-mastering peripherals.
This patch adds some logic which determines if user-space is attempting to perform direct I/O mapping on CMA pages, and if so, ensures the mapping succeeds.
This patch is a 1st attempt to allow O_DIRECT on the memory carved-out by CMA. Any suggestions or comments are appreciated!
Signed-off-by: Marc Carino marc.ceeeee@gmail.com --- mm/gup.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+)
diff --git a/mm/gup.c b/mm/gup.c index cc5a9e7..5fd1bfa 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -353,6 +353,61 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) return 0; }
+static int cma_get_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long start, struct page **page) +{ +#ifdef CONFIG_CMA + const unsigned long pg = start & PAGE_MASK; + unsigned long pfn; + int ret = -EFAULT; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + struct page *tmp_page; + + pgd = pgd_offset(mm, pg); + BUG_ON(pgd_none(*pgd)); + pud = pud_offset(pgd, pg); + BUG_ON(pud_none(*pud)); + pmd = pmd_offset(pud, pg); + if (pmd_none(*pmd)) + return ret; + + pte = pte_offset_map(pmd, pg); + if (!pte) + return ret; + + if (pte_none(*pte)) + goto out; + + tmp_page = pte_page(*pte); + if (!tmp_page) + goto out; + + if (!page_count(tmp_page)) + goto out; + + if (page_mapped(tmp_page)) + goto out; + + if (get_pageblock_migratetype(tmp_page) != MIGRATE_CMA) + goto out; + + if (page) { + *page = tmp_page; + get_page(*page); + } + ret = 0; + +out: + pte_unmap(pte); + return ret; +#else + return 0; +#endif +} + /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task @@ -443,6 +498,16 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, goto next_page; }
+ if (!cma_get_page(mm, vma, start, + pages ? &pages[i] : NULL)) { + if (vmas) + vmas[i] = vma; + i++; + start += PAGE_SIZE; + nr_pages--; + continue; + } + if (!vma || check_vma_flags(vma, gup_flags)) return i ? : -EFAULT; if (is_vm_hugetlb_page(vma)) {