To be secure, when a userptr is invalidated the pages should be dma unmapped ensuring the device can no longer touch the invalidated pages.
v2: - Don't free sg table in MMU notifer, just dma unmap pages
Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs") Fixes: 12f4b58a37f4 ("drm/xe: Use hmm_range_fault to populate user pages") Cc: Thomas Hellström thomas.hellstrom@linux.intel.com Cc: stable@vger.kernel.org # 6.8 Signed-off-by: Matthew Brost matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_hmm.c | 42 ++++++++++++++++++++++++++------ drivers/gpu/drm/xe/xe_hmm.h | 1 + drivers/gpu/drm/xe/xe_pt.c | 2 +- drivers/gpu/drm/xe/xe_vm.c | 5 +++- drivers/gpu/drm/xe/xe_vm_types.h | 5 +++- 5 files changed, 45 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c index 2c32dc46f7d4..baf42514e1f9 100644 --- a/drivers/gpu/drm/xe/xe_hmm.c +++ b/drivers/gpu/drm/xe/xe_hmm.c @@ -112,16 +112,20 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range, return ret; }
-/* - * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr +#define need_unmap(__sg) ((u64)(__sg) & 0x1ull) +#define clear_need_unmap(__sg) (__sg) = (struct sg_table *)((u64)(__sg) & ~0x1ull) +#define set_need_unmap(__sg) (__sg) = (struct sg_table *)((u64)(__sg) | 0x1ull) + +/** + * xe_hmm_userptr_unmap_sg() - Unmap the scatter gather table of userptr * * @uvma: the userptr vma which hold the scatter gather table * * With function xe_userptr_populate_range, we allocate storage of - * the userptr sg table. This is a helper function to free this - * sg table, and dma unmap the address in the table. + * the userptr sg table. This is a helper function to dma unmap the address in + * the table. */ -void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma) +void xe_hmm_userptr_unmap_sg(struct xe_userptr_vma *uvma) { struct xe_userptr *userptr = &uvma->userptr; struct xe_vma *vma = &uvma->vma; @@ -129,11 +133,34 @@ void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma) struct xe_vm *vm = xe_vma_vm(vma); struct xe_device *xe = vm->xe; struct device *dev = xe->drm.dev; + bool do_unmap;
xe_assert(xe, userptr->sg); - dma_unmap_sgtable(dev, userptr->sg, - write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+ spin_lock(&vm->userptr.invalidated_lock); + do_unmap = need_unmap(userptr->sg); + clear_need_unmap(userptr->sg); + spin_unlock(&vm->userptr.invalidated_lock); + + if (do_unmap) + dma_unmap_sgtable(dev, userptr->sg, + write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0); +} + +/* + * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr + * + * @uvma: the userptr vma which hold the scatter gather table + * + * With function xe_userptr_populate_range, we allocate storage of + * the userptr sg table. This is a helper function to free this + * sg table, and dma unmap the address in the table. + */ +void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma) +{ + struct xe_userptr *userptr = &uvma->userptr; + + xe_hmm_userptr_unmap_sg(uvma); sg_free_table(userptr->sg); userptr->sg = NULL; } @@ -244,6 +271,7 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
xe_mark_range_accessed(&hmm_range, write); userptr->sg = &userptr->sgt; + set_need_unmap(userptr->sg); userptr->notifier_seq = hmm_range.notifier_seq;
free_pfns: diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h index 909dc2bdcd97..56e653dc9fa2 100644 --- a/drivers/gpu/drm/xe/xe_hmm.h +++ b/drivers/gpu/drm/xe/xe_hmm.h @@ -9,3 +9,4 @@ struct xe_userptr_vma;
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked); void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma); +void xe_hmm_userptr_unmap_sg(struct xe_userptr_vma *uvma); diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index 8d3765d3351e..b095257dc684 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -635,7 +635,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
if (!xe_vma_is_null(vma)) { if (xe_vma_is_userptr(vma)) - xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0, + xe_res_first_sg(vma_to_userptr_sg(vma), 0, xe_vma_size(vma), &curs); else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo)) xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma), diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index dfd31b346021..c3d54dcf2a3e 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -637,6 +637,9 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, XE_WARN_ON(err); }
+ if (userptr->sg) + xe_hmm_userptr_unmap_sg(uvma); + trace_xe_vma_userptr_invalidate_complete(vma);
return true; @@ -3405,7 +3408,7 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id) if (is_null) { addr = 0; } else if (is_userptr) { - struct sg_table *sg = to_userptr_vma(vma)->userptr.sg; + struct sg_table *sg = vma_to_userptr_sg(vma); struct xe_res_cursor cur;
if (sg) { diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index ce1a63a5e3e7..0478a2235076 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -34,6 +34,9 @@ struct xe_vm; #define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9) #define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 10)
+#define vma_to_userptr_sg(__vma) \ + (struct sg_table *)((u64)to_userptr_vma((__vma))->userptr.sg & ~0x1ull) + /** struct xe_userptr - User pointer */ struct xe_userptr { /** @invalidate_link: Link for the vm::userptr.invalidated list */ @@ -206,7 +209,7 @@ struct xe_vm { struct rw_semaphore notifier_lock; /** * @userptr.invalidated_lock: Protects the - * @userptr.invalidated list. + * @userptr.invalidated list and dma mapped pages of userptrs */ spinlock_t invalidated_lock; /**
linux-stable-mirror@lists.linaro.org