6.18-stable review patch. If anyone has any objections, please let me know.
------------------
From: Thomas Hellström thomas.hellstrom@linux.intel.com
commit 754c23238438600e9236719f7e67aff2c4d02093 upstream.
In situations where no system memory is migrated to devmem, and in upcoming patches where another GPU is performing the migration to the newly allocated devmem buffer, there is nothing to ensure any ongoing clear to the devmem allocation or async eviction from the devmem allocation is complete.
Address that by passing a struct dma_fence down to the copy functions, and ensure it is waited for before migration is marked complete.
v3: - New patch. v4: - Update the logic used for determining when to wait for the pre_migrate_fence. - Update the logic used for determining when to warn for the pre_migrate_fence since the scheduler fences apparently can signal out-of-order. v5: - Fix a UAF (CI) - Remove references to source P2P migration (Himal) - Put the pre_migrate_fence after migration. v6: - Pipeline the pre_migrate_fence dependency (Matt Brost)
Fixes: c5b3eb5a906c ("drm/xe: Add GPUSVM device memory copy vfunc functions") Cc: Matthew Brost matthew.brost@intel.com Cc: stable@vger.kernel.org # v6.15+ Signed-off-by: Thomas Hellström thomas.hellstrom@linux.intel.com Reviewed-by: Matthew Brost matthew.brost@intel.com Acked-by: Maarten Lankhorst maarten.lankhorst@linux.intel.com # For merging through drm-xe. Link: https://patch.msgid.link/20251219113320.183860-4-thomas.hellstrom@linux.inte... (cherry picked from commit 16b5ad31952476fb925c401897fc171cd37f536b) Signed-off-by: Thomas Hellström thomas.hellstrom@linux.intel.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- drivers/gpu/drm/drm_pagemap.c | 17 ++++++++++--- drivers/gpu/drm/xe/xe_migrate.c | 25 ++++++++++++++++---- drivers/gpu/drm/xe/xe_migrate.h | 6 +++- drivers/gpu/drm/xe/xe_svm.c | 49 ++++++++++++++++++++++++++++++---------- include/drm/drm_pagemap.h | 17 +++++++++++-- 5 files changed, 88 insertions(+), 26 deletions(-)
--- a/drivers/gpu/drm/drm_pagemap.c +++ b/drivers/gpu/drm/drm_pagemap.c @@ -3,6 +3,7 @@ * Copyright © 2024-2025 Intel Corporation */
+#include <linux/dma-fence.h> #include <linux/dma-mapping.h> #include <linux/migrate.h> #include <linux/pagemap.h> @@ -408,10 +409,14 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_get_devmem_page(page, zdd); }
- err = ops->copy_to_devmem(pages, pagemap_addr, npages); + err = ops->copy_to_devmem(pages, pagemap_addr, npages, + devmem_allocation->pre_migrate_fence); if (err) goto err_finalize;
+ dma_fence_put(devmem_allocation->pre_migrate_fence); + devmem_allocation->pre_migrate_fence = NULL; + /* Upon success bind devmem allocation to range and zdd */ devmem_allocation->timeslice_expiration = get_jiffies_64() + msecs_to_jiffies(timeslice_ms); @@ -596,7 +601,7 @@ retry: for (i = 0; i < npages; ++i) pages[i] = migrate_pfn_to_page(src[i]);
- err = ops->copy_to_ram(pages, pagemap_addr, npages); + err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL); if (err) goto err_finalize;
@@ -732,7 +737,7 @@ static int __drm_pagemap_migrate_to_ram( for (i = 0; i < npages; ++i) pages[i] = migrate_pfn_to_page(migrate.src[i]);
- err = ops->copy_to_ram(pages, pagemap_addr, npages); + err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL); if (err) goto err_finalize;
@@ -813,11 +818,14 @@ EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_op * @ops: Pointer to the operations structure for GPU SVM device memory * @dpagemap: The struct drm_pagemap we're allocating from. * @size: Size of device memory allocation + * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts. + * (May be NULL). */ void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, struct device *dev, struct mm_struct *mm, const struct drm_pagemap_devmem_ops *ops, - struct drm_pagemap *dpagemap, size_t size) + struct drm_pagemap *dpagemap, size_t size, + struct dma_fence *pre_migrate_fence) { init_completion(&devmem_allocation->detached); devmem_allocation->dev = dev; @@ -825,6 +833,7 @@ void drm_pagemap_devmem_init(struct drm_ devmem_allocation->ops = ops; devmem_allocation->dpagemap = dpagemap; devmem_allocation->size = size; + devmem_allocation->pre_migrate_fence = pre_migrate_fence; } EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
--- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -1813,6 +1813,7 @@ static struct dma_fence *xe_migrate_vram unsigned long sram_offset, struct drm_pagemap_addr *sram_addr, u64 vram_addr, + struct dma_fence *deps, const enum xe_migrate_copy_dir dir) { struct xe_gt *gt = m->tile->primary_gt; @@ -1890,6 +1891,14 @@ static struct dma_fence *xe_migrate_vram
xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
+ if (deps && !dma_fence_is_signaled(deps)) { + dma_fence_get(deps); + err = drm_sched_job_add_dependency(&job->drm, deps); + if (err) + dma_fence_wait(deps, false); + err = 0; + } + mutex_lock(&m->job_mutex); xe_sched_job_arm(job); fence = dma_fence_get(&job->drm.s_fence->finished); @@ -1915,6 +1924,8 @@ err: * @npages: Number of pages to migrate. * @src_addr: Array of DMA information (source of migrate) * @dst_addr: Device physical address of VRAM (destination of migrate) + * @deps: struct dma_fence representing the dependencies that need + * to be signaled before migration. * * Copy from an array dma addresses to a VRAM device physical address * @@ -1924,10 +1935,11 @@ err: struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m, unsigned long npages, struct drm_pagemap_addr *src_addr, - u64 dst_addr) + u64 dst_addr, + struct dma_fence *deps) { return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr, - XE_MIGRATE_COPY_TO_VRAM); + deps, XE_MIGRATE_COPY_TO_VRAM); }
/** @@ -1936,6 +1948,8 @@ struct dma_fence *xe_migrate_to_vram(str * @npages: Number of pages to migrate. * @src_addr: Device physical address of VRAM (source of migrate) * @dst_addr: Array of DMA information (destination of migrate) + * @deps: struct dma_fence representing the dependencies that need + * to be signaled before migration. * * Copy from a VRAM device physical address to an array dma addresses * @@ -1945,10 +1959,11 @@ struct dma_fence *xe_migrate_to_vram(str struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m, unsigned long npages, u64 src_addr, - struct drm_pagemap_addr *dst_addr) + struct drm_pagemap_addr *dst_addr, + struct dma_fence *deps) { return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr, - XE_MIGRATE_COPY_TO_SRAM); + deps, XE_MIGRATE_COPY_TO_SRAM); }
static void xe_migrate_dma_unmap(struct xe_device *xe, @@ -2121,7 +2136,7 @@ int xe_migrate_access_memory(struct xe_m __fence = xe_migrate_vram(m, current_bytes, (unsigned long)buf & ~PAGE_MASK, &pagemap_addr[current_page], - vram_addr, write ? + vram_addr, NULL, write ? XE_MIGRATE_COPY_TO_VRAM : XE_MIGRATE_COPY_TO_SRAM); if (IS_ERR(__fence)) { --- a/drivers/gpu/drm/xe/xe_migrate.h +++ b/drivers/gpu/drm/xe/xe_migrate.h @@ -111,12 +111,14 @@ int xe_migrate_init(struct xe_migrate *m struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m, unsigned long npages, struct drm_pagemap_addr *src_addr, - u64 dst_addr); + u64 dst_addr, + struct dma_fence *deps);
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m, unsigned long npages, u64 src_addr, - struct drm_pagemap_addr *dst_addr); + struct drm_pagemap_addr *dst_addr, + struct dma_fence *deps);
struct dma_fence *xe_migrate_copy(struct xe_migrate *m, struct xe_bo *src_bo, --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -477,7 +477,8 @@ static void xe_svm_copy_us_stats_incr(st
static int xe_svm_copy(struct page **pages, struct drm_pagemap_addr *pagemap_addr, - unsigned long npages, const enum xe_svm_copy_dir dir) + unsigned long npages, const enum xe_svm_copy_dir dir, + struct dma_fence *pre_migrate_fence) { struct xe_vram_region *vr = NULL; struct xe_gt *gt = NULL; @@ -566,7 +567,8 @@ static int xe_svm_copy(struct page **pag __fence = xe_migrate_from_vram(vr->migrate, i - pos + incr, vram_addr, - &pagemap_addr[pos]); + &pagemap_addr[pos], + pre_migrate_fence); } else { vm_dbg(&xe->drm, "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld", @@ -575,13 +577,14 @@ static int xe_svm_copy(struct page **pag __fence = xe_migrate_to_vram(vr->migrate, i - pos + incr, &pagemap_addr[pos], - vram_addr); + vram_addr, + pre_migrate_fence); } if (IS_ERR(__fence)) { err = PTR_ERR(__fence); goto err_out; } - + pre_migrate_fence = NULL; dma_fence_put(fence); fence = __fence; } @@ -604,20 +607,22 @@ static int xe_svm_copy(struct page **pag vram_addr, (u64)pagemap_addr[pos].addr, 1); __fence = xe_migrate_from_vram(vr->migrate, 1, vram_addr, - &pagemap_addr[pos]); + &pagemap_addr[pos], + pre_migrate_fence); } else { vm_dbg(&xe->drm, "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d", (u64)pagemap_addr[pos].addr, vram_addr, 1); __fence = xe_migrate_to_vram(vr->migrate, 1, &pagemap_addr[pos], - vram_addr); + vram_addr, + pre_migrate_fence); } if (IS_ERR(__fence)) { err = PTR_ERR(__fence); goto err_out; } - + pre_migrate_fence = NULL; dma_fence_put(fence); fence = __fence; } @@ -630,6 +635,8 @@ err_out: dma_fence_wait(fence, false); dma_fence_put(fence); } + if (pre_migrate_fence) + dma_fence_wait(pre_migrate_fence, false);
/* * XXX: We can't derive the GT here (or anywhere in this functions, but @@ -646,16 +653,20 @@ err_out:
static int xe_svm_copy_to_devmem(struct page **pages, struct drm_pagemap_addr *pagemap_addr, - unsigned long npages) + unsigned long npages, + struct dma_fence *pre_migrate_fence) { - return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM); + return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM, + pre_migrate_fence); }
static int xe_svm_copy_to_ram(struct page **pages, struct drm_pagemap_addr *pagemap_addr, - unsigned long npages) + unsigned long npages, + struct dma_fence *pre_migrate_fence) { - return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM); + return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM, + pre_migrate_fence); }
static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation) @@ -668,6 +679,7 @@ static void xe_svm_devmem_release(struct struct xe_bo *bo = to_xe_bo(devmem_allocation); struct xe_device *xe = xe_bo_device(bo);
+ dma_fence_put(devmem_allocation->pre_migrate_fence); xe_bo_put_async(bo); xe_pm_runtime_put(xe); } @@ -862,6 +874,7 @@ static int xe_drm_pagemap_populate_mm(st unsigned long timeslice_ms) { struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap); + struct dma_fence *pre_migrate_fence = NULL; struct xe_device *xe = vr->xe; struct device *dev = xe->drm.dev; struct drm_buddy_block *block; @@ -888,8 +901,20 @@ static int xe_drm_pagemap_populate_mm(st break; }
+ /* Ensure that any clearing or async eviction will complete before migration. */ + if (!dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) { + err = dma_resv_get_singleton(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, + &pre_migrate_fence); + if (err) + dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, + false, MAX_SCHEDULE_TIMEOUT); + else if (pre_migrate_fence) + dma_fence_enable_sw_signaling(pre_migrate_fence); + } + drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm, - &dpagemap_devmem_ops, dpagemap, end - start); + &dpagemap_devmem_ops, dpagemap, end - start, + pre_migrate_fence);
blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks; list_for_each_entry(block, blocks, link) --- a/include/drm/drm_pagemap.h +++ b/include/drm/drm_pagemap.h @@ -8,6 +8,7 @@
#define NR_PAGES(order) (1U << (order))
+struct dma_fence; struct drm_pagemap; struct drm_pagemap_zdd; struct device; @@ -174,6 +175,8 @@ struct drm_pagemap_devmem_ops { * @pages: Pointer to array of device memory pages (destination) * @pagemap_addr: Pointer to array of DMA information (source) * @npages: Number of pages to copy + * @pre_migrate_fence: dma-fence to wait for before migration start. + * May be NULL. * * Copy pages to device memory. If the order of a @pagemap_addr entry * is greater than 0, the entry is populated but subsequent entries @@ -183,13 +186,16 @@ struct drm_pagemap_devmem_ops { */ int (*copy_to_devmem)(struct page **pages, struct drm_pagemap_addr *pagemap_addr, - unsigned long npages); + unsigned long npages, + struct dma_fence *pre_migrate_fence);
/** * @copy_to_ram: Copy to system RAM (required for migration) * @pages: Pointer to array of device memory pages (source) * @pagemap_addr: Pointer to array of DMA information (destination) * @npages: Number of pages to copy + * @pre_migrate_fence: dma-fence to wait for before migration start. + * May be NULL. * * Copy pages to system RAM. If the order of a @pagemap_addr entry * is greater than 0, the entry is populated but subsequent entries @@ -199,7 +205,8 @@ struct drm_pagemap_devmem_ops { */ int (*copy_to_ram)(struct page **pages, struct drm_pagemap_addr *pagemap_addr, - unsigned long npages); + unsigned long npages, + struct dma_fence *pre_migrate_fence); };
/** @@ -212,6 +219,8 @@ struct drm_pagemap_devmem_ops { * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to. * @size: Size of device memory allocation * @timeslice_expiration: Timeslice expiration in jiffies + * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts. + * (May be NULL). */ struct drm_pagemap_devmem { struct device *dev; @@ -221,6 +230,7 @@ struct drm_pagemap_devmem { struct drm_pagemap *dpagemap; size_t size; u64 timeslice_expiration; + struct dma_fence *pre_migrate_fence; };
int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, @@ -238,7 +248,8 @@ struct drm_pagemap *drm_pagemap_page_to_ void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, struct device *dev, struct mm_struct *mm, const struct drm_pagemap_devmem_ops *ops, - struct drm_pagemap *dpagemap, size_t size); + struct drm_pagemap *dpagemap, size_t size, + struct dma_fence *pre_migrate_fence);
int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, unsigned long start, unsigned long end,