On 11/21/25 11:12, Pierre-Eric Pelloux-Prayer wrote:
It's doing the same thing as amdgpu_fill_buffer(src_data=0), so drop it.
The only caveat is that amdgpu_res_cleared() return value is only valid right after allocation.
v2: introduce new "bool consider_clear_status" arg
Signed-off-by: Pierre-Eric Pelloux-Prayer pierre-eric.pelloux-prayer@amd.com
It would be better to have that ealier in the patch set, but I guess that gives you rebasing problems?
Christian.
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 16 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 90 +++++----------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 7 +- 3 files changed, 33 insertions(+), 80 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 7d8d70135cc2..dccc31d0128e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -725,13 +725,17 @@ int amdgpu_bo_create(struct amdgpu_device *adev, bo->tbo.resource->mem_type == TTM_PL_VRAM) { struct dma_fence *fence;
r = amdgpu_ttm_clear_buffer(adev, bo, bo->tbo.base.resv, &fence);
r = amdgpu_fill_buffer(adev, amdgpu_ttm_next_clear_entity(adev),bo, 0, NULL, &fence, if (unlikely(r)) goto fail_unreserve;true, AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
dma_resv_add_fence(bo->tbo.base.resv, fence,DMA_RESV_USAGE_KERNEL);dma_fence_put(fence);
if (fence) {dma_resv_add_fence(bo->tbo.base.resv, fence,DMA_RESV_USAGE_KERNEL);dma_fence_put(fence); } if (!bp->resv) amdgpu_bo_unreserve(bo);}@@ -1323,8 +1327,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) goto out; r = amdgpu_fill_buffer(adev, amdgpu_ttm_next_clear_entity(adev),
abo, 0, &bo->base._resv,&fence, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
abo, 0, &bo->base._resv, &fence, if (WARN_ON(r)) goto out;false, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 39cfe2dbdf03..c65c411ce26e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -459,7 +459,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, r = amdgpu_fill_buffer(adev, entity, abo, 0, NULL, &wipe_fence,
AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
if (r) { goto error; } else if (wipe_fence) {false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);@@ -2459,79 +2459,28 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev, } /**
- amdgpu_ttm_clear_buffer - clear memory buffers
- amdgpu_fill_buffer - fill a buffer with a given value
- @adev: amdgpu device object
- @bo: amdgpu buffer object
- @resv: reservation object
- @fence: dma_fence associated with the operation
- @entity: optional entity to use. If NULL, the clearing entities will be
used to load-balance the partial clears
- @bo: the bo to fill
- @src_data: the value to set
- @resv: fences contained in this reservation will be used as dependencies.
- @out_fence: the fence from the last clear will be stored here. It might be
NULL if no job was run.
- @dependency: optional input dependency fence.
- @consider_clear_status: true if region reported as cleared by amdgpu_res_cleared()
are skipped.
- @k_job_id: trace id
- Clear the memory buffer resource.
- Returns:
*/
- 0 for success or a negative error code on failure.
-int amdgpu_ttm_clear_buffer(struct amdgpu_device *adev,
struct amdgpu_bo *bo,struct dma_resv *resv,struct dma_fence **fence)-{
- struct amdgpu_ttm_buffer_entity *entity;
- struct amdgpu_res_cursor cursor;
- u64 addr;
- int r = 0;
- if (!adev->mman.buffer_funcs_enabled)
return -EINVAL;- if (!fence)
return -EINVAL;- entity = &adev->mman.clear_entities[0];
- *fence = dma_fence_get_stub();
- amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
- mutex_lock(&entity->lock);
- while (cursor.remaining) {
struct dma_fence *next = NULL;u64 size;if (amdgpu_res_cleared(&cursor)) {amdgpu_res_next(&cursor, cursor.size);continue;}/* Never clear more than 256MiB at once to avoid timeouts */size = min(cursor.size, 256ULL << 20);r = amdgpu_ttm_map_buffer(adev, entity,&bo->tbo, bo->tbo.resource, &cursor,1, false, false, &size, &addr);if (r)goto err;r = amdgpu_ttm_fill_mem(adev, entity, 0, addr, size, resv,&next, true,AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);if (r)goto err;dma_fence_put(*fence);*fence = next;amdgpu_res_next(&cursor, size);- }
-err:
- mutex_unlock(&entity->lock);
- return r;
-}
int amdgpu_fill_buffer(struct amdgpu_device *adev, struct amdgpu_ttm_buffer_entity *entity, struct amdgpu_bo *bo, uint32_t src_data, struct dma_resv *resv,
struct dma_fence **f,
struct dma_fence **out_fence,bool consider_clear_status, u64 k_job_id){ struct dma_fence *fence = NULL; @@ -2551,6 +2500,11 @@ int amdgpu_fill_buffer(struct amdgpu_device *adev, struct dma_fence *next; uint64_t cur_size, to;
if (consider_clear_status && amdgpu_res_cleared(&dst)) {amdgpu_res_next(&dst, dst.size);continue;}- /* Never fill more than 256MiB at once to avoid timeouts */ cur_size = min(dst.size, 256ULL << 20);
@@ -2574,9 +2528,7 @@ int amdgpu_fill_buffer(struct amdgpu_device *adev, } error: mutex_unlock(&entity->lock);
- if (f)
*f = dma_fence_get(fence);- dma_fence_put(fence);
- *out_fence = fence; return r;
} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 653a4d17543e..f3bdbcec9afc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -181,16 +181,13 @@ int amdgpu_copy_buffer(struct amdgpu_device *adev, struct dma_resv *resv, struct dma_fence **fence, bool vm_needs_flush, uint32_t copy_flags); -int amdgpu_ttm_clear_buffer(struct amdgpu_device *adev,
struct amdgpu_bo *bo,struct dma_resv *resv,struct dma_fence **fence);int amdgpu_fill_buffer(struct amdgpu_device *adev, struct amdgpu_ttm_buffer_entity *entity, struct amdgpu_bo *bo, uint32_t src_data, struct dma_resv *resv,
struct dma_fence **f,
struct dma_fence **out_fence,bool consider_clear_status, u64 k_job_id);struct amdgpu_ttm_buffer_entity *amdgpu_ttm_next_clear_entity(struct amdgpu_device *adev);