Using the inline lock is now the recommended way for dma_fence implementations.
For the scheduler fence use the inline lock for the scheduled fence part and then the lock from the scheduled fence as external lock for the finished fence.
This way there is no functional difference, except for saving the space for the separate lock.
v2: re-work the patch to avoid any functional difference
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/scheduler/sched_fence.c | 6 +++--- include/drm/gpu_scheduler.h | 4 ---- 2 files changed, 3 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 724d77694246..112677231f9a 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -217,7 +217,6 @@ struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
fence->owner = owner; fence->drm_client_id = drm_client_id; - spin_lock_init(&fence->lock);
return fence; } @@ -230,9 +229,10 @@ void drm_sched_fence_init(struct drm_sched_fence *fence, fence->sched = entity->rq->sched; seq = atomic_inc_return(&entity->fence_seq); dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled, - &fence->lock, entity->fence_context, seq); + NULL, entity->fence_context, seq); dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished, - &fence->lock, entity->fence_context + 1, seq); + dma_fence_spinlock(&fence->scheduled), + entity->fence_context + 1, seq); }
module_init(drm_sched_fence_slab_init); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 78e07c2507c7..ad3704685163 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -297,10 +297,6 @@ struct drm_sched_fence { * belongs to. */ struct drm_gpu_scheduler *sched; - /** - * @lock: the lock used by the scheduled and the finished fences. - */ - spinlock_t lock; /** * @owner: job owner for debugging */