When neither a release nor a wait backend ops is specified it is possible to let the dma_fence live on independently of the module who issued it.
This makes it possible to unload drivers and only wait for all their fences to signal.
v2: fix typo in comment
Signed-off-by: Christian König christian.koenig@amd.com Reviewed-by: Tvrtko Ursulin tvrtko.ursulin@igalia.com Reviewed-by: Philipp Stanner phasta@kernel.org --- drivers/dma-buf/dma-fence.c | 16 ++++++++++++---- include/linux/dma-fence.h | 4 ++-- 2 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index e3e74f98f58d..481f1cd9aae2 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -371,6 +371,14 @@ void dma_fence_signal_timestamp_locked(struct dma_fence *fence, &fence->flags))) return;
+ /* + * When neither a release nor a wait operation is specified set the ops + * pointer to NULL to allow the fence structure to become independent + * from who originally issued it. + */ + if (!fence->ops->release && !fence->ops->wait) + RCU_INIT_POINTER(fence->ops, NULL); + /* Stash the cb_list before replacing it with the timestamp */ list_replace(&fence->cb_list, &cb_list);
@@ -537,7 +545,7 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) rcu_read_lock(); ops = rcu_dereference(fence->ops); trace_dma_fence_wait_start(fence); - if (ops->wait) { + if (ops && ops->wait) { /* * Implementing the wait ops is deprecated and not supported for * issuer independent fences, so it is ok to use the ops outside @@ -602,7 +610,7 @@ void dma_fence_release(struct kref *kref) }
ops = rcu_dereference(fence->ops); - if (ops->release) + if (ops && ops->release) ops->release(fence); else dma_fence_free(fence); @@ -638,7 +646,7 @@ static bool __dma_fence_enable_signaling(struct dma_fence *fence)
rcu_read_lock(); ops = rcu_dereference(fence->ops); - if (!was_set && ops->enable_signaling) { + if (!was_set && ops && ops->enable_signaling) { trace_dma_fence_enable_signal(fence);
if (!ops->enable_signaling(fence)) { @@ -1024,7 +1032,7 @@ void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
rcu_read_lock(); ops = rcu_dereference(fence->ops); - if (ops->set_deadline && !dma_fence_is_signaled(fence)) + if (ops && ops->set_deadline && !dma_fence_is_signaled(fence)) ops->set_deadline(fence, deadline); rcu_read_unlock(); } diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index eea674acdfa6..e6d753107143 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -457,7 +457,7 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
rcu_read_lock(); ops = rcu_dereference(fence->ops); - if (ops->signaled && ops->signaled(fence)) { + if (ops && ops->signaled && ops->signaled(fence)) { rcu_read_unlock(); dma_fence_signal_locked(fence); return true; @@ -493,7 +493,7 @@ dma_fence_is_signaled(struct dma_fence *fence)
rcu_read_lock(); ops = rcu_dereference(fence->ops); - if (ops->signaled && ops->signaled(fence)) { + if (ops && ops->signaled && ops->signaled(fence)) { rcu_read_unlock(); dma_fence_signal(fence); return true;