On 10/02/2026 10:01, Christian König wrote:
At first glance it is counter intuitive to protect a constant function pointer table by RCU, but this allows modules providing the function table to unload by waiting for an RCU grace period.
v2: make one the now duplicated lockdep warnings a comment instead. v3: Add more documentation to ->wait and ->release callback. v4: fix typo in documentation v5: rebased on drm-tip
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-fence.c | 69 +++++++++++++++++++++++++------------ include/linux/dma-fence.h | 29 ++++++++++++++-- 2 files changed, 73 insertions(+), 25 deletions(-)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index e05beae6e407..de9bf18be3d4 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -522,6 +522,7 @@ EXPORT_SYMBOL(dma_fence_signal); signed long dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) {
- const struct dma_fence_ops *ops; signed long ret;
if (WARN_ON(timeout < 0)) @@ -533,15 +534,21 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) dma_fence_enable_sw_signaling(fence);
- if (trace_dma_fence_wait_start_enabled()) {
rcu_read_lock();trace_dma_fence_wait_start(fence);
- rcu_read_lock();
- ops = rcu_dereference(fence->ops);
- trace_dma_fence_wait_start(fence);
- if (ops->wait) {
/** Implementing the wait ops is deprecated and not supported for* issuer independent fences, so it is ok to use the ops outside* the RCU protected section.*/rcu_read_unlock();ret = ops->wait(fence, intr, timeout);- } else { rcu_read_unlock();
- }
- if (fence->ops->wait)
ret = fence->ops->wait(fence, intr, timeout);- else ret = dma_fence_default_wait(fence, intr, timeout);
- } if (trace_dma_fence_wait_end_enabled()) { rcu_read_lock(); trace_dma_fence_wait_end(fence);
@@ -562,6 +569,7 @@ void dma_fence_release(struct kref *kref) { struct dma_fence *fence = container_of(kref, struct dma_fence, refcount);
- const struct dma_fence_ops *ops;
rcu_read_lock(); trace_dma_fence_destroy(fence); @@ -593,12 +601,12 @@ void dma_fence_release(struct kref *kref) spin_unlock_irqrestore(fence->lock, flags); }
- rcu_read_unlock();
- if (fence->ops->release)
fence->ops->release(fence);
- ops = rcu_dereference(fence->ops);
- if (ops->release)
else dma_fence_free(fence);ops->release(fence);- rcu_read_unlock(); } EXPORT_SYMBOL(dma_fence_release);
@@ -617,6 +625,7 @@ EXPORT_SYMBOL(dma_fence_free); static bool __dma_fence_enable_signaling(struct dma_fence *fence) {
- const struct dma_fence_ops *ops; bool was_set;
lockdep_assert_held(fence->lock); @@ -627,14 +636,18 @@ static bool __dma_fence_enable_signaling(struct dma_fence *fence) if (dma_fence_test_signaled_flag(fence)) return false;
- if (!was_set && fence->ops->enable_signaling) {
- rcu_read_lock();
- ops = rcu_dereference(fence->ops);
- if (!was_set && ops->enable_signaling) { trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
if (!ops->enable_signaling(fence)) { } }rcu_read_unlock(); dma_fence_signal_locked(fence); return false;- rcu_read_unlock();
return true; } @@ -1007,8 +1020,13 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout); */ void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) {
- if (fence->ops->set_deadline && !dma_fence_is_signaled(fence))
fence->ops->set_deadline(fence, deadline);
- const struct dma_fence_ops *ops;
- rcu_read_lock();
- ops = rcu_dereference(fence->ops);
- if (ops->set_deadline && !dma_fence_is_signaled(fence))
ops->set_deadline(fence, deadline);- rcu_read_unlock(); } EXPORT_SYMBOL(dma_fence_set_deadline);
@@ -1049,7 +1067,12 @@ __dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); kref_init(&fence->refcount);
- fence->ops = ops;
- /*
* At first glance it is counter intuitive to protect a constant* function pointer table by RCU, but this allows modules providing the* function table to unload by waiting for an RCU grace period.*/- RCU_INIT_POINTER(fence->ops, ops); INIT_LIST_HEAD(&fence->cb_list); fence->lock = lock; fence->context = context;
@@ -1129,11 +1152,12 @@ EXPORT_SYMBOL(dma_fence_init64); */ const char __rcu *dma_fence_driver_name(struct dma_fence *fence) {
- RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
"RCU protection is required for safe access to returned string");
- const struct dma_fence_ops *ops;
- /* RCU protection is required for safe access to returned string */
- ops = rcu_dereference(fence->ops); if (!dma_fence_test_signaled_flag(fence))
return (const char __rcu *)fence->ops->get_driver_name(fence);
else return (const char __rcu *)"detached-driver"; }return (const char __rcu *)ops->get_driver_name(fence);@@ -1161,11 +1185,12 @@ EXPORT_SYMBOL(dma_fence_driver_name); */ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence) {
- RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
"RCU protection is required for safe access to returned string");
- const struct dma_fence_ops *ops;
- /* RCU protection is required for safe access to returned string */
- ops = rcu_dereference(fence->ops); if (!dma_fence_test_signaled_flag(fence))
return (const char __rcu *)fence->ops->get_driver_name(fence);
else return (const char __rcu *)"signaled-timeline"; }return (const char __rcu *)ops->get_driver_name(fence);diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 9c4d25289239..6bf4feb0e01f 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -67,7 +67,7 @@ struct seq_file; */ struct dma_fence { spinlock_t *lock;
- const struct dma_fence_ops *ops;
- const struct dma_fence_ops __rcu *ops; /*
- We clear the callback list on kref_put so that by the time we
- release the fence it is unused. No one should be adding to the
@@ -220,6 +220,10 @@ struct dma_fence_ops { * timed out. Can also return other error values on custom implementations, * which should be treated as if the fence is signaled. For example a hardware * lockup could be reported like that.
** Implementing this callback prevents the fence from detaching after* signaling and so it is mandatory for the module providing the */ signed long (*wait)(struct dma_fence *fence, bool intr, signed long timeout);* dma_fence_ops to stay loaded as long as the dma_fence exists.@@ -231,6 +235,13 @@ struct dma_fence_ops { * Can be called from irq context. This callback is optional. If it is * NULL, then dma_fence_free() is instead called as the default * implementation.
** Implementing this callback prevents the fence from detaching after* signaling and so it is mandatory for the module providing the* dma_fence_ops to stay loaded as long as the dma_fence exists.** If the callback is implemented the memory backing the dma_fence */ void (*release)(struct dma_fence *fence);* object must be freed RCU safe.@@ -454,13 +465,19 @@ dma_fence_test_signaled_flag(struct dma_fence *fence) static inline bool dma_fence_is_signaled_locked(struct dma_fence *fence) {
- const struct dma_fence_ops *ops;
- if (dma_fence_test_signaled_flag(fence)) return true;
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
- rcu_read_lock();
- ops = rcu_dereference(fence->ops);
- if (ops->signaled && ops->signaled(fence)) {
dma_fence_signal_locked(fence); return true; }rcu_read_unlock();- rcu_read_unlock();
return false; } @@ -484,13 +501,19 @@ dma_fence_is_signaled_locked(struct dma_fence *fence) static inline bool dma_fence_is_signaled(struct dma_fence *fence) {
- const struct dma_fence_ops *ops;
- if (dma_fence_test_signaled_flag(fence)) return true;
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
- rcu_read_lock();
- ops = rcu_dereference(fence->ops);
- if (ops->signaled && ops->signaled(fence)) {
dma_fence_signal(fence); return true; }rcu_read_unlock();- rcu_read_unlock();
return false; }
Pending parallel discussion on the comment tweaks, the logic and implementation look good to me:
Reviewed-by: Tvrtko Ursulin tvrtko.ursulin@igalia.com
Regards,
Tvrtko