On 3/18/26 10:51, Tvrtko Ursulin wrote:
>
> On 17/03/2026 14:50, Christian König wrote:
>> Amdgpu was the only user of the signal on any feature and we dropped
>> that use case recently, so we can remove that functionality again.
>>
>> This allows to simplfy the dma_fence_array code a lot and saves us from
>
> simplify
>
>> the need to install a callback on all fences at the same time.
>>
>> Signed-off-by: Christian König <christian.koenig(a)amd.com>
>> ---
>>  drivers/dma-buf/dma-fence-array.c            | 133 +++++++-----------
>>  drivers/dma-buf/dma-fence-unwrap.c           |  3 +-
>>  drivers/dma-buf/dma-resv.c                   |  3 +-
>>  drivers/dma-buf/st-dma-fence-unwrap.c        |  2 +-
>>  .../gpu/drm/i915/gem/i915_gem_execbuffer.c   |  3 +-
>>  drivers/gpu/drm/xe/xe_sync.c                 |  2 +-
>>  drivers/gpu/drm/xe/xe_vm.c                   |  4 +-
>>  include/linux/dma-fence-array.h              | 28 +---
>> Â 8 files changed, 63 insertions(+), 115 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
>> index 089f69469524..0d54bf2e47ba 100644
>> --- a/drivers/dma-buf/dma-fence-array.c
>> +++ b/drivers/dma-buf/dma-fence-array.c
>> @@ -42,97 +42,71 @@ static void dma_fence_array_clear_pending_error(struct dma_fence_array *array)
>> Â Â Â Â Â cmpxchg(&array->base.error, PENDING_ERROR, 0);
>> Â }
>> Â -static void irq_dma_fence_array_work(struct irq_work *wrk)
>> +static void dma_fence_array_cb_func(struct dma_fence *f,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct dma_fence_cb *cb)
>> Â {
>> -Â Â Â struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
>> +Â Â Â struct dma_fence_array *array =
>> +Â Â Â Â Â Â Â container_of(cb, struct dma_fence_array, callback);
>> Â -Â Â Â dma_fence_array_clear_pending_error(array);
>> -
>> -Â Â Â dma_fence_signal(&array->base);
>> -Â Â Â dma_fence_put(&array->base);
>> +Â Â Â irq_work_queue(&array->work);
>> Â }
>> Â -static void dma_fence_array_cb_func(struct dma_fence *f,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct dma_fence_cb *cb)
>> +static void irq_dma_fence_array_work(struct irq_work *wrk)
>> Â {
>> -Â Â Â struct dma_fence_array_cb *array_cb =
>> -Â Â Â Â Â Â Â container_of(cb, struct dma_fence_array_cb, cb);
>> -Â Â Â struct dma_fence_array *array = array_cb->array;
>> +Â Â Â struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
>> Â -Â Â Â dma_fence_array_set_pending_error(array, f->error);
>> +Â Â Â while (array->num_pending--) {
>> +Â Â Â Â Â Â Â struct dma_fence *f = array->fences[array->num_pending];
>> Â -Â Â Â if (atomic_dec_and_test(&array->num_pending))
>> -Â Â Â Â Â Â Â irq_work_queue(&array->work);
>> -Â Â Â else
>> -Â Â Â Â Â Â Â dma_fence_put(&array->base);
>> +Â Â Â Â Â Â Â if (!dma_fence_add_callback(f, &array->callback,
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â dma_fence_array_cb_func))
>> +Â Â Â Â Â Â Â Â Â Â Â return;
>> +
>> +Â Â Â Â Â Â Â dma_fence_array_set_pending_error(array, f->error);
>> +Â Â Â }
>> +
>> +Â Â Â dma_fence_signal(&array->base);
>> +Â Â Â dma_fence_put(&array->base);
>> Â }
>> Â Â static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
>> Â {
>> Â Â Â Â Â struct dma_fence_array *array = to_dma_fence_array(fence);
>> -Â Â Â struct dma_fence_array_cb *cb = array->callbacks;
>> -Â Â Â unsigned i;
>> -
>> -Â Â Â for (i = 0; i < array->num_fences; ++i) {
>> -Â Â Â Â Â Â Â cb[i].array = array;
>> -Â Â Â Â Â Â Â /*
>> -Â Â Â Â Â Â Â Â * As we may report that the fence is signaled before all
>> -Â Â Â Â Â Â Â Â * callbacks are complete, we need to take an additional
>> -Â Â Â Â Â Â Â Â * reference count on the array so that we do not free it too
>> -Â Â Â Â Â Â Â Â * early. The core fence handling will only hold the reference
>> -Â Â Â Â Â Â Â Â * until we signal the array as complete (but that is now
>> -Â Â Â Â Â Â Â Â * insufficient).
>> -Â Â Â Â Â Â Â Â */
>> -Â Â Â Â Â Â Â dma_fence_get(&array->base);
>> -Â Â Â Â Â Â Â if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â dma_fence_array_cb_func)) {
>> -Â Â Â Â Â Â Â Â Â Â Â int error = array->fences[i]->error;
>> -
>> -Â Â Â Â Â Â Â Â Â Â Â dma_fence_array_set_pending_error(array, error);
>> -Â Â Â Â Â Â Â Â Â Â Â dma_fence_put(&array->base);
>> -Â Â Â Â Â Â Â Â Â Â Â if (atomic_dec_and_test(&array->num_pending)) {
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â dma_fence_array_clear_pending_error(array);
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â return false;
>> -Â Â Â Â Â Â Â Â Â Â Â }
>> -Â Â Â Â Â Â Â }
>> -Â Â Â }
>> Â +Â Â Â /*
>> +Â Â Â Â * As we may report that the fence is signaled before all
>> +Â Â Â Â * callbacks are complete, we need to take an additional
>> +Â Â Â Â * reference count on the array so that we do not free it too
>> +Â Â Â Â * early. The core fence handling will only hold the reference
>> +Â Â Â Â * until we signal the array as complete (but that is now
>> +Â Â Â Â * insufficient).
>> +Â Â Â Â */
>> +Â Â Â dma_fence_get(&array->base);
>> +Â Â Â irq_dma_fence_array_work(&array->work);
>> Â Â Â Â Â return true;
>> Â }
>> Â Â static bool dma_fence_array_signaled(struct dma_fence *fence)
>> Â {
>> Â Â Â Â Â struct dma_fence_array *array = to_dma_fence_array(fence);
>> -Â Â Â int num_pending;
>> +Â Â Â int num_pending, error = 0;
>> Â Â Â Â Â unsigned int i;
>> Â Â Â Â Â Â /*
>> -Â Â Â Â * We need to read num_pending before checking the enable_signal bit
>> -Â Â Â Â * to avoid racing with the enable_signaling() implementation, which
>> -Â Â Â Â * might decrement the counter, and cause a partial check.
>> -Â Â Â Â * atomic_read_acquire() pairs with atomic_dec_and_test() in
>> -Â Â Â Â * dma_fence_array_enable_signaling()
>> -Â Â Â Â *
>> -Â Â Â Â * The !--num_pending check is here to account for the any_signaled case
>> -Â Â Â Â * if we race with enable_signaling(), that means the !num_pending check
>> -Â Â Â Â * in the is_signalling_enabled branch might be outdated (num_pending (
>> -Â Â Â Â * might have been decremented), but that's fine. The user will get the
>> -Â Â Â Â * right value when testing again later.
>> +Â Â Â Â * Reading num_pending is just an optimization, it is perfectly
>> +Â Â Â Â * acceptable to have a stale value for it.
>
> Bear with me please. I decided to look at this in the morning and maybe it is too early after all. At first I thought "is this true". Because for example:
>
> num_fences = 2
> num_pending = 1
>
> fences = [ signaled, unsignaled ]
>
> Below loop exits and function returns array signaled status.
>
> Then I realised array->num_pending is not the number of unsignaled fences. Instead it seems to be number of fences on which callbacks have not been installed, regardless of the signaled status. Or from a different angle, the next fence index to install the callback on after the current one signals.
>
> But still the false positive seems possible. After installing the callback on the 2nd fence num_pending will be 1, so the signaled check at a point where the first fence has been signaled, but the second one still has not will return true. Dma_fence_array_signaled() cannot know since it doesn't look at the second fence.
Good point, I will rework the handling to take that into account.
Thanks for the review,
Christian.
>
> Regards,
>
> Tvrtko
>
>> Â Â Â Â Â Â */
>> -Â Â Â num_pending = atomic_read_acquire(&array->num_pending);
>> -Â Â Â if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &array->base.flags)) {
>> -Â Â Â Â Â Â Â if (num_pending <= 0)
>> -Â Â Â Â Â Â Â Â Â Â Â goto signal;
>> -Â Â Â Â Â Â Â return false;
>> -Â Â Â }
>> +Â Â Â num_pending = READ_ONCE(array->num_pending);
>> +Â Â Â for (i = 0; i < num_pending; ++i) {
>> +Â Â Â Â Â Â Â struct dma_fence *f = array->fences[i];
>> Â -Â Â Â for (i = 0; i < array->num_fences; ++i) {
>> -Â Â Â Â Â Â Â if (dma_fence_is_signaled(array->fences[i]) && !--num_pending)
>> -Â Â Â Â Â Â Â Â Â Â Â goto signal;
>> -Â Â Â }
>> -Â Â Â return false;
>> +Â Â Â Â Â Â Â if (!dma_fence_is_signaled(f))
>> +Â Â Â Â Â Â Â Â Â Â Â return false;
>> Â -signal:
>> +Â Â Â Â Â Â Â if (!error)
>> +Â Â Â Â Â Â Â Â Â Â Â error = f->error;
>> +Â Â Â }
>> +Â Â Â dma_fence_array_set_pending_error(array, error);
>> Â Â Â Â Â dma_fence_array_clear_pending_error(array);
>> Â Â Â Â Â return true;
>> Â }
>> @@ -171,15 +145,12 @@ EXPORT_SYMBOL(dma_fence_array_ops);
>> Â Â /**
>> Â Â * dma_fence_array_alloc - Allocate a custom fence array
>> - * @num_fences:Â Â Â Â Â Â Â [in]Â Â Â number of fences to add in the array
>> Â Â *
>> Â Â * Return dma fence array on success, NULL on failure
>> Â Â */
>> -struct dma_fence_array *dma_fence_array_alloc(int num_fences)
>> +struct dma_fence_array *dma_fence_array_alloc(void)
>> Â {
>> -Â Â Â struct dma_fence_array *array;
>> -
>> -Â Â Â return kzalloc_flex(*array, callbacks, num_fences);
>> +Â Â Â return kzalloc_obj(struct dma_fence_array);
>> Â }
>> Â EXPORT_SYMBOL(dma_fence_array_alloc);
>> Â @@ -190,21 +161,22 @@ EXPORT_SYMBOL(dma_fence_array_alloc);
>> Â Â * @fences:Â Â Â Â Â Â Â [in]Â Â Â array containing the fences
>> Â Â * @context:Â Â Â Â Â Â Â [in]Â Â Â fence context to use
>> Â Â * @seqno:Â Â Â Â Â Â Â [in]Â Â Â sequence number to use
>> - * @signal_on_any:Â Â Â [in]Â Â Â signal on any fence in the array
>> Â Â *
>> Â Â * Implementation of @dma_fence_array_create without allocation. Useful to init
>> Â Â * a preallocated dma fence array in the path of reclaim or dma fence signaling.
>> Â Â */
>> Â void dma_fence_array_init(struct dma_fence_array *array,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â int num_fences, struct dma_fence **fences,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â u64 context, unsigned seqno,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â bool signal_on_any)
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â u64 context, unsigned seqno)
>> Â {
>> Â Â Â Â Â static struct lock_class_key dma_fence_array_lock_key;
>> Â Â Â Â Â Â WARN_ON(!num_fences || !fences);
>> Â Â Â Â Â Â array->num_fences = num_fences;
>> +Â Â Â array->num_pending = num_fences;
>> +Â Â Â array->fences = fences;
>> +Â Â Â array->base.error = PENDING_ERROR;
>> Â Â Â Â Â Â dma_fence_init(&array->base, &dma_fence_array_ops, NULL, context,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â seqno);
>> @@ -222,11 +194,6 @@ void dma_fence_array_init(struct dma_fence_array *array,
>> Â Â Â Â Â Â */
>> Â Â Â Â Â lockdep_set_class(&array->base.inline_lock, &dma_fence_array_lock_key);
>> Â -Â Â Â atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
>> -Â Â Â array->fences = fences;
>> -
>> -Â Â Â array->base.error = PENDING_ERROR;
>> -
>> Â Â Â Â Â /*
>> Â Â Â Â Â Â * dma_fence_array objects should never contain any other fence
>> Â Â Â Â Â Â * containers or otherwise we run into recursion and potential kernel
>> @@ -249,7 +216,6 @@ EXPORT_SYMBOL(dma_fence_array_init);
>> Â Â * @fences:Â Â Â Â Â Â Â [in]Â Â Â array containing the fences
>> Â Â * @context:Â Â Â Â Â Â Â [in]Â Â Â fence context to use
>> Â Â * @seqno:Â Â Â Â Â Â Â [in]Â Â Â sequence number to use
>> - * @signal_on_any:Â Â Â [in]Â Â Â signal on any fence in the array
>> Â Â *
>> Â Â * Allocate a dma_fence_array object and initialize the base fence with
>> Â Â * dma_fence_init().
>> @@ -264,17 +230,16 @@ EXPORT_SYMBOL(dma_fence_array_init);
>> Â Â */
>> Â struct dma_fence_array *dma_fence_array_create(int num_fences,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct dma_fence **fences,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â u64 context, unsigned seqno,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â bool signal_on_any)
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â u64 context, unsigned seqno)
>> Â {
>> Â Â Â Â Â struct dma_fence_array *array;
>> Â -Â Â Â array = dma_fence_array_alloc(num_fences);
>> +Â Â Â array = dma_fence_array_alloc();
>> Â Â Â Â Â if (!array)
>> Â Â Â Â Â Â Â Â Â return NULL;
>> Â Â Â Â Â Â dma_fence_array_init(array, num_fences, fences,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â context, seqno, signal_on_any);
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â context, seqno);
>> Â Â Â Â Â Â return array;
>> Â }
>> diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
>> index 07fe9bf45aea..53bb40e70b27 100644
>> --- a/drivers/dma-buf/dma-fence-unwrap.c
>> +++ b/drivers/dma-buf/dma-fence-unwrap.c
>> @@ -180,8 +180,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
>> Â Â Â Â Â Â if (count > 1) {
>> Â Â Â Â Â Â Â Â Â result = dma_fence_array_create(count, array,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â dma_fence_context_alloc(1),
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1, false);
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â dma_fence_context_alloc(1), 1);
>> Â Â Â Â Â Â Â Â Â if (!result) {
>> Â Â Â Â Â Â Â Â Â Â Â Â Â for (i = 0; i < count; i++)
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â dma_fence_put(array[i]);
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index ce9e6c04897f..39a92d9f2413 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
>> @@ -648,8 +648,7 @@ int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
>> Â Â Â Â Â }
>> Â Â Â Â Â Â array = dma_fence_array_create(count, fences,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â dma_fence_context_alloc(1),
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1, false);
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â dma_fence_context_alloc(1), 1);
>> Â Â Â Â Â if (!array) {
>> Â Â Â Â Â Â Â Â Â while (count--)
>> Â Â Â Â Â Â Â Â Â Â Â Â Â dma_fence_put(fences[count]);
>> diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c
>> index 9c74195f47fd..45413666970e 100644
>> --- a/drivers/dma-buf/st-dma-fence-unwrap.c
>> +++ b/drivers/dma-buf/st-dma-fence-unwrap.c
>> @@ -65,7 +65,7 @@ static struct dma_fence *mock_array(unsigned int num_fences, ...)
>> Â Â Â Â Â Â array = dma_fence_array_create(num_fences, fences,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â dma_fence_context_alloc(1),
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1, false);
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1);
>> Â Â Â Â Â if (!array)
>> Â Â Â Â Â Â Â Â Â goto error_free;
>> Â Â Â Â Â return &array->base;
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>> index e7918f896a26..1ac91a46d87f 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>> @@ -3203,8 +3203,7 @@ eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd)
>> Â Â Â Â Â fence_array = dma_fence_array_create(eb->num_batches,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â fences,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â eb->context->parallel.fence_context,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â eb->context->parallel.seqno++,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â false);
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â eb->context->parallel.seqno++);
>> Â Â Â Â Â if (!fence_array) {
>> Â Â Â Â Â Â Â Â Â kfree(fences);
>> Â Â Â Â Â Â Â Â Â return ERR_PTR(-ENOMEM);
>> diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
>> index 24d6d9af20d6..37866768d64c 100644
>> --- a/drivers/gpu/drm/xe/xe_sync.c
>> +++ b/drivers/gpu/drm/xe/xe_sync.c
>> @@ -376,7 +376,7 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
>> Â Â Â Â Â Â Â Â Â xe_assert(vm->xe, current_fence == num_fence);
>> Â Â Â Â Â Â Â Â Â cf = dma_fence_array_create(num_fence, fences,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â dma_fence_context_alloc(1),
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1, false);
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1);
>> Â Â Â Â Â Â Â Â Â if (!cf)
>> Â Â Â Â Â Â Â Â Â Â Â Â Â goto err_out;
>> Â diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
>> index 548b0769b3ef..b916a9d90104 100644
>> --- a/drivers/gpu/drm/xe/xe_vm.c
>> +++ b/drivers/gpu/drm/xe/xe_vm.c
>> @@ -3196,7 +3196,7 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
>> Â Â Â Â Â Â Â Â Â goto err_trace;
>> Â Â Â Â Â }
>> Â -Â Â Â cf = dma_fence_array_alloc(n_fence);
>> +Â Â Â cf = dma_fence_array_alloc();
>> Â Â Â Â Â if (!cf) {
>> Â Â Â Â Â Â Â Â Â fence = ERR_PTR(-ENOMEM);
>> Â Â Â Â Â Â Â Â Â goto err_out;
>> @@ -3240,7 +3240,7 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
>> Â Â Â Â Â Â xe_assert(vm->xe, current_fence == n_fence);
>> Â Â Â Â Â dma_fence_array_init(cf, n_fence, fences, dma_fence_context_alloc(1),
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1, false);
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1);
>> Â Â Â Â Â fence = &cf->base;
>> Â Â Â Â Â Â for_each_tile(tile, vm->xe, id) {
>> diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
>> index 370b3d2bba37..3ee55c0e2fa4 100644
>> --- a/include/linux/dma-fence-array.h
>> +++ b/include/linux/dma-fence-array.h
>> @@ -15,16 +15,6 @@
>> Â #include <linux/dma-fence.h>
>> Â #include <linux/irq_work.h>
>> Â -/**
>> - * struct dma_fence_array_cb - callback helper for fence array
>> - * @cb: fence callback structure for signaling
>> - * @array: reference to the parent fence array object
>> - */
>> -struct dma_fence_array_cb {
>> -Â Â Â struct dma_fence_cb cb;
>> -Â Â Â struct dma_fence_array *array;
>> -};
>> -
>> Â /**
>> Â Â * struct dma_fence_array - fence to represent an array of fences
>> Â Â * @base: fence base class
>> @@ -33,18 +23,17 @@ struct dma_fence_array_cb {
>> Â Â * @num_pending: fences in the array still pending
>> Â Â * @fences: array of the fences
>> Â Â * @work: internal irq_work function
>> - * @callbacks: array of callback helpers
>> + * @callback: callback structure for signaling
>> Â Â */
>> Â struct dma_fence_array {
>> Â Â Â Â Â struct dma_fence base;
>> Â -Â Â Â unsigned num_fences;
>> -Â Â Â atomic_t num_pending;
>> +Â Â Â unsigned int num_fences;
>> +Â Â Â unsigned int num_pending;
>> Â Â Â Â Â struct dma_fence **fences;
>> Â Â Â Â Â Â struct irq_work work;
>> -
>> -Â Â Â struct dma_fence_array_cb callbacks[] __counted_by(num_fences);
>> +Â Â Â struct dma_fence_cb callback;
>> Â };
>> Â Â /**
>> @@ -78,16 +67,13 @@ to_dma_fence_array(struct dma_fence *fence)
>> Â Â Â Â Â for (index = 0, fence = dma_fence_array_first(head); fence;Â Â Â \
>> Â Â Â Â Â Â Â Â Â Â ++(index), fence = dma_fence_array_next(head, index))
>> Â -struct dma_fence_array *dma_fence_array_alloc(int num_fences);
>> +struct dma_fence_array *dma_fence_array_alloc(void);
>> Â void dma_fence_array_init(struct dma_fence_array *array,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â int num_fences, struct dma_fence **fences,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â u64 context, unsigned seqno,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â bool signal_on_any);
>> -
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â u64 context, unsigned seqno);
>> Â struct dma_fence_array *dma_fence_array_create(int num_fences,
>> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct dma_fence **fences,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â u64 context, unsigned seqno,
>> -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â bool signal_on_any);
>> +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â u64 context, unsigned seqno);
>> Â Â bool dma_fence_match_context(struct dma_fence *fence, u64 context);
>> Â
>