Consolidate the wrapper functions to check for dma_fence subclasses in the dma_fence header.
This makes it easier to document and also check the different requirements for fence containers in the subclasses.
Signed-off-by: Christian König christian.koenig@amd.com --- include/linux/dma-fence-array.h | 15 +------------ include/linux/dma-fence-chain.h | 3 +-- include/linux/dma-fence.h | 38 +++++++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 16 deletions(-)
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h index 303dd712220f..fec374f69e12 100644 --- a/include/linux/dma-fence-array.h +++ b/include/linux/dma-fence-array.h @@ -45,19 +45,6 @@ struct dma_fence_array { struct irq_work work; };
-extern const struct dma_fence_ops dma_fence_array_ops; - -/** - * dma_fence_is_array - check if a fence is from the array subsclass - * @fence: fence to test - * - * Return true if it is a dma_fence_array and false otherwise. - */ -static inline bool dma_fence_is_array(struct dma_fence *fence) -{ - return fence->ops == &dma_fence_array_ops; -} - /** * to_dma_fence_array - cast a fence to a dma_fence_array * @fence: fence to cast to a dma_fence_array @@ -68,7 +55,7 @@ static inline bool dma_fence_is_array(struct dma_fence *fence) static inline struct dma_fence_array * to_dma_fence_array(struct dma_fence *fence) { - if (fence->ops != &dma_fence_array_ops) + if (!fence || !dma_fence_is_array(fence)) return NULL;
return container_of(fence, struct dma_fence_array, base); diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h index 54fe3443fd2c..ee906b659694 100644 --- a/include/linux/dma-fence-chain.h +++ b/include/linux/dma-fence-chain.h @@ -49,7 +49,6 @@ struct dma_fence_chain { spinlock_t lock; };
-extern const struct dma_fence_ops dma_fence_chain_ops;
/** * to_dma_fence_chain - cast a fence to a dma_fence_chain @@ -61,7 +60,7 @@ extern const struct dma_fence_ops dma_fence_chain_ops; static inline struct dma_fence_chain * to_dma_fence_chain(struct dma_fence *fence) { - if (!fence || fence->ops != &dma_fence_chain_ops) + if (!fence || !dma_fence_is_chain(fence)) return NULL;
return container_of(fence, struct dma_fence_chain, base); diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 1ea691753bd3..775cdc0b4f24 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -587,4 +587,42 @@ struct dma_fence *dma_fence_get_stub(void); struct dma_fence *dma_fence_allocate_private_stub(void); u64 dma_fence_context_alloc(unsigned num);
+extern const struct dma_fence_ops dma_fence_array_ops; +extern const struct dma_fence_ops dma_fence_chain_ops; + +/** + * dma_fence_is_array - check if a fence is from the array subclass + * @fence: the fence to test + * + * Return true if it is a dma_fence_array and false otherwise. + */ +static inline bool dma_fence_is_array(struct dma_fence *fence) +{ + return fence->ops == &dma_fence_array_ops; +} + +/** + * dma_fence_is_chain - check if a fence is from the chain subclass + * @fence: the fence to test + * + * Return true if it is a dma_fence_chain and false otherwise. + */ +static inline bool dma_fence_is_chain(struct dma_fence *fence) +{ + return fence->ops == &dma_fence_chain_ops; +} + +/** + * dma_fence_is_container - check if a fence is a container for other fences + * @fence: the fence to test + * + * Return true if this fence is a container for other fences, false otherwise. + * This is important since we can't build up large fence structure or otherwise + * we run into recursion during operation on those fences. + */ +static inline bool dma_fence_is_container(struct dma_fence *fence) +{ + return dma_fence_is_array(fence) || dma_fence_is_chain(fence); +} + #endif /* __LINUX_DMA_FENCE_H */
It's not allowed to nest another dma_fence container into a dma_fence_array or otherwise we can run into recursion.
Warn about that when we create a dma_fence_array.
v2: fix comment style and typo in the warning pointed out by Thomas
Signed-off-by: Christian König christian.koenig@amd.com Reviewed-by: Daniel Vetter daniel.vetter@ffwll.ch --- drivers/dma-buf/dma-fence-array.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+)
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index 3e07f961e2f3..cb1bacb5a42b 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -176,6 +176,20 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
array->base.error = PENDING_ERROR;
+ /* + * dma_fence_array objects should never contain any other fence + * containers or otherwise we run into recursion and potential kernel + * stack overflow on operations on the dma_fence_array. + * + * The correct way of handling this is to flatten out the array by the + * caller instead. + * + * Enforce this here by checking that we don't create a dma_fence_array + * with any container inside. + */ + while (num_fences--) + WARN_ON(dma_fence_is_container(fences[num_fences])); + return array; } EXPORT_SYMBOL(dma_fence_array_create);
Chaining of dma_fence_chain objects is only allowed through the prev fence and not through the contained fence.
Warn about that when we create a dma_fence_chain.
v2: fix comment style
Signed-off-by: Christian König christian.koenig@amd.com Reviewed-by: Daniel Vetter daniel.vetter@ffwll.ch Reviewed-by: Thomas Hellström thomas.hellstrom@linux.intel.com --- drivers/dma-buf/dma-fence-chain.c | 9 +++++++++ 1 file changed, 9 insertions(+)
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index 1b4cb3e5cec9..084c6927b735 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -254,5 +254,14 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock, context, seqno); + + /* + * Chaining dma_fence_chain container together is only allowed through + * the prev fence and not through the contained fence. + * + * The correct way of handling this is to flatten out the fence + * structure into a dma_fence_array by the caller instead. + */ + WARN_ON(dma_fence_is_chain(fence)); } EXPORT_SYMBOL(dma_fence_chain_init);
Drivers should not add containers as shared fences to the dma_resv object, instead each fence should be added individually.
Signed-off-by: Christian König christian.koenig@amd.com Reviewed-by: Daniel Vetter daniel.vetter@ffwll.ch --- drivers/dma-buf/dma-resv.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 6dd9a40b55d4..e8a0c1d51da2 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -256,6 +256,11 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
dma_resv_assert_held(obj);
+ /* Drivers should not add containers here, instead add each fence + * individually. + */ + WARN_ON(dma_fence_is_container(fence)); + fobj = dma_resv_shared_list(obj); count = fobj->shared_count;
From: Christian König ckoenig.leichtzumerken@gmail.com
Add a helper to iterate over all fences in a dma_fence_array object.
v2 (Jason Ekstrand) - Return NULL from dma_fence_array_first if head == NULL. This matches the iterator behavior of dma_fence_chain_for_each in that it iterates zero times if head == NULL. - Return NULL from dma_fence_array_next if index > array->num_fences.
Signed-off-by: Jason Ekstrand jason@jlekstrand.net Reviewed-by: Jason Ekstrand jason@jlekstrand.net Reviewed-by: Christian König christian.koenig@amd.com Cc: Daniel Vetter daniel.vetter@ffwll.ch Cc: Maarten Lankhorst maarten.lankhorst@linux.intel.com --- drivers/dma-buf/dma-fence-array.c | 27 +++++++++++++++++++++++++++ include/linux/dma-fence-array.h | 17 +++++++++++++++++ 2 files changed, 44 insertions(+)
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index cb1bacb5a42b..52b85d292383 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -219,3 +219,30 @@ bool dma_fence_match_context(struct dma_fence *fence, u64 context) return true; } EXPORT_SYMBOL(dma_fence_match_context); + +struct dma_fence *dma_fence_array_first(struct dma_fence *head) +{ + struct dma_fence_array *array; + + if (!head) + return NULL; + + array = to_dma_fence_array(head); + if (!array) + return head; + + return array->fences[0]; +} +EXPORT_SYMBOL(dma_fence_array_first); + +struct dma_fence *dma_fence_array_next(struct dma_fence *head, + unsigned int index) +{ + struct dma_fence_array *array = to_dma_fence_array(head); + + if (!array || index >= array->num_fences) + return NULL; + + return array->fences[index]; +} +EXPORT_SYMBOL(dma_fence_array_next); diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h index fec374f69e12..e34dcb0bb462 100644 --- a/include/linux/dma-fence-array.h +++ b/include/linux/dma-fence-array.h @@ -61,6 +61,19 @@ to_dma_fence_array(struct dma_fence *fence) return container_of(fence, struct dma_fence_array, base); }
+/** + * dma_fence_array_for_each - iterate over all fences in array + * @fence: current fence + * @index: index into the array + * @head: potential dma_fence_array object + * + * Test if @array is a dma_fence_array object and if yes iterate over all fences + * in the array. If not just iterate over the fence in @array itself. + */ +#define dma_fence_array_for_each(fence, index, head) \ + for (index = 0, fence = dma_fence_array_first(head); fence; \ + ++(index), fence = dma_fence_array_next(head, index)) + struct dma_fence_array *dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context, unsigned seqno, @@ -68,4 +81,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
bool dma_fence_match_context(struct dma_fence *fence, u64 context);
+struct dma_fence *dma_fence_array_first(struct dma_fence *head); +struct dma_fence *dma_fence_array_next(struct dma_fence *head, + unsigned int index); + #endif /* __LINUX_DMA_FENCE_ARRAY_H */
It's a reoccurring pattern that we need to extract the fence from a dma_fence_chain object. Add a helper for this.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/dma-buf/dma-fence-chain.c | 6 ++---- include/linux/dma-fence-chain.h | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 4 deletions(-)
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index 084c6927b735..06f8ef97c6e8 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -148,8 +148,7 @@ static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
dma_fence_get(&head->base); dma_fence_chain_for_each(fence, &head->base) { - struct dma_fence_chain *chain = to_dma_fence_chain(fence); - struct dma_fence *f = chain ? chain->fence : fence; + struct dma_fence *f = dma_fence_chain_contained(fence);
dma_fence_get(f); if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) { @@ -165,8 +164,7 @@ static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) static bool dma_fence_chain_signaled(struct dma_fence *fence) { dma_fence_chain_for_each(fence, fence) { - struct dma_fence_chain *chain = to_dma_fence_chain(fence); - struct dma_fence *f = chain ? chain->fence : fence; + struct dma_fence *f = dma_fence_chain_contained(fence);
if (!dma_fence_is_signaled(f)) { dma_fence_put(fence); diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h index ee906b659694..10d51bcdf7b7 100644 --- a/include/linux/dma-fence-chain.h +++ b/include/linux/dma-fence-chain.h @@ -66,6 +66,21 @@ to_dma_fence_chain(struct dma_fence *fence) return container_of(fence, struct dma_fence_chain, base); }
+/** + * dma_fence_chain_contained - return the contained fence + * @fence: the fence to test + * + * If the fence is a dma_fence_chain the function returns the fence contained + * inside the chain object, otherwise it returns the fence itself. + */ +static inline struct dma_fence * +dma_fence_chain_contained(struct dma_fence *fence) +{ + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + + return chain ? chain->fence : fence; +} + /** * dma_fence_chain_alloc *
Instead of manually extracting the fence.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index f7d8487799b2..40e06745fae9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -261,10 +261,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
dma_resv_for_each_fence(&cursor, resv, true, f) { dma_fence_chain_for_each(f, f) { - struct dma_fence_chain *chain = to_dma_fence_chain(f); + struct dma_fence *tmp = dma_fence_chain_contained(f);
- if (amdgpu_sync_test_fence(adev, mode, owner, chain ? - chain->fence : f)) { + if (amdgpu_sync_test_fence(adev, mode, owner, tmp)) { r = amdgpu_sync_fence(sync, f); dma_fence_put(f); if (r)
Instead of manually messing with the data structures use the iterators and extraction helpers provided by the framework.
This whole handling should essentially go away when boost handling is implemented in the core dma-buf framework.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/i915/gem/i915_gem_busy.c | 40 ++++++-------------- drivers/gpu/drm/i915/gem/i915_gem_wait.c | 31 ++++------------ drivers/gpu/drm/i915/i915_request.c | 47 +++++++----------------- 3 files changed, 33 insertions(+), 85 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 470fdfd61a0f..4ea7a5b26405 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -41,6 +41,8 @@ static __always_inline unsigned int __busy_set_if_active(struct dma_fence *fence, u32 (*flag)(u16 id)) { const struct i915_request *rq; + struct dma_fence *current_fence; + unsigned int i;
/* * We have to check the current hw status of the fence as the uABI @@ -58,40 +60,22 @@ __busy_set_if_active(struct dma_fence *fence, u32 (*flag)(u16 id)) * * 2. A single i915 request. */ - if (dma_fence_is_array(fence)) { - struct dma_fence_array *array = to_dma_fence_array(fence); - struct dma_fence **child = array->fences; - unsigned int nchild = array->num_fences; - - do { - struct dma_fence *current_fence = *child++; - - /* Not an i915 fence, can't be busy per above */ - if (!dma_fence_is_i915(current_fence) || - !test_bit(I915_FENCE_FLAG_COMPOSITE, - ¤t_fence->flags)) { - return 0; - } - - rq = to_request(current_fence); - if (!i915_request_completed(rq)) - return flag(rq->engine->uabi_class); - } while (--nchild); - - /* All requests in array complete, not busy */ - return 0; - } else { - if (!dma_fence_is_i915(fence)) - return 0; + dma_fence_array_for_each(current_fence, i, fence) {
- rq = to_request(fence); - if (i915_request_completed(rq)) + /* Not an i915 fence, can't be busy per above */ + if (!dma_fence_is_i915(current_fence) || + !test_bit(I915_FENCE_FLAG_COMPOSITE, ¤t_fence->flags)) return 0;
+ rq = to_request(current_fence); /* Beware type-expansion follies! */ BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class)); - return flag(rq->engine->uabi_class); + if (!i915_request_completed(rq)) + return flag(rq->engine->uabi_class); } + + /* All requests in array complete, not busy */ + return 0; }
static __always_inline unsigned int diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index dab3d30c09a0..13f37b6aedf7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -72,11 +72,6 @@ static void fence_set_priority(struct dma_fence *fence, rcu_read_unlock(); }
-static inline bool __dma_fence_is_chain(const struct dma_fence *fence) -{ - return fence->ops == &dma_fence_chain_ops; -} - void i915_gem_fence_wait_priority(struct dma_fence *fence, const struct i915_sched_attr *attr) { @@ -85,25 +80,15 @@ void i915_gem_fence_wait_priority(struct dma_fence *fence,
local_bh_disable();
- /* Recurse once into a fence-array */ - if (dma_fence_is_array(fence)) { - struct dma_fence_array *array = to_dma_fence_array(fence); - int i; + /* The chain is ordered; if we boost the last, we boost all */ + dma_fence_chain_for_each(fence, fence) { + struct dma_fence *array, *element; + unsigned int i;
- for (i = 0; i < array->num_fences; i++) - fence_set_priority(array->fences[i], attr); - } else if (__dma_fence_is_chain(fence)) { - struct dma_fence *iter; - - /* The chain is ordered; if we boost the last, we boost all */ - dma_fence_chain_for_each(iter, fence) { - fence_set_priority(to_dma_fence_chain(iter)->fence, - attr); - break; - } - dma_fence_put(iter); - } else { - fence_set_priority(fence, attr); + /* Recurse once into a fence-array */ + array = dma_fence_chain_contained(fence); + dma_fence_array_for_each(element, i, array) + fence_set_priority(element, attr); }
local_bh_enable(); /* kick the tasklets if queues were reprioritised */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index ad175d662b4e..2d8add5b5da9 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1345,18 +1345,15 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) struct dma_fence *iter; int err = 0;
- if (!to_dma_fence_chain(fence)) - return __i915_request_await_external(rq, fence); - dma_fence_chain_for_each(iter, fence) { - struct dma_fence_chain *chain = to_dma_fence_chain(iter); + struct dma_fence *tmp = dma_fence_chain_contained(iter);
- if (!dma_fence_is_i915(chain->fence)) { + if (!dma_fence_is_i915(tmp)) { err = __i915_request_await_external(rq, iter); break; }
- err = i915_request_await_dma_fence(rq, chain->fence); + err = i915_request_await_dma_fence(rq, tmp); if (err < 0) break; } @@ -1386,24 +1383,14 @@ static bool is_same_parallel_context(struct i915_request *to,
int i915_request_await_execution(struct i915_request *rq, - struct dma_fence *fence) + struct dma_fence *array) { - struct dma_fence **child = &fence; - unsigned int nchild = 1; + struct dma_fence *fence; + unsigned int i; int ret;
- if (dma_fence_is_array(fence)) { - struct dma_fence_array *array = to_dma_fence_array(fence); - - /* XXX Error for signal-on-any fence arrays */ + dma_fence_array_for_each(fence, i, array) {
- child = array->fences; - nchild = array->num_fences; - GEM_BUG_ON(!nchild); - } - - do { - fence = *child++; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) continue;
@@ -1425,7 +1412,7 @@ i915_request_await_execution(struct i915_request *rq, } if (ret < 0) return ret; - } while (--nchild); + }
return 0; } @@ -1482,10 +1469,10 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) }
int -i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) +i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *array) { - struct dma_fence **child = &fence; - unsigned int nchild = 1; + struct dma_fence *fence; + unsigned int i; int ret;
/* @@ -1496,16 +1483,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) * amdgpu and we should not see any incoming fence-array from * sync-file being in signal-on-any mode. */ - if (dma_fence_is_array(fence)) { - struct dma_fence_array *array = to_dma_fence_array(fence); - - child = array->fences; - nchild = array->num_fences; - GEM_BUG_ON(!nchild); - } + dma_fence_array_for_each(fence, i, array) {
- do { - fence = *child++; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) continue;
@@ -1537,7 +1516,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) if (fence->context) intel_timeline_sync_set(i915_request_timeline(rq), fence); - } while (--nchild); + }
return 0; }
Greeting,
FYI, we noticed the following commit (built with gcc-9):
commit: 26b145b982ff9178951aab5849a2e2b716527865 ("[PATCH 8/9] drm/i915: use dma_fence extractor functions") url: https://github.com/0day-ci/linux/commits/Christian-K-nig/dma-buf-consolidate... base: git://anongit.freedesktop.org/drm/drm drm-next patch link: https://lore.kernel.org/linux-media/20220124074046.4865-8-christian.koenig@a...
in testcase: igt version: igt-x86_64-0d559158-1_20220115 with following parameters:
group: group-15 ucode: 0xc2
on test machine: 20 threads 1 sockets Commet Lake with 16G memory
caused below changes (please refer to attached dmesg/kmsg for entire log/backtrace):
If you fix the issue, kindly add following tag Reported-by: kernel test robot oliver.sang@intel.com
[ 49.300827][ T504] 2022-01-28 04:51:06 build/tests/gem_exec_balancer --run-subtest busy [ 49.300832][ T504] [ 49.330242][ T1415] Console: switching to colour dummy device 80x25 [ 49.336613][ T1415] [IGT] gem_exec_balancer: executing [ 49.344946][ T504] IGT-Version: 1.26-gae2eb9e1 (x86_64) (Linux: 5.16.0-rc5-01357-g26b145b982ff x86_64) [ 49.344951][ T504] [ 49.345071][ T1415] [IGT] gem_exec_balancer: starting subtest busy [ 49.363800][ T504] Starting subtest: busy [ 49.363828][ T504] [ 49.383443][ T504] (gem_exec_balancer:1415) CRITICAL: Test assertion failure function busy, file ../tests/i915/gem_exec_balancer.c:1608: [ 49.383448][ T504] [ 49.399169][ T504] (gem_exec_balancer:1415) CRITICAL: Failed assertion: busy.busy == 1u << (class + 16) [ 49.399173][ T504] [ 49.411607][ T504] (gem_exec_balancer:1415) CRITICAL: error: 0 != 0x10000 [ 49.411611][ T504] [ 49.421006][ T504] Subtest busy failed. [ 49.421009][ T504] [ 49.427399][ T504] **** DEBUG **** [ 49.427408][ T504] [ 49.434150][ T504] (gem_exec_balancer:1415) igt_dummyload-DEBUG: Test requirement passed: nengine [ 49.434154][ T504] [ 49.446379][ T504] (gem_exec_balancer:1415) igt_dummyload-DEBUG: Test requirement passed: nengine [ 49.446383][ T504] [ 49.459151][ T504] (gem_exec_balancer:1415) CRITICAL: Test assertion failure function busy, file ../tests/i915/gem_exec_balancer.c:1608: [ 49.459155][ T504] [ 49.474854][ T504] (gem_exec_balancer:1415) CRITICAL: Failed assertion: busy.busy == 1u << (class + 16) [ 49.474858][ T504] [ 49.487273][ T504] (gem_exec_balancer:1415) CRITICAL: error: 0 != 0x10000 [ 49.487277][ T504] [ 49.497084][ T504] (gem_exec_balancer:1415) igt_core-INFO: Stack trace: [ 49.497088][ T504] [ 49.507226][ T504] (gem_exec_balancer:1415) igt_core-INFO: #0 ../lib/igt_core.c:1752 __igt_fail_assert() [ 49.507230][ T504] [ 49.520808][ T504] (gem_exec_balancer:1415) igt_core-INFO: #1 ../tests/i915/gem_exec_balancer.c:1607 __igt_unique____real_main3276() [ 49.520812][ T504] [ 49.536465][ T504] (gem_exec_balancer:1415) igt_core-INFO: #2 ../tests/i915/gem_exec_balancer.c:3276 main() [ 49.536470][ T504] [ 49.549683][ T504] (gem_exec_balancer:1415) igt_core-INFO: #3 [__libc_start_main+0xeb] [ 49.549686][ T504] [ 49.560901][ T504] (gem_exec_balancer:1415) igt_core-INFO: #4 [_start+0x2a] [ 49.560905][ T504] [ 49.570571][ T504] **** END ****
To reproduce:
git clone https://github.com/intel/lkp-tests.git cd lkp-tests sudo bin/lkp install job.yaml # job file is attached in this email bin/lkp split-job --compatible job.yaml # generate the yaml file for lkp run sudo bin/lkp run generated-yaml-file
# if come across any failure that blocks the test, # please remove ~/.lkp and /lkp dir to run from a clean state.
--- 0DAY/LKP+ Test Infrastructure Open Source Technology Center https://lists.01.org/hyperkitty/list/lkp@lists.01.org Intel Corporation
Thanks, Oliver Sang
Decomposing fence containers don't seem to make any sense here.
So just remove the function entirely and call dma_fence_wait() directly.
Signed-off-by: Christian König christian.koenig@amd.com Cc: VMware Graphics linux-graphics-maintainer@vmware.com Cc: Zack Rusin zackr@vmware.com --- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 46 ------------------------- drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | 3 -- 3 files changed, 1 insertion(+), 50 deletions(-)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 44ca23b0ea4e..0ff28f0e3eb4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -4500,7 +4500,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, goto mksstats_out; }
- ret = vmw_wait_dma_fence(dev_priv->fman, in_fence); + ret = dma_fence_wait(in_fence, true); if (ret) goto out; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index c60d395f9e2e..430f83a1847c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -621,52 +621,6 @@ int vmw_user_fence_create(struct drm_file *file_priv, return ret; }
- -/** - * vmw_wait_dma_fence - Wait for a dma fence - * - * @fman: pointer to a fence manager - * @fence: DMA fence to wait on - * - * This function handles the case when the fence is actually a fence - * array. If that's the case, it'll wait on each of the child fence - */ -int vmw_wait_dma_fence(struct vmw_fence_manager *fman, - struct dma_fence *fence) -{ - struct dma_fence_array *fence_array; - int ret = 0; - int i; - - - if (dma_fence_is_signaled(fence)) - return 0; - - if (!dma_fence_is_array(fence)) - return dma_fence_wait(fence, true); - - /* From i915: Note that if the fence-array was created in - * signal-on-any mode, we should *not* decompose it into its individual - * fences. However, we don't currently store which mode the fence-array - * is operating in. Fortunately, the only user of signal-on-any is - * private to amdgpu and we should not see any incoming fence-array - * from sync-file being in signal-on-any mode. - */ - - fence_array = to_dma_fence_array(fence); - for (i = 0; i < fence_array->num_fences; i++) { - struct dma_fence *child = fence_array->fences[i]; - - ret = dma_fence_wait(child, true); - - if (ret < 0) - return ret; - } - - return 0; -} - - /* * vmw_fence_fifo_down - signal all unsignaled fence objects. */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h index 079ab4f3ba51..a7eee579c76a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h @@ -104,9 +104,6 @@ extern int vmw_user_fence_create(struct drm_file *file_priv, struct vmw_fence_obj **p_fence, uint32_t *p_handle);
-extern int vmw_wait_dma_fence(struct vmw_fence_manager *fman, - struct dma_fence *fence); - extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
linaro-mm-sig@lists.linaro.org