Abstract the complexity of iterating over all the fences in a dma_resv object.
The new loop handles the whole RCU and retry dance and returns only fences where we can be sure we grabbed the right one.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 63 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-resv.h | 36 ++++++++++++++++++++++ 2 files changed, 99 insertions(+)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 84fbe60629e3..213a9b7251ca 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -323,6 +323,69 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) } EXPORT_SYMBOL(dma_resv_add_excl_fence);
+/** + * dma_resv_walk_unlocked - walk over fences in a dma_resv obj + * @obj: the dma_resv object + * @cursor: cursor to record the current position + * @all_fences: true returns also the shared fences + * @first: if we should start over + * + * Return all the fences in the dma_resv object which are not yet signaled. + * The returned fence has an extra local reference so will stay alive. + * If a concurrent modify is detected the whole iterator is started over again. + */ +struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj, + struct dma_resv_cursor *cursor, + bool all_fences, bool first) +{ + struct dma_fence *fence = NULL; + + do { + /* Drop the reference from the previous round */ + dma_fence_put(fence); + + cursor->is_first = first; + if (first) { + cursor->seq = read_seqcount_begin(&obj->seq); + cursor->index = -1; + cursor->fences = dma_resv_shared_list(obj); + cursor->is_exclusive = true; + + fence = dma_resv_excl_fence(obj); + if (fence && test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fence->flags)) + fence = NULL; + } else { + fence = NULL; + } + + if (fence) { + fence = dma_fence_get_rcu(fence); + } else if (all_fences && cursor->fences) { + struct dma_resv_list *fences = cursor->fences; + + cursor->is_exclusive = false; + while (++cursor->index < fences->shared_count) { + fence = rcu_dereference(fences->shared[ + cursor->index]); + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fence->flags)) + break; + } + if (cursor->index < fences->shared_count) + fence = dma_fence_get_rcu(fence); + else + fence = NULL; + } + + /* For the eventually next round */ + first = true; + } while (read_seqcount_retry(&obj->seq, cursor->seq)); + + return fence; +} +EXPORT_SYMBOL_GPL(dma_resv_walk_unlocked); + /** * dma_resv_copy_fences - Copy all fences from src to dst. * @dst: the destination reservation object diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 9100dd3dc21f..f5b91c292ee0 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -149,6 +149,39 @@ struct dma_resv { struct dma_resv_list __rcu *fence; };
+/** + * struct dma_resv_cursor - current position into the dma_resv fences + * @seq: sequence number to check + * @index: index into the shared fences + * @shared: the shared fences + * @is_first: true if this is the first returned fence + * @is_exclusive: if the current fence is the exclusive one + */ +struct dma_resv_cursor { + unsigned int seq; + unsigned int index; + struct dma_resv_list *fences; + bool is_first; + bool is_exclusive; +}; + +/** + * dma_resv_for_each_fence_unlocked - fence iterator + * @obj: a dma_resv object pointer + * @cursor: a struct dma_resv_cursor pointer + * @all_fences: true if all fences should be returned + * @fence: the current fence + * + * Iterate over the fences in a struct dma_resv object without holding the + * dma_resv::lock. The RCU read side lock must be hold when using this, but can + * be dropped and re-taken as necessary inside the loop. @all_fences controls + * if the shared fences are returned as well. + */ +#define dma_resv_for_each_fence_unlocked(obj, cursor, all_fences, fence) \ + for (fence = dma_resv_walk_unlocked(obj, cursor, all_fences, true); \ + fence; dma_fence_put(fence), \ + fence = dma_resv_walk_unlocked(obj, cursor, all_fences, false)) + #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
@@ -366,6 +399,9 @@ void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); +struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj, + struct dma_resv_cursor *cursor, + bool first, bool all_fences); int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, unsigned *pshared_count, struct dma_fence ***pshared); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
A simpler version of the iterator to be used when the dma_resv object is locked.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 38 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-resv.h | 18 ++++++++++++++++++ 2 files changed, 56 insertions(+)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 213a9b7251ca..8cbccaae169d 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -323,6 +323,44 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) } EXPORT_SYMBOL(dma_resv_add_excl_fence);
+/** + * dma_resv_walk - walk over fences in a dma_resv obj + * @obj: the dma_resv object + * @cursor: cursor to record the current position + * @all_fences: true returns also the shared fences + * @first: if we should start over + * + * Return all the fences in the dma_resv object while holding the + * dma_resv::lock. + */ +struct dma_fence *dma_resv_walk(struct dma_resv *obj, + struct dma_resv_cursor *cursor, + bool all_fences, bool first) +{ + dma_resv_assert_held(obj); + + cursor->is_first = first; + if (first) { + struct dma_fence *fence; + + cursor->index = -1; + cursor->fences = dma_resv_shared_list(obj); + cursor->is_exclusive = true; + + fence = dma_resv_excl_fence(obj); + if (fence) + return fence; + } + + if (!all_fences || !cursor->fences || + ++cursor->index >= cursor->fences->shared_count) + return NULL; + + return rcu_dereference_protected(cursor->fences->shared[cursor->index], + dma_resv_held(obj)); +} +EXPORT_SYMBOL_GPL(dma_resv_walk); + /** * dma_resv_walk_unlocked - walk over fences in a dma_resv obj * @obj: the dma_resv object diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index f5b91c292ee0..6f9bb7e4c538 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -165,6 +165,21 @@ struct dma_resv_cursor { bool is_exclusive; };
+/** + * dma_resv_for_each_fence - fence iterator + * @obj: a dma_resv object pointer + * @cursor: a struct dma_resv_cursor pointer + * @all_fences: true if all fences should be returned + * @fence: the current fence + * + * Iterate over the fences in a struct dma_resv object while holding the + * dma_resv::lock. @all_fences controls if the shared fences are returned as + * well. + */ +#define dma_resv_for_each_fence(obj, cursor, all_fences, fence) \ + for (fence = dma_resv_walk(obj, cursor, all_fences, true); fence; \ + fence = dma_resv_walk(obj, cursor, all_fences, false)) + /** * dma_resv_for_each_fence_unlocked - fence iterator * @obj: a dma_resv object pointer @@ -399,6 +414,9 @@ void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); +struct dma_fence *dma_resv_walk(struct dma_resv *obj, + struct dma_resv_cursor *cursor, + bool first, bool all_fences); struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj, struct dma_resv_cursor *cursor, bool first, bool all_fences);
This makes the function much simpler since the complex retry logic is now handled else where.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 81 +++++++++++++++----------------------- 1 file changed, 32 insertions(+), 49 deletions(-)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 8cbccaae169d..9a9c0bba772b 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -433,74 +433,57 @@ EXPORT_SYMBOL_GPL(dma_resv_walk_unlocked); */ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) { - struct dma_resv_list *src_list, *dst_list; - struct dma_fence *old, *new; - unsigned int i; + struct dma_resv_cursor cursor; + struct dma_resv_list *list; + struct dma_fence *f, *excl;
dma_resv_assert_held(dst);
- rcu_read_lock(); - src_list = dma_resv_shared_list(src); + list = NULL; + excl = NULL;
-retry: - if (src_list) { - unsigned int shared_count = src_list->shared_count; + rcu_read_lock(); + dma_resv_for_each_fence_unlocked(dst, &cursor, true, f) {
- rcu_read_unlock(); + if (cursor.is_first) { + dma_resv_list_free(list); + dma_fence_put(excl);
- dst_list = dma_resv_list_alloc(shared_count); - if (!dst_list) - return -ENOMEM; + if (cursor.fences) { + unsigned int cnt = cursor.fences->shared_count;
- rcu_read_lock(); - src_list = dma_resv_shared_list(src); - if (!src_list || src_list->shared_count > shared_count) { - kfree(dst_list); - goto retry; - } + rcu_read_unlock(); + list = dma_resv_list_alloc(cnt); + if (!list) + return -ENOMEM;
- dst_list->shared_count = 0; - for (i = 0; i < src_list->shared_count; ++i) { - struct dma_fence __rcu **dst; - struct dma_fence *fence; + list->shared_count = 0; + rcu_read_lock();
- fence = rcu_dereference(src_list->shared[i]); - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &fence->flags)) - continue; - - if (!dma_fence_get_rcu(fence)) { - dma_resv_list_free(dst_list); - src_list = dma_resv_shared_list(src); - goto retry; + } else { + list = NULL; } + excl = NULL; + }
- if (dma_fence_is_signaled(fence)) { - dma_fence_put(fence); - continue; - } + if (cursor.is_exclusive) + excl = f; + else + RCU_INIT_POINTER(list->shared[list->shared_count++], f);
- dst = &dst_list->shared[dst_list->shared_count++]; - rcu_assign_pointer(*dst, fence); - } - } else { - dst_list = NULL; + /* Don't drop the reference */ + f = NULL; }
- new = dma_fence_get_rcu_safe(&src->fence_excl); rcu_read_unlock();
- src_list = dma_resv_shared_list(dst); - old = dma_resv_excl_fence(dst); - write_seqcount_begin(&dst->seq); - /* write_seqcount_begin provides the necessary memory barrier */ - RCU_INIT_POINTER(dst->fence_excl, new); - RCU_INIT_POINTER(dst->fence, dst_list); + excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst)); + list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst)); write_seqcount_end(&dst->seq);
- dma_resv_list_free(src_list); - dma_fence_put(old); + dma_resv_list_free(list); + dma_fence_put(excl);
return 0; }
This makes the function much simpler since the complex retry logic is now handled elsewhere.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 110 +++++++++++++------------------------ 1 file changed, 37 insertions(+), 73 deletions(-)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 9a9c0bba772b..2dfb04e6a62f 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -493,99 +493,63 @@ EXPORT_SYMBOL(dma_resv_copy_fences); * dma_resv_get_fences - Get an object's shared and exclusive * fences without update side lock held * @obj: the reservation object - * @pfence_excl: the returned exclusive fence (or NULL) - * @pshared_count: the number of shared fences returned - * @pshared: the array of shared fence ptrs returned (array is krealloc'd to + * @fence_excl: the returned exclusive fence (or NULL) + * @shared_count: the number of shared fences returned + * @shared: the array of shared fence ptrs returned (array is krealloc'd to * the required size, and must be freed by caller) * * Retrieve all fences from the reservation object. If the pointer for the * exclusive fence is not specified the fence is put into the array of the * shared fences as well. Returns either zero or -ENOMEM. */ -int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, - unsigned int *pshared_count, - struct dma_fence ***pshared) +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl, + unsigned int *shared_count, struct dma_fence ***shared) { - struct dma_fence **shared = NULL; - struct dma_fence *fence_excl; - unsigned int shared_count; - int ret = 1; - - do { - struct dma_resv_list *fobj; - unsigned int i, seq; - size_t sz = 0; - - shared_count = i = 0; - - rcu_read_lock(); - seq = read_seqcount_begin(&obj->seq); - - fence_excl = dma_resv_excl_fence(obj); - if (fence_excl && !dma_fence_get_rcu(fence_excl)) - goto unlock; + struct dma_resv_cursor cursor; + struct dma_fence *fence;
- fobj = dma_resv_shared_list(obj); - if (fobj) - sz += sizeof(*shared) * fobj->shared_max; + *shared_count = 0; + *shared = NULL;
- if (!pfence_excl && fence_excl) - sz += sizeof(*shared); + if (fence_excl) + *fence_excl = NULL;
- if (sz) { - struct dma_fence **nshared; + rcu_read_lock(); + dma_resv_for_each_fence_unlocked(obj, &cursor, true, fence) {
- nshared = krealloc(shared, sz, - GFP_NOWAIT | __GFP_NOWARN); - if (!nshared) { - rcu_read_unlock(); + if (cursor.is_first) { + unsigned int count;
- dma_fence_put(fence_excl); - fence_excl = NULL; + while (*shared_count) + dma_fence_put((*shared)[--(*shared_count)]);
- nshared = krealloc(shared, sz, GFP_KERNEL); - if (nshared) { - shared = nshared; - continue; - } + if (fence_excl) + dma_fence_put(*fence_excl);
- ret = -ENOMEM; - break; - } - shared = nshared; - shared_count = fobj ? fobj->shared_count : 0; - for (i = 0; i < shared_count; ++i) { - shared[i] = rcu_dereference(fobj->shared[i]); - if (!dma_fence_get_rcu(shared[i])) - break; - } - } + count = cursor.fences ? cursor.fences->shared_count : 0; + count += fence_excl ? 0 : 1; + rcu_read_unlock();
- if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { - while (i--) - dma_fence_put(shared[i]); - dma_fence_put(fence_excl); - goto unlock; + /* Eventually re-allocate the array */ + *shared = krealloc_array(*shared, count, + sizeof(*shared), + GFP_KERNEL); + if (count && !*shared) + return -ENOMEM; + rcu_read_lock(); }
- ret = 0; -unlock: - rcu_read_unlock(); - } while (ret); - - if (pfence_excl) - *pfence_excl = fence_excl; - else if (fence_excl) - shared[shared_count++] = fence_excl; + if (cursor.is_exclusive && fence_excl) + *fence_excl = fence; + else + (*shared)[(*shared_count)++] = fence;
- if (!shared_count) { - kfree(shared); - shared = NULL; + /* Don't drop the reference */ + fence = NULL; } + rcu_read_unlock();
- *pshared_count = shared_count; - *pshared = shared; - return ret; + return 0; } EXPORT_SYMBOL_GPL(dma_resv_get_fences);
From: kernel test robot lkp@intel.com
drivers/dma-buf/dma-resv.c:525:7-13: ERROR: application of sizeof to pointer
sizeof when applied to a pointer typed expression gives the size of the pointer
Generated by: scripts/coccinelle/misc/noderef.cocci
CC: Christian König ckoenig.leichtzumerken@gmail.com Reported-by: kernel test robot lkp@intel.com Signed-off-by: kernel test robot lkp@intel.com ---
url: https://github.com/0day-ci/linux/commits/Christian-K-nig/dma-buf-add-dma_res... base: git://anongit.freedesktop.org/drm-intel for-linux-next :::::: branch date: 28 hours ago :::::: commit date: 28 hours ago
dma-resv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -522,7 +522,7 @@ int dma_resv_get_fences(struct dma_resv
/* Eventually re-allocate the array */ *shared = krealloc_array(*shared, count, - sizeof(*shared), + sizeof(**shared), GFP_KERNEL); if (count && !*shared) return -ENOMEM;
This makes the function much simpler since the complex retry logic is now handled elsewhere.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 64 +++++--------------------------------- 1 file changed, 7 insertions(+), 57 deletions(-)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 2dfb04e6a62f..645cf52a6a6c 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -571,74 +571,24 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, unsigned long timeout) { long ret = timeout ? timeout : 1; - unsigned int seq, shared_count; + struct dma_resv_cursor cursor; struct dma_fence *fence; - int i;
-retry: - shared_count = 0; - seq = read_seqcount_begin(&obj->seq); rcu_read_lock(); - i = -1; - - fence = dma_resv_excl_fence(obj); - if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { - if (!dma_fence_get_rcu(fence)) - goto unlock_retry; + dma_resv_for_each_fence_unlocked(obj, &cursor, wait_all, fence) { + rcu_read_unlock();
- if (dma_fence_is_signaled(fence)) { + ret = dma_fence_wait_timeout(fence, intr, ret); + if (ret <= 0) { dma_fence_put(fence); - fence = NULL; + return ret; }
- } else { - fence = NULL; - } - - if (wait_all) { - struct dma_resv_list *fobj = dma_resv_shared_list(obj); - - if (fobj) - shared_count = fobj->shared_count; - - for (i = 0; !fence && i < shared_count; ++i) { - struct dma_fence *lfence; - - lfence = rcu_dereference(fobj->shared[i]); - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &lfence->flags)) - continue; - - if (!dma_fence_get_rcu(lfence)) - goto unlock_retry; - - if (dma_fence_is_signaled(lfence)) { - dma_fence_put(lfence); - continue; - } - - fence = lfence; - break; - } + rcu_read_lock(); } - rcu_read_unlock(); - if (fence) { - if (read_seqcount_retry(&obj->seq, seq)) { - dma_fence_put(fence); - goto retry; - }
- ret = dma_fence_wait_timeout(fence, intr, ret); - dma_fence_put(fence); - if (ret > 0 && wait_all && (i + 1 < shared_count)) - goto retry; - } return ret; - -unlock_retry: - rcu_read_unlock(); - goto retry; } EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
This makes the function much simpler since the complex retry logic is now handled elsewhere.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 54 +++++--------------------------------- 1 file changed, 7 insertions(+), 47 deletions(-)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 645cf52a6a6c..cde5d448d029 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -593,22 +593,6 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
-static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) -{ - struct dma_fence *fence, *lfence = passed_fence; - int ret = 1; - - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { - fence = dma_fence_get_rcu(lfence); - if (!fence) - return -1; - - ret = !!dma_fence_is_signaled(fence); - dma_fence_put(fence); - } - return ret; -} - /** * dma_resv_test_signaled - Test if a reservation object's fences have been * signaled. @@ -625,43 +609,19 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) */ bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) { + struct dma_resv_cursor cursor; struct dma_fence *fence; - unsigned int seq; - int ret;
rcu_read_lock(); -retry: - ret = true; - seq = read_seqcount_begin(&obj->seq); - - if (test_all) { - struct dma_resv_list *fobj = dma_resv_shared_list(obj); - unsigned int i, shared_count; - - shared_count = fobj ? fobj->shared_count : 0; - for (i = 0; i < shared_count; ++i) { - fence = rcu_dereference(fobj->shared[i]); - ret = dma_resv_test_signaled_single(fence); - if (ret < 0) - goto retry; - else if (!ret) - break; + dma_resv_for_each_fence_unlocked(obj, &cursor, test_all, fence) { + if (!dma_fence_is_signaled(fence)) { + rcu_read_unlock(); + dma_fence_put(fence); + return false; } } - - fence = dma_resv_excl_fence(obj); - if (ret && fence) { - ret = dma_resv_test_signaled_single(fence); - if (ret < 0) - goto retry; - - } - - if (read_seqcount_retry(&obj->seq, seq)) - goto retry; - rcu_read_unlock(); - return ret; + return true; } EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
This makes the function much simpler since the complex retry logic is now handled else where.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/i915/gem/i915_gem_busy.c | 30 +++++++----------------- 1 file changed, 9 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 6234e17259c1..c6c6d747b33e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -82,8 +82,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_busy *args = data; struct drm_i915_gem_object *obj; - struct dma_resv_list *list; - unsigned int seq; + struct dma_resv_cursor cursor; + struct dma_fence *fence; int err;
err = -ENOENT; @@ -109,28 +109,16 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * to report the overall busyness. This is what the wait-ioctl does. * */ -retry: - seq = raw_read_seqcount(&obj->base.resv->seq); - - /* Translate the exclusive fence to the READ *and* WRITE engine */ - args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv)); - - /* Translate shared fences to READ set of engines */ - list = dma_resv_shared_list(obj->base.resv); - if (list) { - unsigned int shared_count = list->shared_count, i; - - for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence = - rcu_dereference(list->shared[i]); - + args->busy = false; + dma_resv_for_each_fence_unlocked(obj->base.resv, &cursor, true, fence) { + if (cursor.is_exclusive) + /* Translate the exclusive fence to the READ *and* WRITE engine */ + args->busy = busy_check_writer(fence); + else + /* Translate shared fences to READ set of engines */ args->busy |= busy_check_reader(fence); - } }
- if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq)) - goto retry; - err = 0; out: rcu_read_unlock();
This is probably a fix since we didn't even grabed a reference to the fences.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/ttm/ttm_bo.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 0a3127436f61..5dd0c3dfec3c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -269,19 +269,11 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) { struct dma_resv *resv = &bo->base._resv; - struct dma_resv_list *fobj; + struct dma_resv_cursor cursor; struct dma_fence *fence; - int i;
rcu_read_lock(); - fobj = dma_resv_shared_list(resv); - fence = dma_resv_excl_fence(resv); - if (fence && !fence->ops->signaled) - dma_fence_enable_sw_signaling(fence); - - for (i = 0; fobj && i < fobj->shared_count; ++i) { - fence = rcu_dereference(fobj->shared[i]); - + dma_resv_for_each_fence_unlocked(resv, &cursor, true, fence) { if (!fence->ops->signaled) dma_fence_enable_sw_signaling(fence); }
Instead of hand rolling the logic.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index b8fa6ed3dd73..6808dbef5c79 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -437,19 +437,17 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, static void etnaviv_gem_describe_fence(struct dma_fence *fence, const char *type, struct seq_file *m) { - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - seq_printf(m, "\t%9s: %s %s seq %llu\n", - type, - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), - fence->seqno); + seq_printf(m, "\t%9s: %s %s seq %llu\n", type, + fence->ops->get_driver_name(fence), + fence->ops->get_timeline_name(fence), + fence->seqno); }
static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); struct dma_resv *robj = obj->resv; - struct dma_resv_list *fobj; + struct dma_resv_cursor cursor; struct dma_fence *fence; unsigned long off = drm_vma_node_start(&obj->vma_node);
@@ -459,19 +457,12 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) off, etnaviv_obj->vaddr, obj->size);
rcu_read_lock(); - fobj = dma_resv_shared_list(robj); - if (fobj) { - unsigned int i, shared_count = fobj->shared_count; - - for (i = 0; i < shared_count; i++) { - fence = rcu_dereference(fobj->shared[i]); + dma_resv_for_each_fence_unlocked(robj, &cursor, true, fence) { + if (cursor.is_exclusive) + etnaviv_gem_describe_fence(fence, "Exclusive", m); + else etnaviv_gem_describe_fence(fence, "Shared", m); - } } - - fence = dma_resv_excl_fence(robj); - if (fence) - etnaviv_gem_describe_fence(fence, "Exclusive", m); rcu_read_unlock(); }
Simplifying the code a bit.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 44 ++++++++---------------- 1 file changed, 14 insertions(+), 30 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 862eb3c1c4c5..031ba20debb9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -252,41 +252,25 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct dma_resv *resv, enum amdgpu_sync_mode mode, void *owner) { - struct dma_resv_list *flist; + struct dma_resv_cursor cursor; struct dma_fence *f; - unsigned i; - int r = 0; + int r;
if (resv == NULL) return -EINVAL;
- /* always sync to the exclusive fence */ - f = dma_resv_excl_fence(resv); - dma_fence_chain_for_each(f, f) { - struct dma_fence_chain *chain = to_dma_fence_chain(f); - - if (amdgpu_sync_test_fence(adev, mode, owner, chain ? - chain->fence : f)) { - r = amdgpu_sync_fence(sync, f); - dma_fence_put(f); - if (r) - return r; - break; - } - } - - flist = dma_resv_shared_list(resv); - if (!flist) - return 0; - - for (i = 0; i < flist->shared_count; ++i) { - f = rcu_dereference_protected(flist->shared[i], - dma_resv_held(resv)); - - if (amdgpu_sync_test_fence(adev, mode, owner, f)) { - r = amdgpu_sync_fence(sync, f); - if (r) - return r; + dma_resv_for_each_fence(resv, &cursor, true, f) { + dma_fence_chain_for_each(f, f) { + struct dma_fence_chain *chain = to_dma_fence_chain(f); + + if (amdgpu_sync_test_fence(adev, mode, owner, chain ? + chain->fence : f)) { + r = amdgpu_sync_fence(sync, f); + dma_fence_put(f); + if (r) + return r; + break; + } } } return 0;
Simplifying the code a bit.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 489e22190e29..0a927006ba9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1332,10 +1332,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place) { unsigned long num_pages = bo->resource->num_pages; + struct dma_resv_cursor resv_cursor; struct amdgpu_res_cursor cursor; - struct dma_resv_list *flist; struct dma_fence *f; - int i;
/* Swapout? */ if (bo->resource->mem_type == TTM_PL_SYSTEM) @@ -1349,14 +1348,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * If true, then return false as any KFD process needs all its BOs to * be resident to run successfully */ - flist = dma_resv_shared_list(bo->base.resv); - if (flist) { - for (i = 0; i < flist->shared_count; ++i) { - f = rcu_dereference_protected(flist->shared[i], - dma_resv_held(bo->base.resv)); - if (amdkfd_fence_check_mm(f, current->mm)) - return false; - } + dma_resv_for_each_fence(bo->base.resv, &resv_cursor, true, f) { + if (amdkfd_fence_check_mm(f, current->mm)) + return false; }
switch (bo->resource->mem_type) {
Simplifying the code a bit. Also drop the RCU read side lock since the object is locked anyway.
Untested since I can't get the driver to compile on !ARM.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/msm/msm_gem.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 5db07fc287ad..8ee4e8881b03 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -906,7 +906,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct dma_resv *robj = obj->resv; - struct dma_resv_list *fobj; + struct dma_resv_cursor cursor; struct dma_fence *fence; struct msm_gem_vma *vma; uint64_t off = drm_vma_node_start(&obj->vma_node); @@ -981,22 +981,13 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, seq_puts(m, "\n"); }
- rcu_read_lock(); - fobj = dma_resv_shared_list(robj); - if (fobj) { - unsigned int i, shared_count = fobj->shared_count; - - for (i = 0; i < shared_count; i++) { - fence = rcu_dereference(fobj->shared[i]); + dma_resv_for_each_fence(robj, &cursor, true, fence) { + if (cursor.is_exclusive) + describe_fence(fence, "Exclusive", m); + else describe_fence(fence, "Shared", m); - } }
- fence = dma_resv_excl_fence(robj); - if (fence) - describe_fence(fence, "Exclusive", m); - rcu_read_unlock(); - msm_gem_unlock(obj); }
Simplifying the code a bit.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/nouveau/nouveau_fence.c | 48 +++++++------------------ 1 file changed, 12 insertions(+), 36 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 05d0b3eb3690..dc8d7ca1e239 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -339,14 +339,15 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) }
int -nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr) +nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, + bool exclusive, bool intr) { struct nouveau_fence_chan *fctx = chan->fence; - struct dma_fence *fence; struct dma_resv *resv = nvbo->bo.base.resv; - struct dma_resv_list *fobj; + struct dma_resv_cursor cursor; + struct dma_fence *fence; struct nouveau_fence *f; - int ret = 0, i; + int ret;
if (!exclusive) { ret = dma_resv_reserve_shared(resv, 1); @@ -355,10 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e return ret; }
- fobj = dma_resv_shared_list(resv); - fence = dma_resv_excl_fence(resv); - - if (fence) { + dma_resv_for_each_fence(resv, &cursor, exclusive, fence) { struct nouveau_channel *prev = NULL; bool must_wait = true;
@@ -366,41 +364,19 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e if (f) { rcu_read_lock(); prev = rcu_dereference(f->channel); - if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0)) + if (prev && (prev == chan || + fctx->sync(f, prev, chan) == 0)) must_wait = false; rcu_read_unlock(); }
- if (must_wait) + if (must_wait) { ret = dma_fence_wait(fence, intr); - - return ret; - } - - if (!exclusive || !fobj) - return ret; - - for (i = 0; i < fobj->shared_count && !ret; ++i) { - struct nouveau_channel *prev = NULL; - bool must_wait = true; - - fence = rcu_dereference_protected(fobj->shared[i], - dma_resv_held(resv)); - - f = nouveau_local_fence(fence, chan->drm); - if (f) { - rcu_read_lock(); - prev = rcu_dereference(f->channel); - if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0)) - must_wait = false; - rcu_read_unlock(); + if (ret) + return ret; } - - if (must_wait) - ret = dma_fence_wait(fence, intr); } - - return ret; + return 0; }
void
Simplifying the code a bit.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/radeon/radeon_sync.c | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c index 9257b60144c4..14a4d8135bad 100644 --- a/drivers/gpu/drm/radeon/radeon_sync.c +++ b/drivers/gpu/drm/radeon/radeon_sync.c @@ -91,33 +91,17 @@ int radeon_sync_resv(struct radeon_device *rdev, struct dma_resv *resv, bool shared) { - struct dma_resv_list *flist; - struct dma_fence *f; + struct dma_resv_cursor cursor; struct radeon_fence *fence; - unsigned i; + struct dma_fence *f; int r = 0;
- /* always sync to the exclusive fence */ - f = dma_resv_excl_fence(resv); - fence = f ? to_radeon_fence(f) : NULL; - if (fence && fence->rdev == rdev) - radeon_sync_fence(sync, fence); - else if (f) - r = dma_fence_wait(f, true); - - flist = dma_resv_shared_list(resv); - if (shared || !flist || r) - return r; - - for (i = 0; i < flist->shared_count; ++i) { - f = rcu_dereference_protected(flist->shared[i], - dma_resv_held(resv)); + dma_resv_for_each_fence(resv, &cursor, shared, f) { fence = to_radeon_fence(f); if (fence && fence->rdev == rdev) radeon_sync_fence(sync, fence); else r = dma_fence_wait(f, true); - if (r) break; }
On Fri, Sep 10, 2021 at 10:26:42AM +0200, Christian König wrote:
Abstract the complexity of iterating over all the fences in a dma_resv object.
The new loop handles the whole RCU and retry dance and returns only fences where we can be sure we grabbed the right one.
Signed-off-by: Christian König christian.koenig@amd.com
Bigger picture discussion, picking up from the discussion about maybe removing rcu for fence protections entirely.
Iirc the main hold-up was that in ttm delayed restore we really want to take a snapshot of all the fences, even if we can't get at the dma_resv_lock. I was wondering whether we can't solve this problem different with something like that:
- demidlayer delayed destroy a bit, ttm core would only guarantee that dma_resv_lock is called already (so we don't ahve to handle the delayed destroy internally), then call into drivers
- drivers would iterate over the fences currently attached to dma_resv, maybe with a deep iterator thing that walks through chain/array fences too. The driver then filters out all fences which aren't his own pertainined to the device. Driver can do that with the usual ops upcasting trickery, ttm cannot do that.
- driver then stuffs all these fences into the local dma_resv, so that we have an unshared dma_resv fence list with only the fences that matters, and nothing that was added meanwhile
- driver calls the exported ttm function to clean up the mess with the unshared dma_resv
Would this work?
I'm simply trying to figure out whether we have any option to get rid of all this rcu trickery. At least from all the places where we really don't need it ...
Meanwhile I'll try and do at least a bit of high-level review for your series here. -Daniel
drivers/dma-buf/dma-resv.c | 63 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-resv.h | 36 ++++++++++++++++++++++ 2 files changed, 99 insertions(+)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 84fbe60629e3..213a9b7251ca 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -323,6 +323,69 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) } EXPORT_SYMBOL(dma_resv_add_excl_fence); +/**
- dma_resv_walk_unlocked - walk over fences in a dma_resv obj
- @obj: the dma_resv object
- @cursor: cursor to record the current position
- @all_fences: true returns also the shared fences
- @first: if we should start over
- Return all the fences in the dma_resv object which are not yet signaled.
- The returned fence has an extra local reference so will stay alive.
- If a concurrent modify is detected the whole iterator is started over again.
- */
+struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj,
struct dma_resv_cursor *cursor,
bool all_fences, bool first)
+{
- struct dma_fence *fence = NULL;
- do {
/* Drop the reference from the previous round */
dma_fence_put(fence);
cursor->is_first = first;
if (first) {
cursor->seq = read_seqcount_begin(&obj->seq);
cursor->index = -1;
cursor->fences = dma_resv_shared_list(obj);
cursor->is_exclusive = true;
fence = dma_resv_excl_fence(obj);
if (fence && test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
fence = NULL;
} else {
fence = NULL;
}
if (fence) {
fence = dma_fence_get_rcu(fence);
} else if (all_fences && cursor->fences) {
struct dma_resv_list *fences = cursor->fences;
cursor->is_exclusive = false;
while (++cursor->index < fences->shared_count) {
fence = rcu_dereference(fences->shared[
cursor->index]);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
break;
}
if (cursor->index < fences->shared_count)
fence = dma_fence_get_rcu(fence);
else
fence = NULL;
}
/* For the eventually next round */
first = true;
- } while (read_seqcount_retry(&obj->seq, cursor->seq));
- return fence;
+} +EXPORT_SYMBOL_GPL(dma_resv_walk_unlocked);
/**
- dma_resv_copy_fences - Copy all fences from src to dst.
- @dst: the destination reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 9100dd3dc21f..f5b91c292ee0 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -149,6 +149,39 @@ struct dma_resv { struct dma_resv_list __rcu *fence; }; +/**
- struct dma_resv_cursor - current position into the dma_resv fences
- @seq: sequence number to check
- @index: index into the shared fences
- @shared: the shared fences
- @is_first: true if this is the first returned fence
- @is_exclusive: if the current fence is the exclusive one
- */
+struct dma_resv_cursor {
- unsigned int seq;
- unsigned int index;
- struct dma_resv_list *fences;
- bool is_first;
- bool is_exclusive;
+};
+/**
- dma_resv_for_each_fence_unlocked - fence iterator
- @obj: a dma_resv object pointer
- @cursor: a struct dma_resv_cursor pointer
- @all_fences: true if all fences should be returned
- @fence: the current fence
- Iterate over the fences in a struct dma_resv object without holding the
- dma_resv::lock. The RCU read side lock must be hold when using this, but can
- be dropped and re-taken as necessary inside the loop. @all_fences controls
- if the shared fences are returned as well.
- */
+#define dma_resv_for_each_fence_unlocked(obj, cursor, all_fences, fence) \
- for (fence = dma_resv_walk_unlocked(obj, cursor, all_fences, true); \
fence; dma_fence_put(fence), \
fence = dma_resv_walk_unlocked(obj, cursor, all_fences, false))
#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) @@ -366,6 +399,9 @@ void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); +struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj,
struct dma_resv_cursor *cursor,
bool first, bool all_fences);
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, unsigned *pshared_count, struct dma_fence ***pshared); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); -- 2.25.1
On Fri, Sep 10, 2021 at 10:26:42AM +0200, Christian König wrote:
Abstract the complexity of iterating over all the fences in a dma_resv object.
The new loop handles the whole RCU and retry dance and returns only fences where we can be sure we grabbed the right one.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-resv.c | 63 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-resv.h | 36 ++++++++++++++++++++++ 2 files changed, 99 insertions(+)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 84fbe60629e3..213a9b7251ca 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -323,6 +323,69 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) } EXPORT_SYMBOL(dma_resv_add_excl_fence); +/**
- dma_resv_walk_unlocked - walk over fences in a dma_resv obj
- @obj: the dma_resv object
- @cursor: cursor to record the current position
- @all_fences: true returns also the shared fences
- @first: if we should start over
- Return all the fences in the dma_resv object which are not yet signaled.
- The returned fence has an extra local reference so will stay alive.
- If a concurrent modify is detected the whole iterator is started over again.
- */
+struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj,
struct dma_resv_cursor *cursor,
bool all_fences, bool first)
+{
- struct dma_fence *fence = NULL;
- do {
/* Drop the reference from the previous round */
dma_fence_put(fence);
cursor->is_first = first;
if (first) {
cursor->seq = read_seqcount_begin(&obj->seq);
cursor->index = -1;
cursor->fences = dma_resv_shared_list(obj);
cursor->is_exclusive = true;
fence = dma_resv_excl_fence(obj);
if (fence && test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
fence = NULL;
} else {
fence = NULL;
}
if (fence) {
fence = dma_fence_get_rcu(fence);
} else if (all_fences && cursor->fences) {
struct dma_resv_list *fences = cursor->fences;
cursor->is_exclusive = false;
while (++cursor->index < fences->shared_count) {
fence = rcu_dereference(fences->shared[
cursor->index]);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
break;
}
if (cursor->index < fences->shared_count)
fence = dma_fence_get_rcu(fence);
else
fence = NULL;
}
/* For the eventually next round */
first = true;
- } while (read_seqcount_retry(&obj->seq, cursor->seq));
- return fence;
+} +EXPORT_SYMBOL_GPL(dma_resv_walk_unlocked);
/**
- dma_resv_copy_fences - Copy all fences from src to dst.
- @dst: the destination reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 9100dd3dc21f..f5b91c292ee0 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -149,6 +149,39 @@ struct dma_resv { struct dma_resv_list __rcu *fence; }; +/**
- struct dma_resv_cursor - current position into the dma_resv fences
- @seq: sequence number to check
- @index: index into the shared fences
- @shared: the shared fences
- @is_first: true if this is the first returned fence
- @is_exclusive: if the current fence is the exclusive one
- */
+struct dma_resv_cursor {
- unsigned int seq;
- unsigned int index;
- struct dma_resv_list *fences;
- bool is_first;
- bool is_exclusive;
+};
A bit a bikeshed, but I think I'd be nice to align this with the other iterators we have, e.g. for the drm_connector list.
So struct dma_resv_fence_iter, dma_resv_fence_iter_begin/next/end().
Also I think the for_each macro must not include begin/end calls. If we include that then it saves 2 lines of code at the cost of a pile of awkward bugs because people break; out of the loop or return early (only continue is safe) and we leak a fence. Or worse.
Explicit begin/end is much more robust at a very marginal cost imo.
Otherwise I think this fence iterator is a solid concept that yeah we should roll out everywhere. -Daniel
+/**
- dma_resv_for_each_fence_unlocked - fence iterator
- @obj: a dma_resv object pointer
- @cursor: a struct dma_resv_cursor pointer
- @all_fences: true if all fences should be returned
- @fence: the current fence
- Iterate over the fences in a struct dma_resv object without holding the
- dma_resv::lock. The RCU read side lock must be hold when using this, but can
- be dropped and re-taken as necessary inside the loop. @all_fences controls
- if the shared fences are returned as well.
- */
+#define dma_resv_for_each_fence_unlocked(obj, cursor, all_fences, fence) \
- for (fence = dma_resv_walk_unlocked(obj, cursor, all_fences, true); \
fence; dma_fence_put(fence), \
fence = dma_resv_walk_unlocked(obj, cursor, all_fences, false))
#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) @@ -366,6 +399,9 @@ void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); +struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj,
struct dma_resv_cursor *cursor,
bool first, bool all_fences);
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, unsigned *pshared_count, struct dma_fence ***pshared); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); -- 2.25.1
Am 14.09.21 um 19:04 schrieb Daniel Vetter:
On Fri, Sep 10, 2021 at 10:26:42AM +0200, Christian König wrote:
Abstract the complexity of iterating over all the fences in a dma_resv object.
The new loop handles the whole RCU and retry dance and returns only fences where we can be sure we grabbed the right one.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-resv.c | 63 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-resv.h | 36 ++++++++++++++++++++++ 2 files changed, 99 insertions(+)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 84fbe60629e3..213a9b7251ca 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -323,6 +323,69 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) } EXPORT_SYMBOL(dma_resv_add_excl_fence); +/**
- dma_resv_walk_unlocked - walk over fences in a dma_resv obj
- @obj: the dma_resv object
- @cursor: cursor to record the current position
- @all_fences: true returns also the shared fences
- @first: if we should start over
- Return all the fences in the dma_resv object which are not yet signaled.
- The returned fence has an extra local reference so will stay alive.
- If a concurrent modify is detected the whole iterator is started over again.
- */
+struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj,
struct dma_resv_cursor *cursor,
bool all_fences, bool first)
+{
- struct dma_fence *fence = NULL;
- do {
/* Drop the reference from the previous round */
dma_fence_put(fence);
cursor->is_first = first;
if (first) {
cursor->seq = read_seqcount_begin(&obj->seq);
cursor->index = -1;
cursor->fences = dma_resv_shared_list(obj);
cursor->is_exclusive = true;
fence = dma_resv_excl_fence(obj);
if (fence && test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
fence = NULL;
} else {
fence = NULL;
}
if (fence) {
fence = dma_fence_get_rcu(fence);
} else if (all_fences && cursor->fences) {
struct dma_resv_list *fences = cursor->fences;
cursor->is_exclusive = false;
while (++cursor->index < fences->shared_count) {
fence = rcu_dereference(fences->shared[
cursor->index]);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
break;
}
if (cursor->index < fences->shared_count)
fence = dma_fence_get_rcu(fence);
else
fence = NULL;
}
/* For the eventually next round */
first = true;
- } while (read_seqcount_retry(&obj->seq, cursor->seq));
- return fence;
+} +EXPORT_SYMBOL_GPL(dma_resv_walk_unlocked);
- /**
- dma_resv_copy_fences - Copy all fences from src to dst.
- @dst: the destination reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 9100dd3dc21f..f5b91c292ee0 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -149,6 +149,39 @@ struct dma_resv { struct dma_resv_list __rcu *fence; }; +/**
- struct dma_resv_cursor - current position into the dma_resv fences
- @seq: sequence number to check
- @index: index into the shared fences
- @shared: the shared fences
- @is_first: true if this is the first returned fence
- @is_exclusive: if the current fence is the exclusive one
- */
+struct dma_resv_cursor {
- unsigned int seq;
- unsigned int index;
- struct dma_resv_list *fences;
- bool is_first;
- bool is_exclusive;
+};
A bit a bikeshed, but I think I'd be nice to align this with the other iterators we have, e.g. for the drm_connector list.
So struct dma_resv_fence_iter, dma_resv_fence_iter_begin/next/end().
I've renamed the structure to dma_resv_iter.
Also I think the for_each macro must not include begin/end calls. If we include that then it saves 2 lines of code at the cost of a pile of awkward bugs because people break; out of the loop or return early (only continue is safe) and we leak a fence. Or worse.
Explicit begin/end is much more robust at a very marginal cost imo.
The key point is that this makes it quite a bunch more complicated to implement. See those functions are easiest when you centralize them and try to not spread the functionality into begin/end.
The only thing I could see in the end function would be to drop the reference for the dma_fence and that is not really something I would like to do because we actually need to keep that reference in a bunch of cases.
Regards, Christian.
Otherwise I think this fence iterator is a solid concept that yeah we should roll out everywhere. -Daniel
+/**
- dma_resv_for_each_fence_unlocked - fence iterator
- @obj: a dma_resv object pointer
- @cursor: a struct dma_resv_cursor pointer
- @all_fences: true if all fences should be returned
- @fence: the current fence
- Iterate over the fences in a struct dma_resv object without holding the
- dma_resv::lock. The RCU read side lock must be hold when using this, but can
- be dropped and re-taken as necessary inside the loop. @all_fences controls
- if the shared fences are returned as well.
- */
+#define dma_resv_for_each_fence_unlocked(obj, cursor, all_fences, fence) \
- for (fence = dma_resv_walk_unlocked(obj, cursor, all_fences, true); \
fence; dma_fence_put(fence), \
fence = dma_resv_walk_unlocked(obj, cursor, all_fences, false))
- #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
@@ -366,6 +399,9 @@ void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); +struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj,
struct dma_resv_cursor *cursor,
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, unsigned *pshared_count, struct dma_fence ***pshared); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);bool first, bool all_fences);
-- 2.25.1
On Thu, Sep 16, 2021 at 10:50 AM Christian König ckoenig.leichtzumerken@gmail.com wrote:
Am 14.09.21 um 19:04 schrieb Daniel Vetter:
On Fri, Sep 10, 2021 at 10:26:42AM +0200, Christian König wrote:
Abstract the complexity of iterating over all the fences in a dma_resv object.
The new loop handles the whole RCU and retry dance and returns only fences where we can be sure we grabbed the right one.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-resv.c | 63 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-resv.h | 36 ++++++++++++++++++++++ 2 files changed, 99 insertions(+)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 84fbe60629e3..213a9b7251ca 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -323,6 +323,69 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) } EXPORT_SYMBOL(dma_resv_add_excl_fence); +/**
- dma_resv_walk_unlocked - walk over fences in a dma_resv obj
- @obj: the dma_resv object
- @cursor: cursor to record the current position
- @all_fences: true returns also the shared fences
- @first: if we should start over
- Return all the fences in the dma_resv object which are not yet signaled.
- The returned fence has an extra local reference so will stay alive.
- If a concurrent modify is detected the whole iterator is started over again.
- */
+struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj,
struct dma_resv_cursor *cursor,
bool all_fences, bool first)
+{
- struct dma_fence *fence = NULL;
- do {
/* Drop the reference from the previous round */
dma_fence_put(fence);
cursor->is_first = first;
if (first) {
cursor->seq = read_seqcount_begin(&obj->seq);
cursor->index = -1;
cursor->fences = dma_resv_shared_list(obj);
cursor->is_exclusive = true;
fence = dma_resv_excl_fence(obj);
if (fence && test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
fence = NULL;
} else {
fence = NULL;
}
if (fence) {
fence = dma_fence_get_rcu(fence);
} else if (all_fences && cursor->fences) {
struct dma_resv_list *fences = cursor->fences;
cursor->is_exclusive = false;
while (++cursor->index < fences->shared_count) {
fence = rcu_dereference(fences->shared[
cursor->index]);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
break;
}
if (cursor->index < fences->shared_count)
fence = dma_fence_get_rcu(fence);
else
fence = NULL;
}
/* For the eventually next round */
first = true;
- } while (read_seqcount_retry(&obj->seq, cursor->seq));
- return fence;
+} +EXPORT_SYMBOL_GPL(dma_resv_walk_unlocked);
- /**
- dma_resv_copy_fences - Copy all fences from src to dst.
- @dst: the destination reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 9100dd3dc21f..f5b91c292ee0 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -149,6 +149,39 @@ struct dma_resv { struct dma_resv_list __rcu *fence; }; +/**
- struct dma_resv_cursor - current position into the dma_resv fences
- @seq: sequence number to check
- @index: index into the shared fences
- @shared: the shared fences
- @is_first: true if this is the first returned fence
- @is_exclusive: if the current fence is the exclusive one
- */
+struct dma_resv_cursor {
- unsigned int seq;
- unsigned int index;
- struct dma_resv_list *fences;
- bool is_first;
- bool is_exclusive;
+};
A bit a bikeshed, but I think I'd be nice to align this with the other iterators we have, e.g. for the drm_connector list.
So struct dma_resv_fence_iter, dma_resv_fence_iter_begin/next/end().
I've renamed the structure to dma_resv_iter.
Also I think the for_each macro must not include begin/end calls. If we include that then it saves 2 lines of code at the cost of a pile of awkward bugs because people break; out of the loop or return early (only continue is safe) and we leak a fence. Or worse.
Explicit begin/end is much more robust at a very marginal cost imo.
The key point is that this makes it quite a bunch more complicated to implement. See those functions are easiest when you centralize them and try to not spread the functionality into begin/end.
The only thing I could see in the end function would be to drop the reference for the dma_fence and that is not really something I would like to do because we actually need to keep that reference in a bunch of cases.
Yeah but it's extremely fragile. See with drm_connector_iter we also have the need to grab a reference to that connector in a few place, and I do think that open-code that is much clearer instead of inheriting a reference that the for_each macro acquired for you, and which you cleverly leaked through a break; Compare
for_each_fence(fence) { if (fence) { found_fence = fence; break; } }
/* do some itneresting stuff with found_fence */
dma_fence_put(found_fence); /* wtf, where is this fence reference from */
Versus what I'm proposing:
fence_iter_init(&fence_iter) for_each_fence(fence, &fence_iter) { if (fence) { found_fence = fence; dma_fence_get(found_fence); break; } } fence_iter_end(&fence_iter)
/* do some itneresting stuff with found_fence */
dma_fence_put(found_fence); /* 100% clear which reference we're putting here */
One of these patterns is maintainable and clear, at the cost of 3 more lines. The other one is frankly just clever but fragile nonsense.
So yeah I really think we need the iter_init/end/next triple of functions here. Too clever is no good at all. And yes that version means you have an additional kref_get/put in there for the found fence, but I really don't think that matters in any of these paths here.
Cheers, Daniel
Regards, Christian.
Otherwise I think this fence iterator is a solid concept that yeah we should roll out everywhere. -Daniel
+/**
- dma_resv_for_each_fence_unlocked - fence iterator
- @obj: a dma_resv object pointer
- @cursor: a struct dma_resv_cursor pointer
- @all_fences: true if all fences should be returned
- @fence: the current fence
- Iterate over the fences in a struct dma_resv object without holding the
- dma_resv::lock. The RCU read side lock must be hold when using this, but can
- be dropped and re-taken as necessary inside the loop. @all_fences controls
- if the shared fences are returned as well.
- */
+#define dma_resv_for_each_fence_unlocked(obj, cursor, all_fences, fence) \
- for (fence = dma_resv_walk_unlocked(obj, cursor, all_fences, true); \
fence; dma_fence_put(fence), \
fence = dma_resv_walk_unlocked(obj, cursor, all_fences, false))
- #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
@@ -366,6 +399,9 @@ void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); +struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj,
struct dma_resv_cursor *cursor,
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, unsigned *pshared_count, struct dma_fence ***pshared); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);bool first, bool all_fences);
-- 2.25.1
Am 16.09.21 um 14:14 schrieb Daniel Vetter:
On Thu, Sep 16, 2021 at 10:50 AM Christian König ckoenig.leichtzumerken@gmail.com wrote:
Am 14.09.21 um 19:04 schrieb Daniel Vetter:
On Fri, Sep 10, 2021 at 10:26:42AM +0200, Christian König wrote:
Abstract the complexity of iterating over all the fences in a dma_resv object.
The new loop handles the whole RCU and retry dance and returns only fences where we can be sure we grabbed the right one.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-resv.c | 63 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-resv.h | 36 ++++++++++++++++++++++ 2 files changed, 99 insertions(+)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 84fbe60629e3..213a9b7251ca 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -323,6 +323,69 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) } EXPORT_SYMBOL(dma_resv_add_excl_fence); +/**
- dma_resv_walk_unlocked - walk over fences in a dma_resv obj
- @obj: the dma_resv object
- @cursor: cursor to record the current position
- @all_fences: true returns also the shared fences
- @first: if we should start over
- Return all the fences in the dma_resv object which are not yet signaled.
- The returned fence has an extra local reference so will stay alive.
- If a concurrent modify is detected the whole iterator is started over again.
- */
+struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj,
struct dma_resv_cursor *cursor,
bool all_fences, bool first)
+{
- struct dma_fence *fence = NULL;
- do {
/* Drop the reference from the previous round */
dma_fence_put(fence);
cursor->is_first = first;
if (first) {
cursor->seq = read_seqcount_begin(&obj->seq);
cursor->index = -1;
cursor->fences = dma_resv_shared_list(obj);
cursor->is_exclusive = true;
fence = dma_resv_excl_fence(obj);
if (fence && test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
fence = NULL;
} else {
fence = NULL;
}
if (fence) {
fence = dma_fence_get_rcu(fence);
} else if (all_fences && cursor->fences) {
struct dma_resv_list *fences = cursor->fences;
cursor->is_exclusive = false;
while (++cursor->index < fences->shared_count) {
fence = rcu_dereference(fences->shared[
cursor->index]);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
break;
}
if (cursor->index < fences->shared_count)
fence = dma_fence_get_rcu(fence);
else
fence = NULL;
}
/* For the eventually next round */
first = true;
- } while (read_seqcount_retry(&obj->seq, cursor->seq));
- return fence;
+} +EXPORT_SYMBOL_GPL(dma_resv_walk_unlocked);
- /**
- dma_resv_copy_fences - Copy all fences from src to dst.
- @dst: the destination reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 9100dd3dc21f..f5b91c292ee0 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -149,6 +149,39 @@ struct dma_resv { struct dma_resv_list __rcu *fence; }; +/**
- struct dma_resv_cursor - current position into the dma_resv fences
- @seq: sequence number to check
- @index: index into the shared fences
- @shared: the shared fences
- @is_first: true if this is the first returned fence
- @is_exclusive: if the current fence is the exclusive one
- */
+struct dma_resv_cursor {
- unsigned int seq;
- unsigned int index;
- struct dma_resv_list *fences;
- bool is_first;
- bool is_exclusive;
+};
A bit a bikeshed, but I think I'd be nice to align this with the other iterators we have, e.g. for the drm_connector list.
So struct dma_resv_fence_iter, dma_resv_fence_iter_begin/next/end().
I've renamed the structure to dma_resv_iter.
Also I think the for_each macro must not include begin/end calls. If we include that then it saves 2 lines of code at the cost of a pile of awkward bugs because people break; out of the loop or return early (only continue is safe) and we leak a fence. Or worse.
Explicit begin/end is much more robust at a very marginal cost imo.
The key point is that this makes it quite a bunch more complicated to implement. See those functions are easiest when you centralize them and try to not spread the functionality into begin/end.
The only thing I could see in the end function would be to drop the reference for the dma_fence and that is not really something I would like to do because we actually need to keep that reference in a bunch of cases.
Yeah but it's extremely fragile. See with drm_connector_iter we also have the need to grab a reference to that connector in a few place, and I do think that open-code that is much clearer instead of inheriting a reference that the for_each macro acquired for you, and which you cleverly leaked through a break; Compare
for_each_fence(fence) { if (fence) { found_fence = fence; break; } }
/* do some itneresting stuff with found_fence */
dma_fence_put(found_fence); /* wtf, where is this fence reference from */
Versus what I'm proposing:
fence_iter_init(&fence_iter) for_each_fence(fence, &fence_iter) { if (fence) { found_fence = fence; dma_fence_get(found_fence); break; } } fence_iter_end(&fence_iter)
/* do some itneresting stuff with found_fence */
dma_fence_put(found_fence); /* 100% clear which reference we're putting here */
One of these patterns is maintainable and clear, at the cost of 3 more lines. The other one is frankly just clever but fragile nonsense.
So yeah I really think we need the iter_init/end/next triple of functions here. Too clever is no good at all. And yes that version means you have an additional kref_get/put in there for the found fence, but I really don't think that matters in any of these paths here.
Yeah, that's what I've pondered on as well but I thought that avoiding the extra get/put dance would be more important to avoid.
Anyway, going to change that to make clear what happens here.
But question is can you go over the patch set and see if we can replace some more dma_fence_for_each_fence_unlock() with dma_fence_for_each_fence() because the lock is either held or can be taken? I would have a much better feeling to avoid the unlocked access in the first place.
Thanks, Christian.
Cheers, Daniel
Regards, Christian.
Otherwise I think this fence iterator is a solid concept that yeah we should roll out everywhere. -Daniel
+/**
- dma_resv_for_each_fence_unlocked - fence iterator
- @obj: a dma_resv object pointer
- @cursor: a struct dma_resv_cursor pointer
- @all_fences: true if all fences should be returned
- @fence: the current fence
- Iterate over the fences in a struct dma_resv object without holding the
- dma_resv::lock. The RCU read side lock must be hold when using this, but can
- be dropped and re-taken as necessary inside the loop. @all_fences controls
- if the shared fences are returned as well.
- */
+#define dma_resv_for_each_fence_unlocked(obj, cursor, all_fences, fence) \
- for (fence = dma_resv_walk_unlocked(obj, cursor, all_fences, true); \
fence; dma_fence_put(fence), \
fence = dma_resv_walk_unlocked(obj, cursor, all_fences, false))
- #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
@@ -366,6 +399,9 @@ void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); +struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj,
struct dma_resv_cursor *cursor,
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, unsigned *pshared_count, struct dma_fence ***pshared); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);bool first, bool all_fences);
-- 2.25.1
On Thu, Sep 16, 2021 at 02:49:26PM +0200, Christian König wrote:
Am 16.09.21 um 14:14 schrieb Daniel Vetter:
On Thu, Sep 16, 2021 at 10:50 AM Christian König ckoenig.leichtzumerken@gmail.com wrote:
Am 14.09.21 um 19:04 schrieb Daniel Vetter:
On Fri, Sep 10, 2021 at 10:26:42AM +0200, Christian König wrote:
Abstract the complexity of iterating over all the fences in a dma_resv object.
The new loop handles the whole RCU and retry dance and returns only fences where we can be sure we grabbed the right one.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-resv.c | 63 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-resv.h | 36 ++++++++++++++++++++++ 2 files changed, 99 insertions(+)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 84fbe60629e3..213a9b7251ca 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -323,6 +323,69 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) } EXPORT_SYMBOL(dma_resv_add_excl_fence); +/**
- dma_resv_walk_unlocked - walk over fences in a dma_resv obj
- @obj: the dma_resv object
- @cursor: cursor to record the current position
- @all_fences: true returns also the shared fences
- @first: if we should start over
- Return all the fences in the dma_resv object which are not yet signaled.
- The returned fence has an extra local reference so will stay alive.
- If a concurrent modify is detected the whole iterator is started over again.
- */
+struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj,
struct dma_resv_cursor *cursor,
bool all_fences, bool first)
+{
- struct dma_fence *fence = NULL;
- do {
/* Drop the reference from the previous round */
dma_fence_put(fence);
cursor->is_first = first;
if (first) {
cursor->seq = read_seqcount_begin(&obj->seq);
cursor->index = -1;
cursor->fences = dma_resv_shared_list(obj);
cursor->is_exclusive = true;
fence = dma_resv_excl_fence(obj);
if (fence && test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
fence = NULL;
} else {
fence = NULL;
}
if (fence) {
fence = dma_fence_get_rcu(fence);
} else if (all_fences && cursor->fences) {
struct dma_resv_list *fences = cursor->fences;
cursor->is_exclusive = false;
while (++cursor->index < fences->shared_count) {
fence = rcu_dereference(fences->shared[
cursor->index]);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
break;
}
if (cursor->index < fences->shared_count)
fence = dma_fence_get_rcu(fence);
else
fence = NULL;
}
/* For the eventually next round */
first = true;
- } while (read_seqcount_retry(&obj->seq, cursor->seq));
- return fence;
+} +EXPORT_SYMBOL_GPL(dma_resv_walk_unlocked);
- /**
- dma_resv_copy_fences - Copy all fences from src to dst.
- @dst: the destination reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 9100dd3dc21f..f5b91c292ee0 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -149,6 +149,39 @@ struct dma_resv { struct dma_resv_list __rcu *fence; }; +/**
- struct dma_resv_cursor - current position into the dma_resv fences
- @seq: sequence number to check
- @index: index into the shared fences
- @shared: the shared fences
- @is_first: true if this is the first returned fence
- @is_exclusive: if the current fence is the exclusive one
- */
+struct dma_resv_cursor {
- unsigned int seq;
- unsigned int index;
- struct dma_resv_list *fences;
- bool is_first;
- bool is_exclusive;
+};
A bit a bikeshed, but I think I'd be nice to align this with the other iterators we have, e.g. for the drm_connector list.
So struct dma_resv_fence_iter, dma_resv_fence_iter_begin/next/end().
I've renamed the structure to dma_resv_iter.
Also I think the for_each macro must not include begin/end calls. If we include that then it saves 2 lines of code at the cost of a pile of awkward bugs because people break; out of the loop or return early (only continue is safe) and we leak a fence. Or worse.
Explicit begin/end is much more robust at a very marginal cost imo.
The key point is that this makes it quite a bunch more complicated to implement. See those functions are easiest when you centralize them and try to not spread the functionality into begin/end.
The only thing I could see in the end function would be to drop the reference for the dma_fence and that is not really something I would like to do because we actually need to keep that reference in a bunch of cases.
Yeah but it's extremely fragile. See with drm_connector_iter we also have the need to grab a reference to that connector in a few place, and I do think that open-code that is much clearer instead of inheriting a reference that the for_each macro acquired for you, and which you cleverly leaked through a break; Compare
for_each_fence(fence) { if (fence) { found_fence = fence; break; } }
/* do some itneresting stuff with found_fence */
dma_fence_put(found_fence); /* wtf, where is this fence reference from */
Versus what I'm proposing:
fence_iter_init(&fence_iter) for_each_fence(fence, &fence_iter) { if (fence) { found_fence = fence; dma_fence_get(found_fence); break; } } fence_iter_end(&fence_iter)
/* do some itneresting stuff with found_fence */
dma_fence_put(found_fence); /* 100% clear which reference we're putting here */
One of these patterns is maintainable and clear, at the cost of 3 more lines. The other one is frankly just clever but fragile nonsense.
So yeah I really think we need the iter_init/end/next triple of functions here. Too clever is no good at all. And yes that version means you have an additional kref_get/put in there for the found fence, but I really don't think that matters in any of these paths here.
Yeah, that's what I've pondered on as well but I thought that avoiding the extra get/put dance would be more important to avoid.
Yeah, but if that shows up in a benchmark/profile, we can fix it with some fence_iter_get_fence() or so wrapper which explicitly hands the reference over to you (by clearing the pointer in the iter or wherever so the _next() or _end() do not call dma_fence_put anymore). So if necessary, we can have clarity and speed here.
Anyway, going to change that to make clear what happens here.
But question is can you go over the patch set and see if we can replace some more dma_fence_for_each_fence_unlock() with dma_fence_for_each_fence() because the lock is either held or can be taken? I would have a much better feeling to avoid the unlocked access in the first place.
Yeah fully agreed, I think we should aim as much for fully locked. Btw on that did you see my other reply where I toss around an idea for the dma_resv unsharing problem? -Daniel
Thanks, Christian.
Cheers, Daniel
Regards, Christian.
Otherwise I think this fence iterator is a solid concept that yeah we should roll out everywhere. -Daniel
+/**
- dma_resv_for_each_fence_unlocked - fence iterator
- @obj: a dma_resv object pointer
- @cursor: a struct dma_resv_cursor pointer
- @all_fences: true if all fences should be returned
- @fence: the current fence
- Iterate over the fences in a struct dma_resv object without holding the
- dma_resv::lock. The RCU read side lock must be hold when using this, but can
- be dropped and re-taken as necessary inside the loop. @all_fences controls
- if the shared fences are returned as well.
- */
+#define dma_resv_for_each_fence_unlocked(obj, cursor, all_fences, fence) \
- for (fence = dma_resv_walk_unlocked(obj, cursor, all_fences, true); \
fence; dma_fence_put(fence), \
fence = dma_resv_walk_unlocked(obj, cursor, all_fences, false))
- #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
@@ -366,6 +399,9 @@ void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); +struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj,
struct dma_resv_cursor *cursor,
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, unsigned *pshared_count, struct dma_fence ***pshared); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);bool first, bool all_fences);
-- 2.25.1
Am 16.09.21 um 16:09 schrieb Daniel Vetter:
On Thu, Sep 16, 2021 at 02:49:26PM +0200, Christian König wrote:
Am 16.09.21 um 14:14 schrieb Daniel Vetter:
On Thu, Sep 16, 2021 at 10:50 AM Christian König ckoenig.leichtzumerken@gmail.com wrote:
Am 14.09.21 um 19:04 schrieb Daniel Vetter:
On Fri, Sep 10, 2021 at 10:26:42AM +0200, Christian König wrote:
Abstract the complexity of iterating over all the fences in a dma_resv object.
The new loop handles the whole RCU and retry dance and returns only fences where we can be sure we grabbed the right one.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-resv.c | 63 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-resv.h | 36 ++++++++++++++++++++++ 2 files changed, 99 insertions(+)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 84fbe60629e3..213a9b7251ca 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -323,6 +323,69 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) } EXPORT_SYMBOL(dma_resv_add_excl_fence); +/**
- dma_resv_walk_unlocked - walk over fences in a dma_resv obj
- @obj: the dma_resv object
- @cursor: cursor to record the current position
- @all_fences: true returns also the shared fences
- @first: if we should start over
- Return all the fences in the dma_resv object which are not yet signaled.
- The returned fence has an extra local reference so will stay alive.
- If a concurrent modify is detected the whole iterator is started over again.
- */
+struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj,
struct dma_resv_cursor *cursor,
bool all_fences, bool first)
+{
- struct dma_fence *fence = NULL;
- do {
/* Drop the reference from the previous round */
dma_fence_put(fence);
cursor->is_first = first;
if (first) {
cursor->seq = read_seqcount_begin(&obj->seq);
cursor->index = -1;
cursor->fences = dma_resv_shared_list(obj);
cursor->is_exclusive = true;
fence = dma_resv_excl_fence(obj);
if (fence && test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
fence = NULL;
} else {
fence = NULL;
}
if (fence) {
fence = dma_fence_get_rcu(fence);
} else if (all_fences && cursor->fences) {
struct dma_resv_list *fences = cursor->fences;
cursor->is_exclusive = false;
while (++cursor->index < fences->shared_count) {
fence = rcu_dereference(fences->shared[
cursor->index]);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
break;
}
if (cursor->index < fences->shared_count)
fence = dma_fence_get_rcu(fence);
else
fence = NULL;
}
/* For the eventually next round */
first = true;
- } while (read_seqcount_retry(&obj->seq, cursor->seq));
- return fence;
+} +EXPORT_SYMBOL_GPL(dma_resv_walk_unlocked);
- /**
- dma_resv_copy_fences - Copy all fences from src to dst.
- @dst: the destination reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 9100dd3dc21f..f5b91c292ee0 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -149,6 +149,39 @@ struct dma_resv { struct dma_resv_list __rcu *fence; }; +/**
- struct dma_resv_cursor - current position into the dma_resv fences
- @seq: sequence number to check
- @index: index into the shared fences
- @shared: the shared fences
- @is_first: true if this is the first returned fence
- @is_exclusive: if the current fence is the exclusive one
- */
+struct dma_resv_cursor {
- unsigned int seq;
- unsigned int index;
- struct dma_resv_list *fences;
- bool is_first;
- bool is_exclusive;
+};
A bit a bikeshed, but I think I'd be nice to align this with the other iterators we have, e.g. for the drm_connector list.
So struct dma_resv_fence_iter, dma_resv_fence_iter_begin/next/end().
I've renamed the structure to dma_resv_iter.
Also I think the for_each macro must not include begin/end calls. If we include that then it saves 2 lines of code at the cost of a pile of awkward bugs because people break; out of the loop or return early (only continue is safe) and we leak a fence. Or worse.
Explicit begin/end is much more robust at a very marginal cost imo.
The key point is that this makes it quite a bunch more complicated to implement. See those functions are easiest when you centralize them and try to not spread the functionality into begin/end.
The only thing I could see in the end function would be to drop the reference for the dma_fence and that is not really something I would like to do because we actually need to keep that reference in a bunch of cases.
Yeah but it's extremely fragile. See with drm_connector_iter we also have the need to grab a reference to that connector in a few place, and I do think that open-code that is much clearer instead of inheriting a reference that the for_each macro acquired for you, and which you cleverly leaked through a break; Compare
for_each_fence(fence) { if (fence) { found_fence = fence; break; } }
/* do some itneresting stuff with found_fence */
dma_fence_put(found_fence); /* wtf, where is this fence reference from */
Versus what I'm proposing:
fence_iter_init(&fence_iter) for_each_fence(fence, &fence_iter) { if (fence) { found_fence = fence; dma_fence_get(found_fence); break; } } fence_iter_end(&fence_iter)
/* do some itneresting stuff with found_fence */
dma_fence_put(found_fence); /* 100% clear which reference we're putting here */
One of these patterns is maintainable and clear, at the cost of 3 more lines. The other one is frankly just clever but fragile nonsense.
So yeah I really think we need the iter_init/end/next triple of functions here. Too clever is no good at all. And yes that version means you have an additional kref_get/put in there for the found fence, but I really don't think that matters in any of these paths here.
Yeah, that's what I've pondered on as well but I thought that avoiding the extra get/put dance would be more important to avoid.
Yeah, but if that shows up in a benchmark/profile, we can fix it with some fence_iter_get_fence() or so wrapper which explicitly hands the reference over to you (by clearing the pointer in the iter or wherever so the _next() or _end() do not call dma_fence_put anymore). So if necessary, we can have clarity and speed here.
Ok fine with me, going to rework the code.
Anyway, going to change that to make clear what happens here.
But question is can you go over the patch set and see if we can replace some more dma_fence_for_each_fence_unlock() with dma_fence_for_each_fence() because the lock is either held or can be taken? I would have a much better feeling to avoid the unlocked access in the first place.
Yeah fully agreed, I think we should aim as much for fully locked.
The problem is that I can't really say for a lot of code if we should use the locked or unlocked variant.
For example Tvrtko suggested to use the locked variant in i915_request_await_object() and I mixed that up with i915_sw_fence_await_reservation(). End result is that the CI system blew up immediately.
Good that the CI system caught that, but I will certainly only move to the locked variant if somebody explicitly confirm to me that we can do that for an use case.
Btw on that did you see my other reply where I toss around an idea for the dma_resv unsharing problem?
At least I don't know what you are talking about. So no I probably somehow missed that.
Christian.
-Daniel
Thanks, Christian.
Cheers, Daniel
Regards, Christian.
Otherwise I think this fence iterator is a solid concept that yeah we should roll out everywhere. -Daniel
+/**
- dma_resv_for_each_fence_unlocked - fence iterator
- @obj: a dma_resv object pointer
- @cursor: a struct dma_resv_cursor pointer
- @all_fences: true if all fences should be returned
- @fence: the current fence
- Iterate over the fences in a struct dma_resv object without holding the
- dma_resv::lock. The RCU read side lock must be hold when using this, but can
- be dropped and re-taken as necessary inside the loop. @all_fences controls
- if the shared fences are returned as well.
- */
+#define dma_resv_for_each_fence_unlocked(obj, cursor, all_fences, fence) \
- for (fence = dma_resv_walk_unlocked(obj, cursor, all_fences, true); \
fence; dma_fence_put(fence), \
fence = dma_resv_walk_unlocked(obj, cursor, all_fences, false))
- #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
@@ -366,6 +399,9 @@ void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); +struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj,
struct dma_resv_cursor *cursor,
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, unsigned *pshared_count, struct dma_fence ***pshared); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);bool first, bool all_fences);
-- 2.25.1
On Fri, Sep 17, 2021 at 08:32:49AM +0200, Christian König wrote:
Am 16.09.21 um 16:09 schrieb Daniel Vetter:
On Thu, Sep 16, 2021 at 02:49:26PM +0200, Christian König wrote:
Am 16.09.21 um 14:14 schrieb Daniel Vetter:
On Thu, Sep 16, 2021 at 10:50 AM Christian König ckoenig.leichtzumerken@gmail.com wrote:
Am 14.09.21 um 19:04 schrieb Daniel Vetter:
On Fri, Sep 10, 2021 at 10:26:42AM +0200, Christian König wrote: > Abstract the complexity of iterating over all the fences > in a dma_resv object. > > The new loop handles the whole RCU and retry dance and > returns only fences where we can be sure we grabbed the > right one. > > Signed-off-by: Christian König christian.koenig@amd.com > --- > drivers/dma-buf/dma-resv.c | 63 ++++++++++++++++++++++++++++++++++++++ > include/linux/dma-resv.h | 36 ++++++++++++++++++++++ > 2 files changed, 99 insertions(+) > > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c > index 84fbe60629e3..213a9b7251ca 100644 > --- a/drivers/dma-buf/dma-resv.c > +++ b/drivers/dma-buf/dma-resv.c > @@ -323,6 +323,69 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) > } > EXPORT_SYMBOL(dma_resv_add_excl_fence); > +/** > + * dma_resv_walk_unlocked - walk over fences in a dma_resv obj > + * @obj: the dma_resv object > + * @cursor: cursor to record the current position > + * @all_fences: true returns also the shared fences > + * @first: if we should start over > + * > + * Return all the fences in the dma_resv object which are not yet signaled. > + * The returned fence has an extra local reference so will stay alive. > + * If a concurrent modify is detected the whole iterator is started over again. > + */ > +struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj, > + struct dma_resv_cursor *cursor, > + bool all_fences, bool first) > +{ > + struct dma_fence *fence = NULL; > + > + do { > + /* Drop the reference from the previous round */ > + dma_fence_put(fence); > + > + cursor->is_first = first; > + if (first) { > + cursor->seq = read_seqcount_begin(&obj->seq); > + cursor->index = -1; > + cursor->fences = dma_resv_shared_list(obj); > + cursor->is_exclusive = true; > + > + fence = dma_resv_excl_fence(obj); > + if (fence && test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, > + &fence->flags)) > + fence = NULL; > + } else { > + fence = NULL; > + } > + > + if (fence) { > + fence = dma_fence_get_rcu(fence); > + } else if (all_fences && cursor->fences) { > + struct dma_resv_list *fences = cursor->fences; > + > + cursor->is_exclusive = false; > + while (++cursor->index < fences->shared_count) { > + fence = rcu_dereference(fences->shared[ > + cursor->index]); > + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, > + &fence->flags)) > + break; > + } > + if (cursor->index < fences->shared_count) > + fence = dma_fence_get_rcu(fence); > + else > + fence = NULL; > + } > + > + /* For the eventually next round */ > + first = true; > + } while (read_seqcount_retry(&obj->seq, cursor->seq)); > + > + return fence; > +} > +EXPORT_SYMBOL_GPL(dma_resv_walk_unlocked); > + > /** > * dma_resv_copy_fences - Copy all fences from src to dst. > * @dst: the destination reservation object > diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h > index 9100dd3dc21f..f5b91c292ee0 100644 > --- a/include/linux/dma-resv.h > +++ b/include/linux/dma-resv.h > @@ -149,6 +149,39 @@ struct dma_resv { > struct dma_resv_list __rcu *fence; > }; > +/** > + * struct dma_resv_cursor - current position into the dma_resv fences > + * @seq: sequence number to check > + * @index: index into the shared fences > + * @shared: the shared fences > + * @is_first: true if this is the first returned fence > + * @is_exclusive: if the current fence is the exclusive one > + */ > +struct dma_resv_cursor { > + unsigned int seq; > + unsigned int index; > + struct dma_resv_list *fences; > + bool is_first; > + bool is_exclusive; > +}; A bit a bikeshed, but I think I'd be nice to align this with the other iterators we have, e.g. for the drm_connector list.
So struct dma_resv_fence_iter, dma_resv_fence_iter_begin/next/end().
I've renamed the structure to dma_resv_iter.
Also I think the for_each macro must not include begin/end calls. If we include that then it saves 2 lines of code at the cost of a pile of awkward bugs because people break; out of the loop or return early (only continue is safe) and we leak a fence. Or worse.
Explicit begin/end is much more robust at a very marginal cost imo.
The key point is that this makes it quite a bunch more complicated to implement. See those functions are easiest when you centralize them and try to not spread the functionality into begin/end.
The only thing I could see in the end function would be to drop the reference for the dma_fence and that is not really something I would like to do because we actually need to keep that reference in a bunch of cases.
Yeah but it's extremely fragile. See with drm_connector_iter we also have the need to grab a reference to that connector in a few place, and I do think that open-code that is much clearer instead of inheriting a reference that the for_each macro acquired for you, and which you cleverly leaked through a break; Compare
for_each_fence(fence) { if (fence) { found_fence = fence; break; } }
/* do some itneresting stuff with found_fence */
dma_fence_put(found_fence); /* wtf, where is this fence reference from */
Versus what I'm proposing:
fence_iter_init(&fence_iter) for_each_fence(fence, &fence_iter) { if (fence) { found_fence = fence; dma_fence_get(found_fence); break; } } fence_iter_end(&fence_iter)
/* do some itneresting stuff with found_fence */
dma_fence_put(found_fence); /* 100% clear which reference we're putting here */
One of these patterns is maintainable and clear, at the cost of 3 more lines. The other one is frankly just clever but fragile nonsense.
So yeah I really think we need the iter_init/end/next triple of functions here. Too clever is no good at all. And yes that version means you have an additional kref_get/put in there for the found fence, but I really don't think that matters in any of these paths here.
Yeah, that's what I've pondered on as well but I thought that avoiding the extra get/put dance would be more important to avoid.
Yeah, but if that shows up in a benchmark/profile, we can fix it with some fence_iter_get_fence() or so wrapper which explicitly hands the reference over to you (by clearing the pointer in the iter or wherever so the _next() or _end() do not call dma_fence_put anymore). So if necessary, we can have clarity and speed here.
Ok fine with me, going to rework the code.
Anyway, going to change that to make clear what happens here.
But question is can you go over the patch set and see if we can replace some more dma_fence_for_each_fence_unlock() with dma_fence_for_each_fence() because the lock is either held or can be taken? I would have a much better feeling to avoid the unlocked access in the first place.
Yeah fully agreed, I think we should aim as much for fully locked.
The problem is that I can't really say for a lot of code if we should use the locked or unlocked variant.
For example Tvrtko suggested to use the locked variant in i915_request_await_object() and I mixed that up with i915_sw_fence_await_reservation(). End result is that the CI system blew up immediately.
Good that the CI system caught that, but I will certainly only move to the locked variant if somebody explicitly confirm to me that we can do that for an use case.
Yeah I think going to the locked version on a case-by-case basis is probably best.
Btw on that did you see my other reply where I toss around an idea for the dma_resv unsharing problem?
At least I don't know what you are talking about. So no I probably somehow missed that.
https://lore.kernel.org/dri-devel/YUC0hPE7gx7E+tEx@phenom.ffwll.local/
Cheers, Daniel
Christian.
-Daniel
Thanks, Christian.
Cheers, Daniel
Regards, Christian.
Otherwise I think this fence iterator is a solid concept that yeah we should roll out everywhere. -Daniel
> + > +/** > + * dma_resv_for_each_fence_unlocked - fence iterator > + * @obj: a dma_resv object pointer > + * @cursor: a struct dma_resv_cursor pointer > + * @all_fences: true if all fences should be returned > + * @fence: the current fence > + * > + * Iterate over the fences in a struct dma_resv object without holding the > + * dma_resv::lock. The RCU read side lock must be hold when using this, but can > + * be dropped and re-taken as necessary inside the loop. @all_fences controls > + * if the shared fences are returned as well. > + */ > +#define dma_resv_for_each_fence_unlocked(obj, cursor, all_fences, fence) \ > + for (fence = dma_resv_walk_unlocked(obj, cursor, all_fences, true); \ > + fence; dma_fence_put(fence), \ > + fence = dma_resv_walk_unlocked(obj, cursor, all_fences, false)) > + > #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) > #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) > @@ -366,6 +399,9 @@ void dma_resv_fini(struct dma_resv *obj); > int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); > void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); > void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); > +struct dma_fence *dma_resv_walk_unlocked(struct dma_resv *obj, > + struct dma_resv_cursor *cursor, > + bool first, bool all_fences); > int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, > unsigned *pshared_count, struct dma_fence ***pshared); > int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); > -- > 2.25.1 >
linaro-mm-sig@lists.linaro.org