4.19-stable review patch. If anyone has any objections, please let me know.
------------------
From: Gao Xiang gaoxiang25@huawei.com
commit 73f5c66df3e26ab750cefcb9a3e08c71c9f79cad upstream.
There are two minor issues in the current freeze interface:
1) Freeze interfaces have not related with CONFIG_DEBUG_SPINLOCK, therefore fix the incorrect conditions;
2) For SMP platforms, it should also disable preemption before doing atomic_cmpxchg in case that some high priority tasks preempt between atomic_cmpxchg and disable_preempt, then spin on the locked refcount later.
Reviewed-by: Chao Yu yuchao0@huawei.com Signed-off-by: Gao Xiang gaoxiang25@huawei.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
--- drivers/staging/erofs/internal.h | 41 +++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 16 deletions(-)
--- a/drivers/staging/erofs/internal.h +++ b/drivers/staging/erofs/internal.h @@ -184,40 +184,49 @@ struct erofs_workgroup {
#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
-static inline bool erofs_workgroup_try_to_freeze( - struct erofs_workgroup *grp, int v) +#if defined(CONFIG_SMP) +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, + int val) { -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - if (v != atomic_cmpxchg(&grp->refcount, - v, EROFS_LOCKED_MAGIC)) - return false; preempt_disable(); -#else - preempt_disable(); - if (atomic_read(&grp->refcount) != v) { + if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) { preempt_enable(); return false; } -#endif return true; }
-static inline void erofs_workgroup_unfreeze( - struct erofs_workgroup *grp, int v) +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, + int orig_val) { -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - atomic_set(&grp->refcount, v); -#endif + atomic_set(&grp->refcount, orig_val); preempt_enable(); }
-#if defined(CONFIG_SMP) static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) { return atomic_cond_read_relaxed(&grp->refcount, VAL != EROFS_LOCKED_MAGIC); } #else +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, + int val) +{ + preempt_disable(); + /* no need to spin on UP platforms, let's just disable preemption. */ + if (val != atomic_read(&grp->refcount)) { + preempt_enable(); + return false; + } + return true; +} + +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, + int orig_val) +{ + preempt_enable(); +} + static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) { int v = atomic_read(&grp->refcount);