Remove arch specific __atomic_acquire/release_fence() operations since they use fence instruction to simulate acquire/release order and can not work well with real acquire/release instructions.
The default generic __atomic_acuire/release_fence() now provide sequential order via 'fennce rw, rw'. They are rarely called since we use real acquire/release instructions in most of times.
Signed-off-by: Xu Lu luxu.kernel@bytedance.com --- arch/riscv/include/asm/atomic.h | 6 ------ arch/riscv/include/asm/fence.h | 4 ---- 2 files changed, 10 deletions(-)
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index 86291de07de62..6ed50a283bf8b 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -18,12 +18,6 @@
#include <asm/cmpxchg.h>
-#define __atomic_acquire_fence() \ - __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory") - -#define __atomic_release_fence() \ - __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory"); - static __always_inline int arch_atomic_read(const atomic_t *v) { return READ_ONCE(v->counter); diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h index 182db7930edc2..9ce83e4793948 100644 --- a/arch/riscv/include/asm/fence.h +++ b/arch/riscv/include/asm/fence.h @@ -7,12 +7,8 @@ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
#ifdef CONFIG_SMP -#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw) -#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, w) #define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw) #else -#define RISCV_ACQUIRE_BARRIER -#define RISCV_RELEASE_BARRIER #define RISCV_FULL_BARRIER #endif