lists.linaro.org
Sign In
Sign Up
Sign In
Sign Up
Manage this list
×
Keyboard Shortcuts
Thread View
j
: Next unread message
k
: Previous unread message
j a
: Jump to all threads
j l
: Jump to MailingList overview
2025
January
2024
December
November
October
September
August
July
June
May
April
March
February
January
2023
December
November
October
September
August
July
June
May
April
March
February
January
2022
December
November
October
September
August
July
June
May
April
March
February
January
2021
December
November
October
September
August
July
June
May
April
March
February
January
2020
December
November
October
September
August
July
June
May
April
March
February
January
2019
December
November
October
September
August
July
June
May
April
March
February
January
2018
December
November
October
September
August
July
June
May
April
March
February
January
2017
December
November
List overview
Download
Linux-stable-mirror
October 2022
----- 2025 -----
January 2025
----- 2024 -----
December 2024
November 2024
October 2024
September 2024
August 2024
July 2024
June 2024
May 2024
April 2024
March 2024
February 2024
January 2024
----- 2023 -----
December 2023
November 2023
October 2023
September 2023
August 2023
July 2023
June 2023
May 2023
April 2023
March 2023
February 2023
January 2023
----- 2022 -----
December 2022
November 2022
October 2022
September 2022
August 2022
July 2022
June 2022
May 2022
April 2022
March 2022
February 2022
January 2022
----- 2021 -----
December 2021
November 2021
October 2021
September 2021
August 2021
July 2021
June 2021
May 2021
April 2021
March 2021
February 2021
January 2021
----- 2020 -----
December 2020
November 2020
October 2020
September 2020
August 2020
July 2020
June 2020
May 2020
April 2020
March 2020
February 2020
January 2020
----- 2019 -----
December 2019
November 2019
October 2019
September 2019
August 2019
July 2019
June 2019
May 2019
April 2019
March 2019
February 2019
January 2019
----- 2018 -----
December 2018
November 2018
October 2018
September 2018
August 2018
July 2018
June 2018
May 2018
April 2018
March 2018
February 2018
January 2018
----- 2017 -----
December 2017
November 2017
linux-stable-mirror@lists.linaro.org
386 participants
1706 discussions
Start a n
N
ew thread
backport of patches 8238b4579866b7c1bb99883cfe102a43db5506ff and d6ffe6067a54972564552ea45d320fb98db1ac5e
by Mikulas Patocka
Hi Here I'm submitting backport of patches 8238b4579866b7c1bb99883cfe102a43db5506ff and d6ffe6067a54972564552ea45d320fb98db1ac5e to the stable branches. Mikulas
2 years, 2 months
2
40
0
0
[PATCH 4.9 2/2] provide arch_test_bit_acquire for architectures that define test_bit
by Mikulas Patocka
commit d6ffe6067a54972564552ea45d320fb98db1ac5e upstream. Some architectures define their own arch_test_bit and they also need arch_test_bit_acquire, otherwise they won't compile. We also clean up the code by using the generic test_bit if that is equivalent to the arch-specific version. Signed-off-by: Mikulas Patocka <mpatocka(a)redhat.com> Cc: stable(a)vger.kernel.org Fixes: 8238b4579866 ("wait_on_bit: add an acquire memory barrier") Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org> --- arch/alpha/include/asm/bitops.h | 7 +++++++ arch/arc/include/asm/bitops.h | 7 +++++++ arch/frv/include/asm/bitops.h | 7 +++++++ arch/h8300/include/asm/bitops.h | 3 ++- arch/hexagon/include/asm/bitops.h | 15 +++++++++++++++ arch/ia64/include/asm/bitops.h | 7 +++++++ arch/m68k/include/asm/bitops.h | 6 ++++++ arch/mn10300/include/asm/bitops.h | 7 +++++++ arch/s390/include/asm/bitops.h | 7 +++++++ arch/sh/include/asm/bitops-op32.h | 7 +++++++ 10 files changed, 72 insertions(+), 1 deletion(-) Index: linux-stable/arch/alpha/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/alpha/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 +++ linux-stable/arch/alpha/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 @@ -288,6 +288,13 @@ test_bit(int nr, const volatile void * a return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + /* * ffz = Find First Zero in word. Undefined if no zero exists, * so code should check against ~0UL first.. Index: linux-stable/arch/hexagon/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/hexagon/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 +++ linux-stable/arch/hexagon/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 @@ -186,7 +186,22 @@ static inline int __test_bit(int nr, con return retval; } +static inline int __test_bit_acquire(int nr, const volatile unsigned long *addr) +{ + int retval; + + asm volatile( + "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n" + : "=&r" (retval) + : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG) + : "p0", "memory" + ); + + return retval; +} + #define test_bit(nr, addr) __test_bit(nr, addr) +#define test_bit_acquire(nr, addr) __test_bit_acquire(nr, addr) /* * ffz - find first zero in word. Index: linux-stable/arch/ia64/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/ia64/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 +++ linux-stable/arch/ia64/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 @@ -336,6 +336,13 @@ test_bit (int nr, const volatile void *a return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + /** * ffz - find the first zero bit in a long word * @x: The long word to find the bit in Index: linux-stable/arch/m68k/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/m68k/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 +++ linux-stable/arch/m68k/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 @@ -153,6 +153,12 @@ static inline int test_bit(int nr, const return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} static inline int bset_reg_test_and_set_bit(int nr, volatile unsigned long *vaddr) Index: linux-stable/arch/s390/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/s390/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 +++ linux-stable/arch/s390/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 @@ -270,6 +270,13 @@ static inline int test_bit(unsigned long return (*addr >> (nr & 7)) & 1; } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + static inline int test_and_set_bit_lock(unsigned long nr, volatile unsigned long *ptr) { Index: linux-stable/arch/sh/include/asm/bitops-op32.h =================================================================== --- linux-stable.orig/arch/sh/include/asm/bitops-op32.h 2022-10-27 14:24:31.000000000 +0200 +++ linux-stable/arch/sh/include/asm/bitops-op32.h 2022-10-27 14:24:31.000000000 +0200 @@ -139,4 +139,11 @@ static inline int test_bit(int nr, const return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + #endif /* __ASM_SH_BITOPS_OP32_H */ Index: linux-stable/arch/arc/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/arc/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 +++ linux-stable/arch/arc/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 @@ -254,6 +254,13 @@ test_bit(unsigned int nr, const volatile return ((mask & *addr) != 0); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + #ifdef CONFIG_ISA_ARCOMPACT /* Index: linux-stable/arch/h8300/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/h8300/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 +++ linux-stable/arch/h8300/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 @@ -88,7 +88,8 @@ static inline int test_bit(int nr, const return ret; } -#define __test_bit(nr, addr) test_bit(nr, addr) +#define __test_bit(nr, addr) test_bit(nr, addr) +#define test_bit_acquire(nr, addr) test_bit(nr, addr) #define H8300_GEN_TEST_BITOP(FNNAME, OP) \ static inline int FNNAME(int nr, void *addr) \ Index: linux-stable/arch/frv/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/frv/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 +++ linux-stable/arch/frv/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 @@ -156,6 +156,13 @@ static inline int __test_bit(unsigned lo __constant_test_bit((nr),(addr)) : \ __test_bit((nr),(addr))) +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + #include <asm-generic/bitops/find.h> /** Index: linux-stable/arch/mn10300/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/mn10300/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 +++ linux-stable/arch/mn10300/include/asm/bitops.h 2022-10-27 14:24:31.000000000 +0200 @@ -73,6 +73,13 @@ static inline int test_bit(unsigned long return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31)); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + /* * change bit */
2 years, 2 months
1
0
0
0
[PATCH 4.9 1/2] wait_on_bit: add an acquire memory barrier
by Mikulas Patocka
commit 8238b4579866b7c1bb99883cfe102a43db5506ff upstream. There are several places in the kernel where wait_on_bit is not followed by a memory barrier (for example, in drivers/md/dm-bufio.c:new_read). On architectures with weak memory ordering, it may happen that memory accesses that follow wait_on_bit are reordered before wait_on_bit and they may return invalid data. Fix this class of bugs by introducing a new function "test_bit_acquire" that works like test_bit, but has acquire memory ordering semantics. Signed-off-by: Mikulas Patocka <mpatocka(a)redhat.com> Acked-by: Will Deacon <will(a)kernel.org> Cc: stable(a)vger.kernel.org Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org> --- arch/x86/include/asm/bitops.h | 21 +++++++++++++++++++++ include/asm-generic/bitops/non-atomic.h | 14 ++++++++++++++ include/linux/buffer_head.h | 2 +- include/linux/wait.h | 8 ++++---- kernel/sched/wait.c | 2 +- 5 files changed, 41 insertions(+), 6 deletions(-) Index: linux-stable/arch/x86/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/x86/include/asm/bitops.h 2022-10-27 14:24:29.000000000 +0200 +++ linux-stable/arch/x86/include/asm/bitops.h 2022-10-27 14:24:29.000000000 +0200 @@ -314,6 +314,20 @@ static __always_inline bool constant_tes (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; } +static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr) +{ + bool oldbit; + + asm volatile("testb %2,%1" + CC_SET(nz) + : CC_OUT(nz) (oldbit) + : "m" (((unsigned char *)addr)[nr >> 3]), + "i" (1 << (nr & 7)) + :"memory"); + + return oldbit; +} + static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) { bool oldbit; @@ -340,6 +354,13 @@ static bool test_bit(int nr, const volat ? constant_test_bit((nr), (addr)) \ : variable_test_bit((nr), (addr))) +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) : + variable_test_bit(nr, addr); +} + /** * __ffs - find first set bit in word * @word: The word to search Index: linux-stable/include/asm-generic/bitops/non-atomic.h =================================================================== --- linux-stable.orig/include/asm-generic/bitops/non-atomic.h 2022-10-27 14:24:29.000000000 +0200 +++ linux-stable/include/asm-generic/bitops/non-atomic.h 2022-10-27 14:24:29.000000000 +0200 @@ -2,6 +2,7 @@ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #include <asm/types.h> +#include <asm/barrier.h> /** * __set_bit - Set a bit in memory @@ -105,4 +106,17 @@ static inline int test_bit(int nr, const return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +/** + * arch_test_bit_acquire - Determine, with acquire semantics, whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static __always_inline bool +arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} +#define test_bit_acquire arch_test_bit_acquire + #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ Index: linux-stable/include/linux/buffer_head.h =================================================================== --- linux-stable.orig/include/linux/buffer_head.h 2022-10-27 14:24:29.000000000 +0200 +++ linux-stable/include/linux/buffer_head.h 2022-10-27 14:24:29.000000000 +0200 @@ -162,7 +162,7 @@ static __always_inline int buffer_uptoda * make it consistent with folio_test_uptodate * pairs with smp_mb__before_atomic in set_buffer_uptodate */ - return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0; + return test_bit_acquire(BH_Uptodate, &bh->b_state); } #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) Index: linux-stable/include/linux/wait.h =================================================================== --- linux-stable.orig/include/linux/wait.h 2022-10-27 14:24:29.000000000 +0200 +++ linux-stable/include/linux/wait.h 2022-10-27 14:24:29.000000000 +0200 @@ -1066,7 +1066,7 @@ static inline int wait_on_bit(unsigned long *word, int bit, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, bit_wait, @@ -1091,7 +1091,7 @@ static inline int wait_on_bit_io(unsigned long *word, int bit, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, bit_wait_io, @@ -1118,7 +1118,7 @@ wait_on_bit_timeout(unsigned long *word, unsigned long timeout) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout, @@ -1146,7 +1146,7 @@ wait_on_bit_action(unsigned long *word, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, action, mode); } Index: linux-stable/kernel/sched/wait.c =================================================================== --- linux-stable.orig/kernel/sched/wait.c 2022-10-27 14:24:29.000000000 +0200 +++ linux-stable/kernel/sched/wait.c 2022-10-27 14:24:29.000000000 +0200 @@ -389,7 +389,7 @@ __wait_on_bit(wait_queue_head_t *wq, str prepare_to_wait(wq, &q->wait, mode); if (test_bit(q->key.bit_nr, q->key.flags)) ret = (*action)(&q->key, mode); - } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); + } while (test_bit_acquire(q->key.bit_nr, q->key.flags) && !ret); finish_wait(wq, &q->wait); return ret; }
2 years, 2 months
1
0
0
0
[PATCH 4.14 2/2] provide arch_test_bit_acquire for architectures that define test_bit
by Mikulas Patocka
commit d6ffe6067a54972564552ea45d320fb98db1ac5e upstream. Some architectures define their own arch_test_bit and they also need arch_test_bit_acquire, otherwise they won't compile. We also clean up the code by using the generic test_bit if that is equivalent to the arch-specific version. Signed-off-by: Mikulas Patocka <mpatocka(a)redhat.com> Cc: stable(a)vger.kernel.org Fixes: 8238b4579866 ("wait_on_bit: add an acquire memory barrier") Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org> --- arch/alpha/include/asm/bitops.h | 7 +++++++ arch/arc/include/asm/bitops.h | 7 +++++++ arch/frv/include/asm/bitops.h | 7 +++++++ arch/h8300/include/asm/bitops.h | 3 ++- arch/hexagon/include/asm/bitops.h | 15 +++++++++++++++ arch/ia64/include/asm/bitops.h | 7 +++++++ arch/m68k/include/asm/bitops.h | 6 ++++++ arch/mn10300/include/asm/bitops.h | 7 +++++++ arch/s390/include/asm/bitops.h | 7 +++++++ arch/sh/include/asm/bitops-op32.h | 7 +++++++ 10 files changed, 72 insertions(+), 1 deletion(-) Index: linux-stable/arch/alpha/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/alpha/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 +++ linux-stable/arch/alpha/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 @@ -289,6 +289,13 @@ test_bit(int nr, const volatile void * a return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + /* * ffz = Find First Zero in word. Undefined if no zero exists, * so code should check against ~0UL first.. Index: linux-stable/arch/hexagon/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/hexagon/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 +++ linux-stable/arch/hexagon/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 @@ -186,7 +186,22 @@ static inline int __test_bit(int nr, con return retval; } +static inline int __test_bit_acquire(int nr, const volatile unsigned long *addr) +{ + int retval; + + asm volatile( + "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n" + : "=&r" (retval) + : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG) + : "p0", "memory" + ); + + return retval; +} + #define test_bit(nr, addr) __test_bit(nr, addr) +#define test_bit_acquire(nr, addr) __test_bit_acquire(nr, addr) /* * ffz - find first zero in word. Index: linux-stable/arch/ia64/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/ia64/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 +++ linux-stable/arch/ia64/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 @@ -337,6 +337,13 @@ test_bit (int nr, const volatile void *a return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + /** * ffz - find the first zero bit in a long word * @x: The long word to find the bit in Index: linux-stable/arch/m68k/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/m68k/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 +++ linux-stable/arch/m68k/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 @@ -153,6 +153,12 @@ static inline int test_bit(int nr, const return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} static inline int bset_reg_test_and_set_bit(int nr, volatile unsigned long *vaddr) Index: linux-stable/arch/s390/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/s390/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 +++ linux-stable/arch/s390/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 @@ -215,6 +215,13 @@ static inline int test_bit(unsigned long return (*addr >> (nr & 7)) & 1; } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + static inline int test_and_set_bit_lock(unsigned long nr, volatile unsigned long *ptr) { Index: linux-stable/arch/sh/include/asm/bitops-op32.h =================================================================== --- linux-stable.orig/arch/sh/include/asm/bitops-op32.h 2022-10-27 14:24:02.000000000 +0200 +++ linux-stable/arch/sh/include/asm/bitops-op32.h 2022-10-27 14:24:02.000000000 +0200 @@ -140,4 +140,11 @@ static inline int test_bit(int nr, const return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + #endif /* __ASM_SH_BITOPS_OP32_H */ Index: linux-stable/arch/arc/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/arc/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 +++ linux-stable/arch/arc/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 @@ -254,6 +254,13 @@ test_bit(unsigned int nr, const volatile return ((mask & *addr) != 0); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + #ifdef CONFIG_ISA_ARCOMPACT /* Index: linux-stable/arch/h8300/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/h8300/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 +++ linux-stable/arch/h8300/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 @@ -89,7 +89,8 @@ static inline int test_bit(int nr, const return ret; } -#define __test_bit(nr, addr) test_bit(nr, addr) +#define __test_bit(nr, addr) test_bit(nr, addr) +#define test_bit_acquire(nr, addr) test_bit(nr, addr) #define H8300_GEN_TEST_BITOP(FNNAME, OP) \ static inline int FNNAME(int nr, void *addr) \ Index: linux-stable/arch/frv/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/frv/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 +++ linux-stable/arch/frv/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 @@ -156,6 +156,13 @@ static inline int __test_bit(unsigned lo __constant_test_bit((nr),(addr)) : \ __test_bit((nr),(addr))) +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + #include <asm-generic/bitops/find.h> /** Index: linux-stable/arch/mn10300/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/mn10300/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 +++ linux-stable/arch/mn10300/include/asm/bitops.h 2022-10-27 14:24:02.000000000 +0200 @@ -73,6 +73,13 @@ static inline int test_bit(unsigned long return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31)); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + /* * change bit */
2 years, 2 months
1
0
0
0
[PATCH 5.14 1/2] wait_on_bit: add an acquire memory barrier
by Mikulas Patocka
commit 8238b4579866b7c1bb99883cfe102a43db5506ff upstream. There are several places in the kernel where wait_on_bit is not followed by a memory barrier (for example, in drivers/md/dm-bufio.c:new_read). On architectures with weak memory ordering, it may happen that memory accesses that follow wait_on_bit are reordered before wait_on_bit and they may return invalid data. Fix this class of bugs by introducing a new function "test_bit_acquire" that works like test_bit, but has acquire memory ordering semantics. Signed-off-by: Mikulas Patocka <mpatocka(a)redhat.com> Acked-by: Will Deacon <will(a)kernel.org> Cc: stable(a)vger.kernel.org Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org> --- arch/x86/include/asm/bitops.h | 21 +++++++++++++++++++++ include/asm-generic/bitops/non-atomic.h | 14 ++++++++++++++ include/linux/buffer_head.h | 2 +- include/linux/wait_bit.h | 8 ++++---- kernel/sched/wait_bit.c | 2 +- 5 files changed, 41 insertions(+), 6 deletions(-) Index: linux-stable/arch/x86/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/x86/include/asm/bitops.h 2022-10-27 14:24:00.000000000 +0200 +++ linux-stable/arch/x86/include/asm/bitops.h 2022-10-27 14:24:00.000000000 +0200 @@ -328,6 +328,20 @@ static __always_inline bool constant_tes (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; } +static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr) +{ + bool oldbit; + + asm volatile("testb %2,%1" + CC_SET(nz) + : CC_OUT(nz) (oldbit) + : "m" (((unsigned char *)addr)[nr >> 3]), + "i" (1 << (nr & 7)) + :"memory"); + + return oldbit; +} + static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) { bool oldbit; @@ -354,6 +368,13 @@ static bool test_bit(int nr, const volat ? constant_test_bit((nr), (addr)) \ : variable_test_bit((nr), (addr))) +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) : + variable_test_bit(nr, addr); +} + /** * __ffs - find first set bit in word * @word: The word to search Index: linux-stable/include/asm-generic/bitops/non-atomic.h =================================================================== --- linux-stable.orig/include/asm-generic/bitops/non-atomic.h 2022-10-27 14:24:00.000000000 +0200 +++ linux-stable/include/asm-generic/bitops/non-atomic.h 2022-10-27 14:24:00.000000000 +0200 @@ -3,6 +3,7 @@ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #include <asm/types.h> +#include <asm/barrier.h> /** * __set_bit - Set a bit in memory @@ -106,4 +107,17 @@ static inline int test_bit(int nr, const return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +/** + * arch_test_bit_acquire - Determine, with acquire semantics, whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static __always_inline bool +arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} +#define test_bit_acquire arch_test_bit_acquire + #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ Index: linux-stable/include/linux/buffer_head.h =================================================================== --- linux-stable.orig/include/linux/buffer_head.h 2022-10-27 14:24:00.000000000 +0200 +++ linux-stable/include/linux/buffer_head.h 2022-10-27 14:24:00.000000000 +0200 @@ -163,7 +163,7 @@ static __always_inline int buffer_uptoda * make it consistent with folio_test_uptodate * pairs with smp_mb__before_atomic in set_buffer_uptodate */ - return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0; + return test_bit_acquire(BH_Uptodate, &bh->b_state); } #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) Index: linux-stable/include/linux/wait_bit.h =================================================================== --- linux-stable.orig/include/linux/wait_bit.h 2022-10-27 14:24:00.000000000 +0200 +++ linux-stable/include/linux/wait_bit.h 2022-10-27 14:24:00.000000000 +0200 @@ -76,7 +76,7 @@ static inline int wait_on_bit(unsigned long *word, int bit, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, bit_wait, @@ -101,7 +101,7 @@ static inline int wait_on_bit_io(unsigned long *word, int bit, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, bit_wait_io, @@ -128,7 +128,7 @@ wait_on_bit_timeout(unsigned long *word, unsigned long timeout) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout, @@ -156,7 +156,7 @@ wait_on_bit_action(unsigned long *word, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, action, mode); } Index: linux-stable/kernel/sched/wait_bit.c =================================================================== --- linux-stable.orig/kernel/sched/wait_bit.c 2022-10-27 14:24:00.000000000 +0200 +++ linux-stable/kernel/sched/wait_bit.c 2022-10-27 14:24:00.000000000 +0200 @@ -49,7 +49,7 @@ __wait_on_bit(struct wait_queue_head *wq prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode); if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) ret = (*action)(&wbq_entry->key, mode); - } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret); + } while (test_bit_acquire(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret); finish_wait(wq_head, &wbq_entry->wq_entry); return ret; }
2 years, 2 months
1
0
0
0
[PATCH 4.19 2/2] provide arch_test_bit_acquire for architectures that define test_bit
by Mikulas Patocka
commit d6ffe6067a54972564552ea45d320fb98db1ac5e upstream. Some architectures define their own arch_test_bit and they also need arch_test_bit_acquire, otherwise they won't compile. We also clean up the code by using the generic test_bit if that is equivalent to the arch-specific version. Signed-off-by: Mikulas Patocka <mpatocka(a)redhat.com> Cc: stable(a)vger.kernel.org Fixes: 8238b4579866 ("wait_on_bit: add an acquire memory barrier") Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org> --- arch/alpha/include/asm/bitops.h | 7 +++++++ arch/arc/include/asm/bitops.h | 7 +++++++ arch/h8300/include/asm/bitops.h | 3 ++- arch/hexagon/include/asm/bitops.h | 15 +++++++++++++++ arch/ia64/include/asm/bitops.h | 7 +++++++ arch/m68k/include/asm/bitops.h | 6 ++++++ arch/s390/include/asm/bitops.h | 7 +++++++ arch/sh/include/asm/bitops-op32.h | 7 +++++++ 8 files changed, 58 insertions(+), 1 deletion(-) Index: linux-stable/arch/alpha/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/alpha/include/asm/bitops.h 2022-10-27 14:23:33.000000000 +0200 +++ linux-stable/arch/alpha/include/asm/bitops.h 2022-10-27 14:23:33.000000000 +0200 @@ -289,6 +289,13 @@ test_bit(int nr, const volatile void * a return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + /* * ffz = Find First Zero in word. Undefined if no zero exists, * so code should check against ~0UL first.. Index: linux-stable/arch/hexagon/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/hexagon/include/asm/bitops.h 2022-10-27 14:23:33.000000000 +0200 +++ linux-stable/arch/hexagon/include/asm/bitops.h 2022-10-27 14:23:33.000000000 +0200 @@ -186,7 +186,22 @@ static inline int __test_bit(int nr, con return retval; } +static inline int __test_bit_acquire(int nr, const volatile unsigned long *addr) +{ + int retval; + + asm volatile( + "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n" + : "=&r" (retval) + : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG) + : "p0", "memory" + ); + + return retval; +} + #define test_bit(nr, addr) __test_bit(nr, addr) +#define test_bit_acquire(nr, addr) __test_bit_acquire(nr, addr) /* * ffz - find first zero in word. Index: linux-stable/arch/ia64/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/ia64/include/asm/bitops.h 2022-10-27 14:23:33.000000000 +0200 +++ linux-stable/arch/ia64/include/asm/bitops.h 2022-10-27 14:23:33.000000000 +0200 @@ -337,6 +337,13 @@ test_bit (int nr, const volatile void *a return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + /** * ffz - find the first zero bit in a long word * @x: The long word to find the bit in Index: linux-stable/arch/m68k/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/m68k/include/asm/bitops.h 2022-10-27 14:23:33.000000000 +0200 +++ linux-stable/arch/m68k/include/asm/bitops.h 2022-10-27 14:23:33.000000000 +0200 @@ -153,6 +153,12 @@ static inline int test_bit(int nr, const return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} static inline int bset_reg_test_and_set_bit(int nr, volatile unsigned long *vaddr) Index: linux-stable/arch/s390/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/s390/include/asm/bitops.h 2022-10-27 14:23:33.000000000 +0200 +++ linux-stable/arch/s390/include/asm/bitops.h 2022-10-27 14:23:33.000000000 +0200 @@ -215,6 +215,13 @@ static inline int test_bit(unsigned long return (*addr >> (nr & 7)) & 1; } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + static inline int test_and_set_bit_lock(unsigned long nr, volatile unsigned long *ptr) { Index: linux-stable/arch/sh/include/asm/bitops-op32.h =================================================================== --- linux-stable.orig/arch/sh/include/asm/bitops-op32.h 2022-10-27 14:23:33.000000000 +0200 +++ linux-stable/arch/sh/include/asm/bitops-op32.h 2022-10-27 14:23:33.000000000 +0200 @@ -140,4 +140,11 @@ static inline int test_bit(int nr, const return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + #endif /* __ASM_SH_BITOPS_OP32_H */ Index: linux-stable/arch/arc/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/arc/include/asm/bitops.h 2022-10-27 14:23:33.000000000 +0200 +++ linux-stable/arch/arc/include/asm/bitops.h 2022-10-27 14:23:33.000000000 +0200 @@ -254,6 +254,13 @@ test_bit(unsigned int nr, const volatile return ((mask & *addr) != 0); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + #ifdef CONFIG_ISA_ARCOMPACT /* Index: linux-stable/arch/h8300/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/h8300/include/asm/bitops.h 2022-10-27 14:23:33.000000000 +0200 +++ linux-stable/arch/h8300/include/asm/bitops.h 2022-10-27 14:23:33.000000000 +0200 @@ -89,7 +89,8 @@ static inline int test_bit(int nr, const return ret; } -#define __test_bit(nr, addr) test_bit(nr, addr) +#define __test_bit(nr, addr) test_bit(nr, addr) +#define test_bit_acquire(nr, addr) test_bit(nr, addr) #define H8300_GEN_TEST_BITOP(FNNAME, OP) \ static inline int FNNAME(int nr, void *addr) \
2 years, 2 months
1
0
0
0
[PATCH 4.19 1/2] wait_on_bit: add an acquire memory barrier
by Mikulas Patocka
commit 8238b4579866b7c1bb99883cfe102a43db5506ff upstream. There are several places in the kernel where wait_on_bit is not followed by a memory barrier (for example, in drivers/md/dm-bufio.c:new_read). On architectures with weak memory ordering, it may happen that memory accesses that follow wait_on_bit are reordered before wait_on_bit and they may return invalid data. Fix this class of bugs by introducing a new function "test_bit_acquire" that works like test_bit, but has acquire memory ordering semantics. Signed-off-by: Mikulas Patocka <mpatocka(a)redhat.com> Acked-by: Will Deacon <will(a)kernel.org> Cc: stable(a)vger.kernel.org Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org> --- arch/x86/include/asm/bitops.h | 21 +++++++++++++++++++++ include/asm-generic/bitops/non-atomic.h | 14 ++++++++++++++ include/linux/buffer_head.h | 2 +- include/linux/wait_bit.h | 8 ++++---- kernel/sched/wait_bit.c | 2 +- 5 files changed, 41 insertions(+), 6 deletions(-) Index: linux-stable/arch/x86/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/x86/include/asm/bitops.h 2022-10-27 14:23:32.000000000 +0200 +++ linux-stable/arch/x86/include/asm/bitops.h 2022-10-27 14:23:32.000000000 +0200 @@ -317,6 +317,20 @@ static __always_inline bool constant_tes (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; } +static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr) +{ + bool oldbit; + + asm volatile("testb %2,%1" + CC_SET(nz) + : CC_OUT(nz) (oldbit) + : "m" (((unsigned char *)addr)[nr >> 3]), + "i" (1 << (nr & 7)) + :"memory"); + + return oldbit; +} + static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) { bool oldbit; @@ -343,6 +357,13 @@ static bool test_bit(int nr, const volat ? constant_test_bit((nr), (addr)) \ : variable_test_bit((nr), (addr))) +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) : + variable_test_bit(nr, addr); +} + /** * __ffs - find first set bit in word * @word: The word to search Index: linux-stable/include/asm-generic/bitops/non-atomic.h =================================================================== --- linux-stable.orig/include/asm-generic/bitops/non-atomic.h 2022-10-27 14:23:32.000000000 +0200 +++ linux-stable/include/asm-generic/bitops/non-atomic.h 2022-10-27 14:23:32.000000000 +0200 @@ -3,6 +3,7 @@ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #include <asm/types.h> +#include <asm/barrier.h> /** * __set_bit - Set a bit in memory @@ -106,4 +107,17 @@ static inline int test_bit(int nr, const return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +/** + * arch_test_bit_acquire - Determine, with acquire semantics, whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static __always_inline bool +arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} +#define test_bit_acquire arch_test_bit_acquire + #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ Index: linux-stable/include/linux/buffer_head.h =================================================================== --- linux-stable.orig/include/linux/buffer_head.h 2022-10-27 14:23:32.000000000 +0200 +++ linux-stable/include/linux/buffer_head.h 2022-10-27 14:23:32.000000000 +0200 @@ -166,7 +166,7 @@ static __always_inline int buffer_uptoda * make it consistent with folio_test_uptodate * pairs with smp_mb__before_atomic in set_buffer_uptodate */ - return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0; + return test_bit_acquire(BH_Uptodate, &bh->b_state); } #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) Index: linux-stable/include/linux/wait_bit.h =================================================================== --- linux-stable.orig/include/linux/wait_bit.h 2022-10-27 14:23:32.000000000 +0200 +++ linux-stable/include/linux/wait_bit.h 2022-10-27 14:23:32.000000000 +0200 @@ -71,7 +71,7 @@ static inline int wait_on_bit(unsigned long *word, int bit, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, bit_wait, @@ -96,7 +96,7 @@ static inline int wait_on_bit_io(unsigned long *word, int bit, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, bit_wait_io, @@ -123,7 +123,7 @@ wait_on_bit_timeout(unsigned long *word, unsigned long timeout) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout, @@ -151,7 +151,7 @@ wait_on_bit_action(unsigned long *word, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, action, mode); } Index: linux-stable/kernel/sched/wait_bit.c =================================================================== --- linux-stable.orig/kernel/sched/wait_bit.c 2022-10-27 14:23:32.000000000 +0200 +++ linux-stable/kernel/sched/wait_bit.c 2022-10-27 14:23:32.000000000 +0200 @@ -46,7 +46,7 @@ __wait_on_bit(struct wait_queue_head *wq prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode); if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) ret = (*action)(&wbq_entry->key, mode); - } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret); + } while (test_bit_acquire(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret); finish_wait(wq_head, &wbq_entry->wq_entry);
2 years, 2 months
1
0
0
0
[PATCH 5.4 2/2] provide arch_test_bit_acquire for architectures that define test_bit
by Mikulas Patocka
commit d6ffe6067a54972564552ea45d320fb98db1ac5e upstream. Some architectures define their own arch_test_bit and they also need arch_test_bit_acquire, otherwise they won't compile. We also clean up the code by using the generic test_bit if that is equivalent to the arch-specific version. Signed-off-by: Mikulas Patocka <mpatocka(a)redhat.com> Cc: stable(a)vger.kernel.org Fixes: 8238b4579866 ("wait_on_bit: add an acquire memory barrier") Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org> --- arch/alpha/include/asm/bitops.h | 7 +++++++ arch/arc/include/asm/bitops.h | 7 +++++++ arch/h8300/include/asm/bitops.h | 3 ++- arch/hexagon/include/asm/bitops.h | 15 +++++++++++++++ arch/ia64/include/asm/bitops.h | 7 +++++++ arch/m68k/include/asm/bitops.h | 6 ++++++ arch/s390/include/asm/bitops.h | 7 +++++++ arch/sh/include/asm/bitops-op32.h | 7 +++++++ 8 files changed, 58 insertions(+), 1 deletion(-) Index: linux-stable/arch/alpha/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/alpha/include/asm/bitops.h 2022-10-27 14:23:06.000000000 +0200 +++ linux-stable/arch/alpha/include/asm/bitops.h 2022-10-27 14:23:06.000000000 +0200 @@ -289,6 +289,13 @@ test_bit(int nr, const volatile void * a return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + /* * ffz = Find First Zero in word. Undefined if no zero exists, * so code should check against ~0UL first.. Index: linux-stable/arch/hexagon/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/hexagon/include/asm/bitops.h 2022-10-27 14:23:06.000000000 +0200 +++ linux-stable/arch/hexagon/include/asm/bitops.h 2022-10-27 14:23:06.000000000 +0200 @@ -172,7 +172,22 @@ static inline int __test_bit(int nr, con return retval; } +static inline int __test_bit_acquire(int nr, const volatile unsigned long *addr) +{ + int retval; + + asm volatile( + "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n" + : "=&r" (retval) + : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG) + : "p0", "memory" + ); + + return retval; +} + #define test_bit(nr, addr) __test_bit(nr, addr) +#define test_bit_acquire(nr, addr) __test_bit_acquire(nr, addr) /* * ffz - find first zero in word. Index: linux-stable/arch/ia64/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/ia64/include/asm/bitops.h 2022-10-27 14:23:06.000000000 +0200 +++ linux-stable/arch/ia64/include/asm/bitops.h 2022-10-27 14:23:06.000000000 +0200 @@ -337,6 +337,13 @@ test_bit (int nr, const volatile void *a return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + /** * ffz - find the first zero bit in a long word * @x: The long word to find the bit in Index: linux-stable/arch/m68k/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/m68k/include/asm/bitops.h 2022-10-27 14:23:06.000000000 +0200 +++ linux-stable/arch/m68k/include/asm/bitops.h 2022-10-27 14:23:06.000000000 +0200 @@ -153,6 +153,12 @@ static inline int test_bit(int nr, const return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} static inline int bset_reg_test_and_set_bit(int nr, volatile unsigned long *vaddr) Index: linux-stable/arch/s390/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/s390/include/asm/bitops.h 2022-10-27 14:23:06.000000000 +0200 +++ linux-stable/arch/s390/include/asm/bitops.h 2022-10-27 14:23:06.000000000 +0200 @@ -219,6 +219,13 @@ static inline bool arch_test_bit(unsigne return (*addr >> (nr & 7)) & 1; } +static __always_inline bool +arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + static inline bool arch_test_and_set_bit_lock(unsigned long nr, volatile unsigned long *ptr) { Index: linux-stable/arch/sh/include/asm/bitops-op32.h =================================================================== --- linux-stable.orig/arch/sh/include/asm/bitops-op32.h 2022-10-27 14:23:06.000000000 +0200 +++ linux-stable/arch/sh/include/asm/bitops-op32.h 2022-10-27 14:23:06.000000000 +0200 @@ -140,4 +140,11 @@ static inline int test_bit(int nr, const return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + #endif /* __ASM_SH_BITOPS_OP32_H */ Index: linux-stable/arch/arc/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/arc/include/asm/bitops.h 2022-10-27 14:23:06.000000000 +0200 +++ linux-stable/arch/arc/include/asm/bitops.h 2022-10-27 14:23:06.000000000 +0200 @@ -251,6 +251,13 @@ test_bit(unsigned int nr, const volatile return ((mask & *addr) != 0); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + #ifdef CONFIG_ISA_ARCOMPACT /* Index: linux-stable/arch/h8300/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/h8300/include/asm/bitops.h 2022-10-27 14:23:06.000000000 +0200 +++ linux-stable/arch/h8300/include/asm/bitops.h 2022-10-27 14:23:06.000000000 +0200 @@ -83,7 +83,8 @@ static inline int test_bit(int nr, const return ret; } -#define __test_bit(nr, addr) test_bit(nr, addr) +#define __test_bit(nr, addr) test_bit(nr, addr) +#define test_bit_acquire(nr, addr) test_bit(nr, addr) #define H8300_GEN_TEST_BITOP(FNNAME, OP) \ static inline int FNNAME(int nr, void *addr) \
2 years, 2 months
1
0
0
0
[PATCH 5.4 1/2] wait_on_bit: add an acquire memory barrier
by Mikulas Patocka
commit 8238b4579866b7c1bb99883cfe102a43db5506ff upstream. There are several places in the kernel where wait_on_bit is not followed by a memory barrier (for example, in drivers/md/dm-bufio.c:new_read). On architectures with weak memory ordering, it may happen that memory accesses that follow wait_on_bit are reordered before wait_on_bit and they may return invalid data. Fix this class of bugs by introducing a new function "test_bit_acquire" that works like test_bit, but has acquire memory ordering semantics. Signed-off-by: Mikulas Patocka <mpatocka(a)redhat.com> Acked-by: Will Deacon <will(a)kernel.org> Cc: stable(a)vger.kernel.org Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org> --- arch/x86/include/asm/bitops.h | 21 +++++++++++++++++++++ include/asm-generic/bitops-instrumented.h | 6 ++++++ include/asm-generic/bitops/non-atomic.h | 14 ++++++++++++++ include/linux/buffer_head.h | 2 +- include/linux/wait_bit.h | 8 ++++---- kernel/sched/wait_bit.c | 2 +- 6 files changed, 47 insertions(+), 6 deletions(-) Index: linux-stable/arch/x86/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/x86/include/asm/bitops.h 2022-10-27 14:23:04.000000000 +0200 +++ linux-stable/arch/x86/include/asm/bitops.h 2022-10-27 14:23:04.000000000 +0200 @@ -207,6 +207,20 @@ static __always_inline bool constant_tes (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; } +static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr) +{ + bool oldbit; + + asm volatile("testb %2,%1" + CC_SET(nz) + : CC_OUT(nz) (oldbit) + : "m" (((unsigned char *)addr)[nr >> 3]), + "i" (1 << (nr & 7)) + :"memory"); + + return oldbit; +} + static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) { bool oldbit; @@ -224,6 +238,13 @@ static __always_inline bool variable_tes ? constant_test_bit((nr), (addr)) \ : variable_test_bit((nr), (addr))) +static __always_inline bool +arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) : + variable_test_bit(nr, addr); +} + /** * __ffs - find first set bit in word * @word: The word to search Index: linux-stable/include/asm-generic/bitops/non-atomic.h =================================================================== --- linux-stable.orig/include/asm-generic/bitops/non-atomic.h 2022-10-27 14:23:04.000000000 +0200 +++ linux-stable/include/asm-generic/bitops/non-atomic.h 2022-10-27 14:23:04.000000000 +0200 @@ -3,6 +3,7 @@ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #include <asm/types.h> +#include <asm/barrier.h> /** * __set_bit - Set a bit in memory @@ -106,4 +107,17 @@ static inline int test_bit(int nr, const return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +/** + * arch_test_bit_acquire - Determine, with acquire semantics, whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static __always_inline bool +arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} +#define test_bit_acquire arch_test_bit_acquire + #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ Index: linux-stable/include/linux/buffer_head.h =================================================================== --- linux-stable.orig/include/linux/buffer_head.h 2022-10-27 14:23:04.000000000 +0200 +++ linux-stable/include/linux/buffer_head.h 2022-10-27 14:23:04.000000000 +0200 @@ -166,7 +166,7 @@ static __always_inline int buffer_uptoda * make it consistent with folio_test_uptodate * pairs with smp_mb__before_atomic in set_buffer_uptodate */ - return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0; + return test_bit_acquire(BH_Uptodate, &bh->b_state); } #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) Index: linux-stable/include/linux/wait_bit.h =================================================================== --- linux-stable.orig/include/linux/wait_bit.h 2022-10-27 14:23:04.000000000 +0200 +++ linux-stable/include/linux/wait_bit.h 2022-10-27 14:23:04.000000000 +0200 @@ -71,7 +71,7 @@ static inline int wait_on_bit(unsigned long *word, int bit, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, bit_wait, @@ -96,7 +96,7 @@ static inline int wait_on_bit_io(unsigned long *word, int bit, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, bit_wait_io, @@ -123,7 +123,7 @@ wait_on_bit_timeout(unsigned long *word, unsigned long timeout) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout, @@ -151,7 +151,7 @@ wait_on_bit_action(unsigned long *word, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, action, mode); } Index: linux-stable/kernel/sched/wait_bit.c =================================================================== --- linux-stable.orig/kernel/sched/wait_bit.c 2022-10-27 14:23:04.000000000 +0200 +++ linux-stable/kernel/sched/wait_bit.c 2022-10-27 14:23:04.000000000 +0200 @@ -47,7 +47,7 @@ __wait_on_bit(struct wait_queue_head *wq prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode); if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) ret = (*action)(&wbq_entry->key, mode); - } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret); + } while (test_bit_acquire(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret); finish_wait(wq_head, &wbq_entry->wq_entry); Index: linux-stable/include/asm-generic/bitops-instrumented.h =================================================================== --- linux-stable.orig/include/asm-generic/bitops-instrumented.h 2022-10-27 14:23:04.000000000 +0200 +++ linux-stable/include/asm-generic/bitops-instrumented.h 2022-10-27 14:23:04.000000000 +0200 @@ -238,6 +238,12 @@ static inline bool test_bit(long nr, con return arch_test_bit(nr, addr); } +static inline bool test_bit_acquire(long nr, const volatile unsigned long *addr) +{ + kasan_check_read(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_bit_acquire(nr, addr); +} + #if defined(arch_clear_bit_unlock_is_negative_byte) /** * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
2 years, 2 months
1
0
0
0
[PATCH 5.10 2/2] provide arch_test_bit_acquire for architectures that define test_bit
by Mikulas Patocka
commit d6ffe6067a54972564552ea45d320fb98db1ac5e upstream. Some architectures define their own arch_test_bit and they also need arch_test_bit_acquire, otherwise they won't compile. We also clean up the code by using the generic test_bit if that is equivalent to the arch-specific version. Signed-off-by: Mikulas Patocka <mpatocka(a)redhat.com> Cc: stable(a)vger.kernel.org Fixes: 8238b4579866 ("wait_on_bit: add an acquire memory barrier") Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org> --- arch/alpha/include/asm/bitops.h | 7 +++++++ arch/arc/include/asm/bitops.h | 7 +++++++ arch/h8300/include/asm/bitops.h | 3 ++- arch/hexagon/include/asm/bitops.h | 15 +++++++++++++++ arch/ia64/include/asm/bitops.h | 7 +++++++ arch/m68k/include/asm/bitops.h | 6 ++++++ arch/s390/include/asm/bitops.h | 7 +++++++ arch/sh/include/asm/bitops-op32.h | 7 +++++++ 8 files changed, 58 insertions(+), 1 deletion(-) Index: linux-stable/arch/alpha/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/alpha/include/asm/bitops.h 2022-10-27 14:22:39.000000000 +0200 +++ linux-stable/arch/alpha/include/asm/bitops.h 2022-10-27 14:22:39.000000000 +0200 @@ -289,6 +289,13 @@ test_bit(int nr, const volatile void * a return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + /* * ffz = Find First Zero in word. Undefined if no zero exists, * so code should check against ~0UL first.. Index: linux-stable/arch/hexagon/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/hexagon/include/asm/bitops.h 2022-10-27 14:22:39.000000000 +0200 +++ linux-stable/arch/hexagon/include/asm/bitops.h 2022-10-27 14:22:39.000000000 +0200 @@ -172,7 +172,22 @@ static inline int __test_bit(int nr, con return retval; } +static inline int __test_bit_acquire(int nr, const volatile unsigned long *addr) +{ + int retval; + + asm volatile( + "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n" + : "=&r" (retval) + : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG) + : "p0", "memory" + ); + + return retval; +} + #define test_bit(nr, addr) __test_bit(nr, addr) +#define test_bit_acquire(nr, addr) __test_bit_acquire(nr, addr) /* * ffz - find first zero in word. Index: linux-stable/arch/ia64/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/ia64/include/asm/bitops.h 2022-10-27 14:22:39.000000000 +0200 +++ linux-stable/arch/ia64/include/asm/bitops.h 2022-10-27 14:22:39.000000000 +0200 @@ -337,6 +337,13 @@ test_bit (int nr, const volatile void *a return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + /** * ffz - find the first zero bit in a long word * @x: The long word to find the bit in Index: linux-stable/arch/m68k/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/m68k/include/asm/bitops.h 2022-10-27 14:22:39.000000000 +0200 +++ linux-stable/arch/m68k/include/asm/bitops.h 2022-10-27 14:22:39.000000000 +0200 @@ -153,6 +153,12 @@ static inline int test_bit(int nr, const return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} static inline int bset_reg_test_and_set_bit(int nr, volatile unsigned long *vaddr) Index: linux-stable/arch/s390/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/s390/include/asm/bitops.h 2022-10-27 14:22:39.000000000 +0200 +++ linux-stable/arch/s390/include/asm/bitops.h 2022-10-27 14:22:39.000000000 +0200 @@ -219,6 +219,13 @@ static inline bool arch_test_bit(unsigne return (*addr >> (nr & 7)) & 1; } +static __always_inline bool +arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + static inline bool arch_test_and_set_bit_lock(unsigned long nr, volatile unsigned long *ptr) { Index: linux-stable/arch/sh/include/asm/bitops-op32.h =================================================================== --- linux-stable.orig/arch/sh/include/asm/bitops-op32.h 2022-10-27 14:22:39.000000000 +0200 +++ linux-stable/arch/sh/include/asm/bitops-op32.h 2022-10-27 14:22:39.000000000 +0200 @@ -138,4 +138,11 @@ static inline int test_bit(int nr, const return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + #endif /* __ASM_SH_BITOPS_OP32_H */ Index: linux-stable/arch/h8300/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/h8300/include/asm/bitops.h 2022-10-27 14:22:39.000000000 +0200 +++ linux-stable/arch/h8300/include/asm/bitops.h 2022-10-27 14:22:39.000000000 +0200 @@ -83,7 +83,8 @@ static inline int test_bit(int nr, const return ret; } -#define __test_bit(nr, addr) test_bit(nr, addr) +#define __test_bit(nr, addr) test_bit(nr, addr) +#define test_bit_acquire(nr, addr) test_bit(nr, addr) #define H8300_GEN_TEST_BITOP(FNNAME, OP) \ static inline int FNNAME(int nr, void *addr) \ Index: linux-stable/arch/arc/include/asm/bitops.h =================================================================== --- linux-stable.orig/arch/arc/include/asm/bitops.h 2022-10-27 14:22:39.000000000 +0200 +++ linux-stable/arch/arc/include/asm/bitops.h 2022-10-27 14:22:39.000000000 +0200 @@ -197,6 +197,13 @@ test_bit(unsigned int nr, const volatile return ((mask & *addr) != 0); } +static __always_inline bool +test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + #ifdef CONFIG_ISA_ARCOMPACT /*
2 years, 2 months
1
0
0
0
← Newer
1
...
36
37
38
39
40
41
42
...
171
Older →
Jump to page:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
Results per page:
10
25
50
100
200