On Wed, 6 Aug 2025 16:19:59 +0000 Jimmy Tran jtoantran@google.com wrote:
From: Linus Torvalds torvalds@linux-foundation.org
commit e3c92e81711d14b46c3121d36bc8e152cb843923 upstream.
This implements the runtime constant infrastructure for x86, allowing the dcache d_hash() function to be generated using as a constant for hash table address followed by shift by a constant of the hash index.
Cc: stable@vger.kernel.org # 6.10.x: e60cc61: vfs: dcache: move hashlen_hash Cc: stable@vger.kernel.org # 6.10.x: e782985: runtime constants: add default Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Jimmy Tran jtoantran@google.com
arch/x86/include/asm/runtime-const.h | 61 ++++++++++++++++++++++++++++ arch/x86/kernel/vmlinux.lds.S | 3 ++ 2 files changed, 64 insertions(+) create mode 100644 arch/x86/include/asm/runtime-const.h
diff --git a/arch/x86/include/asm/runtime-const.h b/arch/x86/include/asm/runtime-const.h new file mode 100644 index 0000000000000..76fdeaa0faa3f --- /dev/null +++ b/arch/x86/include/asm/runtime-const.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RUNTIME_CONST_H +#define _ASM_RUNTIME_CONST_H
+#define runtime_const_ptr(sym) ({ \
- typeof(sym) __ret; \
- asm_inline("mov %1,%0\n1:\n" \
".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \
".long 1b - %c2 - .\n\t" \
".popsection" \
: "=r" (__ret) \
: "i" (0x0123456789abcdefULL), \
Surely for 32bit that should only be (say) 0x01234567? Something ought to error the out of range constant.
"i" (sizeof(long))); \
- __ret; })
+// The 'typeof' will create at _least_ a 32-bit type, but +// will happily also take a bigger type and the 'shrl' will +// clear the upper bits
Eh? What are you trying to achieve. 'shrl' is a 32bit instruction, will be C0 /5 imm8. It only makes sense to have 'unsigned int __ret = (val). For a 64bit input you'd need to use 'shrd r/m32,r32,imm8' (0F AC ...) (that is Intel syntax - that's the book I've got). Since that is 'shrd low_32,high_32,count' you'd need separate asm parameters for the low and high parts. Only the low part is updated - which makes returning __ret incorrect even if a 'shrd' instruction is 'magically' generated.
OTOH I've no idea what "+r" does for a 64bit variable and %k0 doesn't seem to be in the gcc docs I'm looking at.
+#define runtime_const_shift_right_32(val, sym) ({ \
- typeof(0u+(val)) __ret = (val); \
- asm_inline("shrl $12,%k0\n1:\n" \
".pushsection runtime_shift_" #sym ",\"a\"\n\t" \
".long 1b - 1 - .\n\t" \
".popsection" \
: "+r" (__ret)); \
- __ret; })
If you want to allow a 64bit input, then something like this might work: u64 v = 0u + (val); u32 lo = v, hi = v >> 32; if (statically_true(hi == 0)) asm_inline("shrl $12,%0", "+r" (lo)); else asm_inline("shrd $12,%0,%1", "+r" (lo) : "r" (hi)); lo The "+r" could be "+rm" - but I'm not sure that'll work in a #define and the values are pretty much needed in registers. (Do check the at&t argument order for shrd.)
David
+#define runtime_const_init(type, sym) do { \
- extern s32 __start_runtime_##type##_##sym[]; \
- extern s32 __stop_runtime_##type##_##sym[]; \
- runtime_const_fixup(__runtime_fixup_##type, \
(unsigned long)(sym), \
__start_runtime_##type##_##sym, \
__stop_runtime_##type##_##sym); \
+} while (0)
+/*
- The text patching is trivial - you can only do this at init time,
- when the text section hasn't been marked RO, and before the text
- has ever been executed.
- */
+static inline void __runtime_fixup_ptr(void *where, unsigned long val) +{
- *(unsigned long *)where = val;
+}
+static inline void __runtime_fixup_shift(void *where, unsigned long val) +{
- *(unsigned char *)where = val;
+}
+static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
- unsigned long val, s32 *start, s32 *end)
+{
- while (start < end) {
fn(*start + (void *)start, val);
start++;
- }
+}
+#endif diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index c57d5df1abc60..cb5b41480a848 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -371,6 +371,9 @@ SECTIONS PERCPU_SECTION(INTERNODE_CACHE_BYTES) #endif
- RUNTIME_CONST(shift, d_hash_shift)
- RUNTIME_CONST(ptr, dentry_hashtable)
- . = ALIGN(PAGE_SIZE);
/* freed after init ends here */