runtime constants: add x86 architecture support
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 10 Jun 2024 19:32:14 +0000 (12:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 19 Jun 2024 19:34:34 +0000 (12:34 -0700)
This implements the runtime constant infrastructure for x86, allowing
the dcache d_hash() function to be generated using as a constant for
hash table address followed by shift by a constant of the hash index.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/include/asm/runtime-const.h [new file with mode: 0644]
arch/x86/kernel/vmlinux.lds.S

diff --git a/arch/x86/include/asm/runtime-const.h b/arch/x86/include/asm/runtime-const.h
new file mode 100644 (file)
index 0000000..24e3a53
--- /dev/null
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RUNTIME_CONST_H
+#define _ASM_RUNTIME_CONST_H
+
+#define runtime_const_ptr(sym) ({                              \
+       typeof(sym) __ret;                                      \
+       asm_inline("mov %1,%0\n1:\n"                            \
+               ".pushsection runtime_ptr_" #sym ",\"a\"\n\t"   \
+               ".long 1b - %c2 - .\n\t"                        \
+               ".popsection"                                   \
+               :"=r" (__ret)                                   \
+               :"i" ((unsigned long)0x0123456789abcdefull),    \
+                "i" (sizeof(long)));                           \
+       __ret; })
+
+// The 'typeof' will create at _least_ a 32-bit type, but
+// will happily also take a bigger type and the 'shrl' will
+// clear the upper bits
+#define runtime_const_shift_right_32(val, sym) ({              \
+       typeof(0u+(val)) __ret = (val);                         \
+       asm_inline("shrl $12,%k0\n1:\n"                         \
+               ".pushsection runtime_shift_" #sym ",\"a\"\n\t" \
+               ".long 1b - 1 - .\n\t"                          \
+               ".popsection"                                   \
+               :"+r" (__ret));                                 \
+       __ret; })
+
+#define runtime_const_init(type, sym) do {             \
+       extern s32 __start_runtime_##type##_##sym[];    \
+       extern s32 __stop_runtime_##type##_##sym[];     \
+       runtime_const_fixup(__runtime_fixup_##type,     \
+               (unsigned long)(sym),                   \
+               __start_runtime_##type##_##sym,         \
+               __stop_runtime_##type##_##sym);         \
+} while (0)
+
+/*
+ * The text patching is trivial - you can only do this at init time,
+ * when the text section hasn't been marked RO, and before the text
+ * has ever been executed.
+ */
+static inline void __runtime_fixup_ptr(void *where, unsigned long val)
+{
+       *(unsigned long *)where = val;
+}
+
+static inline void __runtime_fixup_shift(void *where, unsigned long val)
+{
+       *(unsigned char *)where = val;
+}
+
+static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
+       unsigned long val, s32 *start, s32 *end)
+{
+       while (start < end) {
+               fn(*start + (void *)start, val);
+               start++;
+       }
+}
+
+#endif
index 3509afc..6e73403 100644 (file)
@@ -357,6 +357,9 @@ SECTIONS
        PERCPU_SECTION(INTERNODE_CACHE_BYTES)
 #endif
 
+       RUNTIME_CONST(shift, d_hash_shift)
+       RUNTIME_CONST(ptr, dentry_hashtable)
+
        . = ALIGN(PAGE_SIZE);
 
        /* freed after init ends here */