arch/riscv/include/asm/runtime-const.h | 174 +++++++++++++++++++++++++++++++++ arch/riscv/kernel/vmlinux.lds.S | 3 + 2 files changed, 177 insertions(+)
Implement the runtime constant infrastructure for riscv. Use this
infrastructure to generate constants to be used by the d_hash()
function.
This is the riscv variant of commit 94a2bc0f611c ("arm64: add 'runtime
constant' support") and commit e3c92e81711d ("runtime constants: add
x86 architecture support").
Signed-off-by: Charlie Jenkins <charlie@rivosinc.com>
---
Ard brought this to my attention in this patch [1].
[1] https://lore.kernel.org/lkml/CAMj1kXE4DJnwFejNWQu784GvyJO=aGNrzuLjSxiowX_e7nW8QA@mail.gmail.com/
---
arch/riscv/include/asm/runtime-const.h | 174 +++++++++++++++++++++++++++++++++
arch/riscv/kernel/vmlinux.lds.S | 3 +
2 files changed, 177 insertions(+)
diff --git a/arch/riscv/include/asm/runtime-const.h b/arch/riscv/include/asm/runtime-const.h
new file mode 100644
index 0000000000000000000000000000000000000000..a551ff596d904a7a2ad2627c0cfaa0fff3a2280a
--- /dev/null
+++ b/arch/riscv/include/asm/runtime-const.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_RUNTIME_CONST_H
+#define _ASM_RISCV_RUNTIME_CONST_H
+
+#include <asm/cacheflush.h>
+#include <asm/text-patching.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_32BIT
+#define runtime_const_ptr(sym) \
+({ \
+ typeof(sym) __ret, __tmp; \
+ asm_inline("1:\t" \
+ ".option push" \
+ ".option norvc" \
+ "lui %[__ret],0x89abd\n\t" \
+ "addi %[__ret],-0x211\n\t" \
+ ".option pop" \
+ ".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+ : [__ret] "=r" (__ret)); \
+ __ret; \
+})
+#else
+/*
+ * Loading 64-bit constants into a register from immediates is a non-trivial
+ * task on riscv64. To get it somewhat performant, load 32 bits into two
+ * different registers and then combine the results. If the processor supports
+ * the Zbb extension, we can combine the final "slli,srli,add" into one
+ * instruction "add.uw".
+ */
+#define runtime_const_ptr(sym) \
+({ \
+ typeof(sym) __ret, __tmp; \
+ asm_inline("1:\t" \
+ ".option push\n\t" \
+ ".option norvc\n\t" \
+ "lui %[__ret],0x89abd\n\t" \
+ "lui %[__tmp],0x1234\n\t" \
+ "addiw %[__ret],%[__ret],-0x211\n\t" \
+ "addiw %[__tmp],%[__tmp],0x567\n\t" \
+ "slli %[__tmp],%[__tmp],32\n\t" \
+ ALTERNATIVE( \
+ "slli %[__ret],%[__ret],32\n\t" \
+ "srli %[__ret],%[__ret],32\n\t" \
+ "add %[__ret],%[__ret],%[__tmp]\n\t", \
+ ".option push\n\t" \
+ ".option arch,+zba\n\t" \
+ "add.uw %[__ret],%[__ret],%[__tmp]\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".option pop\n\t", 0, RISCV_ISA_EXT_ZBA, 1) \
+ ".option pop\n\t" \
+ ".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+ : [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \
+ __ret; \
+})
+#endif
+
+#ifdef CONFIG_32BIT
+#define SRLI "srli "
+#else
+#define SRLI "srliw "
+#endif
+
+#define runtime_const_shift_right_32(val, sym) \
+({ \
+ u32 __ret; \
+ asm_inline("1:\t" \
+ ".option push\n\t" \
+ ".option norvc\n\t" \
+ SRLI "%[__ret],%[__val],12\n\t" \
+ ".option pop\n\t" \
+ ".pushsection runtime_shift_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+ : [__ret] "=r" (__ret) \
+ : [__val] "r" (val)); \
+ __ret; \
+})
+
+#define runtime_const_init(type, sym) do { \
+ extern s32 __start_runtime_##type##_##sym[]; \
+ extern s32 __stop_runtime_##type##_##sym[]; \
+ \
+ runtime_const_fixup(__runtime_fixup_##type, \
+ (unsigned long)(sym), \
+ __start_runtime_##type##_##sym, \
+ __stop_runtime_##type##_##sym); \
+} while (0)
+
+static inline void __runtime_fixup_caches(void *where, unsigned int insns)
+{
+ /* On riscv there are currently only cache-wide flushes so va is ignored. */
+ __always_unused uintptr_t va = (uintptr_t)where;
+
+ flush_icache_range(va, va + 4*insns);
+}
+
+/*
+ * The 32-bit immediate is stored in a lui+addi pairing.
+ * lui holds the upper 20 bits of the immediate in the first 20 bits of the instruction.
+ * addi holds the lower 12 bits of the immediate in the first 12 bits of the instruction.
+ */
+static inline void __runtime_fixup_32(u32 *lui, u32 *addi, unsigned int val)
+{
+ unsigned int lower_immediate, upper_immediate;
+ u32 lui_insn = *(u32 *)lui;
+ u32 addi_insn = *(u32 *)addi;
+
+ lower_immediate = sign_extend32(val, 11);
+ upper_immediate = (val - lower_immediate);
+
+ if (upper_immediate & 0xfffff000) {
+ /* replace upper 20 bits of lui with upper immediate */
+ lui_insn &= 0x00000fff;
+ lui_insn |= upper_immediate & 0xfffff000;
+ } else {
+ /* replace lui with nop if immediate is small enough to fit in addi */
+ lui_insn = 0x00000013;
+ }
+
+ if (lower_immediate & 0x00000fff) {
+ /* replace upper 12 bits of addi with lower 12 bits of val */
+ addi_insn &= 0x000fffff;
+ addi_insn |= (lower_immediate & 0x00000fff) << 20;
+ } else {
+ /* replace addi with nop if lower_immediate is empty */
+ addi_insn = 0x00000013;
+ }
+
+ patch_insn_write(lui, &lui_insn, sizeof(lui_insn));
+ patch_insn_write(addi, &addi_insn, sizeof(addi_insn));
+}
+
+static inline void __runtime_fixup_ptr(void *where, unsigned long val)
+{
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ __runtime_fixup_32(where, where + 4, val);
+ __runtime_fixup_caches(where, 2);
+ } else {
+ __runtime_fixup_32(where, where + 8, val);
+ __runtime_fixup_32(where + 4, where + 12, val >> 32);
+ __runtime_fixup_caches(where, 4);
+ }
+}
+
+/*
+ * Replace the least significant 5 bits of the srli/srliw immediate that is
+ * located at bits 20-24
+ */
+static inline void __runtime_fixup_shift(void *where, unsigned long val)
+{
+ u32 insn = *(u32 *)where;
+
+ insn &= 0xfe0fffff;
+ insn |= (val & 0b11111) << 20;
+
+ patch_text_nosync(where, &insn, sizeof(insn));
+}
+
+static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
+ unsigned long val, s32 *start, s32 *end)
+{
+ while (start < end) {
+ fn(*start + (void *)start, val);
+ start++;
+ }
+}
+
+#endif /* _ASM_RISCV_RUNTIME_CONST_H */
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 002ca58dd998cb78b662837b5ebac988fb6c77bb..61bd5ba6680a786bf1db7dc37bf1acda0639b5c7 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -97,6 +97,9 @@ SECTIONS
{
EXIT_DATA
}
+
+ RUNTIME_CONST_VARIABLES
+
PERCPU_SECTION(L1_CACHE_BYTES)
.rel.dyn : {
---
base-commit: ffd294d346d185b70e28b1a28abe367bbfe53c04
change-id: 20250123-runtime_const_riscv-6cd854ee2817
--
- Charlie
On 27/01/2025 08:18, Charlie Jenkins wrote: > Implement the runtime constant infrastructure for riscv. Use this > infrastructure to generate constants to be used by the d_hash() > function. > > This is the riscv variant of commit 94a2bc0f611c ("arm64: add 'runtime > constant' support") and commit e3c92e81711d ("runtime constants: add > x86 architecture support"). > > Signed-off-by: Charlie Jenkins <charlie@rivosinc.com> I think my only comment on this is we should treat instructions as __le32 type and add the proper conversions when maniuplating them. -- Ben Dooks http://www.codethink.co.uk/ Senior Engineer Codethink - Providing Genius https://www.codethink.co.uk/privacy.html
On Mon, Jan 27, 2025 at 08:51:04AM +0000, Ben Dooks wrote: > On 27/01/2025 08:18, Charlie Jenkins wrote: > > Implement the runtime constant infrastructure for riscv. Use this > > infrastructure to generate constants to be used by the d_hash() > > function. > > > > This is the riscv variant of commit 94a2bc0f611c ("arm64: add 'runtime > > constant' support") and commit e3c92e81711d ("runtime constants: add > > x86 architecture support"). > > > > Signed-off-by: Charlie Jenkins <charlie@rivosinc.com> > > I think my only comment on this is we should treat instructions as > __le32 type and add the proper conversions when maniuplating them. Fair enough, I will make the proper adjustements. - Charlie > > -- > Ben Dooks http://www.codethink.co.uk/ > Senior Engineer Codethink - Providing Genius > > https://www.codethink.co.uk/privacy.html
© 2016 - 2025 Red Hat, Inc.