This config allows to compile kernel as PIE and to relocate it at
any virtual address at runtime: this paves the way to KASLR.
Runtime relocation is possible since relocation metadata are embedded
into the kernel.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
Signed-off-by: Xi Ruoyao <xry111@xry111.site> # Use arch_initcall
---
arch/loongarch/Kconfig | 15 ++++++
arch/loongarch/Makefile | 5 ++
arch/loongarch/kernel/Makefile | 2 +
arch/loongarch/kernel/head.S | 5 ++
arch/loongarch/kernel/relocate.c | 78 +++++++++++++++++++++++++++++
arch/loongarch/kernel/vmlinux.lds.S | 11 +++-
6 files changed, 114 insertions(+), 2 deletions(-)
create mode 100644 arch/loongarch/kernel/relocate.c
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 9cc8b84f7eb0..089a4695b1b3 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -48,6 +48,7 @@ config LOONGARCH
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_NUMA_BALANCING
+ select SYS_SUPPORTS_RELOCATABLE
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
@@ -229,6 +230,11 @@ config SCHED_OMIT_FRAME_POINTER
config AS_HAS_EXPLICIT_RELOCS
def_bool $(as-instr,x:pcalau12i \$t0$(comma)%pc_hi20(x))
+config SYS_SUPPORTS_RELOCATABLE
+ bool
+ help
+ Selected if the platform supports relocating the kernel.
+
menu "Kernel type and options"
source "kernel/Kconfig.hz"
@@ -474,6 +480,15 @@ config PHYSICAL_START
specified in the "crashkernel=YM@XM" command line boot parameter
passed to the panic-ed kernel).
+config RELOCATABLE
+ bool "Relocatable kernel"
+ depends on SYS_SUPPORTS_RELOCATABLE
+ help
+ This builds the kernel as a Position Independent Executable (PIE),
+ which retains all relocation metadata required to relocate the
+ kernel binary at runtime to a different virtual address than the
+ address it was linked at.
+
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index 4402387d2755..27b5a70ff31c 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -71,6 +71,11 @@ KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs
KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs
endif
+ifeq ($(CONFIG_RELOCATABLE),y)
+LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext
+KBUILD_CFLAGS_KERNEL += -fPIE
+endif
+
cflags-y += -ffreestanding
cflags-y += $(call cc-option, -mno-check-zero-division)
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index c8cfbd562921..3341dd5f0926 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -31,6 +31,8 @@ endif
obj-$(CONFIG_MODULES) += module.o module-sections.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_RELOCATABLE) += relocate.o
+
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index d2ac26b5b22b..499edc80d8ab 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -86,6 +86,11 @@ SYM_CODE_START(kernel_entry) # kernel entry point
PTR_ADD sp, sp, tp
set_saved_sp sp, t0, t1
+#ifdef CONFIG_RELOCATABLE
+ /* Apply the relocations */
+ bl relocate_kernel
+#endif
+
bl start_kernel
ASM_BUG()
diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
new file mode 100644
index 000000000000..91ce92433bab
--- /dev/null
+++ b/arch/loongarch/kernel/relocate.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Kernel relocation at boot time
+ *
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/panic_notifier.h>
+#include <asm/sections.h>
+
+#define RELOCATED(x) ((void *)((long)x + reloc_offset))
+
+extern long __rela_dyn_start;
+extern long __rela_dyn_end;
+
+static unsigned long reloc_offset;
+
+void __init relocate_kernel(void)
+{
+ reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
+
+ if (reloc_offset) {
+ Elf64_Rela *rela, *rela_end;
+ rela = (Elf64_Rela *)&__rela_dyn_start;
+ rela_end = (Elf64_Rela *)&__rela_dyn_end;
+
+ for ( ; rela < rela_end; rela++) {
+ Elf64_Addr addr = rela->r_offset;
+ Elf64_Addr relocated_addr = rela->r_addend;
+
+ if (rela->r_info != R_LARCH_RELATIVE)
+ continue;
+
+ if (relocated_addr >= VMLINUX_LOAD_ADDRESS)
+ relocated_addr =
+ (Elf64_Addr)RELOCATED(relocated_addr);
+
+ *(Elf64_Addr *)RELOCATED(addr) = relocated_addr;
+ }
+ }
+}
+
+/*
+ * Show relocation information on panic.
+ */
+static void show_kernel_relocation(const char *level)
+{
+ if (reloc_offset > 0) {
+ printk(level);
+ pr_cont("Kernel relocated offset @ 0x%lx\n", reloc_offset);
+ pr_cont(" .text @ 0x%lx\n", (unsigned long)&_text);
+ pr_cont(" .data @ 0x%lx\n", (unsigned long)&_sdata);
+ pr_cont(" .bss @ 0x%lx\n", (unsigned long)&__bss_start);
+ }
+}
+
+static int kernel_location_notifier_fn(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ show_kernel_relocation(KERN_EMERG);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block kernel_location_notifier = {
+ .notifier_call = kernel_location_notifier_fn
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kernel_location_notifier);
+ return 0;
+}
+
+arch_initcall(register_kernel_offset_dumper);
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
index 733b16e8d55d..aec0b6567d24 100644
--- a/arch/loongarch/kernel/vmlinux.lds.S
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -70,6 +70,8 @@ SECTIONS
.plt : ALIGN(16) { *(.plt) }
.got.plt : ALIGN(16) { *(.got.plt) }
+ .data.rel : { *(.data.rel*) }
+
. = ALIGN(PECOFF_SEGMENT_ALIGN);
__init_begin = .;
__inittext_begin = .;
@@ -93,8 +95,6 @@ SECTIONS
PERCPU_SECTION(1 << CONFIG_L1_CACHE_SHIFT)
#endif
- .rela.dyn : ALIGN(8) { *(.rela.dyn) *(.rela*) }
-
.init.bss : {
*(.init.bss)
}
@@ -107,6 +107,12 @@ SECTIONS
RO_DATA(4096)
RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE)
+ .rela.dyn : ALIGN(8) {
+ __rela_dyn_start = .;
+ *(.rela.dyn) *(.rela*)
+ __rela_dyn_end = .;
+ }
+
.sdata : {
*(.sdata)
}
@@ -133,6 +139,7 @@ SECTIONS
DISCARDS
/DISCARD/ : {
+ *(.dynamic .dynsym .dynstr .hash .gnu.hash)
*(.gnu.attributes)
*(.options)
*(.eh_frame)
--
2.37.3
Hi Youling, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on linus/master] [also build test WARNING on v6.2-rc7 next-20230210] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Youling-Tang/LoongArch-Use-la-pcrel-instead-of-la-abs-when-it-s-trivially-possible/20230210-165022 patch link: https://lore.kernel.org/r/1676018856-26520-5-git-send-email-tangyouling%40loongson.cn patch subject: [PATCH v4 4/5] LoongArch: Add support for kernel relocation config: loongarch-allmodconfig (https://download.01.org/0day-ci/archive/20230211/202302110150.tIuRIiTp-lkp@intel.com/config) compiler: loongarch64-linux-gcc (GCC) 12.1.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/intel-lab-lkp/linux/commit/26dc7750408c7f232632db44fab905df7b48d83c git remote add linux-review https://github.com/intel-lab-lkp/linux git fetch --no-tags linux-review Youling-Tang/LoongArch-Use-la-pcrel-instead-of-la-abs-when-it-s-trivially-possible/20230210-165022 git checkout 26dc7750408c7f232632db44fab905df7b48d83c # save the config file mkdir build_dir && cp config build_dir/.config COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=loongarch olddefconfig COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=loongarch SHELL=/bin/bash arch/loongarch/kernel/ If you fix the issue, kindly add following tag where applicable | Reported-by: kernel test robot <lkp@intel.com> | Link: https://lore.kernel.org/oe-kbuild-all/202302110150.tIuRIiTp-lkp@intel.com/ Note: functions only called from assembly code should be annotated with the asmlinkage attribute All warnings (new ones prefixed by >>): >> arch/loongarch/kernel/relocate.c:21:13: warning: no previous prototype for 'relocate_kernel' [-Wmissing-prototypes] 21 | void __init relocate_kernel(void) | ^~~~~~~~~~~~~~~ vim +/relocate_kernel +21 arch/loongarch/kernel/relocate.c 20 > 21 void __init relocate_kernel(void) 22 { 23 reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS; 24 25 if (reloc_offset) { 26 Elf64_Rela *rela, *rela_end; 27 rela = (Elf64_Rela *)&__rela_dyn_start; 28 rela_end = (Elf64_Rela *)&__rela_dyn_end; 29 30 for ( ; rela < rela_end; rela++) { 31 Elf64_Addr addr = rela->r_offset; 32 Elf64_Addr relocated_addr = rela->r_addend; 33 34 if (rela->r_info != R_LARCH_RELATIVE) 35 continue; 36 37 if (relocated_addr >= VMLINUX_LOAD_ADDRESS) 38 relocated_addr = 39 (Elf64_Addr)RELOCATED(relocated_addr); 40 41 *(Elf64_Addr *)RELOCATED(addr) = relocated_addr; 42 } 43 } 44 } 45 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests
Hi, Youling, On Fri, Feb 10, 2023 at 4:48 PM Youling Tang <tangyouling@loongson.cn> wrote: > > This config allows to compile kernel as PIE and to relocate it at > any virtual address at runtime: this paves the way to KASLR. > Runtime relocation is possible since relocation metadata are embedded > into the kernel. > > Signed-off-by: Youling Tang <tangyouling@loongson.cn> > Signed-off-by: Xi Ruoyao <xry111@xry111.site> # Use arch_initcall > --- > arch/loongarch/Kconfig | 15 ++++++ > arch/loongarch/Makefile | 5 ++ > arch/loongarch/kernel/Makefile | 2 + > arch/loongarch/kernel/head.S | 5 ++ > arch/loongarch/kernel/relocate.c | 78 +++++++++++++++++++++++++++++ > arch/loongarch/kernel/vmlinux.lds.S | 11 +++- > 6 files changed, 114 insertions(+), 2 deletions(-) > create mode 100644 arch/loongarch/kernel/relocate.c > > diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig > index 9cc8b84f7eb0..089a4695b1b3 100644 > --- a/arch/loongarch/Kconfig > +++ b/arch/loongarch/Kconfig > @@ -48,6 +48,7 @@ config LOONGARCH > select ARCH_SUPPORTS_ATOMIC_RMW > select ARCH_SUPPORTS_HUGETLBFS > select ARCH_SUPPORTS_NUMA_BALANCING > + select SYS_SUPPORTS_RELOCATABLE We don't need such a Kconfig option since it is always true. Huacai > select ARCH_USE_BUILTIN_BSWAP > select ARCH_USE_CMPXCHG_LOCKREF > select ARCH_USE_QUEUED_RWLOCKS > @@ -229,6 +230,11 @@ config SCHED_OMIT_FRAME_POINTER > config AS_HAS_EXPLICIT_RELOCS > def_bool $(as-instr,x:pcalau12i \$t0$(comma)%pc_hi20(x)) > > +config SYS_SUPPORTS_RELOCATABLE > + bool > + help > + Selected if the platform supports relocating the kernel. > + > menu "Kernel type and options" > > source "kernel/Kconfig.hz" > @@ -474,6 +480,15 @@ config PHYSICAL_START > specified in the "crashkernel=YM@XM" command line boot parameter > passed to the panic-ed kernel). > > +config RELOCATABLE > + bool "Relocatable kernel" > + depends on SYS_SUPPORTS_RELOCATABLE > + help > + This builds the kernel as a Position Independent Executable (PIE), > + which retains all relocation metadata required to relocate the > + kernel binary at runtime to a different virtual address than the > + address it was linked at. > + > config SECCOMP > bool "Enable seccomp to safely compute untrusted bytecode" > depends on PROC_FS > diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile > index 4402387d2755..27b5a70ff31c 100644 > --- a/arch/loongarch/Makefile > +++ b/arch/loongarch/Makefile > @@ -71,6 +71,11 @@ KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs > KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs > endif > > +ifeq ($(CONFIG_RELOCATABLE),y) > +LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext > +KBUILD_CFLAGS_KERNEL += -fPIE > +endif > + > cflags-y += -ffreestanding > cflags-y += $(call cc-option, -mno-check-zero-division) > > diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile > index c8cfbd562921..3341dd5f0926 100644 > --- a/arch/loongarch/kernel/Makefile > +++ b/arch/loongarch/kernel/Makefile > @@ -31,6 +31,8 @@ endif > obj-$(CONFIG_MODULES) += module.o module-sections.o > obj-$(CONFIG_STACKTRACE) += stacktrace.o > > +obj-$(CONFIG_RELOCATABLE) += relocate.o > + > obj-$(CONFIG_PROC_FS) += proc.o > > obj-$(CONFIG_SMP) += smp.o > diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S > index d2ac26b5b22b..499edc80d8ab 100644 > --- a/arch/loongarch/kernel/head.S > +++ b/arch/loongarch/kernel/head.S > @@ -86,6 +86,11 @@ SYM_CODE_START(kernel_entry) # kernel entry point > PTR_ADD sp, sp, tp > set_saved_sp sp, t0, t1 > > +#ifdef CONFIG_RELOCATABLE > + /* Apply the relocations */ > + bl relocate_kernel > +#endif > + > bl start_kernel > ASM_BUG() > > diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c > new file mode 100644 > index 000000000000..91ce92433bab > --- /dev/null > +++ b/arch/loongarch/kernel/relocate.c > @@ -0,0 +1,78 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* > + * Support for Kernel relocation at boot time > + * > + * Copyright (C) 2023 Loongson Technology Corporation Limited > + */ > + > +#include <linux/elf.h> > +#include <linux/kernel.h> > +#include <linux/printk.h> > +#include <linux/panic_notifier.h> > +#include <asm/sections.h> > + > +#define RELOCATED(x) ((void *)((long)x + reloc_offset)) > + > +extern long __rela_dyn_start; > +extern long __rela_dyn_end; > + > +static unsigned long reloc_offset; > + > +void __init relocate_kernel(void) > +{ > + reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS; > + > + if (reloc_offset) { > + Elf64_Rela *rela, *rela_end; > + rela = (Elf64_Rela *)&__rela_dyn_start; > + rela_end = (Elf64_Rela *)&__rela_dyn_end; > + > + for ( ; rela < rela_end; rela++) { > + Elf64_Addr addr = rela->r_offset; > + Elf64_Addr relocated_addr = rela->r_addend; > + > + if (rela->r_info != R_LARCH_RELATIVE) > + continue; > + > + if (relocated_addr >= VMLINUX_LOAD_ADDRESS) > + relocated_addr = > + (Elf64_Addr)RELOCATED(relocated_addr); > + > + *(Elf64_Addr *)RELOCATED(addr) = relocated_addr; > + } > + } > +} > + > +/* > + * Show relocation information on panic. > + */ > +static void show_kernel_relocation(const char *level) > +{ > + if (reloc_offset > 0) { > + printk(level); > + pr_cont("Kernel relocated offset @ 0x%lx\n", reloc_offset); > + pr_cont(" .text @ 0x%lx\n", (unsigned long)&_text); > + pr_cont(" .data @ 0x%lx\n", (unsigned long)&_sdata); > + pr_cont(" .bss @ 0x%lx\n", (unsigned long)&__bss_start); > + } > +} > + > +static int kernel_location_notifier_fn(struct notifier_block *self, > + unsigned long v, void *p) > +{ > + show_kernel_relocation(KERN_EMERG); > + return NOTIFY_DONE; > +} > + > +static struct notifier_block kernel_location_notifier = { > + .notifier_call = kernel_location_notifier_fn > +}; > + > +static int __init register_kernel_offset_dumper(void) > +{ > + atomic_notifier_chain_register(&panic_notifier_list, > + &kernel_location_notifier); > + return 0; > +} > + > +arch_initcall(register_kernel_offset_dumper); > diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S > index 733b16e8d55d..aec0b6567d24 100644 > --- a/arch/loongarch/kernel/vmlinux.lds.S > +++ b/arch/loongarch/kernel/vmlinux.lds.S > @@ -70,6 +70,8 @@ SECTIONS > .plt : ALIGN(16) { *(.plt) } > .got.plt : ALIGN(16) { *(.got.plt) } > > + .data.rel : { *(.data.rel*) } > + > . = ALIGN(PECOFF_SEGMENT_ALIGN); > __init_begin = .; > __inittext_begin = .; > @@ -93,8 +95,6 @@ SECTIONS > PERCPU_SECTION(1 << CONFIG_L1_CACHE_SHIFT) > #endif > > - .rela.dyn : ALIGN(8) { *(.rela.dyn) *(.rela*) } > - > .init.bss : { > *(.init.bss) > } > @@ -107,6 +107,12 @@ SECTIONS > RO_DATA(4096) > RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE) > > + .rela.dyn : ALIGN(8) { > + __rela_dyn_start = .; > + *(.rela.dyn) *(.rela*) > + __rela_dyn_end = .; > + } > + > .sdata : { > *(.sdata) > } > @@ -133,6 +139,7 @@ SECTIONS > > DISCARDS > /DISCARD/ : { > + *(.dynamic .dynsym .dynstr .hash .gnu.hash) > *(.gnu.attributes) > *(.options) > *(.eh_frame) > -- > 2.37.3 >
© 2016 - 2025 Red Hat, Inc.