1 | From: Ard Biesheuvel <ardb@kernel.org> | 1 | From: Ard Biesheuvel <ardb@kernel.org> |
---|---|---|---|
2 | 2 | ||
3 | Start refactoring the x86 startup code so we keep all the code that is | 3 | Reorganize C code that is used during early boot, either in the |
4 | shared between different boot stages (EFI stub, decompressor, early | 4 | decompressor/EFI stub or the kernel proper, but before the kernel |
5 | startup in the core kernel *) and/or needs to be built in a special way | 5 | virtual mapping is up. |
6 | (due to the fact that it is C code that runs from the 1:1 mapping of | ||
7 | RAM) in a single place, sharing all the C flags and other runes that are | ||
8 | needed to disable instrumentation, sanitizers, etc. | ||
9 | 6 | ||
10 | This is an RFC so I have left some things for later, e.g., the SEV-SNP | 7 | v3: |
11 | init code in arch/x86/coco that is shared between all of the above [*] | 8 | - keep rip_rel_ptr() around in PIC code - sadly, it is still needed in |
12 | and will be tricky to disentangle; there are also some known issues in | 9 | some cases |
13 | that code related to EFI boot that we are addressing in parallel. | 10 | - remove RIP_REL_REF() uses in separate patches |
11 | - keep __head annotations for now, they will all be removed later | ||
12 | - disable objtool validation for library objects (i.e., pieces that are | ||
13 | not linked into vmlinux) | ||
14 | |||
15 | I will follow up with a series that gets rid of .head.text altogether, | ||
16 | as it will no longer be needed at all once the startup code is checked | ||
17 | for absolute relocations. | ||
18 | |||
19 | The SEV startup code needs to be moved first, though, and this is a bit | ||
20 | more complicated, so I will decouple that effort from this series, also | ||
21 | because there is a known issue that needs to be fixed first related to | ||
22 | memory acceptance from the EFI stub. | ||
14 | 23 | ||
15 | Cc: Tom Lendacky <thomas.lendacky@amd.com> | 24 | Cc: Tom Lendacky <thomas.lendacky@amd.com> |
16 | Cc: Dionna Amalie Glaze <dionnaglaze@google.com> | 25 | Cc: Dionna Amalie Glaze <dionnaglaze@google.com> |
17 | Cc: Kevin Loughlin <kevinloughlin@google.com> | 26 | Cc: Kevin Loughlin <kevinloughlin@google.com> |
18 | 27 | ||
19 | Ard Biesheuvel (6): | 28 | Ard Biesheuvel (7): |
20 | x86/boot/compressed: Merge local pgtable.h include into asm/boot.h | 29 | x86/boot/startup: Disable objtool validation for library code |
21 | x86/boot: Move 5-level paging trampoline into startup code | 30 | x86/asm: Make rip_rel_ptr() usable from fPIC code |
22 | x86/boot: Move EFI mixed mode startup code back under arch/x86 | 31 | x86/boot: Move the early GDT/IDT setup code into startup/ |
23 | x86/boot: Move early GDT/IDT setup code into startup/ | ||
24 | x86/boot: Move early kernel mapping code into startup/ | 32 | x86/boot: Move early kernel mapping code into startup/ |
33 | x86/boot: Drop RIP_REL_REF() uses from early mapping code | ||
25 | x86/boot: Move early SME init code into startup/ | 34 | x86/boot: Move early SME init code into startup/ |
35 | x86/boot: Drop RIP_REL_REF() uses from SME startup code | ||
26 | 36 | ||
27 | arch/x86/Makefile | 1 + | 37 | arch/x86/boot/compressed/Makefile | 2 +- |
28 | arch/x86/boot/compressed/Makefile | 4 +- | 38 | arch/x86/boot/startup/Makefile | 22 ++ |
29 | arch/x86/boot/compressed/head_64.S | 1 - | 39 | arch/x86/boot/startup/gdt_idt.c | 83 ++++++ |
30 | arch/x86/boot/compressed/misc.c | 1 - | 40 | arch/x86/boot/startup/map_kernel.c | 225 ++++++++++++++++ |
31 | arch/x86/boot/compressed/pgtable.h | 18 -- | 41 | arch/x86/{mm/mem_encrypt_identity.c => boot/startup/sme.c} | 19 +- |
32 | arch/x86/boot/compressed/pgtable_64.c | 1 - | 42 | arch/x86/coco/sev/core.c | 2 +- |
33 | arch/x86/boot/startup/Makefile | 22 ++ | 43 | arch/x86/coco/sev/shared.c | 4 +- |
34 | drivers/firmware/efi/libstub/x86-mixed.S => arch/x86/boot/startup/efi-mixed.S | 0 | 44 | arch/x86/include/asm/asm.h | 2 +- |
35 | arch/x86/boot/startup/gdt_idt.c | 82 ++++++ | 45 | arch/x86/include/asm/coco.h | 2 +- |
36 | arch/x86/boot/{compressed => startup}/la57toggle.S | 1 - | 46 | arch/x86/include/asm/mem_encrypt.h | 2 +- |
37 | arch/x86/boot/startup/map_kernel.c | 232 +++++++++++++++ | 47 | arch/x86/kernel/head64.c | 285 +------------------- |
38 | arch/x86/{mm/mem_encrypt_identity.c => boot/startup/sme.c} | 45 ++- | 48 | arch/x86/mm/Makefile | 6 - |
39 | arch/x86/include/asm/boot.h | 10 + | 49 | 12 files changed, 346 insertions(+), 308 deletions(-) |
40 | arch/x86/include/asm/mem_encrypt.h | 2 +- | ||
41 | arch/x86/kernel/head64.c | 302 +------------------- | ||
42 | arch/x86/mm/Makefile | 6 - | ||
43 | drivers/firmware/efi/libstub/Makefile | 1 - | ||
44 | 17 files changed, 372 insertions(+), 357 deletions(-) | ||
45 | delete mode 100644 arch/x86/boot/compressed/pgtable.h | ||
46 | create mode 100644 arch/x86/boot/startup/Makefile | ||
47 | rename drivers/firmware/efi/libstub/x86-mixed.S => arch/x86/boot/startup/efi-mixed.S (100%) | ||
48 | create mode 100644 arch/x86/boot/startup/gdt_idt.c | 50 | create mode 100644 arch/x86/boot/startup/gdt_idt.c |
49 | rename arch/x86/boot/{compressed => startup}/la57toggle.S (99%) | ||
50 | create mode 100644 arch/x86/boot/startup/map_kernel.c | 51 | create mode 100644 arch/x86/boot/startup/map_kernel.c |
51 | rename arch/x86/{mm/mem_encrypt_identity.c => boot/startup/sme.c} (92%) | 52 | rename arch/x86/{mm/mem_encrypt_identity.c => boot/startup/sme.c} (97%) |
52 | 53 | ||
54 | |||
55 | base-commit: 4f2d1bbc2c92a32fd612e6c3b51832d5c1c3678e | ||
53 | -- | 56 | -- |
54 | 2.49.0.472.ge94155a9ec-goog | 57 | 2.49.0.504.g3bcea36a83-goog | diff view generated by jsdifflib |
1 | From: Ard Biesheuvel <ardb@kernel.org> | 1 | From: Ard Biesheuvel <ardb@kernel.org> |
---|---|---|---|
2 | 2 | ||
3 | Linus expressed a strong preference for arch-specific asm code (i.e., | 3 | The library code built under arch/x86/boot/startup is not intended to be |
4 | virtually all of it) to reside under arch/ rather than anywhere else. | 4 | linked into vmlinux but only into the decompressor and/or the EFI stub. |
5 | 5 | ||
6 | So move the EFI mixed mode startup code back, and put it under | 6 | This means objtool validation is not needed here, and may result in |
7 | arch/x86/boot/startup/ where all shared x86 startup code is going to | 7 | false positive errors for things like missing retpolines. |
8 | live. | 8 | |
9 | So disable it for all objects added to lib-y | ||
9 | 10 | ||
10 | Signed-off-by: Ard Biesheuvel <ardb@kernel.org> | 11 | Signed-off-by: Ard Biesheuvel <ardb@kernel.org> |
11 | --- | 12 | --- |
12 | arch/x86/boot/startup/Makefile | 3 +++ | 13 | arch/x86/boot/startup/Makefile | 6 ++++++ |
13 | drivers/firmware/efi/libstub/x86-mixed.S => arch/x86/boot/startup/efi-mixed.S | 0 | 14 | 1 file changed, 6 insertions(+) |
14 | drivers/firmware/efi/libstub/Makefile | 1 - | ||
15 | 3 files changed, 3 insertions(+), 1 deletion(-) | ||
16 | 15 | ||
17 | diff --git a/arch/x86/boot/startup/Makefile b/arch/x86/boot/startup/Makefile | 16 | diff --git a/arch/x86/boot/startup/Makefile b/arch/x86/boot/startup/Makefile |
18 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/arch/x86/boot/startup/Makefile | 18 | --- a/arch/x86/boot/startup/Makefile |
20 | +++ b/arch/x86/boot/startup/Makefile | 19 | +++ b/arch/x86/boot/startup/Makefile |
21 | @@ -XXX,XX +XXX,XX @@ | 20 | @@ -XXX,XX +XXX,XX @@ KBUILD_AFLAGS += -D__DISABLE_EXPORTS |
22 | # SPDX-License-Identifier: GPL-2.0 | 21 | |
23 | 22 | lib-$(CONFIG_X86_64) += la57toggle.o | |
24 | +KBUILD_AFLAGS += -D__DISABLE_EXPORTS | 23 | lib-$(CONFIG_EFI_MIXED) += efi-mixed.o |
25 | + | 24 | + |
26 | lib-$(CONFIG_X86_64) += la57toggle.o | 25 | +# |
27 | +lib-$(CONFIG_EFI_MIXED) += efi-mixed.o | 26 | +# Disable objtool validation for all library code, which is intended |
28 | diff --git a/drivers/firmware/efi/libstub/x86-mixed.S b/arch/x86/boot/startup/efi-mixed.S | 27 | +# to be linked into the decompressor or the EFI stub but not vmlinux |
29 | similarity index 100% | 28 | +# |
30 | rename from drivers/firmware/efi/libstub/x86-mixed.S | 29 | +$(patsubst %.o,$(obj)/%.o,$(lib-y)): OBJECT_FILES_NON_STANDARD := y |
31 | rename to arch/x86/boot/startup/efi-mixed.S | ||
32 | diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile | ||
33 | index XXXXXXX..XXXXXXX 100644 | ||
34 | --- a/drivers/firmware/efi/libstub/Makefile | ||
35 | +++ b/drivers/firmware/efi/libstub/Makefile | ||
36 | @@ -XXX,XX +XXX,XX @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \ | ||
37 | lib-$(CONFIG_ARM) += arm32-stub.o | ||
38 | lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o | ||
39 | lib-$(CONFIG_X86) += x86-stub.o smbios.o | ||
40 | -lib-$(CONFIG_EFI_MIXED) += x86-mixed.o | ||
41 | lib-$(CONFIG_X86_64) += x86-5lvl.o | ||
42 | lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o | ||
43 | lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o | ||
44 | -- | 30 | -- |
45 | 2.49.0.472.ge94155a9ec-goog | 31 | 2.49.0.504.g3bcea36a83-goog | diff view generated by jsdifflib |
1 | From: Ard Biesheuvel <ardb@kernel.org> | 1 | From: Ard Biesheuvel <ardb@kernel.org> |
---|---|---|---|
2 | 2 | ||
3 | Merge the local include "pgtable.h" -which declares the API of the | 3 | RIP_REL_REF() is used in non-PIC C code that is called very early, |
4 | 5-level paging trampoline- into <asm/boot.h> so that its implementation | 4 | before the kernel virtual mapping is up, which is the mapping that the |
5 | in la57toggle.S as well as the calling code can be decoupled from the | 5 | linker expects. It is currently used in two different ways: |
6 | traditional decompressor. | 6 | - to refer to the value of a global variable, including as an lvalue in |
7 | assignments; | ||
8 | - to take the address of a global variable via the mapping that the code | ||
9 | currently executes at. | ||
10 | |||
11 | The former case is only needed in non-PIC code, as PIC code will never | ||
12 | use absolute symbol references when the address of the symbol is not | ||
13 | being used. But taking the address of a variable in PIC code may still | ||
14 | require extra care, as a stack allocated struct assignment may be | ||
15 | emitted as a memcpy() from a statically allocated copy in .rodata. | ||
16 | |||
17 | For instance, this | ||
18 | |||
19 | void startup_64_setup_gdt_idt(void) | ||
20 | { | ||
21 | struct desc_ptr startup_gdt_descr = { | ||
22 | .address = (__force unsigned long)gdt_page.gdt, | ||
23 | .size = GDT_SIZE - 1, | ||
24 | }; | ||
25 | |||
26 | may result in an absolute symbol reference in PIC code, even though the | ||
27 | struct is allocated on the stack and populated at runtime. | ||
28 | |||
29 | To address this case, make rip_rel_ptr() accessible in PIC code, and | ||
30 | update any existing uses where the address of a global variable is | ||
31 | taken using RIP_REL_REF. | ||
32 | |||
33 | Once all code of this nature has been moved into arch/x86/boot/startup | ||
34 | and built with -fPIC, RIP_REL_REF() can be retired, and only | ||
35 | rip_rel_ptr() will remain. | ||
7 | 36 | ||
8 | Signed-off-by: Ard Biesheuvel <ardb@kernel.org> | 37 | Signed-off-by: Ard Biesheuvel <ardb@kernel.org> |
9 | --- | 38 | --- |
10 | arch/x86/boot/compressed/head_64.S | 1 - | 39 | arch/x86/coco/sev/core.c | 2 +- |
11 | arch/x86/boot/compressed/la57toggle.S | 1 - | 40 | arch/x86/coco/sev/shared.c | 4 ++-- |
12 | arch/x86/boot/compressed/misc.c | 1 - | 41 | arch/x86/include/asm/asm.h | 2 +- |
13 | arch/x86/boot/compressed/pgtable.h | 18 ------------------ | 42 | arch/x86/kernel/head64.c | 23 ++++++++++---------- |
14 | arch/x86/boot/compressed/pgtable_64.c | 1 - | 43 | arch/x86/mm/mem_encrypt_identity.c | 6 ++--- |
15 | arch/x86/include/asm/boot.h | 10 ++++++++++ | 44 | 5 files changed, 18 insertions(+), 19 deletions(-) |
16 | 6 files changed, 10 insertions(+), 22 deletions(-) | 45 | |
17 | 46 | diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c | |
18 | diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S | 47 | index XXXXXXX..XXXXXXX 100644 |
19 | index XXXXXXX..XXXXXXX 100644 | 48 | --- a/arch/x86/coco/sev/core.c |
20 | --- a/arch/x86/boot/compressed/head_64.S | 49 | +++ b/arch/x86/coco/sev/core.c |
21 | +++ b/arch/x86/boot/compressed/head_64.S | 50 | @@ -XXX,XX +XXX,XX @@ static __head void svsm_setup(struct cc_blob_sev_info *cc_info) |
51 | * kernel was loaded (physbase), so the get the CA address using | ||
52 | * RIP-relative addressing. | ||
53 | */ | ||
54 | - pa = (u64)&RIP_REL_REF(boot_svsm_ca_page); | ||
55 | + pa = (u64)rip_rel_ptr(&boot_svsm_ca_page); | ||
56 | |||
57 | /* | ||
58 | * Switch over to the boot SVSM CA while the current CA is still | ||
59 | diff --git a/arch/x86/coco/sev/shared.c b/arch/x86/coco/sev/shared.c | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/arch/x86/coco/sev/shared.c | ||
62 | +++ b/arch/x86/coco/sev/shared.c | ||
63 | @@ -XXX,XX +XXX,XX @@ static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid | ||
64 | */ | ||
65 | static const struct snp_cpuid_table *snp_cpuid_get_table(void) | ||
66 | { | ||
67 | - return &RIP_REL_REF(cpuid_table_copy); | ||
68 | + return rip_rel_ptr(&cpuid_table_copy); | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | @@ -XXX,XX +XXX,XX @@ static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info) | ||
73 | * routine is running identity mapped when called, both by the decompressor | ||
74 | * code and the early kernel code. | ||
75 | */ | ||
76 | - if (!rmpadjust((unsigned long)&RIP_REL_REF(boot_ghcb_page), RMP_PG_SIZE_4K, 1)) | ||
77 | + if (!rmpadjust((unsigned long)rip_rel_ptr(&boot_ghcb_page), RMP_PG_SIZE_4K, 1)) | ||
78 | return false; | ||
79 | |||
80 | /* | ||
81 | diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h | ||
82 | index XXXXXXX..XXXXXXX 100644 | ||
83 | --- a/arch/x86/include/asm/asm.h | ||
84 | +++ b/arch/x86/include/asm/asm.h | ||
22 | @@ -XXX,XX +XXX,XX @@ | 85 | @@ -XXX,XX +XXX,XX @@ |
23 | #include <asm/bootparam.h> | ||
24 | #include <asm/desc_defs.h> | ||
25 | #include <asm/trapnr.h> | ||
26 | -#include "pgtable.h" | ||
27 | |||
28 | /* | ||
29 | * Fix alignment at 16 bytes. Following CONFIG_FUNCTION_ALIGNMENT will result | ||
30 | diff --git a/arch/x86/boot/compressed/la57toggle.S b/arch/x86/boot/compressed/la57toggle.S | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/arch/x86/boot/compressed/la57toggle.S | ||
33 | +++ b/arch/x86/boot/compressed/la57toggle.S | ||
34 | @@ -XXX,XX +XXX,XX @@ | ||
35 | #include <asm/boot.h> | ||
36 | #include <asm/msr.h> | ||
37 | #include <asm/processor-flags.h> | ||
38 | -#include "pgtable.h" | ||
39 | |||
40 | /* | ||
41 | * This is the 32-bit trampoline that will be copied over to low memory. It | ||
42 | diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c | ||
43 | index XXXXXXX..XXXXXXX 100644 | ||
44 | --- a/arch/x86/boot/compressed/misc.c | ||
45 | +++ b/arch/x86/boot/compressed/misc.c | ||
46 | @@ -XXX,XX +XXX,XX @@ | ||
47 | |||
48 | #include "misc.h" | ||
49 | #include "error.h" | ||
50 | -#include "pgtable.h" | ||
51 | #include "../string.h" | ||
52 | #include "../voffset.h" | ||
53 | #include <asm/bootparam_utils.h> | ||
54 | diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h | ||
55 | deleted file mode 100644 | ||
56 | index XXXXXXX..XXXXXXX | ||
57 | --- a/arch/x86/boot/compressed/pgtable.h | ||
58 | +++ /dev/null | ||
59 | @@ -XXX,XX +XXX,XX @@ | ||
60 | -#ifndef BOOT_COMPRESSED_PAGETABLE_H | ||
61 | -#define BOOT_COMPRESSED_PAGETABLE_H | ||
62 | - | ||
63 | -#define TRAMPOLINE_32BIT_SIZE (2 * PAGE_SIZE) | ||
64 | - | ||
65 | -#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE | ||
66 | -#define TRAMPOLINE_32BIT_CODE_SIZE 0xA0 | ||
67 | - | ||
68 | -#ifndef __ASSEMBLER__ | ||
69 | - | ||
70 | -extern unsigned long *trampoline_32bit; | ||
71 | - | ||
72 | -extern void trampoline_32bit_src(void *trampoline, bool enable_5lvl); | ||
73 | - | ||
74 | -extern const u16 trampoline_ljmp_imm_offset; | ||
75 | - | ||
76 | -#endif /* __ASSEMBLER__ */ | ||
77 | -#endif /* BOOT_COMPRESSED_PAGETABLE_H */ | ||
78 | diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c | ||
79 | index XXXXXXX..XXXXXXX 100644 | ||
80 | --- a/arch/x86/boot/compressed/pgtable_64.c | ||
81 | +++ b/arch/x86/boot/compressed/pgtable_64.c | ||
82 | @@ -XXX,XX +XXX,XX @@ | ||
83 | #include <asm/bootparam_utils.h> | ||
84 | #include <asm/e820/types.h> | ||
85 | #include <asm/processor.h> | ||
86 | -#include "pgtable.h" | ||
87 | #include "../string.h" | ||
88 | #include "efi.h" | ||
89 | |||
90 | diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h | ||
91 | index XXXXXXX..XXXXXXX 100644 | ||
92 | --- a/arch/x86/include/asm/boot.h | ||
93 | +++ b/arch/x86/include/asm/boot.h | ||
94 | @@ -XXX,XX +XXX,XX @@ | ||
95 | # define BOOT_STACK_SIZE 0x1000 | ||
96 | #endif | 86 | #endif |
97 | 87 | ||
98 | +#define TRAMPOLINE_32BIT_SIZE (2 * PAGE_SIZE) | ||
99 | + | ||
100 | +#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE | ||
101 | +#define TRAMPOLINE_32BIT_CODE_SIZE 0xA0 | ||
102 | + | ||
103 | #ifndef __ASSEMBLER__ | 88 | #ifndef __ASSEMBLER__ |
104 | extern unsigned int output_len; | 89 | -#ifndef __pic__ |
105 | extern const unsigned long kernel_text_size; | 90 | static __always_inline __pure void *rip_rel_ptr(void *p) |
106 | @@ -XXX,XX +XXX,XX @@ unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr, | 91 | { |
107 | void (*error)(char *x)); | 92 | asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p)); |
108 | 93 | ||
109 | extern struct boot_params *boot_params_ptr; | 94 | return p; |
110 | +extern unsigned long *trampoline_32bit; | 95 | } |
111 | +extern const u16 trampoline_ljmp_imm_offset; | 96 | +#ifndef __pic__ |
112 | + | 97 | #define RIP_REL_REF(var) (*(typeof(&(var)))rip_rel_ptr(&(var))) |
113 | +void trampoline_32bit_src(void *trampoline, bool enable_5lvl); | 98 | #else |
114 | + | 99 | #define RIP_REL_REF(var) (var) |
115 | #endif | 100 | diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c |
116 | 101 | index XXXXXXX..XXXXXXX 100644 | |
117 | #endif /* _ASM_X86_BOOT_H */ | 102 | --- a/arch/x86/kernel/head64.c |
103 | +++ b/arch/x86/kernel/head64.c | ||
104 | @@ -XXX,XX +XXX,XX @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, | ||
105 | * attribute. | ||
106 | */ | ||
107 | if (sme_get_me_mask()) { | ||
108 | - paddr = (unsigned long)&RIP_REL_REF(__start_bss_decrypted); | ||
109 | - paddr_end = (unsigned long)&RIP_REL_REF(__end_bss_decrypted); | ||
110 | + paddr = (unsigned long)rip_rel_ptr(__start_bss_decrypted); | ||
111 | + paddr_end = (unsigned long)rip_rel_ptr(__end_bss_decrypted); | ||
112 | |||
113 | for (; paddr < paddr_end; paddr += PMD_SIZE) { | ||
114 | /* | ||
115 | @@ -XXX,XX +XXX,XX @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, | ||
116 | unsigned long __head __startup_64(unsigned long p2v_offset, | ||
117 | struct boot_params *bp) | ||
118 | { | ||
119 | - pmd_t (*early_pgts)[PTRS_PER_PMD] = RIP_REL_REF(early_dynamic_pgts); | ||
120 | - unsigned long physaddr = (unsigned long)&RIP_REL_REF(_text); | ||
121 | + pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts); | ||
122 | + unsigned long physaddr = (unsigned long)rip_rel_ptr(_text); | ||
123 | unsigned long va_text, va_end; | ||
124 | unsigned long pgtable_flags; | ||
125 | unsigned long load_delta; | ||
126 | @@ -XXX,XX +XXX,XX @@ unsigned long __head __startup_64(unsigned long p2v_offset, | ||
127 | for (;;); | ||
128 | |||
129 | va_text = physaddr - p2v_offset; | ||
130 | - va_end = (unsigned long)&RIP_REL_REF(_end) - p2v_offset; | ||
131 | + va_end = (unsigned long)rip_rel_ptr(_end) - p2v_offset; | ||
132 | |||
133 | /* Include the SME encryption mask in the fixup value */ | ||
134 | load_delta += sme_get_me_mask(); | ||
135 | |||
136 | /* Fixup the physical addresses in the page table */ | ||
137 | |||
138 | - pgd = &RIP_REL_REF(early_top_pgt)->pgd; | ||
139 | + pgd = rip_rel_ptr(early_top_pgt); | ||
140 | pgd[pgd_index(__START_KERNEL_map)] += load_delta; | ||
141 | |||
142 | if (IS_ENABLED(CONFIG_X86_5LEVEL) && la57) { | ||
143 | - p4d = (p4dval_t *)&RIP_REL_REF(level4_kernel_pgt); | ||
144 | + p4d = (p4dval_t *)rip_rel_ptr(level4_kernel_pgt); | ||
145 | p4d[MAX_PTRS_PER_P4D - 1] += load_delta; | ||
146 | |||
147 | pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE; | ||
148 | @@ -XXX,XX +XXX,XX @@ unsigned long __head __startup_64(unsigned long p2v_offset, | ||
149 | * error, causing the BIOS to halt the system. | ||
150 | */ | ||
151 | |||
152 | - pmd = &RIP_REL_REF(level2_kernel_pgt)->pmd; | ||
153 | + pmd = rip_rel_ptr(level2_kernel_pgt); | ||
154 | |||
155 | /* invalidate pages before the kernel image */ | ||
156 | for (i = 0; i < pmd_index(va_text); i++) | ||
157 | @@ -XXX,XX +XXX,XX @@ static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data; | ||
158 | static void __head startup_64_load_idt(void *vc_handler) | ||
159 | { | ||
160 | struct desc_ptr desc = { | ||
161 | - .address = (unsigned long)&RIP_REL_REF(bringup_idt_table), | ||
162 | + .address = (unsigned long)rip_rel_ptr(bringup_idt_table), | ||
163 | .size = sizeof(bringup_idt_table) - 1, | ||
164 | }; | ||
165 | struct idt_data data; | ||
166 | @@ -XXX,XX +XXX,XX @@ void early_setup_idt(void) | ||
167 | */ | ||
168 | void __head startup_64_setup_gdt_idt(void) | ||
169 | { | ||
170 | - struct desc_struct *gdt = (void *)(__force unsigned long)gdt_page.gdt; | ||
171 | void *handler = NULL; | ||
172 | |||
173 | struct desc_ptr startup_gdt_descr = { | ||
174 | - .address = (unsigned long)&RIP_REL_REF(*gdt), | ||
175 | + .address = (unsigned long)rip_rel_ptr((__force void *)&gdt_page), | ||
176 | .size = GDT_SIZE - 1, | ||
177 | }; | ||
178 | |||
179 | @@ -XXX,XX +XXX,XX @@ void __head startup_64_setup_gdt_idt(void) | ||
180 | "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory"); | ||
181 | |||
182 | if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) | ||
183 | - handler = &RIP_REL_REF(vc_no_ghcb); | ||
184 | + handler = rip_rel_ptr(vc_no_ghcb); | ||
185 | |||
186 | startup_64_load_idt(handler); | ||
187 | } | ||
188 | diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c | ||
189 | index XXXXXXX..XXXXXXX 100644 | ||
190 | --- a/arch/x86/mm/mem_encrypt_identity.c | ||
191 | +++ b/arch/x86/mm/mem_encrypt_identity.c | ||
192 | @@ -XXX,XX +XXX,XX @@ void __head sme_encrypt_kernel(struct boot_params *bp) | ||
193 | * memory from being cached. | ||
194 | */ | ||
195 | |||
196 | - kernel_start = (unsigned long)RIP_REL_REF(_text); | ||
197 | - kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE); | ||
198 | + kernel_start = (unsigned long)rip_rel_ptr(_text); | ||
199 | + kernel_end = ALIGN((unsigned long)rip_rel_ptr(_end), PMD_SIZE); | ||
200 | kernel_len = kernel_end - kernel_start; | ||
201 | |||
202 | initrd_start = 0; | ||
203 | @@ -XXX,XX +XXX,XX @@ void __head sme_encrypt_kernel(struct boot_params *bp) | ||
204 | * pagetable structures for the encryption of the kernel | ||
205 | * pagetable structures for workarea (in case not currently mapped) | ||
206 | */ | ||
207 | - execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea); | ||
208 | + execute_start = workarea_start = (unsigned long)rip_rel_ptr(sme_workarea); | ||
209 | execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE; | ||
210 | execute_len = execute_end - execute_start; | ||
211 | |||
118 | -- | 212 | -- |
119 | 2.49.0.472.ge94155a9ec-goog | 213 | 2.49.0.504.g3bcea36a83-goog | diff view generated by jsdifflib |
1 | From: Ard Biesheuvel <ardb@kernel.org> | 1 | From: Ard Biesheuvel <ardb@kernel.org> |
---|---|---|---|
2 | 2 | ||
3 | Move the early GDT/IDT setup code that runs long before the kernel | 3 | Move the early GDT/IDT setup code that runs long before the kernel |
4 | virtual mapping is up into arch/x86/boot/startup/, and build it in a way | 4 | virtual mapping is up into arch/x86/boot/startup/, and build it in a way |
5 | that ensures that the code tolerates being called from the 1:1 mapping | 5 | that ensures that the code tolerates being called from the 1:1 mapping |
6 | of memory. | 6 | of memory. The code itself is left unchanged by this patch. |
7 | |||
8 | This allows the RIP_REL_REF() macro uses to be dropped, and removes the | ||
9 | need for emitting the code into the special .head.text section. | ||
10 | 7 | ||
11 | Also tweak the sed symbol matching pattern in the decompressor to match | 8 | Also tweak the sed symbol matching pattern in the decompressor to match |
12 | on lower case 't' or 'b', as these will be emitted by Clang for symbols | 9 | on lower case 't' or 'b', as these will be emitted by Clang for symbols |
13 | with hidden linkage. | 10 | with hidden linkage. |
14 | 11 | ||
15 | Signed-off-by: Ard Biesheuvel <ardb@kernel.org> | 12 | Signed-off-by: Ard Biesheuvel <ardb@kernel.org> |
16 | --- | 13 | --- |
17 | arch/x86/boot/compressed/Makefile | 2 +- | 14 | arch/x86/boot/compressed/Makefile | 2 +- |
18 | arch/x86/boot/startup/Makefile | 15 ++++ | 15 | arch/x86/boot/startup/Makefile | 15 ++++ |
19 | arch/x86/boot/startup/gdt_idt.c | 82 ++++++++++++++++++++ | 16 | arch/x86/boot/startup/gdt_idt.c | 83 ++++++++++++++++++++ |
20 | arch/x86/kernel/head64.c | 74 ------------------ | 17 | arch/x86/kernel/head64.c | 73 ----------------- |
21 | 4 files changed, 98 insertions(+), 75 deletions(-) | 18 | 4 files changed, 99 insertions(+), 74 deletions(-) |
22 | 19 | ||
23 | diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile | 20 | diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile |
24 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
25 | --- a/arch/x86/boot/compressed/Makefile | 22 | --- a/arch/x86/boot/compressed/Makefile |
26 | +++ b/arch/x86/boot/compressed/Makefile | 23 | +++ b/arch/x86/boot/compressed/Makefile |
... | ... | ||
69 | + | 66 | + |
70 | +#include <linux/linkage.h> | 67 | +#include <linux/linkage.h> |
71 | +#include <linux/types.h> | 68 | +#include <linux/types.h> |
72 | + | 69 | + |
73 | +#include <asm/desc.h> | 70 | +#include <asm/desc.h> |
71 | +#include <asm/init.h> | ||
74 | +#include <asm/setup.h> | 72 | +#include <asm/setup.h> |
75 | +#include <asm/sev.h> | 73 | +#include <asm/sev.h> |
76 | +#include <asm/trapnr.h> | 74 | +#include <asm/trapnr.h> |
77 | + | 75 | + |
78 | +/* | 76 | +/* |
... | ... | ||
88 | + * which also hasn't happened yet in early CPU bringup. | 86 | + * which also hasn't happened yet in early CPU bringup. |
89 | + */ | 87 | + */ |
90 | +static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data; | 88 | +static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data; |
91 | + | 89 | + |
92 | +/* This may run while still in the direct mapping */ | 90 | +/* This may run while still in the direct mapping */ |
93 | +static void startup_64_load_idt(void *vc_handler) | 91 | +static void __head startup_64_load_idt(void *vc_handler) |
94 | +{ | 92 | +{ |
95 | + struct desc_ptr desc = { | 93 | + struct desc_ptr desc = { |
96 | + .address = (unsigned long)bringup_idt_table, | 94 | + .address = (unsigned long)rip_rel_ptr(bringup_idt_table), |
97 | + .size = sizeof(bringup_idt_table) - 1, | 95 | + .size = sizeof(bringup_idt_table) - 1, |
98 | + }; | 96 | + }; |
99 | + struct idt_data data; | 97 | + struct idt_data data; |
100 | + gate_desc idt_desc; | 98 | + gate_desc idt_desc; |
101 | + | 99 | + |
... | ... | ||
123 | +} | 121 | +} |
124 | + | 122 | + |
125 | +/* | 123 | +/* |
126 | + * Setup boot CPU state needed before kernel switches to virtual addresses. | 124 | + * Setup boot CPU state needed before kernel switches to virtual addresses. |
127 | + */ | 125 | + */ |
128 | +void __init startup_64_setup_gdt_idt(void) | 126 | +void __head startup_64_setup_gdt_idt(void) |
129 | +{ | 127 | +{ |
130 | + void *handler = NULL; | 128 | + void *handler = NULL; |
131 | + | 129 | + |
132 | + struct desc_ptr startup_gdt_descr = { | 130 | + struct desc_ptr startup_gdt_descr = { |
133 | + .address = (__force unsigned long)gdt_page.gdt, | 131 | + .address = (unsigned long)rip_rel_ptr((__force void *)&gdt_page), |
134 | + .size = GDT_SIZE - 1, | 132 | + .size = GDT_SIZE - 1, |
135 | + }; | 133 | + }; |
136 | + | 134 | + |
137 | + /* Load GDT */ | 135 | + /* Load GDT */ |
138 | + native_load_gdt(&startup_gdt_descr); | 136 | + native_load_gdt(&startup_gdt_descr); |
... | ... | ||
141 | + asm volatile("movl %%eax, %%ds\n" | 139 | + asm volatile("movl %%eax, %%ds\n" |
142 | + "movl %%eax, %%ss\n" | 140 | + "movl %%eax, %%ss\n" |
143 | + "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory"); | 141 | + "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory"); |
144 | + | 142 | + |
145 | + if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) | 143 | + if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) |
146 | + handler = vc_no_ghcb; | 144 | + handler = rip_rel_ptr(vc_no_ghcb); |
147 | + | 145 | + |
148 | + startup_64_load_idt(handler); | 146 | + startup_64_load_idt(handler); |
149 | +} | 147 | +} |
150 | diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c | 148 | diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c |
151 | index XXXXXXX..XXXXXXX 100644 | 149 | index XXXXXXX..XXXXXXX 100644 |
... | ... | ||
172 | - | 170 | - |
173 | -/* This may run while still in the direct mapping */ | 171 | -/* This may run while still in the direct mapping */ |
174 | -static void __head startup_64_load_idt(void *vc_handler) | 172 | -static void __head startup_64_load_idt(void *vc_handler) |
175 | -{ | 173 | -{ |
176 | - struct desc_ptr desc = { | 174 | - struct desc_ptr desc = { |
177 | - .address = (unsigned long)&RIP_REL_REF(bringup_idt_table), | 175 | - .address = (unsigned long)rip_rel_ptr(bringup_idt_table), |
178 | - .size = sizeof(bringup_idt_table) - 1, | 176 | - .size = sizeof(bringup_idt_table) - 1, |
179 | - }; | 177 | - }; |
180 | - struct idt_data data; | 178 | - struct idt_data data; |
181 | - gate_desc idt_desc; | 179 | - gate_desc idt_desc; |
182 | - | 180 | - |
... | ... | ||
206 | -/* | 204 | -/* |
207 | - * Setup boot CPU state needed before kernel switches to virtual addresses. | 205 | - * Setup boot CPU state needed before kernel switches to virtual addresses. |
208 | - */ | 206 | - */ |
209 | -void __head startup_64_setup_gdt_idt(void) | 207 | -void __head startup_64_setup_gdt_idt(void) |
210 | -{ | 208 | -{ |
211 | - struct desc_struct *gdt = (void *)(__force unsigned long)gdt_page.gdt; | ||
212 | - void *handler = NULL; | 209 | - void *handler = NULL; |
213 | - | 210 | - |
214 | - struct desc_ptr startup_gdt_descr = { | 211 | - struct desc_ptr startup_gdt_descr = { |
215 | - .address = (unsigned long)&RIP_REL_REF(*gdt), | 212 | - .address = (unsigned long)rip_rel_ptr((__force void *)&gdt_page), |
216 | - .size = GDT_SIZE - 1, | 213 | - .size = GDT_SIZE - 1, |
217 | - }; | 214 | - }; |
218 | - | 215 | - |
219 | - /* Load GDT */ | 216 | - /* Load GDT */ |
220 | - native_load_gdt(&startup_gdt_descr); | 217 | - native_load_gdt(&startup_gdt_descr); |
... | ... | ||
223 | - asm volatile("movl %%eax, %%ds\n" | 220 | - asm volatile("movl %%eax, %%ds\n" |
224 | - "movl %%eax, %%ss\n" | 221 | - "movl %%eax, %%ss\n" |
225 | - "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory"); | 222 | - "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory"); |
226 | - | 223 | - |
227 | - if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) | 224 | - if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) |
228 | - handler = &RIP_REL_REF(vc_no_ghcb); | 225 | - handler = rip_rel_ptr(vc_no_ghcb); |
229 | - | 226 | - |
230 | - startup_64_load_idt(handler); | 227 | - startup_64_load_idt(handler); |
231 | -} | 228 | -} |
232 | -- | 229 | -- |
233 | 2.49.0.472.ge94155a9ec-goog | 230 | 2.49.0.504.g3bcea36a83-goog | diff view generated by jsdifflib |
1 | From: Ard Biesheuvel <ardb@kernel.org> | 1 | From: Ard Biesheuvel <ardb@kernel.org> |
---|---|---|---|
2 | 2 | ||
3 | The startup code that constructs the kernel virtual mapping runs from | 3 | The startup code that constructs the kernel virtual mapping runs from |
4 | the 1:1 mapping of memory itself, and therefore, cannot use absolute | 4 | the 1:1 mapping of memory itself, and therefore, cannot use absolute |
5 | symbol references. Move this code into a separate source file under | 5 | symbol references. Before making changes in subsequent patches, move |
6 | arch/x86/boot/startup/ where all such code will be kept from now on. | 6 | this code into a separate source file under arch/x86/boot/startup/ where |
7 | 7 | all such code will be kept from now on. | |
8 | Since all code here is constructed in a manner that ensures that it | ||
9 | tolerates running from the 1:1 mapping of memory, any uses of the | ||
10 | RIP_REL_REF() macro can be dropped, along with __head annotations for | ||
11 | placing this code in a dedicated startup section. | ||
12 | 8 | ||
13 | Signed-off-by: Ard Biesheuvel <ardb@kernel.org> | 9 | Signed-off-by: Ard Biesheuvel <ardb@kernel.org> |
14 | --- | 10 | --- |
15 | arch/x86/boot/startup/Makefile | 2 +- | 11 | arch/x86/boot/startup/Makefile | 2 +- |
16 | arch/x86/boot/startup/map_kernel.c | 232 ++++++++++++++++++++ | 12 | arch/x86/boot/startup/map_kernel.c | 224 ++++++++++++++++++++ |
17 | arch/x86/kernel/head64.c | 228 +------------------ | 13 | arch/x86/kernel/head64.c | 211 +----------------- |
18 | 3 files changed, 234 insertions(+), 228 deletions(-) | 14 | 3 files changed, 226 insertions(+), 211 deletions(-) |
19 | 15 | ||
20 | diff --git a/arch/x86/boot/startup/Makefile b/arch/x86/boot/startup/Makefile | 16 | diff --git a/arch/x86/boot/startup/Makefile b/arch/x86/boot/startup/Makefile |
21 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/arch/x86/boot/startup/Makefile | 18 | --- a/arch/x86/boot/startup/Makefile |
23 | +++ b/arch/x86/boot/startup/Makefile | 19 | +++ b/arch/x86/boot/startup/Makefile |
... | ... | ||
42 | +#include <linux/linkage.h> | 38 | +#include <linux/linkage.h> |
43 | +#include <linux/types.h> | 39 | +#include <linux/types.h> |
44 | +#include <linux/kernel.h> | 40 | +#include <linux/kernel.h> |
45 | +#include <linux/pgtable.h> | 41 | +#include <linux/pgtable.h> |
46 | + | 42 | + |
43 | +#include <asm/init.h> | ||
47 | +#include <asm/sections.h> | 44 | +#include <asm/sections.h> |
48 | +#include <asm/setup.h> | 45 | +#include <asm/setup.h> |
49 | +#include <asm/sev.h> | 46 | +#include <asm/sev.h> |
50 | + | 47 | + |
51 | +extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; | 48 | +extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; |
52 | +extern unsigned int next_early_pgt; | 49 | +extern unsigned int next_early_pgt; |
53 | + | ||
54 | +#ifdef CONFIG_X86_5LEVEL | ||
55 | +unsigned int __pgtable_l5_enabled __ro_after_init; | ||
56 | +unsigned int pgdir_shift __ro_after_init = 39; | ||
57 | +EXPORT_SYMBOL(pgdir_shift); | ||
58 | +unsigned int ptrs_per_p4d __ro_after_init = 1; | ||
59 | +EXPORT_SYMBOL(ptrs_per_p4d); | ||
60 | +#endif | ||
61 | + | ||
62 | +#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT | ||
63 | +unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4; | ||
64 | +EXPORT_SYMBOL(page_offset_base); | ||
65 | +unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4; | ||
66 | +EXPORT_SYMBOL(vmalloc_base); | ||
67 | +unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4; | ||
68 | +EXPORT_SYMBOL(vmemmap_base); | ||
69 | +#endif | ||
70 | + | 50 | + |
71 | +static inline bool check_la57_support(void) | 51 | +static inline bool check_la57_support(void) |
72 | +{ | 52 | +{ |
73 | + if (!IS_ENABLED(CONFIG_X86_5LEVEL)) | 53 | + if (!IS_ENABLED(CONFIG_X86_5LEVEL)) |
74 | + return false; | 54 | + return false; |
... | ... | ||
78 | + * stage. Only check if it has been enabled there. | 58 | + * stage. Only check if it has been enabled there. |
79 | + */ | 59 | + */ |
80 | + if (!(native_read_cr4() & X86_CR4_LA57)) | 60 | + if (!(native_read_cr4() & X86_CR4_LA57)) |
81 | + return false; | 61 | + return false; |
82 | + | 62 | + |
83 | + __pgtable_l5_enabled = 1; | 63 | + RIP_REL_REF(__pgtable_l5_enabled) = 1; |
84 | + pgdir_shift = 48; | 64 | + RIP_REL_REF(pgdir_shift) = 48; |
85 | + ptrs_per_p4d = 512; | 65 | + RIP_REL_REF(ptrs_per_p4d) = 512; |
86 | + page_offset_base = __PAGE_OFFSET_BASE_L5; | 66 | + RIP_REL_REF(page_offset_base) = __PAGE_OFFSET_BASE_L5; |
87 | + vmalloc_base = __VMALLOC_BASE_L5; | 67 | + RIP_REL_REF(vmalloc_base) = __VMALLOC_BASE_L5; |
88 | + vmemmap_base = __VMEMMAP_BASE_L5; | 68 | + RIP_REL_REF(vmemmap_base) = __VMEMMAP_BASE_L5; |
89 | + | 69 | + |
90 | + return true; | 70 | + return true; |
91 | +} | 71 | +} |
92 | + | 72 | + |
93 | +static unsigned long sme_postprocess_startup(struct boot_params *bp, | 73 | +static unsigned long __head sme_postprocess_startup(struct boot_params *bp, |
94 | + pmdval_t *pmd, | 74 | + pmdval_t *pmd, |
95 | + unsigned long p2v_offset) | 75 | + unsigned long p2v_offset) |
96 | +{ | 76 | +{ |
97 | + unsigned long paddr, paddr_end; | 77 | + unsigned long paddr, paddr_end; |
98 | + int i; | 78 | + int i; |
99 | + | 79 | + |
100 | + /* Encrypt the kernel and related (if SME is active) */ | 80 | + /* Encrypt the kernel and related (if SME is active) */ |
... | ... | ||
105 | + * The bss section will be memset to zero later in the initialization so | 85 | + * The bss section will be memset to zero later in the initialization so |
106 | + * there is no need to zero it after changing the memory encryption | 86 | + * there is no need to zero it after changing the memory encryption |
107 | + * attribute. | 87 | + * attribute. |
108 | + */ | 88 | + */ |
109 | + if (sme_get_me_mask()) { | 89 | + if (sme_get_me_mask()) { |
110 | + paddr = (unsigned long)__start_bss_decrypted; | 90 | + paddr = (unsigned long)rip_rel_ptr(__start_bss_decrypted); |
111 | + paddr_end = (unsigned long)__end_bss_decrypted; | 91 | + paddr_end = (unsigned long)rip_rel_ptr(__end_bss_decrypted); |
112 | + | 92 | + |
113 | + for (; paddr < paddr_end; paddr += PMD_SIZE) { | 93 | + for (; paddr < paddr_end; paddr += PMD_SIZE) { |
114 | + /* | 94 | + /* |
115 | + * On SNP, transition the page to shared in the RMP table so that | 95 | + * On SNP, transition the page to shared in the RMP table so that |
116 | + * it is consistent with the page table attribute change. | 96 | + * it is consistent with the page table attribute change. |
... | ... | ||
133 | + * modifier for the initial pgdir entry programmed into CR3. | 113 | + * modifier for the initial pgdir entry programmed into CR3. |
134 | + */ | 114 | + */ |
135 | + return sme_get_me_mask(); | 115 | + return sme_get_me_mask(); |
136 | +} | 116 | +} |
137 | + | 117 | + |
138 | +unsigned long __init __startup_64(unsigned long p2v_offset, | 118 | +/* Code in __startup_64() can be relocated during execution, but the compiler |
119 | + * doesn't have to generate PC-relative relocations when accessing globals from | ||
120 | + * that function. Clang actually does not generate them, which leads to | ||
121 | + * boot-time crashes. To work around this problem, every global pointer must | ||
122 | + * be accessed using RIP_REL_REF(). Kernel virtual addresses can be determined | ||
123 | + * by subtracting p2v_offset from the RIP-relative address. | ||
124 | + */ | ||
125 | +unsigned long __head __startup_64(unsigned long p2v_offset, | ||
139 | + struct boot_params *bp) | 126 | + struct boot_params *bp) |
140 | +{ | 127 | +{ |
141 | + pmd_t (*early_pgts)[PTRS_PER_PMD] = early_dynamic_pgts; | 128 | + pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts); |
142 | + unsigned long physaddr = (unsigned long)_text; | 129 | + unsigned long physaddr = (unsigned long)rip_rel_ptr(_text); |
143 | + unsigned long va_text, va_end; | 130 | + unsigned long va_text, va_end; |
144 | + unsigned long pgtable_flags; | 131 | + unsigned long pgtable_flags; |
145 | + unsigned long load_delta; | 132 | + unsigned long load_delta; |
146 | + pgdval_t *pgd; | 133 | + pgdval_t *pgd; |
147 | + p4dval_t *p4d; | 134 | + p4dval_t *p4d; |
... | ... | ||
158 | + | 145 | + |
159 | + /* | 146 | + /* |
160 | + * Compute the delta between the address I am compiled to run at | 147 | + * Compute the delta between the address I am compiled to run at |
161 | + * and the address I am actually running at. | 148 | + * and the address I am actually running at. |
162 | + */ | 149 | + */ |
163 | + phys_base = load_delta = __START_KERNEL_map + p2v_offset; | 150 | + load_delta = __START_KERNEL_map + p2v_offset; |
151 | + RIP_REL_REF(phys_base) = load_delta; | ||
164 | + | 152 | + |
165 | + /* Is the address not 2M aligned? */ | 153 | + /* Is the address not 2M aligned? */ |
166 | + if (load_delta & ~PMD_MASK) | 154 | + if (load_delta & ~PMD_MASK) |
167 | + for (;;); | 155 | + for (;;); |
168 | + | 156 | + |
169 | + va_text = physaddr - p2v_offset; | 157 | + va_text = physaddr - p2v_offset; |
170 | + va_end = (unsigned long)_end - p2v_offset; | 158 | + va_end = (unsigned long)rip_rel_ptr(_end) - p2v_offset; |
171 | + | 159 | + |
172 | + /* Include the SME encryption mask in the fixup value */ | 160 | + /* Include the SME encryption mask in the fixup value */ |
173 | + load_delta += sme_get_me_mask(); | 161 | + load_delta += sme_get_me_mask(); |
174 | + | 162 | + |
175 | + /* Fixup the physical addresses in the page table */ | 163 | + /* Fixup the physical addresses in the page table */ |
176 | + | 164 | + |
177 | + pgd = &early_top_pgt[0].pgd; | 165 | + pgd = rip_rel_ptr(early_top_pgt); |
178 | + pgd[pgd_index(__START_KERNEL_map)] += load_delta; | 166 | + pgd[pgd_index(__START_KERNEL_map)] += load_delta; |
179 | + | 167 | + |
180 | + if (IS_ENABLED(CONFIG_X86_5LEVEL) && la57) { | 168 | + if (IS_ENABLED(CONFIG_X86_5LEVEL) && la57) { |
181 | + p4d = (p4dval_t *)level4_kernel_pgt; | 169 | + p4d = (p4dval_t *)rip_rel_ptr(level4_kernel_pgt); |
182 | + p4d[MAX_PTRS_PER_P4D - 1] += load_delta; | 170 | + p4d[MAX_PTRS_PER_P4D - 1] += load_delta; |
183 | + | 171 | + |
184 | + pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE; | 172 | + pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE; |
185 | + } | 173 | + } |
186 | + | 174 | + |
187 | + level3_kernel_pgt[PTRS_PER_PUD - 2].pud += load_delta; | 175 | + RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 2].pud += load_delta; |
188 | + level3_kernel_pgt[PTRS_PER_PUD - 1].pud += load_delta; | 176 | + RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 1].pud += load_delta; |
189 | + | 177 | + |
190 | + for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--) | 178 | + for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--) |
191 | + level2_fixmap_pgt[i].pmd += load_delta; | 179 | + RIP_REL_REF(level2_fixmap_pgt)[i].pmd += load_delta; |
192 | + | 180 | + |
193 | + /* | 181 | + /* |
194 | + * Set up the identity mapping for the switchover. These | 182 | + * Set up the identity mapping for the switchover. These |
195 | + * entries should *NOT* have the global bit set! This also | 183 | + * entries should *NOT* have the global bit set! This also |
196 | + * creates a bunch of nonsense entries but that is fine -- | 184 | + * creates a bunch of nonsense entries but that is fine -- |
197 | + * it avoids problems around wraparound. | 185 | + * it avoids problems around wraparound. |
198 | + */ | 186 | + */ |
199 | + | 187 | + |
200 | + pud = &early_pgts[0]->pmd; | 188 | + pud = &early_pgts[0]->pmd; |
201 | + pmd = &early_pgts[1]->pmd; | 189 | + pmd = &early_pgts[1]->pmd; |
202 | + next_early_pgt = 2; | 190 | + RIP_REL_REF(next_early_pgt) = 2; |
203 | + | 191 | + |
204 | + pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask(); | 192 | + pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask(); |
205 | + | 193 | + |
206 | + if (la57) { | 194 | + if (la57) { |
207 | + p4d = &early_pgts[next_early_pgt++]->pmd; | 195 | + p4d = &early_pgts[RIP_REL_REF(next_early_pgt)++]->pmd; |
208 | + | 196 | + |
209 | + i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; | 197 | + i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; |
210 | + pgd[i + 0] = (pgdval_t)p4d + pgtable_flags; | 198 | + pgd[i + 0] = (pgdval_t)p4d + pgtable_flags; |
211 | + pgd[i + 1] = (pgdval_t)p4d + pgtable_flags; | 199 | + pgd[i + 1] = (pgdval_t)p4d + pgtable_flags; |
212 | + | 200 | + |
... | ... | ||
223 | + pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; | 211 | + pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; |
224 | + pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; | 212 | + pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; |
225 | + | 213 | + |
226 | + pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; | 214 | + pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; |
227 | + /* Filter out unsupported __PAGE_KERNEL_* bits: */ | 215 | + /* Filter out unsupported __PAGE_KERNEL_* bits: */ |
228 | + pmd_entry &= __supported_pte_mask; | 216 | + pmd_entry &= RIP_REL_REF(__supported_pte_mask); |
229 | + pmd_entry += sme_get_me_mask(); | 217 | + pmd_entry += sme_get_me_mask(); |
230 | + pmd_entry += physaddr; | 218 | + pmd_entry += physaddr; |
231 | + | 219 | + |
232 | + for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) { | 220 | + for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) { |
233 | + int idx = i + (physaddr >> PMD_SHIFT); | 221 | + int idx = i + (physaddr >> PMD_SHIFT); |
... | ... | ||
249 | + * and on some hardware (particularly the UV platform) even | 237 | + * and on some hardware (particularly the UV platform) even |
250 | + * speculative access to some reserved areas is caught as an | 238 | + * speculative access to some reserved areas is caught as an |
251 | + * error, causing the BIOS to halt the system. | 239 | + * error, causing the BIOS to halt the system. |
252 | + */ | 240 | + */ |
253 | + | 241 | + |
254 | + pmd = &level2_kernel_pgt[0].pmd; | 242 | + pmd = rip_rel_ptr(level2_kernel_pgt); |
255 | + | 243 | + |
256 | + /* invalidate pages before the kernel image */ | 244 | + /* invalidate pages before the kernel image */ |
257 | + for (i = 0; i < pmd_index(va_text); i++) | 245 | + for (i = 0; i < pmd_index(va_text); i++) |
258 | + pmd[i] &= ~_PAGE_PRESENT; | 246 | + pmd[i] &= ~_PAGE_PRESENT; |
259 | + | 247 | + |
... | ... | ||
278 | extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; | 266 | extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; |
279 | -static unsigned int __initdata next_early_pgt; | 267 | -static unsigned int __initdata next_early_pgt; |
280 | +unsigned int __initdata next_early_pgt; | 268 | +unsigned int __initdata next_early_pgt; |
281 | pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); | 269 | pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); |
282 | 270 | ||
283 | -#ifdef CONFIG_X86_5LEVEL | 271 | #ifdef CONFIG_X86_5LEVEL |
284 | -unsigned int __pgtable_l5_enabled __ro_after_init; | 272 | @@ -XXX,XX +XXX,XX @@ unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4; |
285 | -unsigned int pgdir_shift __ro_after_init = 39; | 273 | EXPORT_SYMBOL(vmemmap_base); |
286 | -EXPORT_SYMBOL(pgdir_shift); | 274 | #endif |
287 | -unsigned int ptrs_per_p4d __ro_after_init = 1; | 275 | |
288 | -EXPORT_SYMBOL(ptrs_per_p4d); | ||
289 | -#endif | ||
290 | - | ||
291 | -#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT | ||
292 | -unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4; | ||
293 | -EXPORT_SYMBOL(page_offset_base); | ||
294 | -unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4; | ||
295 | -EXPORT_SYMBOL(vmalloc_base); | ||
296 | -unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4; | ||
297 | -EXPORT_SYMBOL(vmemmap_base); | ||
298 | -#endif | ||
299 | - | ||
300 | -static inline bool check_la57_support(void) | 276 | -static inline bool check_la57_support(void) |
301 | -{ | 277 | -{ |
302 | - if (!IS_ENABLED(CONFIG_X86_5LEVEL)) | 278 | - if (!IS_ENABLED(CONFIG_X86_5LEVEL)) |
303 | - return false; | 279 | - return false; |
304 | - | 280 | - |
... | ... | ||
334 | - * The bss section will be memset to zero later in the initialization so | 310 | - * The bss section will be memset to zero later in the initialization so |
335 | - * there is no need to zero it after changing the memory encryption | 311 | - * there is no need to zero it after changing the memory encryption |
336 | - * attribute. | 312 | - * attribute. |
337 | - */ | 313 | - */ |
338 | - if (sme_get_me_mask()) { | 314 | - if (sme_get_me_mask()) { |
339 | - paddr = (unsigned long)&RIP_REL_REF(__start_bss_decrypted); | 315 | - paddr = (unsigned long)rip_rel_ptr(__start_bss_decrypted); |
340 | - paddr_end = (unsigned long)&RIP_REL_REF(__end_bss_decrypted); | 316 | - paddr_end = (unsigned long)rip_rel_ptr(__end_bss_decrypted); |
341 | - | 317 | - |
342 | - for (; paddr < paddr_end; paddr += PMD_SIZE) { | 318 | - for (; paddr < paddr_end; paddr += PMD_SIZE) { |
343 | - /* | 319 | - /* |
344 | - * On SNP, transition the page to shared in the RMP table so that | 320 | - * On SNP, transition the page to shared in the RMP table so that |
345 | - * it is consistent with the page table attribute change. | 321 | - * it is consistent with the page table attribute change. |
... | ... | ||
372 | - * by subtracting p2v_offset from the RIP-relative address. | 348 | - * by subtracting p2v_offset from the RIP-relative address. |
373 | - */ | 349 | - */ |
374 | -unsigned long __head __startup_64(unsigned long p2v_offset, | 350 | -unsigned long __head __startup_64(unsigned long p2v_offset, |
375 | - struct boot_params *bp) | 351 | - struct boot_params *bp) |
376 | -{ | 352 | -{ |
377 | - pmd_t (*early_pgts)[PTRS_PER_PMD] = RIP_REL_REF(early_dynamic_pgts); | 353 | - pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts); |
378 | - unsigned long physaddr = (unsigned long)&RIP_REL_REF(_text); | 354 | - unsigned long physaddr = (unsigned long)rip_rel_ptr(_text); |
379 | - unsigned long va_text, va_end; | 355 | - unsigned long va_text, va_end; |
380 | - unsigned long pgtable_flags; | 356 | - unsigned long pgtable_flags; |
381 | - unsigned long load_delta; | 357 | - unsigned long load_delta; |
382 | - pgdval_t *pgd; | 358 | - pgdval_t *pgd; |
383 | - p4dval_t *p4d; | 359 | - p4dval_t *p4d; |
... | ... | ||
402 | - /* Is the address not 2M aligned? */ | 378 | - /* Is the address not 2M aligned? */ |
403 | - if (load_delta & ~PMD_MASK) | 379 | - if (load_delta & ~PMD_MASK) |
404 | - for (;;); | 380 | - for (;;); |
405 | - | 381 | - |
406 | - va_text = physaddr - p2v_offset; | 382 | - va_text = physaddr - p2v_offset; |
407 | - va_end = (unsigned long)&RIP_REL_REF(_end) - p2v_offset; | 383 | - va_end = (unsigned long)rip_rel_ptr(_end) - p2v_offset; |
408 | - | 384 | - |
409 | - /* Include the SME encryption mask in the fixup value */ | 385 | - /* Include the SME encryption mask in the fixup value */ |
410 | - load_delta += sme_get_me_mask(); | 386 | - load_delta += sme_get_me_mask(); |
411 | - | 387 | - |
412 | - /* Fixup the physical addresses in the page table */ | 388 | - /* Fixup the physical addresses in the page table */ |
413 | - | 389 | - |
414 | - pgd = &RIP_REL_REF(early_top_pgt)->pgd; | 390 | - pgd = rip_rel_ptr(early_top_pgt); |
415 | - pgd[pgd_index(__START_KERNEL_map)] += load_delta; | 391 | - pgd[pgd_index(__START_KERNEL_map)] += load_delta; |
416 | - | 392 | - |
417 | - if (IS_ENABLED(CONFIG_X86_5LEVEL) && la57) { | 393 | - if (IS_ENABLED(CONFIG_X86_5LEVEL) && la57) { |
418 | - p4d = (p4dval_t *)&RIP_REL_REF(level4_kernel_pgt); | 394 | - p4d = (p4dval_t *)rip_rel_ptr(level4_kernel_pgt); |
419 | - p4d[MAX_PTRS_PER_P4D - 1] += load_delta; | 395 | - p4d[MAX_PTRS_PER_P4D - 1] += load_delta; |
420 | - | 396 | - |
421 | - pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE; | 397 | - pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE; |
422 | - } | 398 | - } |
423 | - | 399 | - |
... | ... | ||
486 | - * and on some hardware (particularly the UV platform) even | 462 | - * and on some hardware (particularly the UV platform) even |
487 | - * speculative access to some reserved areas is caught as an | 463 | - * speculative access to some reserved areas is caught as an |
488 | - * error, causing the BIOS to halt the system. | 464 | - * error, causing the BIOS to halt the system. |
489 | - */ | 465 | - */ |
490 | - | 466 | - |
491 | - pmd = &RIP_REL_REF(level2_kernel_pgt)->pmd; | 467 | - pmd = rip_rel_ptr(level2_kernel_pgt); |
492 | - | 468 | - |
493 | - /* invalidate pages before the kernel image */ | 469 | - /* invalidate pages before the kernel image */ |
494 | - for (i = 0; i < pmd_index(va_text); i++) | 470 | - for (i = 0; i < pmd_index(va_text); i++) |
495 | - pmd[i] &= ~_PAGE_PRESENT; | 471 | - pmd[i] &= ~_PAGE_PRESENT; |
496 | - | 472 | - |
... | ... | ||
508 | - | 484 | - |
509 | /* Wipe all early page tables except for the kernel symbol map */ | 485 | /* Wipe all early page tables except for the kernel symbol map */ |
510 | static void __init reset_early_page_tables(void) | 486 | static void __init reset_early_page_tables(void) |
511 | { | 487 | { |
512 | -- | 488 | -- |
513 | 2.49.0.472.ge94155a9ec-goog | 489 | 2.49.0.504.g3bcea36a83-goog | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Ard Biesheuvel <ardb@kernel.org> | ||
1 | 2 | ||
3 | Now that __startup_64() is built using -fPIC, RIP_REL_REF() has become a | ||
4 | NOP and can be removed. Only some occurrences of rip_rel_ptr() will | ||
5 | remain, to explicitly take the address of certain global structures in | ||
6 | the 1:1 mapping of memory. | ||
7 | |||
8 | While at it, update the code comment to describe why this is needed. | ||
9 | |||
10 | Signed-off-by: Ard Biesheuvel <ardb@kernel.org> | ||
11 | --- | ||
12 | arch/x86/boot/startup/map_kernel.c | 41 ++++++++++---------- | ||
13 | 1 file changed, 21 insertions(+), 20 deletions(-) | ||
14 | |||
15 | diff --git a/arch/x86/boot/startup/map_kernel.c b/arch/x86/boot/startup/map_kernel.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/arch/x86/boot/startup/map_kernel.c | ||
18 | +++ b/arch/x86/boot/startup/map_kernel.c | ||
19 | @@ -XXX,XX +XXX,XX @@ static inline bool check_la57_support(void) | ||
20 | if (!(native_read_cr4() & X86_CR4_LA57)) | ||
21 | return false; | ||
22 | |||
23 | - RIP_REL_REF(__pgtable_l5_enabled) = 1; | ||
24 | - RIP_REL_REF(pgdir_shift) = 48; | ||
25 | - RIP_REL_REF(ptrs_per_p4d) = 512; | ||
26 | - RIP_REL_REF(page_offset_base) = __PAGE_OFFSET_BASE_L5; | ||
27 | - RIP_REL_REF(vmalloc_base) = __VMALLOC_BASE_L5; | ||
28 | - RIP_REL_REF(vmemmap_base) = __VMEMMAP_BASE_L5; | ||
29 | + __pgtable_l5_enabled = 1; | ||
30 | + pgdir_shift = 48; | ||
31 | + ptrs_per_p4d = 512; | ||
32 | + page_offset_base = __PAGE_OFFSET_BASE_L5; | ||
33 | + vmalloc_base = __VMALLOC_BASE_L5; | ||
34 | + vmemmap_base = __VMEMMAP_BASE_L5; | ||
35 | |||
36 | return true; | ||
37 | } | ||
38 | @@ -XXX,XX +XXX,XX @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, | ||
39 | return sme_get_me_mask(); | ||
40 | } | ||
41 | |||
42 | -/* Code in __startup_64() can be relocated during execution, but the compiler | ||
43 | - * doesn't have to generate PC-relative relocations when accessing globals from | ||
44 | - * that function. Clang actually does not generate them, which leads to | ||
45 | - * boot-time crashes. To work around this problem, every global pointer must | ||
46 | - * be accessed using RIP_REL_REF(). Kernel virtual addresses can be determined | ||
47 | - * by subtracting p2v_offset from the RIP-relative address. | ||
48 | +/* | ||
49 | + * This code is compiled using PIC codegen because it will execute from the | ||
50 | + * early 1:1 mapping of memory, which deviates from the mapping expected by the | ||
51 | + * linker. Due to this deviation, taking the address of a global variable will | ||
52 | + * produce an ambiguous result when using the plain & operator. Instead, | ||
53 | + * rip_rel_ptr() must be used, which will return the RIP-relative address in | ||
54 | + * the 1:1 mapping of memory. Kernel virtual addresses can be determined by | ||
55 | + * subtracting p2v_offset from the RIP-relative address. | ||
56 | */ | ||
57 | unsigned long __head __startup_64(unsigned long p2v_offset, | ||
58 | struct boot_params *bp) | ||
59 | @@ -XXX,XX +XXX,XX @@ unsigned long __head __startup_64(unsigned long p2v_offset, | ||
60 | * Compute the delta between the address I am compiled to run at | ||
61 | * and the address I am actually running at. | ||
62 | */ | ||
63 | - load_delta = __START_KERNEL_map + p2v_offset; | ||
64 | - RIP_REL_REF(phys_base) = load_delta; | ||
65 | + phys_base = load_delta = __START_KERNEL_map + p2v_offset; | ||
66 | |||
67 | /* Is the address not 2M aligned? */ | ||
68 | if (load_delta & ~PMD_MASK) | ||
69 | @@ -XXX,XX +XXX,XX @@ unsigned long __head __startup_64(unsigned long p2v_offset, | ||
70 | pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE; | ||
71 | } | ||
72 | |||
73 | - RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 2].pud += load_delta; | ||
74 | - RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 1].pud += load_delta; | ||
75 | + level3_kernel_pgt[PTRS_PER_PUD - 2].pud += load_delta; | ||
76 | + level3_kernel_pgt[PTRS_PER_PUD - 1].pud += load_delta; | ||
77 | |||
78 | for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--) | ||
79 | - RIP_REL_REF(level2_fixmap_pgt)[i].pmd += load_delta; | ||
80 | + level2_fixmap_pgt[i].pmd += load_delta; | ||
81 | |||
82 | /* | ||
83 | * Set up the identity mapping for the switchover. These | ||
84 | @@ -XXX,XX +XXX,XX @@ unsigned long __head __startup_64(unsigned long p2v_offset, | ||
85 | |||
86 | pud = &early_pgts[0]->pmd; | ||
87 | pmd = &early_pgts[1]->pmd; | ||
88 | - RIP_REL_REF(next_early_pgt) = 2; | ||
89 | + next_early_pgt = 2; | ||
90 | |||
91 | pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask(); | ||
92 | |||
93 | if (la57) { | ||
94 | - p4d = &early_pgts[RIP_REL_REF(next_early_pgt)++]->pmd; | ||
95 | + p4d = &early_pgts[next_early_pgt++]->pmd; | ||
96 | |||
97 | i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; | ||
98 | pgd[i + 0] = (pgdval_t)p4d + pgtable_flags; | ||
99 | @@ -XXX,XX +XXX,XX @@ unsigned long __head __startup_64(unsigned long p2v_offset, | ||
100 | |||
101 | pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; | ||
102 | /* Filter out unsupported __PAGE_KERNEL_* bits: */ | ||
103 | - pmd_entry &= RIP_REL_REF(__supported_pte_mask); | ||
104 | + pmd_entry &= __supported_pte_mask; | ||
105 | pmd_entry += sme_get_me_mask(); | ||
106 | pmd_entry += physaddr; | ||
107 | |||
108 | -- | ||
109 | 2.49.0.504.g3bcea36a83-goog | diff view generated by jsdifflib |
... | ... | ||
---|---|---|---|
3 | Move the SME initialization code, which runs from the 1:1 mapping of | 3 | Move the SME initialization code, which runs from the 1:1 mapping of |
4 | memory as it operates on the kernel virtual mapping, into the new | 4 | memory as it operates on the kernel virtual mapping, into the new |
5 | sub-directory arch/x86/boot/startup/ where all startup code will reside | 5 | sub-directory arch/x86/boot/startup/ where all startup code will reside |
6 | that needs to tolerate executing from the 1:1 mapping. | 6 | that needs to tolerate executing from the 1:1 mapping. |
7 | 7 | ||
8 | This allows RIP_REL_REF() macro invocations and __head annotations to be | ||
9 | dropped. | ||
10 | |||
11 | Signed-off-by: Ard Biesheuvel <ardb@kernel.org> | 8 | Signed-off-by: Ard Biesheuvel <ardb@kernel.org> |
12 | --- | 9 | --- |
13 | arch/x86/boot/startup/Makefile | 1 + | 10 | arch/x86/boot/startup/Makefile | 1 + |
14 | arch/x86/{mm/mem_encrypt_identity.c => boot/startup/sme.c} | 45 +++++++++----------- | 11 | arch/x86/{mm/mem_encrypt_identity.c => boot/startup/sme.c} | 2 -- |
15 | arch/x86/include/asm/mem_encrypt.h | 2 +- | 12 | arch/x86/mm/Makefile | 6 ------ |
16 | arch/x86/mm/Makefile | 6 --- | 13 | 3 files changed, 1 insertion(+), 8 deletions(-) |
17 | 4 files changed, 23 insertions(+), 31 deletions(-) | ||
18 | 14 | ||
19 | diff --git a/arch/x86/boot/startup/Makefile b/arch/x86/boot/startup/Makefile | 15 | diff --git a/arch/x86/boot/startup/Makefile b/arch/x86/boot/startup/Makefile |
20 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/arch/x86/boot/startup/Makefile | 17 | --- a/arch/x86/boot/startup/Makefile |
22 | +++ b/arch/x86/boot/startup/Makefile | 18 | +++ b/arch/x86/boot/startup/Makefile |
... | ... | ||
27 | +obj-$(CONFIG_AMD_MEM_ENCRYPT) += sme.o | 23 | +obj-$(CONFIG_AMD_MEM_ENCRYPT) += sme.o |
28 | 24 | ||
29 | lib-$(CONFIG_X86_64) += la57toggle.o | 25 | lib-$(CONFIG_X86_64) += la57toggle.o |
30 | lib-$(CONFIG_EFI_MIXED) += efi-mixed.o | 26 | lib-$(CONFIG_EFI_MIXED) += efi-mixed.o |
31 | diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/boot/startup/sme.c | 27 | diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/boot/startup/sme.c |
32 | similarity index 92% | 28 | similarity index 99% |
33 | rename from arch/x86/mm/mem_encrypt_identity.c | 29 | rename from arch/x86/mm/mem_encrypt_identity.c |
34 | rename to arch/x86/boot/startup/sme.c | 30 | rename to arch/x86/boot/startup/sme.c |
35 | index XXXXXXX..XXXXXXX 100644 | 31 | index XXXXXXX..XXXXXXX 100644 |
36 | --- a/arch/x86/mm/mem_encrypt_identity.c | 32 | --- a/arch/x86/mm/mem_encrypt_identity.c |
37 | +++ b/arch/x86/boot/startup/sme.c | 33 | +++ b/arch/x86/boot/startup/sme.c |
... | ... | ||
42 | -#include "mm_internal.h" | 38 | -#include "mm_internal.h" |
43 | - | 39 | - |
44 | #define PGD_FLAGS _KERNPG_TABLE_NOENC | 40 | #define PGD_FLAGS _KERNPG_TABLE_NOENC |
45 | #define P4D_FLAGS _KERNPG_TABLE_NOENC | 41 | #define P4D_FLAGS _KERNPG_TABLE_NOENC |
46 | #define PUD_FLAGS _KERNPG_TABLE_NOENC | 42 | #define PUD_FLAGS _KERNPG_TABLE_NOENC |
47 | @@ -XXX,XX +XXX,XX @@ struct sme_populate_pgd_data { | ||
48 | */ | ||
49 | static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch"); | ||
50 | |||
51 | -static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd) | ||
52 | +static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) | ||
53 | { | ||
54 | unsigned long pgd_start, pgd_end, pgd_size; | ||
55 | pgd_t *pgd_p; | ||
56 | @@ -XXX,XX +XXX,XX @@ static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd) | ||
57 | memset(pgd_p, 0, pgd_size); | ||
58 | } | ||
59 | |||
60 | -static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) | ||
61 | +static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) | ||
62 | { | ||
63 | pgd_t *pgd; | ||
64 | p4d_t *p4d; | ||
65 | @@ -XXX,XX +XXX,XX @@ static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) | ||
66 | return pud; | ||
67 | } | ||
68 | |||
69 | -static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) | ||
70 | +static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) | ||
71 | { | ||
72 | pud_t *pud; | ||
73 | pmd_t *pmd; | ||
74 | @@ -XXX,XX +XXX,XX @@ static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) | ||
75 | set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags)); | ||
76 | } | ||
77 | |||
78 | -static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd) | ||
79 | +static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) | ||
80 | { | ||
81 | pud_t *pud; | ||
82 | pmd_t *pmd; | ||
83 | @@ -XXX,XX +XXX,XX @@ static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd) | ||
84 | set_pte(pte, __pte(ppd->paddr | ppd->pte_flags)); | ||
85 | } | ||
86 | |||
87 | -static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) | ||
88 | +static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) | ||
89 | { | ||
90 | while (ppd->vaddr < ppd->vaddr_end) { | ||
91 | sme_populate_pgd_large(ppd); | ||
92 | @@ -XXX,XX +XXX,XX @@ static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) | ||
93 | } | ||
94 | } | ||
95 | |||
96 | -static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd) | ||
97 | +static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd) | ||
98 | { | ||
99 | while (ppd->vaddr < ppd->vaddr_end) { | ||
100 | sme_populate_pgd(ppd); | ||
101 | @@ -XXX,XX +XXX,XX @@ static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd) | ||
102 | } | ||
103 | } | ||
104 | |||
105 | -static void __head __sme_map_range(struct sme_populate_pgd_data *ppd, | ||
106 | +static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, | ||
107 | pmdval_t pmd_flags, pteval_t pte_flags) | ||
108 | { | ||
109 | unsigned long vaddr_end; | ||
110 | @@ -XXX,XX +XXX,XX @@ static void __head __sme_map_range(struct sme_populate_pgd_data *ppd, | ||
111 | __sme_map_range_pte(ppd); | ||
112 | } | ||
113 | |||
114 | -static void __head sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) | ||
115 | +static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) | ||
116 | { | ||
117 | __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC); | ||
118 | } | ||
119 | |||
120 | -static void __head sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) | ||
121 | +static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) | ||
122 | { | ||
123 | __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC); | ||
124 | } | ||
125 | |||
126 | -static void __head sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) | ||
127 | +static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) | ||
128 | { | ||
129 | __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP); | ||
130 | } | ||
131 | |||
132 | -static unsigned long __head sme_pgtable_calc(unsigned long len) | ||
133 | +static unsigned long __init sme_pgtable_calc(unsigned long len) | ||
134 | { | ||
135 | unsigned long entries = 0, tables = 0; | ||
136 | |||
137 | @@ -XXX,XX +XXX,XX @@ static unsigned long __head sme_pgtable_calc(unsigned long len) | ||
138 | return entries + tables; | ||
139 | } | ||
140 | |||
141 | -void __head sme_encrypt_kernel(struct boot_params *bp) | ||
142 | +void __init sme_encrypt_kernel(struct boot_params *bp) | ||
143 | { | ||
144 | unsigned long workarea_start, workarea_end, workarea_len; | ||
145 | unsigned long execute_start, execute_end, execute_len; | ||
146 | @@ -XXX,XX +XXX,XX @@ void __head sme_encrypt_kernel(struct boot_params *bp) | ||
147 | * instrumentation or checking boot_cpu_data in the cc_platform_has() | ||
148 | * function. | ||
149 | */ | ||
150 | - if (!sme_get_me_mask() || | ||
151 | - RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED) | ||
152 | + if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED) | ||
153 | return; | ||
154 | |||
155 | /* | ||
156 | @@ -XXX,XX +XXX,XX @@ void __head sme_encrypt_kernel(struct boot_params *bp) | ||
157 | * memory from being cached. | ||
158 | */ | ||
159 | |||
160 | - kernel_start = (unsigned long)RIP_REL_REF(_text); | ||
161 | - kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE); | ||
162 | + kernel_start = (unsigned long)_text; | ||
163 | + kernel_end = ALIGN((unsigned long)_end, PMD_SIZE); | ||
164 | kernel_len = kernel_end - kernel_start; | ||
165 | |||
166 | initrd_start = 0; | ||
167 | @@ -XXX,XX +XXX,XX @@ void __head sme_encrypt_kernel(struct boot_params *bp) | ||
168 | * pagetable structures for the encryption of the kernel | ||
169 | * pagetable structures for workarea (in case not currently mapped) | ||
170 | */ | ||
171 | - execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea); | ||
172 | + execute_start = workarea_start = (unsigned long)sme_workarea; | ||
173 | execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE; | ||
174 | execute_len = execute_end - execute_start; | ||
175 | |||
176 | @@ -XXX,XX +XXX,XX @@ void __head sme_encrypt_kernel(struct boot_params *bp) | ||
177 | native_write_cr3(__native_read_cr3()); | ||
178 | } | ||
179 | |||
180 | -void __head sme_enable(struct boot_params *bp) | ||
181 | +void __init sme_enable(struct boot_params *bp) | ||
182 | { | ||
183 | unsigned int eax, ebx, ecx, edx; | ||
184 | unsigned long feature_mask; | ||
185 | @@ -XXX,XX +XXX,XX @@ void __head sme_enable(struct boot_params *bp) | ||
186 | me_mask = 1UL << (ebx & 0x3f); | ||
187 | |||
188 | /* Check the SEV MSR whether SEV or SME is enabled */ | ||
189 | - RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV); | ||
190 | + sev_status = msr = __rdmsr(MSR_AMD64_SEV); | ||
191 | feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; | ||
192 | |||
193 | /* | ||
194 | @@ -XXX,XX +XXX,XX @@ void __head sme_enable(struct boot_params *bp) | ||
195 | return; | ||
196 | } | ||
197 | |||
198 | - RIP_REL_REF(sme_me_mask) = me_mask; | ||
199 | - RIP_REL_REF(physical_mask) &= ~me_mask; | ||
200 | - RIP_REL_REF(cc_vendor) = CC_VENDOR_AMD; | ||
201 | + sme_me_mask = me_mask; | ||
202 | + physical_mask &= ~me_mask; | ||
203 | + cc_vendor = CC_VENDOR_AMD; | ||
204 | cc_set_mask(me_mask); | ||
205 | } | ||
206 | diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h | ||
207 | index XXXXXXX..XXXXXXX 100644 | ||
208 | --- a/arch/x86/include/asm/mem_encrypt.h | ||
209 | +++ b/arch/x86/include/asm/mem_encrypt.h | ||
210 | @@ -XXX,XX +XXX,XX @@ void __init sev_es_init_vc_handling(void); | ||
211 | |||
212 | static inline u64 sme_get_me_mask(void) | ||
213 | { | ||
214 | - return RIP_REL_REF(sme_me_mask); | ||
215 | + return sme_me_mask; | ||
216 | } | ||
217 | |||
218 | #define __bss_decrypted __section(".bss..decrypted") | ||
219 | diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile | 43 | diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile |
220 | index XXXXXXX..XXXXXXX 100644 | 44 | index XXXXXXX..XXXXXXX 100644 |
221 | --- a/arch/x86/mm/Makefile | 45 | --- a/arch/x86/mm/Makefile |
222 | +++ b/arch/x86/mm/Makefile | 46 | +++ b/arch/x86/mm/Makefile |
223 | @@ -XXX,XX +XXX,XX @@ | 47 | @@ -XXX,XX +XXX,XX @@ |
... | ... | ||
259 | obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o | 83 | obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o |
260 | 84 | ||
261 | -obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o | 85 | -obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o |
262 | obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o | 86 | obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o |
263 | -- | 87 | -- |
264 | 2.49.0.472.ge94155a9ec-goog | 88 | 2.49.0.504.g3bcea36a83-goog | diff view generated by jsdifflib |
1 | From: Ard Biesheuvel <ardb@kernel.org> | 1 | From: Ard Biesheuvel <ardb@kernel.org> |
---|---|---|---|
2 | 2 | ||
3 | The 5-level paging trampoline is used by both the EFI stub and the | 3 | RIP_REL_REF() has no effect on code residing in arch/x86/boot/startup, |
4 | traditional decompressor. Move it out of the decompressor sources into | 4 | as it is built with -fPIC. So remove any occurrences from the SME |
5 | the newly minted arch/x86/boot/startup/ sub-directory which will hold | 5 | startup code. |
6 | startup code that may be shared between the decompressor, the EFI stub | ||
7 | and the kernel proper, and needs to tolerate being called during early | ||
8 | boot, before the kernel virtual mapping has been created. | ||
9 | 6 | ||
10 | This will allow the 5-level paging trampoline to be used by EFI boot | 7 | Note the SME is the only caller of cc_set_mask() that requires this, so |
11 | images such as zboot that omit the traditional decompressor entirely. | 8 | drop it from there as well. |
12 | 9 | ||
13 | Signed-off-by: Ard Biesheuvel <ardb@kernel.org> | 10 | Signed-off-by: Ard Biesheuvel <ardb@kernel.org> |
14 | --- | 11 | --- |
15 | arch/x86/Makefile | 1 + | 12 | arch/x86/boot/startup/sme.c | 11 +++++------ |
16 | arch/x86/boot/compressed/Makefile | 2 +- | 13 | arch/x86/include/asm/coco.h | 2 +- |
17 | arch/x86/boot/startup/Makefile | 3 +++ | 14 | arch/x86/include/asm/mem_encrypt.h | 2 +- |
18 | arch/x86/boot/{compressed => startup}/la57toggle.S | 0 | 15 | 3 files changed, 7 insertions(+), 8 deletions(-) |
19 | 4 files changed, 5 insertions(+), 1 deletion(-) | ||
20 | 16 | ||
21 | diff --git a/arch/x86/Makefile b/arch/x86/Makefile | 17 | diff --git a/arch/x86/boot/startup/sme.c b/arch/x86/boot/startup/sme.c |
22 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
23 | --- a/arch/x86/Makefile | 19 | --- a/arch/x86/boot/startup/sme.c |
24 | +++ b/arch/x86/Makefile | 20 | +++ b/arch/x86/boot/startup/sme.c |
25 | @@ -XXX,XX +XXX,XX @@ archprepare: $(cpufeaturemasks.hdr) | 21 | @@ -XXX,XX +XXX,XX @@ void __head sme_encrypt_kernel(struct boot_params *bp) |
26 | ### | 22 | * instrumentation or checking boot_cpu_data in the cc_platform_has() |
27 | # Kernel objects | 23 | * function. |
28 | 24 | */ | |
29 | +core-y += arch/x86/boot/startup/ | 25 | - if (!sme_get_me_mask() || |
30 | libs-y += arch/x86/lib/ | 26 | - RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED) |
31 | 27 | + if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED) | |
32 | # drivers-y are linked after core-y | 28 | return; |
33 | diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile | 29 | |
30 | /* | ||
31 | @@ -XXX,XX +XXX,XX @@ void __head sme_enable(struct boot_params *bp) | ||
32 | me_mask = 1UL << (ebx & 0x3f); | ||
33 | |||
34 | /* Check the SEV MSR whether SEV or SME is enabled */ | ||
35 | - RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV); | ||
36 | + sev_status = msr = __rdmsr(MSR_AMD64_SEV); | ||
37 | feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; | ||
38 | |||
39 | /* | ||
40 | @@ -XXX,XX +XXX,XX @@ void __head sme_enable(struct boot_params *bp) | ||
41 | return; | ||
42 | } | ||
43 | |||
44 | - RIP_REL_REF(sme_me_mask) = me_mask; | ||
45 | - RIP_REL_REF(physical_mask) &= ~me_mask; | ||
46 | - RIP_REL_REF(cc_vendor) = CC_VENDOR_AMD; | ||
47 | + sme_me_mask = me_mask; | ||
48 | + physical_mask &= ~me_mask; | ||
49 | + cc_vendor = CC_VENDOR_AMD; | ||
50 | cc_set_mask(me_mask); | ||
51 | } | ||
52 | diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h | ||
34 | index XXXXXXX..XXXXXXX 100644 | 53 | index XXXXXXX..XXXXXXX 100644 |
35 | --- a/arch/x86/boot/compressed/Makefile | 54 | --- a/arch/x86/include/asm/coco.h |
36 | +++ b/arch/x86/boot/compressed/Makefile | 55 | +++ b/arch/x86/include/asm/coco.h |
37 | @@ -XXX,XX +XXX,XX @@ ifdef CONFIG_X86_64 | 56 | @@ -XXX,XX +XXX,XX @@ static inline u64 cc_get_mask(void) |
38 | vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o | 57 | |
39 | vmlinux-objs-y += $(obj)/pgtable_64.o | 58 | static inline void cc_set_mask(u64 mask) |
40 | vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o | 59 | { |
41 | - vmlinux-objs-y += $(obj)/la57toggle.o | 60 | - RIP_REL_REF(cc_mask) = mask; |
42 | endif | 61 | + cc_mask = mask; |
43 | 62 | } | |
44 | vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o | 63 | |
45 | @@ -XXX,XX +XXX,XX @@ vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o | 64 | u64 cc_mkenc(u64 val); |
46 | 65 | diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h | |
47 | vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o | 66 | index XXXXXXX..XXXXXXX 100644 |
48 | vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a | 67 | --- a/arch/x86/include/asm/mem_encrypt.h |
49 | +vmlinux-libs-$(CONFIG_X86_64) += $(objtree)/arch/x86/boot/startup/lib.a | 68 | +++ b/arch/x86/include/asm/mem_encrypt.h |
50 | 69 | @@ -XXX,XX +XXX,XX @@ void __init sev_es_init_vc_handling(void); | |
51 | $(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE | 70 | |
52 | $(call if_changed,ld) | 71 | static inline u64 sme_get_me_mask(void) |
53 | diff --git a/arch/x86/boot/startup/Makefile b/arch/x86/boot/startup/Makefile | 72 | { |
54 | new file mode 100644 | 73 | - return RIP_REL_REF(sme_me_mask); |
55 | index XXXXXXX..XXXXXXX | 74 | + return sme_me_mask; |
56 | --- /dev/null | 75 | } |
57 | +++ b/arch/x86/boot/startup/Makefile | 76 | |
58 | @@ -XXX,XX +XXX,XX @@ | 77 | #define __bss_decrypted __section(".bss..decrypted") |
59 | +# SPDX-License-Identifier: GPL-2.0 | ||
60 | + | ||
61 | +lib-$(CONFIG_X86_64) += la57toggle.o | ||
62 | diff --git a/arch/x86/boot/compressed/la57toggle.S b/arch/x86/boot/startup/la57toggle.S | ||
63 | similarity index 100% | ||
64 | rename from arch/x86/boot/compressed/la57toggle.S | ||
65 | rename to arch/x86/boot/startup/la57toggle.S | ||
66 | -- | 78 | -- |
67 | 2.49.0.472.ge94155a9ec-goog | 79 | 2.49.0.504.g3bcea36a83-goog | diff view generated by jsdifflib |