From: Alexander Graf <graf@amazon.com>
KHO uses "scratch regions" to bootstrap a kexec'ed kernel. These regions are
guaranteed to not have any memory that KHO would preserve.
Teach KASLR in decompression code to only consider these scratch regions
when KHO is enabled to make sure preserved memory won't get overwritten.
Signed-off-by: Alexander Graf <graf@amazon.com>
Co-developed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Co-developed-by: Changyuan Lyu <changyuanl@google.com>
Signed-off-by: Changyuan Lyu <changyuanl@google.com>
---
arch/x86/boot/compressed/kaslr.c | 52 +++++++++++++++++++++++++++++++-
1 file changed, 51 insertions(+), 1 deletion(-)
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index f03d59ea6e40f..25de8c3e17cdb 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -760,6 +760,55 @@ static void process_e820_entries(unsigned long minimum,
}
}
+/*
+ * If KHO is active, only process its scratch areas to ensure we are not
+ * stepping onto preserved memory.
+ */
+#ifdef CONFIG_KEXEC_HANDOVER
+static bool process_kho_entries(unsigned long minimum, unsigned long image_size)
+{
+ struct kho_scratch *kho_scratch;
+ struct setup_data *ptr;
+ int i, nr_areas = 0;
+
+ ptr = (struct setup_data *)boot_params_ptr->hdr.setup_data;
+ while (ptr) {
+ if (ptr->type == SETUP_KEXEC_KHO) {
+ struct kho_data *kho = (struct kho_data *)ptr->data;
+
+ kho_scratch = (void *)kho->scratch_addr;
+ nr_areas = kho->scratch_size / sizeof(*kho_scratch);
+
+ break;
+ }
+
+ ptr = (struct setup_data *)ptr->next;
+ }
+
+ if (!nr_areas)
+ return false;
+
+ for (i = 0; i < nr_areas; i++) {
+ struct kho_scratch *area = &kho_scratch[i];
+ struct mem_vector region = {
+ .start = area->addr,
+ .size = area->size,
+ };
+
+ if (process_mem_region(®ion, minimum, image_size))
+ break;
+ }
+
+ return true;
+}
+#else
+static inline bool process_kho_entries(unsigned long minimum,
+ unsigned long image_size)
+{
+ return false;
+}
+#endif
+
static unsigned long find_random_phys_addr(unsigned long minimum,
unsigned long image_size)
{
@@ -775,7 +824,8 @@ static unsigned long find_random_phys_addr(unsigned long minimum,
return 0;
}
- if (!process_efi_entries(minimum, image_size))
+ if (!process_kho_entries(minimum, image_size) &&
+ !process_efi_entries(minimum, image_size))
process_e820_entries(minimum, image_size);
phys_addr = slots_fetch_random();
--
2.49.0.906.g1f30a19c02-goog
Hi Andrew,
Based on Dave's feedback above, could you please take the following
fix and squash it with "x86/boot: make sure KASLR does not step over
KHO preserved memory" with the updated commit message in mm-unstable?
Thank you very much!
Best,
Changyuan
---- 8< ----
From 464b5750c55f978b47da242f50ec7dbcbac1948c Mon Sep 17 00:00:00 2001
From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
Date: Mon, 5 May 2025 11:29:23 -0700
Subject: [PATCH] fixup! x86/boot: make sure KASLR does not step over KHO
preserved memory
During kexec handover (KHO) memory contains data that should be
preserved and this data would be consumed by kexec'ed kernel.
To make sure that the preserved memory is not overwritten, KHO uses
"scratch regions" to bootstrap kexec'ed kernel. These regions are
guaranteed to not have any memory that KHO would preserve and are used as
the only memory the kernel sees during the early boot.
The scratch regions are passed in the setup_data by the first kernel with
other KHO parameters. If the setup_data contains the KHO parameters, limit
randomization to scratch areas only to make sure preserved memory won't get
overwritten.
Since all the pointers in setup_data are represented by u64, they require
double casting (first to unsigned long and then to the actual pointer type)
to compile on 32-bits. This looks goofy out of context, but it is
unfortunately the way that this is handled across the tree. There are at
least a dozen instances of casting like this.
Signed-off-by: Alexander Graf <graf@amazon.com>
Co-developed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Co-developed-by: Changyuan Lyu <changyuanl@google.com>
Signed-off-by: Changyuan Lyu <changyuanl@google.com>
---
arch/x86/boot/compressed/kaslr.c | 26 ++++++++++++--------------
1 file changed, 12 insertions(+), 14 deletions(-)
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 25de8c3e17cdb..3b0948ad449f9 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -764,25 +764,26 @@ static void process_e820_entries(unsigned long minimum,
* If KHO is active, only process its scratch areas to ensure we are not
* stepping onto preserved memory.
*/
-#ifdef CONFIG_KEXEC_HANDOVER
static bool process_kho_entries(unsigned long minimum, unsigned long image_size)
{
struct kho_scratch *kho_scratch;
struct setup_data *ptr;
+ struct kho_data *kho;
int i, nr_areas = 0;
- ptr = (struct setup_data *)boot_params_ptr->hdr.setup_data;
+ if (!IS_ENABLED(CONFIG_KEXEC_HANDOVER))
+ return false;
+
+ ptr = (struct setup_data *)(unsigned long)boot_params_ptr->hdr.setup_data;
while (ptr) {
if (ptr->type == SETUP_KEXEC_KHO) {
- struct kho_data *kho = (struct kho_data *)ptr->data;
-
- kho_scratch = (void *)kho->scratch_addr;
+ kho = (struct kho_data *)(unsigned long)ptr->data;
+ kho_scratch = (void *)(unsigned long)kho->scratch_addr;
nr_areas = kho->scratch_size / sizeof(*kho_scratch);
-
break;
}
- ptr = (struct setup_data *)ptr->next;
+ ptr = (struct setup_data *)(unsigned long)ptr->next;
}
if (!nr_areas)
@@ -801,13 +802,6 @@ static bool process_kho_entries(unsigned long minimum, unsigned long image_size)
return true;
}
-#else
-static inline bool process_kho_entries(unsigned long minimum,
- unsigned long image_size)
-{
- return false;
-}
-#endif
static unsigned long find_random_phys_addr(unsigned long minimum,
unsigned long image_size)
@@ -824,6 +818,10 @@ static unsigned long find_random_phys_addr(unsigned long minimum,
return 0;
}
+ /*
+ * During kexec handover only process KHO scratch areas that are known
+ * not to contain any data that must be preserved.
+ */
if (!process_kho_entries(minimum, image_size) &&
!process_efi_entries(minimum, image_size))
process_e820_entries(minimum, image_size);
--
2.49.0.967.g6a0df3ecc3-goog
On 5/1/25 15:54, Changyuan Lyu wrote:
> KHO uses "scratch regions" to bootstrap a kexec'ed kernel. These regions are
> guaranteed to not have any memory that KHO would preserve.
I understand how these changelogs got written. They were written by
someone thinking *only* about KHO and hacking it into the existing code.
That's fine and understandable.
But everyone else is coming at this from the perspective of not knowing
what scratch memory is.
"Scratch memory" in the KHO world is basically "normal kernel memory" to
anybody else. So I think it's a disservice to everyone else reading
these changelogs to act like it's something special.
The thing that *is* special is that KHO kernels don't have a lot of
"normal kernel memory". At least they're designed to tolerate lots of
handed-off memory and little "scratch memory"
When you run through these again, could you please try to write these
changelogs and comments for folks that are not familiar with KHO?
> +/*
> + * If KHO is active, only process its scratch areas to ensure we are not
> + * stepping onto preserved memory.
> + */
> +#ifdef CONFIG_KEXEC_HANDOVER
> +static bool process_kho_entries(unsigned long minimum, unsigned long image_size)
> +{
I thought we agreed to rework this to unconditionally define the
kho_scratch structures so the #ifdef can go away?
> + struct kho_scratch *kho_scratch;
> + struct setup_data *ptr;
> + int i, nr_areas = 0;
> +
> + ptr = (struct setup_data *)boot_params_ptr->hdr.setup_data;
> + while (ptr) {
> + if (ptr->type == SETUP_KEXEC_KHO) {
> + struct kho_data *kho = (struct kho_data *)ptr->data;
> +
> + kho_scratch = (void *)kho->scratch_addr;
> + nr_areas = kho->scratch_size / sizeof(*kho_scratch);
> +
> + break;
> + }
> +
> + ptr = (struct setup_data *)ptr->next;
> + }
> +
> + if (!nr_areas)
> + return false;
> +
> + for (i = 0; i < nr_areas; i++) {
> + struct kho_scratch *area = &kho_scratch[i];
> + struct mem_vector region = {
> + .start = area->addr,
> + .size = area->size,
> + };
> +
> + if (process_mem_region(®ion, minimum, image_size))
> + break;
> + }
> +
> + return true;
> +}
> +#else
> +static inline bool process_kho_entries(unsigned long minimum,
> + unsigned long image_size)
> +{
> + return false;
> +}
> +#endif
> +
> static unsigned long find_random_phys_addr(unsigned long minimum,
> unsigned long image_size)
> {
> @@ -775,7 +824,8 @@ static unsigned long find_random_phys_addr(unsigned long minimum,
> return 0;
> }
>
> - if (!process_efi_entries(minimum, image_size))
> + if (!process_kho_entries(minimum, image_size) &&
> + !process_efi_entries(minimum, image_size))
> process_e820_entries(minimum, image_size);
>
> phys_addr = slots_fetch_random();
I made a comment about this in the last round, making this the second
thing that I've noticed that was not addressed.
Could you please go back through the last round of comments before you
repost these?
Just to be clear: these are making progress, but they're not OK from the
x86 side yet.
On Fri, May 02, 2025 at 11:48:54AM -0700, Dave Hansen wrote:
> On 5/1/25 15:54, Changyuan Lyu wrote:
> > +/*
> > + * If KHO is active, only process its scratch areas to ensure we are not
> > + * stepping onto preserved memory.
> > + */
> > +#ifdef CONFIG_KEXEC_HANDOVER
> > +static bool process_kho_entries(unsigned long minimum, unsigned long image_size)
> > +{
>
> I thought we agreed to rework this to unconditionally define the
> kho_scratch structures so the #ifdef can go away?
It's either #ifdef or double casting and my understanding was that your
preference was to get rid of the double casting.
> > + struct kho_scratch *kho_scratch;
> > + struct setup_data *ptr;
> > + int i, nr_areas = 0;
> > +
> > + ptr = (struct setup_data *)boot_params_ptr->hdr.setup_data;
> > + while (ptr) {
> > + if (ptr->type == SETUP_KEXEC_KHO) {
> > + struct kho_data *kho = (struct kho_data *)ptr->data;
> > +
> > + kho_scratch = (void *)kho->scratch_addr;
> > + nr_areas = kho->scratch_size / sizeof(*kho_scratch);
> > +
> > + break;
> > + }
> > +
> > + ptr = (struct setup_data *)ptr->next;
> > + }
> > +
> > + if (!nr_areas)
> > + return false;
> > +
> > + for (i = 0; i < nr_areas; i++) {
> > + struct kho_scratch *area = &kho_scratch[i];
> > + struct mem_vector region = {
> > + .start = area->addr,
> > + .size = area->size,
> > + };
> > +
> > + if (process_mem_region(®ion, minimum, image_size))
> > + break;
> > + }
> > +
> > + return true;
> > +}
> > +#else
> > +static inline bool process_kho_entries(unsigned long minimum,
> > + unsigned long image_size)
> > +{
> > + return false;
> > +}
> > +#endif
> > +
> > static unsigned long find_random_phys_addr(unsigned long minimum,
> > unsigned long image_size)
> > {
> > @@ -775,7 +824,8 @@ static unsigned long find_random_phys_addr(unsigned long minimum,
> > return 0;
> > }
> >
> > - if (!process_efi_entries(minimum, image_size))
> > + if (!process_kho_entries(minimum, image_size) &&
> > + !process_efi_entries(minimum, image_size))
> > process_e820_entries(minimum, image_size);
> >
> > phys_addr = slots_fetch_random();
>
> I made a comment about this in the last round, making this the second
> thing that I've noticed that was not addressed.
>
> Could you please go back through the last round of comments before you
> repost these?
I presumed that changelog covers it. We'll add a comment here for the next
posting.
> Just to be clear: these are making progress, but they're not OK from the
> x86 side yet.
--
Sincerely yours,
Mike.
On 5/2/25 14:16, Mike Rapoport wrote:
>>> +/*
>>> + * If KHO is active, only process its scratch areas to ensure we are not
>>> + * stepping onto preserved memory.
>>> + */
>>> +#ifdef CONFIG_KEXEC_HANDOVER
>>> +static bool process_kho_entries(unsigned long minimum, unsigned long image_size)
>>> +{
>> I thought we agreed to rework this to unconditionally define the
>> kho_scratch structures so the #ifdef can go away?
> It's either #ifdef or double casting and my understanding was that your
> preference was to get rid of the double casting.
Looking back at the other message... Sorry, you did make it clear there
and it just didn't penetrate my thick skull.
The double cast is goofy, but it does seem to be the normal way of doing
things. There are lots of examples of it. So, grudgingly, I prefer the
double cast over the #ifdef.
BTW, _please_ changelog this stuff. It would have saved this round of
back-and-forth. The changelog is the perfect place to say something like:
This looks goofy out of context, but it is unfortunately the way
that this is handled across the tree. There are at least a dozen
instances of casting like this.
This series is generally a bit sparse in the changelog and comment
departments.
© 2016 - 2026 Red Hat, Inc.