Make head.S invoke a C function to retrieve MBI and SLRT addresses in a
platform-specific way.  This is also the place to perform sanity checks
of DRTM.
Signed-off-by: Krystian Hebel <krystian.hebel@3mdeb.com>
Signed-off-by: Sergii Dmytruk <sergii.dmytruk@3mdeb.com>
---
 xen/arch/x86/Makefile                |  1 +
 xen/arch/x86/boot/Makefile           |  5 +++-
 xen/arch/x86/boot/head.S             | 43 ++++++++++++++++++++++++++++
 xen/arch/x86/boot/slaunch-early.c    | 41 ++++++++++++++++++++++++++
 xen/arch/x86/include/asm/intel-txt.h | 16 +++++++++++
 xen/arch/x86/include/asm/slaunch.h   | 26 +++++++++++++++++
 xen/arch/x86/slaunch.c               | 27 +++++++++++++++++
 7 files changed, 158 insertions(+), 1 deletion(-)
 create mode 100644 xen/arch/x86/boot/slaunch-early.c
 create mode 100644 xen/arch/x86/include/asm/slaunch.h
 create mode 100644 xen/arch/x86/slaunch.c
diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile
index ce724a9daa..aa20eb42b5 100644
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_COMPAT) += x86_64/physdev.o
 obj-$(CONFIG_X86_PSR) += psr.o
 obj-y += setup.o
 obj-y += shutdown.o
+obj-y += slaunch.o
 obj-y += smp.o
 obj-y += smpboot.o
 obj-y += spec_ctrl.o
diff --git a/xen/arch/x86/boot/Makefile b/xen/arch/x86/boot/Makefile
index ff0d61d7ac..5471b966dd 100644
--- a/xen/arch/x86/boot/Makefile
+++ b/xen/arch/x86/boot/Makefile
@@ -5,6 +5,7 @@ obj-bin-y += $(obj64)
 obj32 := cmdline.32.o
 obj32 += reloc.32.o
 obj32 += reloc-trampoline.32.o
+obj32 += slaunch-early.32.o
 
 obj64 := reloc-trampoline.o
 
@@ -28,6 +29,8 @@ $(obj32): XEN_CFLAGS := $(CFLAGS_x86_32) -fpic
 $(obj)/%.32.o: $(src)/%.c FORCE
 	$(call if_changed_rule,cc_o_c)
 
+$(obj)/slaunch-early.32.o: XEN_CFLAGS += -D__EARLY_SLAUNCH__
+
 orphan-handling-$(call ld-option,--orphan-handling=error) := --orphan-handling=error
 LDFLAGS_DIRECT-$(call ld-option,--warn-rwx-segments) := --no-warn-rwx-segments
 LDFLAGS_DIRECT += $(LDFLAGS_DIRECT-y)
@@ -81,7 +84,7 @@ cmd_combine = \
               --bin1      $(obj)/built-in-32.base.bin \
               --bin2      $(obj)/built-in-32.offset.bin \
               --map       $(obj)/built-in-32.base.map \
-              --exports   cmdline_parse_early,reloc,reloc_trampoline32 \
+              --exports   cmdline_parse_early,reloc,reloc_trampoline32,slaunch_early_init \
               --output    $@
 
 targets += built-in-32.S
diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S
index a69107bd81..b4cf423c80 100644
--- a/xen/arch/x86/boot/head.S
+++ b/xen/arch/x86/boot/head.S
@@ -472,6 +472,10 @@ __start:
         /* Bootloaders may set multiboot{1,2}.mem_lower to a nonzero value. */
         xor     %edx,%edx
 
+        /* Check for TrenchBoot slaunch bootloader. */
+        cmp     $SLAUNCH_BOOTLOADER_MAGIC, %eax
+        je      .Lslaunch_proto
+
         /* Check for Multiboot2 bootloader. */
         cmp     $MULTIBOOT2_BOOTLOADER_MAGIC,%eax
         je      .Lmultiboot2_proto
@@ -487,6 +491,45 @@ __start:
         cmovnz  MB_mem_lower(%ebx),%edx
         jmp     trampoline_bios_setup
 
+.Lslaunch_proto:
+        /*
+         * Upon reaching here, CPU state mostly matches the one setup by the
+         * bootloader with ESP, ESI and EDX being clobbered above.
+         */
+
+        /* Save information that TrenchBoot slaunch was used. */
+        movb    $1, sym_esi(slaunch_active)
+
+        /*
+         * Prepare space for output parameter of slaunch_early_init(), which is
+         * a structure of two uint32_t fields.
+         */
+        sub     $8, %esp
+
+        push    %esp                             /* pointer to output structure */
+        lea     sym_offs(__2M_rwdata_end), %ecx  /* end of target image */
+        lea     sym_offs(_start), %edx           /* target base address */
+        mov     %esi, %eax                       /* load base address */
+        /*
+         * slaunch_early_init(load/eax, tgt/edx, tgt_end/ecx, ret/stk) using
+         * fastcall calling convention.
+         */
+        call    slaunch_early_init
+        add     $4, %esp                         /* pop the fourth parameter */
+
+        /* Move outputs of slaunch_early_init() from stack into registers. */
+        pop     %eax  /* physical MBI address */
+        pop     %edx  /* physical SLRT address */
+
+        /* Save physical address of SLRT for C code. */
+        mov     %edx, sym_esi(slaunch_slrt)
+
+        /* Store MBI address in EBX where MB2 code expects it. */
+        mov     %eax, %ebx
+
+        /* Move magic number expected by Multiboot 2 to EAX and fall through. */
+        movl    $MULTIBOOT2_BOOTLOADER_MAGIC, %eax
+
 .Lmultiboot2_proto:
         /* Skip Multiboot2 information fixed part. */
         lea     (MB2_fixed_sizeof+MULTIBOOT2_TAG_ALIGN-1)(%ebx),%ecx
diff --git a/xen/arch/x86/boot/slaunch-early.c b/xen/arch/x86/boot/slaunch-early.c
new file mode 100644
index 0000000000..c9d364bcd5
--- /dev/null
+++ b/xen/arch/x86/boot/slaunch-early.c
@@ -0,0 +1,41 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (c) 2022-2025 3mdeb Sp. z o.o. All rights reserved.
+ */
+
+#include <xen/slr-table.h>
+#include <xen/types.h>
+#include <asm/intel-txt.h>
+
+struct early_init_results
+{
+    uint32_t mbi_pa;
+    uint32_t slrt_pa;
+} __packed;
+
+void asmlinkage slaunch_early_init(uint32_t load_base_addr,
+                                   uint32_t tgt_base_addr,
+                                   uint32_t tgt_end_addr,
+                                   struct early_init_results *result)
+{
+    void *txt_heap;
+    const struct txt_os_mle_data *os_mle;
+    const struct slr_table *slrt;
+    const struct slr_entry_intel_info *intel_info;
+
+    txt_heap = txt_init();
+    os_mle = txt_os_mle_data_start(txt_heap);
+
+    result->slrt_pa = os_mle->slrt;
+    result->mbi_pa = 0;
+
+    slrt = (const struct slr_table *)(uintptr_t)os_mle->slrt;
+
+    intel_info = (const struct slr_entry_intel_info *)
+        slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_INTEL_INFO);
+    if ( intel_info == NULL || intel_info->hdr.size != sizeof(*intel_info) )
+        return;
+
+    result->mbi_pa = intel_info->boot_params_base;
+}
diff --git a/xen/arch/x86/include/asm/intel-txt.h b/xen/arch/x86/include/asm/intel-txt.h
index cc2d312f4d..7658457e9d 100644
--- a/xen/arch/x86/include/asm/intel-txt.h
+++ b/xen/arch/x86/include/asm/intel-txt.h
@@ -292,6 +292,22 @@ static inline void *txt_sinit_mle_data_start(const void *heap)
            sizeof(uint64_t);
 }
 
+static inline void *txt_init(void)
+{
+    void *txt_heap;
+
+    /* Clear the TXT error register for a clean start of the day. */
+    txt_write(TXTCR_ERRORCODE, 0);
+
+    txt_heap = _p(txt_read(TXTCR_HEAP_BASE));
+
+    if ( txt_os_mle_data_size(txt_heap) < sizeof(struct txt_os_mle_data) ||
+         txt_os_sinit_data_size(txt_heap) < sizeof(struct txt_os_sinit_data) )
+        txt_reset(SLAUNCH_ERROR_GENERIC);
+
+    return txt_heap;
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* X86_INTEL_TXT_H */
diff --git a/xen/arch/x86/include/asm/slaunch.h b/xen/arch/x86/include/asm/slaunch.h
new file mode 100644
index 0000000000..df42defd92
--- /dev/null
+++ b/xen/arch/x86/include/asm/slaunch.h
@@ -0,0 +1,26 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (c) 2022-2025 3mdeb Sp. z o.o. All rights reserved.
+ */
+
+#ifndef X86_SLAUNCH_H
+#define X86_SLAUNCH_H
+
+#include <xen/types.h>
+
+/* Indicates an active Secure Launch boot. */
+extern bool slaunch_active;
+
+/*
+ * Holds physical address of SLRT.  Use slaunch_get_slrt() to access SLRT
+ * instead of mapping where this points to.
+ */
+extern uint32_t slaunch_slrt;
+
+/*
+ * Retrieves pointer to SLRT.  Checks table's validity and maps it as necessary.
+ */
+struct slr_table *slaunch_get_slrt(void);
+
+#endif /* X86_SLAUNCH_H */
diff --git a/xen/arch/x86/slaunch.c b/xen/arch/x86/slaunch.c
new file mode 100644
index 0000000000..a3e6ab8d71
--- /dev/null
+++ b/xen/arch/x86/slaunch.c
@@ -0,0 +1,27 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (c) 2022-2025 3mdeb Sp. z o.o. All rights reserved.
+ */
+
+#include <xen/compiler.h>
+#include <xen/init.h>
+#include <xen/macros.h>
+#include <xen/types.h>
+#include <asm/slaunch.h>
+
+/*
+ * These variables are assigned to by the code near Xen's entry point.
+ *
+ * slaunch_active is not __initdata to allow checking for an active Secure
+ * Launch boot.
+ */
+bool slaunch_active;
+uint32_t __initdata slaunch_slrt; /* physical address */
+
+/* Using slaunch_active in head.S assumes it's a single byte in size, so enforce
+ * this assumption. */
+static void __maybe_unused compile_time_checks(void)
+{
+    BUILD_BUG_ON(sizeof(slaunch_active) != 1);
+}
-- 
2.49.0On 30.05.2025 15:17, Sergii Dmytruk wrote:
> Make head.S invoke a C function to retrieve MBI and SLRT addresses in a
> platform-specific way.  This is also the place to perform sanity checks
> of DRTM.
> 
> Signed-off-by: Krystian Hebel <krystian.hebel@3mdeb.com>
> Signed-off-by: Sergii Dmytruk <sergii.dmytruk@3mdeb.com>
> ---
>  xen/arch/x86/Makefile                |  1 +
>  xen/arch/x86/boot/Makefile           |  5 +++-
>  xen/arch/x86/boot/head.S             | 43 ++++++++++++++++++++++++++++
>  xen/arch/x86/boot/slaunch-early.c    | 41 ++++++++++++++++++++++++++
>  xen/arch/x86/include/asm/intel-txt.h | 16 +++++++++++
>  xen/arch/x86/include/asm/slaunch.h   | 26 +++++++++++++++++
>  xen/arch/x86/slaunch.c               | 27 +++++++++++++++++
>  7 files changed, 158 insertions(+), 1 deletion(-)
>  create mode 100644 xen/arch/x86/boot/slaunch-early.c
>  create mode 100644 xen/arch/x86/include/asm/slaunch.h
>  create mode 100644 xen/arch/x86/slaunch.c
As indicated in reply to patch 3 - imo all code additions here want to be
under some CONFIG_xyz. I repeat this here, but I don't think I'll repeat it
any further.
> --- a/xen/arch/x86/boot/head.S
> +++ b/xen/arch/x86/boot/head.S
> @@ -472,6 +472,10 @@ __start:
>          /* Bootloaders may set multiboot{1,2}.mem_lower to a nonzero value. */
>          xor     %edx,%edx
>  
> +        /* Check for TrenchBoot slaunch bootloader. */
> +        cmp     $SLAUNCH_BOOTLOADER_MAGIC, %eax
> +        je      .Lslaunch_proto
> +
>          /* Check for Multiboot2 bootloader. */
>          cmp     $MULTIBOOT2_BOOTLOADER_MAGIC,%eax
>          je      .Lmultiboot2_proto
> @@ -487,6 +491,45 @@ __start:
>          cmovnz  MB_mem_lower(%ebx),%edx
>          jmp     trampoline_bios_setup
>  
> +.Lslaunch_proto:
> +        /*
> +         * Upon reaching here, CPU state mostly matches the one setup by the
> +         * bootloader with ESP, ESI and EDX being clobbered above.
> +         */
> +
> +        /* Save information that TrenchBoot slaunch was used. */
> +        movb    $1, sym_esi(slaunch_active)
> +
> +        /*
> +         * Prepare space for output parameter of slaunch_early_init(), which is
> +         * a structure of two uint32_t fields.
> +         */
> +        sub     $8, %esp
At the very least a textual reference to the struct type is needed here,
to be able to find it. Better would be to have the size calculated into
asm-offsets.h, to use a proper symbolic name here.
> +        push    %esp                             /* pointer to output structure */
> +        lea     sym_offs(__2M_rwdata_end), %ecx  /* end of target image */
> +        lea     sym_offs(_start), %edx           /* target base address */
Why LEA when this can be expressed with (shorter) MOV?
> +        mov     %esi, %eax                       /* load base address */
> +        /*
> +         * slaunch_early_init(load/eax, tgt/edx, tgt_end/ecx, ret/stk) using
> +         * fastcall calling convention.
> +         */
> +        call    slaunch_early_init
> +        add     $4, %esp                         /* pop the fourth parameter */
> +
> +        /* Move outputs of slaunch_early_init() from stack into registers. */
> +        pop     %eax  /* physical MBI address */
> +        pop     %edx  /* physical SLRT address */
> +
> +        /* Save physical address of SLRT for C code. */
> +        mov     %edx, sym_esi(slaunch_slrt)
Why go through %edx?
> +        /* Store MBI address in EBX where MB2 code expects it. */
> +        mov     %eax, %ebx
Why go through %eax?
> --- /dev/null
> +++ b/xen/arch/x86/boot/slaunch-early.c
> @@ -0,0 +1,41 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + *
> + * Copyright (c) 2022-2025 3mdeb Sp. z o.o. All rights reserved.
> + */
> +
> +#include <xen/slr-table.h>
> +#include <xen/types.h>
> +#include <asm/intel-txt.h>
> +
> +struct early_init_results
> +{
> +    uint32_t mbi_pa;
> +    uint32_t slrt_pa;
> +} __packed;
Why __packed?
> +void asmlinkage slaunch_early_init(uint32_t load_base_addr,
__init ?
> +                                   uint32_t tgt_base_addr,
> +                                   uint32_t tgt_end_addr,
> +                                   struct early_init_results *result)
> +{
> +    void *txt_heap;
> +    const struct txt_os_mle_data *os_mle;
> +    const struct slr_table *slrt;
> +    const struct slr_entry_intel_info *intel_info;
> +
> +    txt_heap = txt_init();
> +    os_mle = txt_os_mle_data_start(txt_heap);
> +
> +    result->slrt_pa = os_mle->slrt;
> +    result->mbi_pa = 0;
> +
> +    slrt = (const struct slr_table *)(uintptr_t)os_mle->slrt;
I think the cast to uintptr_t wants omitting here. This is 32-bit code, and
hence the conversion to a pointer ought to go fine without. Or else you're
silently discarding bits in the earlier assignment to ->slrt_pa.
> +    intel_info = (const struct slr_entry_intel_info *)
> +        slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_INTEL_INFO);
> +    if ( intel_info == NULL || intel_info->hdr.size != sizeof(*intel_info) )
> +        return;
This size check is best effort only, isn't it? Or else how do you know
->hdr.size is actually within bounds? Further in txt_init() you use less-
than checks; why more relaxed there and more strict here?
> --- a/xen/arch/x86/include/asm/intel-txt.h
> +++ b/xen/arch/x86/include/asm/intel-txt.h
> @@ -292,6 +292,22 @@ static inline void *txt_sinit_mle_data_start(const void *heap)
>             sizeof(uint64_t);
>  }
>  
> +static inline void *txt_init(void)
__init ?
> --- /dev/null
> +++ b/xen/arch/x86/include/asm/slaunch.h
> @@ -0,0 +1,26 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + *
> + * Copyright (c) 2022-2025 3mdeb Sp. z o.o. All rights reserved.
> + */
> +
> +#ifndef X86_SLAUNCH_H
> +#define X86_SLAUNCH_H
> +
> +#include <xen/types.h>
> +
> +/* Indicates an active Secure Launch boot. */
> +extern bool slaunch_active;
> +
> +/*
> + * Holds physical address of SLRT.  Use slaunch_get_slrt() to access SLRT
> + * instead of mapping where this points to.
> + */
> +extern uint32_t slaunch_slrt;
> +
> +/*
> + * Retrieves pointer to SLRT.  Checks table's validity and maps it as necessary.
> + */
> +struct slr_table *slaunch_get_slrt(void);
There's no definition of this here, nor a use. Why is this living in this
patch? Misra objects to declarations without definitions, and you want to
be prepared that such a large series may go in piece by piece. Hence there
may not be new Misra violations at any patch boundary.
> --- /dev/null
> +++ b/xen/arch/x86/slaunch.c
> @@ -0,0 +1,27 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + *
> + * Copyright (c) 2022-2025 3mdeb Sp. z o.o. All rights reserved.
> + */
> +
> +#include <xen/compiler.h>
> +#include <xen/init.h>
> +#include <xen/macros.h>
> +#include <xen/types.h>
Looks like all you need here is xen/stdint.h?
> +#include <asm/slaunch.h>
We try to move to there being blanks lines between groups of #include-s,
e.g. all xen/ ones separated from all asm/ ones.
> +/*
> + * These variables are assigned to by the code near Xen's entry point.
> + *
> + * slaunch_active is not __initdata to allow checking for an active Secure
> + * Launch boot.
> + */
> +bool slaunch_active;
Not using __initdata is quite plausible, but why not __ro_after_init?
> +uint32_t __initdata slaunch_slrt; /* physical address */
> +
> +/* Using slaunch_active in head.S assumes it's a single byte in size, so enforce
> + * this assumption. */
Please follow comment style as per ./CODING_STYLE.
Jan
                
            On Thu, Jul 03, 2025 at 12:50:39PM +0200, Jan Beulich wrote:
> As indicated in reply to patch 3 - imo all code additions here want to be
> under some CONFIG_xyz. I repeat this here, but I don't think I'll repeat it
> any further.
I'll add one.  In case this is problematic for some reason I want to
mention that in the new version I don't add #ifdef where there are
if-statements because I did:
    #ifdef CONFIG_SLAUNCH
    ...
    #else
    static const bool slaunch_active = false;
    #endif
and that's enough for the compiler to discard
`if (slaunch_active ...) { ... }`.
> > +        /*
> > +         * Prepare space for output parameter of slaunch_early_init(), which is
> > +         * a structure of two uint32_t fields.
> > +         */
> > +        sub     $8, %esp
>
> At the very least a textual reference to the struct type is needed here,
> to be able to find it. Better would be to have the size calculated into
> asm-offsets.h, to use a proper symbolic name here.
Will do both of those things so it's easier to understand behaviour of
POPs.
> > +        push    %esp                             /* pointer to output structure */
> > +        lea     sym_offs(__2M_rwdata_end), %ecx  /* end of target image */
> > +        lea     sym_offs(_start), %edx           /* target base address */
>
> Why LEA when this can be expressed with (shorter) MOV?
I'll change to MOVs for consistency.  The reason is probably that these
are addresses and that's what LEA is for.
> > +        /* Move outputs of slaunch_early_init() from stack into registers. */
> > +        pop     %eax  /* physical MBI address */
> > +        pop     %edx  /* physical SLRT address */
> > +
> > +        /* Save physical address of SLRT for C code. */
> > +        mov     %edx, sym_esi(slaunch_slrt)
>
> Why go through %edx?
>
> > +        /* Store MBI address in EBX where MB2 code expects it. */
> > +        mov     %eax, %ebx
>
> Why go through %eax?
I think I just wanted to fully unpack the structure before processing
its fields, but there is real need for that, so I'll combine it.
> > +struct early_init_results
> > +{
> > +    uint32_t mbi_pa;
> > +    uint32_t slrt_pa;
> > +} __packed;
>
> Why __packed?
Just a bullet-proof form of documenting a requirement.
> > +void asmlinkage slaunch_early_init(uint32_t load_base_addr,
>
> __init ?
This is early code to which no such sections apply, as far as I can
tell.
> > +    slrt = (const struct slr_table *)(uintptr_t)os_mle->slrt;
>
> I think the cast to uintptr_t wants omitting here. This is 32-bit code, and
> hence the conversion to a pointer ought to go fine without. Or else you're
> silently discarding bits in the earlier assignment to ->slrt_pa.
`os_mle->slrt` is 64-bit and compiler dislikes implicit narrowing for
pointer casts, so can't just drop the cast.  I'll use `result->slrt_pa`
(32-bit) to get rid of the cast and will add a check that the address is
in fact 32-bit.
The values of pointers are generally below 4 GiB, so no harm is done.
The address fields are 64-bit probably for the extensibility and because
they are mostly consumed by 64-bit code.
> > +    intel_info = (const struct slr_entry_intel_info *)
> > +        slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_INTEL_INFO);
> > +    if ( intel_info == NULL || intel_info->hdr.size != sizeof(*intel_info) )
> > +        return;
>
> This size check is best effort only, isn't it? Or else how do you
> know ->hdr.size is actually within bounds?
It's just a sanity check the structure is not of completely wrong format.
> Further in txt_init() you use less-than checks; why more relaxed there
> and more strict here?
Those are different kinds of checks: here the code checks that an entry
in SLRT size matches Xen's structure in size, while txt_init() verifies
that a section of TXT heap is large enough to fit the data we expect to
find there.
> > --- a/xen/arch/x86/include/asm/intel-txt.h
> > +++ b/xen/arch/x86/include/asm/intel-txt.h
> > @@ -292,6 +292,22 @@ static inline void *txt_sinit_mle_data_start(const void *heap)
> >             sizeof(uint64_t);
> >  }
> >
> > +static inline void *txt_init(void)
>
> __init ?
Then it won't work in an early code.
> > +/*
> > + * Retrieves pointer to SLRT.  Checks table's validity and maps it as necessary.
> > + */
> > +struct slr_table *slaunch_get_slrt(void);
>
> There's no definition of this here, nor a use. Why is this living in this
> patch? Misra objects to declarations without definitions, and you want to
> be prepared that such a large series may go in piece by piece. Hence there
> may not be new Misra violations at any patch boundary.
The reason is that a comment mentions this function.  I'll change the
comment to not do that until the function is introduced.
> > +#include <xen/types.h>
>
> Looks like all you need here is xen/stdint.h?
Right, <xen/types.h> will be necessary for NULL in patch #6.
> > +#include <asm/slaunch.h>
>
> We try to move to there being blanks lines between groups of #include-s,
> e.g. all xen/ ones separated from all asm/ ones.
Will add a blank line.
> > +/*
> > + * These variables are assigned to by the code near Xen's entry point.
> > + *
> > + * slaunch_active is not __initdata to allow checking for an active Secure
> > + * Launch boot.
> > + */
> > +bool slaunch_active;
>
> Not using __initdata is quite plausible, but why not __ro_after_init?
I haven't tried it and likely didn't even see it (it's in a separate
header), will try changing.
> > +uint32_t __initdata slaunch_slrt; /* physical address */
> > +
> > +/* Using slaunch_active in head.S assumes it's a single byte in size, so enforce
> > + * this assumption. */
>
> Please follow comment style as per ./CODING_STYLE.
>
> Jan
Will adjust.
Regards
                
            On 5/30/25 6:17 AM, Sergii Dmytruk wrote:
> Make head.S invoke a C function to retrieve MBI and SLRT addresses in a
> platform-specific way.  This is also the place to perform sanity checks
> of DRTM.
> 
> Signed-off-by: Krystian Hebel <krystian.hebel@3mdeb.com>
> Signed-off-by: Sergii Dmytruk <sergii.dmytruk@3mdeb.com>
> ---
>   xen/arch/x86/Makefile                |  1 +
>   xen/arch/x86/boot/Makefile           |  5 +++-
>   xen/arch/x86/boot/head.S             | 43 ++++++++++++++++++++++++++++
>   xen/arch/x86/boot/slaunch-early.c    | 41 ++++++++++++++++++++++++++
>   xen/arch/x86/include/asm/intel-txt.h | 16 +++++++++++
>   xen/arch/x86/include/asm/slaunch.h   | 26 +++++++++++++++++
>   xen/arch/x86/slaunch.c               | 27 +++++++++++++++++
>   7 files changed, 158 insertions(+), 1 deletion(-)
>   create mode 100644 xen/arch/x86/boot/slaunch-early.c
>   create mode 100644 xen/arch/x86/include/asm/slaunch.h
>   create mode 100644 xen/arch/x86/slaunch.c
> 
> diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile
> index ce724a9daa..aa20eb42b5 100644
> --- a/xen/arch/x86/Makefile
> +++ b/xen/arch/x86/Makefile
> @@ -58,6 +58,7 @@ obj-$(CONFIG_COMPAT) += x86_64/physdev.o
>   obj-$(CONFIG_X86_PSR) += psr.o
>   obj-y += setup.o
>   obj-y += shutdown.o
> +obj-y += slaunch.o
>   obj-y += smp.o
>   obj-y += smpboot.o
>   obj-y += spec_ctrl.o
> diff --git a/xen/arch/x86/boot/Makefile b/xen/arch/x86/boot/Makefile
> index ff0d61d7ac..5471b966dd 100644
> --- a/xen/arch/x86/boot/Makefile
> +++ b/xen/arch/x86/boot/Makefile
> @@ -5,6 +5,7 @@ obj-bin-y += $(obj64)
>   obj32 := cmdline.32.o
>   obj32 += reloc.32.o
>   obj32 += reloc-trampoline.32.o
> +obj32 += slaunch-early.32.o
>   
>   obj64 := reloc-trampoline.o
>   
> @@ -28,6 +29,8 @@ $(obj32): XEN_CFLAGS := $(CFLAGS_x86_32) -fpic
>   $(obj)/%.32.o: $(src)/%.c FORCE
>   	$(call if_changed_rule,cc_o_c)
>   
> +$(obj)/slaunch-early.32.o: XEN_CFLAGS += -D__EARLY_SLAUNCH__
> +
>   orphan-handling-$(call ld-option,--orphan-handling=error) := --orphan-handling=error
>   LDFLAGS_DIRECT-$(call ld-option,--warn-rwx-segments) := --no-warn-rwx-segments
>   LDFLAGS_DIRECT += $(LDFLAGS_DIRECT-y)
> @@ -81,7 +84,7 @@ cmd_combine = \
>                 --bin1      $(obj)/built-in-32.base.bin \
>                 --bin2      $(obj)/built-in-32.offset.bin \
>                 --map       $(obj)/built-in-32.base.map \
> -              --exports   cmdline_parse_early,reloc,reloc_trampoline32 \
> +              --exports   cmdline_parse_early,reloc,reloc_trampoline32,slaunch_early_init \
>                 --output    $@
>   
>   targets += built-in-32.S
> diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S
> index a69107bd81..b4cf423c80 100644
> --- a/xen/arch/x86/boot/head.S
> +++ b/xen/arch/x86/boot/head.S
> @@ -472,6 +472,10 @@ __start:
>           /* Bootloaders may set multiboot{1,2}.mem_lower to a nonzero value. */
>           xor     %edx,%edx
>   
> +        /* Check for TrenchBoot slaunch bootloader. */
> +        cmp     $SLAUNCH_BOOTLOADER_MAGIC, %eax
> +        je      .Lslaunch_proto
> +
>           /* Check for Multiboot2 bootloader. */
>           cmp     $MULTIBOOT2_BOOTLOADER_MAGIC,%eax
>           je      .Lmultiboot2_proto
> @@ -487,6 +491,45 @@ __start:
>           cmovnz  MB_mem_lower(%ebx),%edx
>           jmp     trampoline_bios_setup
>   
> +.Lslaunch_proto:
> +        /*
> +         * Upon reaching here, CPU state mostly matches the one setup by the
> +         * bootloader with ESP, ESI and EDX being clobbered above.
> +         */
> +
> +        /* Save information that TrenchBoot slaunch was used. */
> +        movb    $1, sym_esi(slaunch_active)
> +
> +        /*
> +         * Prepare space for output parameter of slaunch_early_init(), which is
> +         * a structure of two uint32_t fields.
> +         */
> +        sub     $8, %esp
> +
> +        push    %esp                             /* pointer to output structure */
> +        lea     sym_offs(__2M_rwdata_end), %ecx  /* end of target image */
> +        lea     sym_offs(_start), %edx           /* target base address */
> +        mov     %esi, %eax                       /* load base address */
> +        /*
> +         * slaunch_early_init(load/eax, tgt/edx, tgt_end/ecx, ret/stk) using
> +         * fastcall calling convention.
> +         */
> +        call    slaunch_early_init
> +        add     $4, %esp                         /* pop the fourth parameter */
> +
> +        /* Move outputs of slaunch_early_init() from stack into registers. */
> +        pop     %eax  /* physical MBI address */
> +        pop     %edx  /* physical SLRT address */
> +
> +        /* Save physical address of SLRT for C code. */
> +        mov     %edx, sym_esi(slaunch_slrt)
> +
> +        /* Store MBI address in EBX where MB2 code expects it. */
> +        mov     %eax, %ebx
> +
> +        /* Move magic number expected by Multiboot 2 to EAX and fall through. */
> +        movl    $MULTIBOOT2_BOOTLOADER_MAGIC, %eax
> +
>   .Lmultiboot2_proto:
>           /* Skip Multiboot2 information fixed part. */
>           lea     (MB2_fixed_sizeof+MULTIBOOT2_TAG_ALIGN-1)(%ebx),%ecx
> diff --git a/xen/arch/x86/boot/slaunch-early.c b/xen/arch/x86/boot/slaunch-early.c
> new file mode 100644
> index 0000000000..c9d364bcd5
> --- /dev/null
> +++ b/xen/arch/x86/boot/slaunch-early.c
> @@ -0,0 +1,41 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + *
> + * Copyright (c) 2022-2025 3mdeb Sp. z o.o. All rights reserved.
> + */
> +
> +#include <xen/slr-table.h>
> +#include <xen/types.h>
> +#include <asm/intel-txt.h>
> +
> +struct early_init_results
> +{
> +    uint32_t mbi_pa;
> +    uint32_t slrt_pa;
> +} __packed;
> +
> +void asmlinkage slaunch_early_init(uint32_t load_base_addr,
> +                                   uint32_t tgt_base_addr,
> +                                   uint32_t tgt_end_addr,
> +                                   struct early_init_results *result)
> +{
> +    void *txt_heap;
> +    const struct txt_os_mle_data *os_mle;
> +    const struct slr_table *slrt;
> +    const struct slr_entry_intel_info *intel_info;
> +
> +    txt_heap = txt_init();
> +    os_mle = txt_os_mle_data_start(txt_heap);
> +
> +    result->slrt_pa = os_mle->slrt;
> +    result->mbi_pa = 0;
> +
> +    slrt = (const struct slr_table *)(uintptr_t)os_mle->slrt;
> +
> +    intel_info = (const struct slr_entry_intel_info *)
> +        slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_INTEL_INFO);
> +    if ( intel_info == NULL || intel_info->hdr.size != sizeof(*intel_info) )
> +        return;
Since these are the x86/TXT bits, it seems at this point, not finding 
the TXT info structure would be fatal, no?
> +
> +    result->mbi_pa = intel_info->boot_params_base;
> +}
> diff --git a/xen/arch/x86/include/asm/intel-txt.h b/xen/arch/x86/include/asm/intel-txt.h
> index cc2d312f4d..7658457e9d 100644
> --- a/xen/arch/x86/include/asm/intel-txt.h
> +++ b/xen/arch/x86/include/asm/intel-txt.h
> @@ -292,6 +292,22 @@ static inline void *txt_sinit_mle_data_start(const void *heap)
>              sizeof(uint64_t);
>   }
>   
> +static inline void *txt_init(void)
> +{
> +    void *txt_heap;
> +
> +    /* Clear the TXT error register for a clean start of the day. */
> +    txt_write(TXTCR_ERRORCODE, 0);
> +
> +    txt_heap = _p(txt_read(TXTCR_HEAP_BASE));
> +
> +    if ( txt_os_mle_data_size(txt_heap) < sizeof(struct txt_os_mle_data) ||
> +         txt_os_sinit_data_size(txt_heap) < sizeof(struct txt_os_sinit_data) )
> +        txt_reset(SLAUNCH_ERROR_GENERIC);
I know the list of error codes pulled in are from the patches for Linux 
Secure Launch which seems right. The Xen work is free to add more 
specific error codes e.g. somewhere like here. We could even consider 
using regions in the vendor error code space for different things like 
generic errors vs architecture specific ones vs etc.
Thanks
Ross
> +
> +    return txt_heap;
> +}
> +
>   #endif /* __ASSEMBLY__ */
>   
>   #endif /* X86_INTEL_TXT_H */
> diff --git a/xen/arch/x86/include/asm/slaunch.h b/xen/arch/x86/include/asm/slaunch.h
> new file mode 100644
> index 0000000000..df42defd92
> --- /dev/null
> +++ b/xen/arch/x86/include/asm/slaunch.h
> @@ -0,0 +1,26 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + *
> + * Copyright (c) 2022-2025 3mdeb Sp. z o.o. All rights reserved.
> + */
> +
> +#ifndef X86_SLAUNCH_H
> +#define X86_SLAUNCH_H
> +
> +#include <xen/types.h>
> +
> +/* Indicates an active Secure Launch boot. */
> +extern bool slaunch_active;
> +
> +/*
> + * Holds physical address of SLRT.  Use slaunch_get_slrt() to access SLRT
> + * instead of mapping where this points to.
> + */
> +extern uint32_t slaunch_slrt;
> +
> +/*
> + * Retrieves pointer to SLRT.  Checks table's validity and maps it as necessary.
> + */
> +struct slr_table *slaunch_get_slrt(void);
> +
> +#endif /* X86_SLAUNCH_H */
> diff --git a/xen/arch/x86/slaunch.c b/xen/arch/x86/slaunch.c
> new file mode 100644
> index 0000000000..a3e6ab8d71
> --- /dev/null
> +++ b/xen/arch/x86/slaunch.c
> @@ -0,0 +1,27 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + *
> + * Copyright (c) 2022-2025 3mdeb Sp. z o.o. All rights reserved.
> + */
> +
> +#include <xen/compiler.h>
> +#include <xen/init.h>
> +#include <xen/macros.h>
> +#include <xen/types.h>
> +#include <asm/slaunch.h>
> +
> +/*
> + * These variables are assigned to by the code near Xen's entry point.
> + *
> + * slaunch_active is not __initdata to allow checking for an active Secure
> + * Launch boot.
> + */
> +bool slaunch_active;
> +uint32_t __initdata slaunch_slrt; /* physical address */
> +
> +/* Using slaunch_active in head.S assumes it's a single byte in size, so enforce
> + * this assumption. */
> +static void __maybe_unused compile_time_checks(void)
> +{
> +    BUILD_BUG_ON(sizeof(slaunch_active) != 1);
> +}
                
            On Tue, Jun 03, 2025 at 09:17:29AM -0700, ross.philipson@oracle.com wrote:
> > +void asmlinkage slaunch_early_init(uint32_t load_base_addr,
> > +                                   uint32_t tgt_base_addr,
> > +                                   uint32_t tgt_end_addr,
> > +                                   struct early_init_results *result)
> > +{
> > +    void *txt_heap;
> > +    const struct txt_os_mle_data *os_mle;
> > +    const struct slr_table *slrt;
> > +    const struct slr_entry_intel_info *intel_info;
> > +
> > +    txt_heap = txt_init();
> > +    os_mle = txt_os_mle_data_start(txt_heap);
> > +
> > +    result->slrt_pa = os_mle->slrt;
> > +    result->mbi_pa = 0;
> > +
> > +    slrt = (const struct slr_table *)(uintptr_t)os_mle->slrt;
> > +
> > +    intel_info = (const struct slr_entry_intel_info *)
> > +        slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_INTEL_INFO);
> > +    if ( intel_info == NULL || intel_info->hdr.size != sizeof(*intel_info) )
> > +        return;
>
> Since these are the x86/TXT bits, it seems at this point, not finding the
> TXT info structure would be fatal, no?
It is fatal, but early code doesn't have means for reporting errors
nicely, so it just continues.  You think it's better to reboot right
away?
> > +static inline void *txt_init(void)
> > +{
> > +    void *txt_heap;
> > +
> > +    /* Clear the TXT error register for a clean start of the day. */
> > +    txt_write(TXTCR_ERRORCODE, 0);
> > +
> > +    txt_heap = _p(txt_read(TXTCR_HEAP_BASE));
> > +
> > +    if ( txt_os_mle_data_size(txt_heap) < sizeof(struct txt_os_mle_data) ||
> > +         txt_os_sinit_data_size(txt_heap) < sizeof(struct txt_os_sinit_data) )
> > +        txt_reset(SLAUNCH_ERROR_GENERIC);
>
> I know the list of error codes pulled in are from the patches for Linux
> Secure Launch which seems right. The Xen work is free to add more specific
> error codes e.g. somewhere like here. We could even consider using regions
> in the vendor error code space for different things like generic errors vs
> architecture specific ones vs etc.
>
> Thanks
> Ross
I think some codes were already added and this is the only place where
SLAUNCH_ERROR_GENERIC is used, not really sure why, will add a couple
more.  By the way, the new errors were inserted in the middle making
about half of the errors out of sync with Linux, should Xen and Linux be
in sync?
Not sure about usefulness of error regions, the errors codes are only
for TXT and major/minor errors in TXT.ERRORCODE weren't helpful in
debugging.
Regards
                
            On 6/11/25 3:14 PM, Sergii Dmytruk wrote:
> On Tue, Jun 03, 2025 at 09:17:29AM -0700, ross.philipson@oracle.com wrote:
>>> +void asmlinkage slaunch_early_init(uint32_t load_base_addr,
>>> +                                   uint32_t tgt_base_addr,
>>> +                                   uint32_t tgt_end_addr,
>>> +                                   struct early_init_results *result)
>>> +{
>>> +    void *txt_heap;
>>> +    const struct txt_os_mle_data *os_mle;
>>> +    const struct slr_table *slrt;
>>> +    const struct slr_entry_intel_info *intel_info;
>>> +
>>> +    txt_heap = txt_init();
>>> +    os_mle = txt_os_mle_data_start(txt_heap);
>>> +
>>> +    result->slrt_pa = os_mle->slrt;
>>> +    result->mbi_pa = 0;
>>> +
>>> +    slrt = (const struct slr_table *)(uintptr_t)os_mle->slrt;
>>> +
>>> +    intel_info = (const struct slr_entry_intel_info *)
>>> +        slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_INTEL_INFO);
>>> +    if ( intel_info == NULL || intel_info->hdr.size != sizeof(*intel_info) )
>>> +        return;
>>
>> Since these are the x86/TXT bits, it seems at this point, not finding the
>> TXT info structure would be fatal, no?
> 
> It is fatal, but early code doesn't have means for reporting errors
> nicely, so it just continues.  You think it's better to reboot right
> away?
I was trying to track down where you make the first determination that a 
TXT secure launch is supposed to be done. I think it is before this 
point so if that is the case, I think this should be fatal here.
> 
>>> +static inline void *txt_init(void)
>>> +{
>>> +    void *txt_heap;
>>> +
>>> +    /* Clear the TXT error register for a clean start of the day. */
>>> +    txt_write(TXTCR_ERRORCODE, 0);
>>> +
>>> +    txt_heap = _p(txt_read(TXTCR_HEAP_BASE));
>>> +
>>> +    if ( txt_os_mle_data_size(txt_heap) < sizeof(struct txt_os_mle_data) ||
>>> +         txt_os_sinit_data_size(txt_heap) < sizeof(struct txt_os_sinit_data) )
>>> +        txt_reset(SLAUNCH_ERROR_GENERIC);
>>
>> I know the list of error codes pulled in are from the patches for Linux
>> Secure Launch which seems right. The Xen work is free to add more specific
>> error codes e.g. somewhere like here. We could even consider using regions
>> in the vendor error code space for different things like generic errors vs
>> architecture specific ones vs etc.
>>
>> Thanks
>> Ross
> 
> I think some codes were already added and this is the only place where
> SLAUNCH_ERROR_GENERIC is used, not really sure why, will add a couple
> more.  By the way, the new errors were inserted in the middle making
> about half of the errors out of sync with Linux, should Xen and Linux be
> in sync?
> 
> Not sure about usefulness of error regions, the errors codes are only
> for TXT and major/minor errors in TXT.ERRORCODE weren't helpful in
> debugging.
Yea all I really meant was that the area in the error code < 0x400 is 
ours to chop up how we want.
Thanks
Ross
> 
> Regards
>
                
            On Thu, Jun 12, 2025 at 09:30:55AM -0700, ross.philipson@oracle.com wrote:
> On 6/11/25 3:14 PM, Sergii Dmytruk wrote:
> > On Tue, Jun 03, 2025 at 09:17:29AM -0700, ross.philipson@oracle.com wrote:
> > > > +void asmlinkage slaunch_early_init(uint32_t load_base_addr,
> > > > +                                   uint32_t tgt_base_addr,
> > > > +                                   uint32_t tgt_end_addr,
> > > > +                                   struct early_init_results *result)
> > > > +{
> > > > +    void *txt_heap;
> > > > +    const struct txt_os_mle_data *os_mle;
> > > > +    const struct slr_table *slrt;
> > > > +    const struct slr_entry_intel_info *intel_info;
> > > > +
> > > > +    txt_heap = txt_init();
> > > > +    os_mle = txt_os_mle_data_start(txt_heap);
> > > > +
> > > > +    result->slrt_pa = os_mle->slrt;
> > > > +    result->mbi_pa = 0;
> > > > +
> > > > +    slrt = (const struct slr_table *)(uintptr_t)os_mle->slrt;
> > > > +
> > > > +    intel_info = (const struct slr_entry_intel_info *)
> > > > +        slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_INTEL_INFO);
> > > > +    if ( intel_info == NULL || intel_info->hdr.size != sizeof(*intel_info) )
> > > > +        return;
> > > 
> > > Since these are the x86/TXT bits, it seems at this point, not finding the
> > > TXT info structure would be fatal, no?
> > 
> > It is fatal, but early code doesn't have means for reporting errors
> > nicely, so it just continues.  You think it's better to reboot right
> > away?
> 
> I was trying to track down where you make the first determination that a TXT
> secure launch is supposed to be done. I think it is before this point so if
> that is the case, I think this should be fatal here.
Yes, this code is invoked only if Slaunch is in progress (Xen was
invoked through Slaunch-specific entry point).  Will add rebooting.
Regards
                
            On 12.06.2025 00:14, Sergii Dmytruk wrote:
> On Tue, Jun 03, 2025 at 09:17:29AM -0700, ross.philipson@oracle.com wrote:
>>> +static inline void *txt_init(void)
>>> +{
>>> +    void *txt_heap;
>>> +
>>> +    /* Clear the TXT error register for a clean start of the day. */
>>> +    txt_write(TXTCR_ERRORCODE, 0);
>>> +
>>> +    txt_heap = _p(txt_read(TXTCR_HEAP_BASE));
>>> +
>>> +    if ( txt_os_mle_data_size(txt_heap) < sizeof(struct txt_os_mle_data) ||
>>> +         txt_os_sinit_data_size(txt_heap) < sizeof(struct txt_os_sinit_data) )
>>> +        txt_reset(SLAUNCH_ERROR_GENERIC);
>>
>> I know the list of error codes pulled in are from the patches for Linux
>> Secure Launch which seems right. The Xen work is free to add more specific
>> error codes e.g. somewhere like here. We could even consider using regions
>> in the vendor error code space for different things like generic errors vs
>> architecture specific ones vs etc.
> 
> I think some codes were already added and this is the only place where
> SLAUNCH_ERROR_GENERIC is used, not really sure why, will add a couple
> more.  By the way, the new errors were inserted in the middle making
> about half of the errors out of sync with Linux, should Xen and Linux be
> in sync?
As the uses isolated to Xen and Linux respectively, or are the values
propagated between the two in some way? In the former case there's no
need for them to stay in sync, I think. Whereas in the latter case them
staying in sync would want enforcing somehow, if at all possible.
Jan
                
            On Thu, Jun 12, 2025 at 10:02:33AM +0200, Jan Beulich wrote:
> On 12.06.2025 00:14, Sergii Dmytruk wrote:
> > On Tue, Jun 03, 2025 at 09:17:29AM -0700, ross.philipson@oracle.com wrote:
> >>> +static inline void *txt_init(void)
> >>> +{
> >>> +    void *txt_heap;
> >>> +
> >>> +    /* Clear the TXT error register for a clean start of the day. */
> >>> +    txt_write(TXTCR_ERRORCODE, 0);
> >>> +
> >>> +    txt_heap = _p(txt_read(TXTCR_HEAP_BASE));
> >>> +
> >>> +    if ( txt_os_mle_data_size(txt_heap) < sizeof(struct txt_os_mle_data) ||
> >>> +         txt_os_sinit_data_size(txt_heap) < sizeof(struct txt_os_sinit_data) )
> >>> +        txt_reset(SLAUNCH_ERROR_GENERIC);
> >>
> >> I know the list of error codes pulled in are from the patches for Linux
> >> Secure Launch which seems right. The Xen work is free to add more specific
> >> error codes e.g. somewhere like here. We could even consider using regions
> >> in the vendor error code space for different things like generic errors vs
> >> architecture specific ones vs etc.
> >
> > I think some codes were already added and this is the only place where
> > SLAUNCH_ERROR_GENERIC is used, not really sure why, will add a couple
> > more.  By the way, the new errors were inserted in the middle making
> > about half of the errors out of sync with Linux, should Xen and Linux be
> > in sync?
>
> As the uses isolated to Xen and Linux respectively, or are the values
> propagated between the two in some way? In the former case there's no
> need for them to stay in sync, I think. Whereas in the latter case them
> staying in sync would want enforcing somehow, if at all possible.
>
> Jan
The uses are independent, the error list was copied probably because
error conditions are similar.  I'll remove errors unused in Xen.
Regards
                
            © 2016 - 2025 Red Hat, Inc.