[PATCH v3 06/22] xen/arch/x86: reserve TXT memory during Slaunch

Sergii Dmytruk posted 22 patches 5 months ago
[PATCH v3 06/22] xen/arch/x86: reserve TXT memory during Slaunch
Posted by Sergii Dmytruk 5 months ago
From: Kacper Stojek <kacper.stojek@3mdeb.com>

TXT heap, SINIT and TXT private space are marked as reserved or unused
in e820 to protect from unintended uses.

Signed-off-by: Kacper Stojek <kacper.stojek@3mdeb.com>
Signed-off-by: Krystian Hebel <krystian.hebel@3mdeb.com>
Signed-off-by: Michał Żygowski <michal.zygowski@3mdeb.com>
Signed-off-by: Sergii Dmytruk <sergii.dmytruk@3mdeb.com>
---
 xen/arch/x86/Makefile                |   1 +
 xen/arch/x86/include/asm/intel-txt.h |   6 ++
 xen/arch/x86/include/asm/mm.h        |   3 +
 xen/arch/x86/include/asm/slaunch.h   |  44 +++++++++++
 xen/arch/x86/intel-txt.c             | 113 +++++++++++++++++++++++++++
 xen/arch/x86/setup.c                 |  10 ++-
 xen/arch/x86/slaunch.c               |  98 ++++++++++++++++++++++-
 7 files changed, 271 insertions(+), 4 deletions(-)
 create mode 100644 xen/arch/x86/intel-txt.c

diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile
index aa20eb42b5..5788898166 100644
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_GDBSX) += gdbsx.o
 obj-y += hypercall.o
 obj-y += i387.o
 obj-y += i8259.o
+obj-y += intel-txt.o
 obj-y += io_apic.o
 obj-$(CONFIG_LIVEPATCH) += livepatch.o
 obj-y += msi.o
diff --git a/xen/arch/x86/include/asm/intel-txt.h b/xen/arch/x86/include/asm/intel-txt.h
index 122b4293ea..ad3c41d86c 100644
--- a/xen/arch/x86/include/asm/intel-txt.h
+++ b/xen/arch/x86/include/asm/intel-txt.h
@@ -420,6 +420,12 @@ static inline void txt_verify_pmr_ranges(
     */
 }
 
+/* Prepares for accesses to TXT-specific memory. */
+void txt_map_mem_regions(void);
+
+/* Marks TXT-specific memory as used to avoid its corruption. */
+void txt_reserve_mem_regions(void);
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* X86_INTEL_TXT_H */
diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h
index d6e80db71c..91fa95cd90 100644
--- a/xen/arch/x86/include/asm/mm.h
+++ b/xen/arch/x86/include/asm/mm.h
@@ -106,6 +106,9 @@
 #define _PGC_need_scrub   _PGC_allocated
 #define PGC_need_scrub    PGC_allocated
 
+/* How much of the directmap is prebuilt at compile time. */
+#define PREBUILT_MAP_LIMIT (1 << L2_PAGETABLE_SHIFT)
+
 #ifndef CONFIG_BIGMEM
 /*
  * This definition is solely for the use in struct page_info (and
diff --git a/xen/arch/x86/include/asm/slaunch.h b/xen/arch/x86/include/asm/slaunch.h
index df42defd92..7891d60035 100644
--- a/xen/arch/x86/include/asm/slaunch.h
+++ b/xen/arch/x86/include/asm/slaunch.h
@@ -7,6 +7,7 @@
 #ifndef X86_SLAUNCH_H
 #define X86_SLAUNCH_H
 
+#include <xen/slr-table.h>
 #include <xen/types.h>
 
 /* Indicates an active Secure Launch boot. */
@@ -18,9 +19,52 @@ extern bool slaunch_active;
  */
 extern uint32_t slaunch_slrt;
 
+/*
+ * evt_log is assigned a physical address and the caller must map it to
+ * virtual, if needed.
+ */
+static inline void find_evt_log(const struct slr_table *slrt, void **evt_log,
+                                uint32_t *evt_log_size)
+{
+    const struct slr_entry_log_info *log_info;
+
+    log_info = (const struct slr_entry_log_info *)
+        slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_LOG_INFO);
+    if ( log_info != NULL )
+    {
+        *evt_log = _p(log_info->addr);
+        *evt_log_size = log_info->size;
+    }
+    else
+    {
+        *evt_log = NULL;
+        *evt_log_size = 0;
+    }
+}
+
 /*
  * Retrieves pointer to SLRT.  Checks table's validity and maps it as necessary.
  */
 struct slr_table *slaunch_get_slrt(void);
 
+/*
+ * Prepares for accesses to essential data structures setup by boot environment.
+ */
+void slaunch_map_mem_regions(void);
+
+/* Marks regions of memory as used to avoid their corruption. */
+void slaunch_reserve_mem_regions(void);
+
+/*
+ * This helper function is used to map memory using L2 page tables by aligning
+ * mapped regions to 2MB. This way page allocator (which at this point isn't
+ * yet initialized) isn't needed for creating new L1 mappings. The function
+ * also checks and skips memory already mapped by the prebuilt tables.
+ *
+ * There is no unmap_l2() because the function is meant to be used by the code
+ * that accesses DRTM-related memory soon after which Xen rebuilds memory maps,
+ * effectively dropping all existing mappings.
+ */
+int slaunch_map_l2(unsigned long paddr, unsigned long size);
+
 #endif /* X86_SLAUNCH_H */
diff --git a/xen/arch/x86/intel-txt.c b/xen/arch/x86/intel-txt.c
new file mode 100644
index 0000000000..163383b262
--- /dev/null
+++ b/xen/arch/x86/intel-txt.c
@@ -0,0 +1,113 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (c) 2022-2025 3mdeb Sp. z o.o. All rights reserved.
+ */
+
+#include <xen/bug.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/types.h>
+#include <asm/e820.h>
+#include <asm/intel-txt.h>
+#include <asm/slaunch.h>
+
+static uint64_t __initdata txt_heap_base, txt_heap_size;
+
+void __init txt_map_mem_regions(void)
+{
+    int rc;
+
+    rc = slaunch_map_l2(TXT_PRIV_CONFIG_REGS_BASE, TXT_CONFIG_SPACE_SIZE);
+    BUG_ON(rc != 0);
+
+    txt_heap_base = txt_read(TXTCR_HEAP_BASE);
+    BUG_ON(txt_heap_base == 0);
+
+    txt_heap_size = txt_read(TXTCR_HEAP_SIZE);
+    BUG_ON(txt_heap_size == 0);
+
+    rc = slaunch_map_l2(txt_heap_base, txt_heap_size);
+    BUG_ON(rc != 0);
+}
+
+/* Mark RAM region as RESERVED if it isn't marked that way already. */
+static int __init mark_ram_as(struct e820map *map, uint64_t start,
+                              uint64_t end, uint32_t type)
+{
+    unsigned int i;
+    uint32_t from_type = E820_RAM;
+
+    for ( i = 0; i < map->nr_map; i++ )
+    {
+        uint64_t rs = map->map[i].addr;
+        uint64_t re = rs + map->map[i].size;
+
+        /* The entry includes the range. */
+        if ( start >= rs && end <= re )
+            break;
+
+        /* The entry intersects the range. */
+        if ( end > rs && start < re )
+        {
+            /* Fatal failure. */
+            return 0;
+        }
+    }
+
+    /*
+     * If the range is not included by any entry and no entry intersects it,
+     * then it's not listed in the memory map.  Consider this case as a success
+     * since we're only preventing RAM from being used and unlisted range should
+     * not be used.
+     */
+    if ( i == map->nr_map )
+        return 1;
+
+    /*
+     * e820_change_range_type() fails if the range is already marked with the
+     * desired type.  Don't consider it an error if firmware has done it for us.
+     */
+    if ( map->map[i].type == type )
+        return 1;
+
+    /* E820_ACPI or E820_NVS are really unexpected, but others are fine. */
+    if ( map->map[i].type == E820_RESERVED ||
+         map->map[i].type == E820_UNUSABLE )
+        from_type = map->map[i].type;
+
+    return e820_change_range_type(map, start, end, from_type, type);
+}
+
+void __init txt_reserve_mem_regions(void)
+{
+    int rc;
+    uint64_t sinit_base, sinit_size;
+
+    /* TXT Heap */
+    BUG_ON(txt_heap_base == 0);
+    printk("SLAUNCH: reserving TXT heap (%#lx - %#lx)\n", txt_heap_base,
+           txt_heap_base + txt_heap_size);
+    rc = mark_ram_as(&e820_raw, txt_heap_base, txt_heap_base + txt_heap_size,
+                     E820_RESERVED);
+    BUG_ON(rc == 0);
+
+    sinit_base = txt_read(TXTCR_SINIT_BASE);
+    BUG_ON(sinit_base == 0);
+
+    sinit_size = txt_read(TXTCR_SINIT_SIZE);
+    BUG_ON(sinit_size == 0);
+
+    /* SINIT */
+    printk("SLAUNCH: reserving SINIT memory (%#lx - %#lx)\n", sinit_base,
+           sinit_base + sinit_size);
+    rc = mark_ram_as(&e820_raw, sinit_base, sinit_base + sinit_size,
+                     E820_RESERVED);
+    BUG_ON(rc == 0);
+
+    /* TXT Private Space */
+    rc = mark_ram_as(&e820_raw, TXT_PRIV_CONFIG_REGS_BASE,
+                     TXT_PRIV_CONFIG_REGS_BASE + TXT_CONFIG_SPACE_SIZE,
+                     E820_UNUSABLE);
+    BUG_ON(rc == 0);
+}
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 1f5cb67bd0..e4638acd12 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -53,6 +53,7 @@
 #include <asm/prot-key.h>
 #include <asm/pv/domain.h>
 #include <asm/setup.h>
+#include <asm/slaunch.h>
 #include <asm/smp.h>
 #include <asm/spec_ctrl.h>
 #include <asm/stubs.h>
@@ -1087,9 +1088,6 @@ static struct domain *__init create_dom0(struct boot_info *bi)
     return d;
 }
 
-/* How much of the directmap is prebuilt at compile time. */
-#define PREBUILT_MAP_LIMIT (1 << L2_PAGETABLE_SHIFT)
-
 void asmlinkage __init noreturn __start_xen(void)
 {
     const char *memmap_type = NULL;
@@ -1425,6 +1423,12 @@ void asmlinkage __init noreturn __start_xen(void)
 #endif
     }
 
+    if ( slaunch_active )
+    {
+        slaunch_map_mem_regions();
+        slaunch_reserve_mem_regions();
+    }
+
     /* Sanitise the raw E820 map to produce a final clean version. */
     max_page = raw_max_page = init_e820(memmap_type, &e820_raw);
 
diff --git a/xen/arch/x86/slaunch.c b/xen/arch/x86/slaunch.c
index a3e6ab8d71..ac3b43942b 100644
--- a/xen/arch/x86/slaunch.c
+++ b/xen/arch/x86/slaunch.c
@@ -7,14 +7,18 @@
 #include <xen/compiler.h>
 #include <xen/init.h>
 #include <xen/macros.h>
+#include <xen/mm.h>
 #include <xen/types.h>
+#include <asm/e820.h>
+#include <asm/intel-txt.h>
+#include <asm/page.h>
 #include <asm/slaunch.h>
 
 /*
  * These variables are assigned to by the code near Xen's entry point.
  *
  * slaunch_active is not __initdata to allow checking for an active Secure
- * Launch boot.
+ * Launch boot at any point.
  */
 bool slaunch_active;
 uint32_t __initdata slaunch_slrt; /* physical address */
@@ -25,3 +29,95 @@ static void __maybe_unused compile_time_checks(void)
 {
     BUILD_BUG_ON(sizeof(slaunch_active) != 1);
 }
+
+struct slr_table *__init slaunch_get_slrt(void)
+{
+    static struct slr_table *slrt;
+
+    if (slrt == NULL) {
+        int rc;
+
+        slrt = __va(slaunch_slrt);
+
+        rc = slaunch_map_l2(slaunch_slrt, PAGE_SIZE);
+        BUG_ON(rc != 0);
+
+        if ( slrt->magic != SLR_TABLE_MAGIC )
+            panic("SLRT has invalid magic value: %#08x!\n", slrt->magic);
+        /* XXX: are newer revisions allowed? */
+        if ( slrt->revision != SLR_TABLE_REVISION )
+            panic("SLRT is of unsupported revision: %#04x!\n", slrt->revision);
+        if ( slrt->architecture != SLR_INTEL_TXT )
+            panic("SLRT is for unexpected architecture: %#04x!\n",
+                  slrt->architecture);
+        if ( slrt->size > slrt->max_size )
+            panic("SLRT is larger than its max size: %#08x > %#08x!\n",
+                  slrt->size, slrt->max_size);
+
+        if ( slrt->size > PAGE_SIZE )
+        {
+            rc = slaunch_map_l2(slaunch_slrt, slrt->size);
+            BUG_ON(rc != 0);
+        }
+    }
+
+    return slrt;
+}
+
+void __init slaunch_map_mem_regions(void)
+{
+    void *evt_log_addr;
+    uint32_t evt_log_size;
+
+    /* Vendor-specific part. */
+    txt_map_mem_regions();
+
+    find_evt_log(slaunch_get_slrt(), &evt_log_addr, &evt_log_size);
+    if ( evt_log_addr != NULL )
+    {
+        int rc = slaunch_map_l2((uintptr_t)evt_log_addr, evt_log_size);
+        BUG_ON(rc != 0);
+    }
+}
+
+void __init slaunch_reserve_mem_regions(void)
+{
+    int rc;
+
+    void *evt_log_addr;
+    uint32_t evt_log_size;
+
+    /* Vendor-specific part. */
+    txt_reserve_mem_regions();
+
+    find_evt_log(slaunch_get_slrt(), &evt_log_addr, &evt_log_size);
+    if ( evt_log_addr != NULL )
+    {
+        printk("SLAUNCH: reserving event log (%#lx - %#lx)\n",
+               (uint64_t)evt_log_addr,
+               (uint64_t)evt_log_addr + evt_log_size);
+        rc = reserve_e820_ram(&e820_raw, (uint64_t)evt_log_addr,
+                              (uint64_t)evt_log_addr + evt_log_size);
+        BUG_ON(rc == 0);
+    }
+}
+
+int __init slaunch_map_l2(unsigned long paddr, unsigned long size)
+{
+    unsigned long aligned_paddr = paddr & ~((1ULL << L2_PAGETABLE_SHIFT) - 1);
+    unsigned long pages = ((paddr + size) - aligned_paddr);
+    pages = ROUNDUP(pages, 1ULL << L2_PAGETABLE_SHIFT) >> PAGE_SHIFT;
+
+    if ( aligned_paddr + pages * PAGE_SIZE <= PREBUILT_MAP_LIMIT )
+        return 0;
+
+    if ( aligned_paddr < PREBUILT_MAP_LIMIT )
+    {
+        pages -= (PREBUILT_MAP_LIMIT - aligned_paddr) >> PAGE_SHIFT;
+        aligned_paddr = PREBUILT_MAP_LIMIT;
+    }
+
+    return map_pages_to_xen((uintptr_t)__va(aligned_paddr),
+                            maddr_to_mfn(aligned_paddr),
+                            pages, PAGE_HYPERVISOR);
+}
-- 
2.49.0


Re: [PATCH v3 06/22] xen/arch/x86: reserve TXT memory during Slaunch
Posted by Jan Beulich 3 months, 3 weeks ago
On 30.05.2025 15:17, Sergii Dmytruk wrote:
> --- a/xen/arch/x86/include/asm/mm.h
> +++ b/xen/arch/x86/include/asm/mm.h
> @@ -106,6 +106,9 @@
>  #define _PGC_need_scrub   _PGC_allocated
>  #define PGC_need_scrub    PGC_allocated
>  
> +/* How much of the directmap is prebuilt at compile time. */
> +#define PREBUILT_MAP_LIMIT (1 << L2_PAGETABLE_SHIFT)

Oh, also - I don't think mm.h is a good place for this. Please consider
putting into setup.h.

Jan
Re: [PATCH v3 06/22] xen/arch/x86: reserve TXT memory during Slaunch
Posted by Jan Beulich 3 months, 3 weeks ago
On 30.05.2025 15:17, Sergii Dmytruk wrote:
> --- a/xen/arch/x86/include/asm/mm.h
> +++ b/xen/arch/x86/include/asm/mm.h
> @@ -106,6 +106,9 @@
>  #define _PGC_need_scrub   _PGC_allocated
>  #define PGC_need_scrub    PGC_allocated
>  
> +/* How much of the directmap is prebuilt at compile time. */
> +#define PREBUILT_MAP_LIMIT (1 << L2_PAGETABLE_SHIFT)

Better 1U or even 1UL?

> --- a/xen/arch/x86/include/asm/slaunch.h
> +++ b/xen/arch/x86/include/asm/slaunch.h
> @@ -7,6 +7,7 @@
>  #ifndef X86_SLAUNCH_H
>  #define X86_SLAUNCH_H
>  
> +#include <xen/slr-table.h>
>  #include <xen/types.h>
>  
>  /* Indicates an active Secure Launch boot. */
> @@ -18,9 +19,52 @@ extern bool slaunch_active;
>   */
>  extern uint32_t slaunch_slrt;
>  
> +/*
> + * evt_log is assigned a physical address and the caller must map it to
> + * virtual, if needed.

In which case you want to use paddr_t, not void *.

> + */
> +static inline void find_evt_log(const struct slr_table *slrt, void **evt_log,
> +                                uint32_t *evt_log_size)
> +{
> +    const struct slr_entry_log_info *log_info;
> +
> +    log_info = (const struct slr_entry_log_info *)
> +        slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_LOG_INFO);

In situations like this please use the less type-unsafe container_of().
(Apparently applies also to at least one earlier patch.)

> +    if ( log_info != NULL )
> +    {
> +        *evt_log = _p(log_info->addr);
> +        *evt_log_size = log_info->size;
> +    }
> +    else
> +    {
> +        *evt_log = NULL;
> +        *evt_log_size = 0;
> +    }
> +}
> +
>  /*
>   * Retrieves pointer to SLRT.  Checks table's validity and maps it as necessary.
>   */
>  struct slr_table *slaunch_get_slrt(void);
>  
> +/*
> + * Prepares for accesses to essential data structures setup by boot environment.
> + */
> +void slaunch_map_mem_regions(void);
> +
> +/* Marks regions of memory as used to avoid their corruption. */
> +void slaunch_reserve_mem_regions(void);
> +
> +/*
> + * This helper function is used to map memory using L2 page tables by aligning
> + * mapped regions to 2MB. This way page allocator (which at this point isn't
> + * yet initialized) isn't needed for creating new L1 mappings. The function
> + * also checks and skips memory already mapped by the prebuilt tables.
> + *
> + * There is no unmap_l2() because the function is meant to be used by the code
> + * that accesses DRTM-related memory soon after which Xen rebuilds memory maps,
> + * effectively dropping all existing mappings.
> + */
> +int slaunch_map_l2(unsigned long paddr, unsigned long size);

While largely benign on x86-64, maybe better paddr_t and size_t. And then ...

> --- /dev/null
> +++ b/xen/arch/x86/intel-txt.c
> @@ -0,0 +1,113 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + *
> + * Copyright (c) 2022-2025 3mdeb Sp. z o.o. All rights reserved.
> + */
> +
> +#include <xen/bug.h>
> +#include <xen/init.h>
> +#include <xen/lib.h>
> +#include <xen/types.h>
> +#include <asm/e820.h>
> +#include <asm/intel-txt.h>
> +#include <asm/slaunch.h>
> +
> +static uint64_t __initdata txt_heap_base, txt_heap_size;

... why suddenly uint64_t here (and then elsewhere below)?

> +/* Mark RAM region as RESERVED if it isn't marked that way already. */
> +static int __init mark_ram_as(struct e820map *map, uint64_t start,
> +                              uint64_t end, uint32_t type)
> +{
> +    unsigned int i;
> +    uint32_t from_type = E820_RAM;
> +
> +    for ( i = 0; i < map->nr_map; i++ )
> +    {
> +        uint64_t rs = map->map[i].addr;
> +        uint64_t re = rs + map->map[i].size;
> +
> +        /* The entry includes the range. */
> +        if ( start >= rs && end <= re )
> +            break;
> +
> +        /* The entry intersects the range. */
> +        if ( end > rs && start < re )
> +        {
> +            /* Fatal failure. */
> +            return 0;
> +        }
> +    }
> +
> +    /*
> +     * If the range is not included by any entry and no entry intersects it,
> +     * then it's not listed in the memory map.  Consider this case as a success
> +     * since we're only preventing RAM from being used and unlisted range should
> +     * not be used.
> +     */
> +    if ( i == map->nr_map )
> +        return 1;
> +
> +    /*
> +     * e820_change_range_type() fails if the range is already marked with the
> +     * desired type.  Don't consider it an error if firmware has done it for us.
> +     */
> +    if ( map->map[i].type == type )
> +        return 1;
> +
> +    /* E820_ACPI or E820_NVS are really unexpected, but others are fine. */
> +    if ( map->map[i].type == E820_RESERVED ||
> +         map->map[i].type == E820_UNUSABLE )

Are you sure about permitting UNUSABLE here?

> +        from_type = map->map[i].type;
> +
> +    return e820_change_range_type(map, start, end, from_type, type);

Even if this function, for historic reasons, also returns int/0/1, please make
new code with boolean results return bool/false/true.

> +void __init txt_reserve_mem_regions(void)
> +{
> +    int rc;
> +    uint64_t sinit_base, sinit_size;
> +
> +    /* TXT Heap */
> +    BUG_ON(txt_heap_base == 0);
> +    printk("SLAUNCH: reserving TXT heap (%#lx - %#lx)\n", txt_heap_base,
> +           txt_heap_base + txt_heap_size);

Please log ranges in a way that makes it unambiguous whether they're exclusive
or inclusive (especially at the upper end).

> +    rc = mark_ram_as(&e820_raw, txt_heap_base, txt_heap_base + txt_heap_size,
> +                     E820_RESERVED);
> +    BUG_ON(rc == 0);

As to the boolean remark above - constructs like this look particularly odd:
Typically a return code (stored in a variable named "rc") of 0 means "success".

> +    sinit_base = txt_read(TXTCR_SINIT_BASE);
> +    BUG_ON(sinit_base == 0);
> +
> +    sinit_size = txt_read(TXTCR_SINIT_SIZE);
> +    BUG_ON(sinit_size == 0);
> +
> +    /* SINIT */
> +    printk("SLAUNCH: reserving SINIT memory (%#lx - %#lx)\n", sinit_base,
> +           sinit_base + sinit_size);
> +    rc = mark_ram_as(&e820_raw, sinit_base, sinit_base + sinit_size,
> +                     E820_RESERVED);
> +    BUG_ON(rc == 0);
> +
> +    /* TXT Private Space */
> +    rc = mark_ram_as(&e820_raw, TXT_PRIV_CONFIG_REGS_BASE,
> +                     TXT_PRIV_CONFIG_REGS_BASE + TXT_CONFIG_SPACE_SIZE,
> +                     E820_UNUSABLE);

Why UNUSABLE? Then, if all callers used RESERVED, this wouldn't need to be
a function arguments anymore, and you also wouldn't need to change RESERVED
ranges.

> --- a/xen/arch/x86/slaunch.c
> +++ b/xen/arch/x86/slaunch.c
> @@ -7,14 +7,18 @@
>  #include <xen/compiler.h>
>  #include <xen/init.h>
>  #include <xen/macros.h>
> +#include <xen/mm.h>
>  #include <xen/types.h>
> +#include <asm/e820.h>
> +#include <asm/intel-txt.h>
> +#include <asm/page.h>
>  #include <asm/slaunch.h>
>  
>  /*
>   * These variables are assigned to by the code near Xen's entry point.
>   *
>   * slaunch_active is not __initdata to allow checking for an active Secure
> - * Launch boot.
> + * Launch boot at any point.

This comment adjustment should probably move to where the comment is being
introduced.

> @@ -25,3 +29,95 @@ static void __maybe_unused compile_time_checks(void)
>  {
>      BUILD_BUG_ON(sizeof(slaunch_active) != 1);
>  }
> +
> +struct slr_table *__init slaunch_get_slrt(void)
> +{
> +    static struct slr_table *slrt;

__initdata?

> +    if (slrt == NULL) {

Nit: Style.

> +        int rc;
> +
> +        slrt = __va(slaunch_slrt);
> +
> +        rc = slaunch_map_l2(slaunch_slrt, PAGE_SIZE);
> +        BUG_ON(rc != 0);
> +
> +        if ( slrt->magic != SLR_TABLE_MAGIC )
> +            panic("SLRT has invalid magic value: %#08x!\n", slrt->magic);

While %#x is indeed the prefered form to use, in particular when padding that's
not normally helpful, as the 0x prefix is included in the character count. And
the value zero also ends up odd in that case, I think.

> +int __init slaunch_map_l2(unsigned long paddr, unsigned long size)
> +{
> +    unsigned long aligned_paddr = paddr & ~((1ULL << L2_PAGETABLE_SHIFT) - 1);
> +    unsigned long pages = ((paddr + size) - aligned_paddr);
> +    pages = ROUNDUP(pages, 1ULL << L2_PAGETABLE_SHIFT) >> PAGE_SHIFT;

Nit: Blank line please between declaration(s) and statement(s).

> +    if ( aligned_paddr + pages * PAGE_SIZE <= PREBUILT_MAP_LIMIT )
> +        return 0;
> +
> +    if ( aligned_paddr < PREBUILT_MAP_LIMIT )
> +    {
> +        pages -= (PREBUILT_MAP_LIMIT - aligned_paddr) >> PAGE_SHIFT;
> +        aligned_paddr = PREBUILT_MAP_LIMIT;
> +    }
> +
> +    return map_pages_to_xen((uintptr_t)__va(aligned_paddr),
> +                            maddr_to_mfn(aligned_paddr),
> +                            pages, PAGE_HYPERVISOR);
> +}

What is being mapped here is (silently?) assumed to be below 4Gb? The
function could anyway do with a brief comment saying what it's intended
to do, and what assumptions it makes.

It further looks as if you may be doing the same mapping multiple times,
as you don't record what was already mapped.

Jan
Re: [PATCH v3 06/22] xen/arch/x86: reserve TXT memory during Slaunch
Posted by Sergii Dmytruk 1 month, 1 week ago
On Thu, Jul 10, 2025 at 03:00:07PM +0200, Jan Beulich wrote:
> On 30.05.2025 15:17, Sergii Dmytruk wrote:
> > --- a/xen/arch/x86/include/asm/mm.h
> > +++ b/xen/arch/x86/include/asm/mm.h
> > @@ -106,6 +106,9 @@
> >  #define _PGC_need_scrub   _PGC_allocated
> >  #define PGC_need_scrub    PGC_allocated
> >
> > +/* How much of the directmap is prebuilt at compile time. */
> > +#define PREBUILT_MAP_LIMIT (1 << L2_PAGETABLE_SHIFT)
>
> Better 1U or even 1UL?

Will change to 1UL.  L2_PAGETABLE_SHIFT is 21, so all variants are
essentially the same.

From another email:
> Oh, also - I don't think mm.h is a good place for this. Please
> consider putting into setup.h.

Sure, mm.h just had a more suggestive name.

> > +/*
> > + * evt_log is assigned a physical address and the caller must map it to
> > + * virtual, if needed.
>
> In which case you want to use paddr_t, not void *.

Will change.

> > + */
> > +static inline void find_evt_log(const struct slr_table *slrt, void **evt_log,
> > +                                uint32_t *evt_log_size)
> > +{
> > +    const struct slr_entry_log_info *log_info;
> > +
> > +    log_info = (const struct slr_entry_log_info *)
> > +        slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_LOG_INFO);
>
> In situations like this please use the less type-unsafe container_of().
> (Apparently applies also to at least one earlier patch.)

Will update all places.

> > +int slaunch_map_l2(unsigned long paddr, unsigned long size);
>
> While largely benign on x86-64, maybe better paddr_t and size_t. And then ...

OK.

> > +static uint64_t __initdata txt_heap_base, txt_heap_size;
>
> ... why suddenly uint64_t here (and then elsewhere below)?

These have 64 bits allocated for them in TXT register space.
The spec only talks about bits 31:0 (unless its a typo), so I'll add a
comment about that and use `uint32_t`.

> > +/* Mark RAM region as RESERVED if it isn't marked that way already. */
> > +static int __init mark_ram_as(struct e820map *map, uint64_t start,
> > +                              uint64_t end, uint32_t type)
> > +{
> > ...
> > +
> > +    /* E820_ACPI or E820_NVS are really unexpected, but others are fine. */
> > +    if ( map->map[i].type == E820_RESERVED ||
> > +         map->map[i].type == E820_UNUSABLE )
>
> Are you sure about permitting UNUSABLE here?

Not really, I'll drop it.

> > +        from_type = map->map[i].type;
> > +
> > +    return e820_change_range_type(map, start, end, from_type, type);
>
> Even if this function, for historic reasons, also returns int/0/1, please make
> new code with boolean results return bool/false/true.

OK.

> > +void __init txt_reserve_mem_regions(void)
> > +{
> > +    int rc;
> > +    uint64_t sinit_base, sinit_size;
> > +
> > +    /* TXT Heap */
> > +    BUG_ON(txt_heap_base == 0);
> > +    printk("SLAUNCH: reserving TXT heap (%#lx - %#lx)\n", txt_heap_base,
> > +           txt_heap_base + txt_heap_size);
>
> Please log ranges in a way that makes it unambiguous whether they're exclusive
> or inclusive (especially at the upper end).

I'll use start:end notation which I think suggests inclusive bounds.

> > +    /* TXT Private Space */
> > +    rc = mark_ram_as(&e820_raw, TXT_PRIV_CONFIG_REGS_BASE,
> > +                     TXT_PRIV_CONFIG_REGS_BASE + TXT_CONFIG_SPACE_SIZE,
> > +                     E820_UNUSABLE);
>
> Why UNUSABLE? Then, if all callers used RESERVED, this wouldn't need to be
> a function arguments anymore, and you also wouldn't need to change RESERVED
> ranges.

I think it was suggested during some review as a way to prevent an OS
from using a range (reserved ones can still get used), but I'm not sure
it even works, so will revert to always reserve memory.

> > - * Launch boot.
> > + * Launch boot at any point.
>
> This comment adjustment should probably move to where the comment is being
> introduced.

Thanks, I probably forgot to do it.

> > +struct slr_table *__init slaunch_get_slrt(void)
> > +{
> > +    static struct slr_table *slrt;
>
> __initdata?

Right, will add.

> > +    if (slrt == NULL) {
>
> Nit: Style.

Will fix.

> > +            panic("SLRT has invalid magic value: %#08x!\n", slrt->magic);
>
> While %#x is indeed the prefered form to use, in particular when padding that's
> not normally helpful, as the 0x prefix is included in the character count. And
> the value zero also ends up odd in that case, I think.

I constantly forget about prefix being included.  Won't specify width
here, it's not particularly useful in these cases.

> > +int __init slaunch_map_l2(unsigned long paddr, unsigned long size)
> > +{
> > +    unsigned long aligned_paddr = paddr & ~((1ULL << L2_PAGETABLE_SHIFT) - 1);
> > +    unsigned long pages = ((paddr + size) - aligned_paddr);
> > +    pages = ROUNDUP(pages, 1ULL << L2_PAGETABLE_SHIFT) >> PAGE_SHIFT;
>
> Nit: Blank line please between declaration(s) and statement(s).

OK.

> > +    if ( aligned_paddr + pages * PAGE_SIZE <= PREBUILT_MAP_LIMIT )
> > +        return 0;
> > +
> > +    if ( aligned_paddr < PREBUILT_MAP_LIMIT )
> > +    {
> > +        pages -= (PREBUILT_MAP_LIMIT - aligned_paddr) >> PAGE_SHIFT;
> > +        aligned_paddr = PREBUILT_MAP_LIMIT;
> > +    }
> > +
> > +    return map_pages_to_xen((uintptr_t)__va(aligned_paddr),
> > +                            maddr_to_mfn(aligned_paddr),
> > +                            pages, PAGE_HYPERVISOR);
> > +}
>
> What is being mapped here is (silently?) assumed to be below 4Gb? The
> function could anyway do with a brief comment saying what it's intended
> to do, and what assumptions it makes.
>
> It further looks as if you may be doing the same mapping multiple times,
> as you don't record what was already mapped.
>
> Jan

There is a large comment in slaunch.h which explains that we don't care
about unmapping because memory pages are being rebuilt after this
function is used.  No need to track what's being mapped for the same
reason.

DRTM data is below 4 GiB, will add BUG_ON() calls and update a comment
to state that.

Regards
Re: [PATCH v3 06/22] xen/arch/x86: reserve TXT memory during Slaunch
Posted by Jan Beulich 1 month, 1 week ago
On 22.09.2025 23:35, Sergii Dmytruk wrote:
> On Thu, Jul 10, 2025 at 03:00:07PM +0200, Jan Beulich wrote:
>> On 30.05.2025 15:17, Sergii Dmytruk wrote:
>>> +void __init txt_reserve_mem_regions(void)
>>> +{
>>> +    int rc;
>>> +    uint64_t sinit_base, sinit_size;
>>> +
>>> +    /* TXT Heap */
>>> +    BUG_ON(txt_heap_base == 0);
>>> +    printk("SLAUNCH: reserving TXT heap (%#lx - %#lx)\n", txt_heap_base,
>>> +           txt_heap_base + txt_heap_size);
>>
>> Please log ranges in a way that makes it unambiguous whether they're exclusive
>> or inclusive (especially at the upper end).
> 
> I'll use start:end notation which I think suggests inclusive bounds.

Please use mathematical notation when logging ranges, i.e. [a,b) or [a,b].

Jan
Re: [PATCH v3 06/22] xen/arch/x86: reserve TXT memory during Slaunch
Posted by Sergii Dmytruk 1 month, 1 week ago
On Tue, Sep 23, 2025 at 12:48:55AM +0200, Jan Beulich wrote:
> On 22.09.2025 23:35, Sergii Dmytruk wrote:
> > On Thu, Jul 10, 2025 at 03:00:07PM +0200, Jan Beulich wrote:
> >> On 30.05.2025 15:17, Sergii Dmytruk wrote:
> >>> +void __init txt_reserve_mem_regions(void)
> >>> +{
> >>> +    int rc;
> >>> +    uint64_t sinit_base, sinit_size;
> >>> +
> >>> +    /* TXT Heap */
> >>> +    BUG_ON(txt_heap_base == 0);
> >>> +    printk("SLAUNCH: reserving TXT heap (%#lx - %#lx)\n", txt_heap_base,
> >>> +           txt_heap_base + txt_heap_size);
> >>
> >> Please log ranges in a way that makes it unambiguous whether they're exclusive
> >> or inclusive (especially at the upper end).
> >
> > I'll use start:end notation which I think suggests inclusive bounds.
>
> Please use mathematical notation when logging ranges, i.e. [a,b) or [a,b].
>
> Jan

OK, initially I thought it's too uncommon in log output.

Regards