[PATCH v3 05/22] x86/boot/slaunch-early: early TXT checks and boot data retrieval

Sergii Dmytruk posted 22 patches 5 months ago
[PATCH v3 05/22] x86/boot/slaunch-early: early TXT checks and boot data retrieval
Posted by Sergii Dmytruk 5 months ago
From: Krystian Hebel <krystian.hebel@3mdeb.com>

The tests validate that important parts of memory are protected against
DMA attacks, including Xen and MBI. Modules can be tested later, when it
is possible to report issues to a user before invoking TXT reset.

TPM event log validation is temporarily disabled due to an issue with
its allocation by bootloader (GRUB) which will need to be modified to
address this. Ultimately event log will also have to be validated early
as it is used immediately after these tests to hold MBI measurements.
See larger comment in txt_verify_pmr_ranges().

Signed-off-by: Krystian Hebel <krystian.hebel@3mdeb.com>
Signed-off-by: Sergii Dmytruk <sergii.dmytruk@3mdeb.com>
---
 xen/arch/x86/boot/slaunch-early.c    |   6 ++
 xen/arch/x86/include/asm/intel-txt.h | 112 +++++++++++++++++++++++++++
 2 files changed, 118 insertions(+)

diff --git a/xen/arch/x86/boot/slaunch-early.c b/xen/arch/x86/boot/slaunch-early.c
index c9d364bcd5..662144e42f 100644
--- a/xen/arch/x86/boot/slaunch-early.c
+++ b/xen/arch/x86/boot/slaunch-early.c
@@ -22,10 +22,13 @@ void asmlinkage slaunch_early_init(uint32_t load_base_addr,
     void *txt_heap;
     const struct txt_os_mle_data *os_mle;
     const struct slr_table *slrt;
+    const struct txt_os_sinit_data *os_sinit;
     const struct slr_entry_intel_info *intel_info;
+    uint32_t size = tgt_end_addr - tgt_base_addr;
 
     txt_heap = txt_init();
     os_mle = txt_os_mle_data_start(txt_heap);
+    os_sinit = txt_os_sinit_data_start(txt_heap);
 
     result->slrt_pa = os_mle->slrt;
     result->mbi_pa = 0;
@@ -38,4 +41,7 @@ void asmlinkage slaunch_early_init(uint32_t load_base_addr,
         return;
 
     result->mbi_pa = intel_info->boot_params_base;
+
+    txt_verify_pmr_ranges(os_mle, os_sinit, intel_info,
+                          load_base_addr, tgt_base_addr, size);
 }
diff --git a/xen/arch/x86/include/asm/intel-txt.h b/xen/arch/x86/include/asm/intel-txt.h
index 7658457e9d..122b4293ea 100644
--- a/xen/arch/x86/include/asm/intel-txt.h
+++ b/xen/arch/x86/include/asm/intel-txt.h
@@ -93,6 +93,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <xen/slr-table.h>
+
 /* Need to differentiate between pre- and post paging enabled. */
 #ifdef __EARLY_SLAUNCH__
 #include <xen/macros.h>
@@ -308,6 +310,116 @@ static inline void *txt_init(void)
     return txt_heap;
 }
 
+static inline int is_in_pmr(const struct txt_os_sinit_data *os_sinit,
+                            uint64_t base, uint32_t size, int check_high)
+{
+    /* Check for size overflow. */
+    if ( base + size < base )
+        txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW);
+
+    /* Low range always starts at 0, so its size is also end address. */
+    if ( base >= os_sinit->vtd_pmr_lo_base &&
+         base + size <= os_sinit->vtd_pmr_lo_size )
+        return 1;
+
+    if ( check_high && os_sinit->vtd_pmr_hi_size != 0 )
+    {
+        if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <
+             os_sinit->vtd_pmr_hi_size )
+            txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW);
+        if ( base >= os_sinit->vtd_pmr_hi_base &&
+             base + size <= os_sinit->vtd_pmr_hi_base +
+                            os_sinit->vtd_pmr_hi_size )
+            return 1;
+    }
+
+    return 0;
+}
+
+static inline void txt_verify_pmr_ranges(
+    const struct txt_os_mle_data *os_mle,
+    const struct txt_os_sinit_data *os_sinit,
+    const struct slr_entry_intel_info *info,
+    uint32_t load_base_addr,
+    uint32_t tgt_base_addr,
+    uint32_t xen_size)
+{
+    int check_high_pmr = 0;
+
+    /* Verify the value of the low PMR base. It should always be 0. */
+    if ( os_sinit->vtd_pmr_lo_base != 0 )
+        txt_reset(SLAUNCH_ERROR_LO_PMR_BASE);
+
+    /*
+     * Low PMR size should not be 0 on current platforms. There is an ongoing
+     * transition to TPR-based DMA protection instead of PMR-based; this is not
+     * yet supported by the code.
+     */
+    if ( os_sinit->vtd_pmr_lo_size == 0 )
+        txt_reset(SLAUNCH_ERROR_LO_PMR_SIZE);
+
+    /* Check if regions overlap. Treat regions with no hole between as error. */
+    if ( os_sinit->vtd_pmr_hi_size != 0 &&
+         os_sinit->vtd_pmr_hi_base <= os_sinit->vtd_pmr_lo_size )
+        txt_reset(SLAUNCH_ERROR_HI_PMR_BASE);
+
+    /* All regions accessed by 32b code must be below 4G. */
+    if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <=
+         0x100000000ULL )
+        check_high_pmr = 1;
+
+    /*
+     * ACM checks that TXT heap and MLE memory is protected against DMA. We have
+     * to check if MBI and whole Xen memory is protected. The latter is done in
+     * case bootloader failed to set whole image as MLE and to make sure that
+     * both pre- and post-relocation code is protected.
+     */
+
+    /* Check if all of Xen before relocation is covered by PMR. */
+    if ( !is_in_pmr(os_sinit, load_base_addr, xen_size, check_high_pmr) )
+        txt_reset(SLAUNCH_ERROR_LO_PMR_MLE);
+
+    /* Check if all of Xen after relocation is covered by PMR. */
+    if ( load_base_addr != tgt_base_addr &&
+         !is_in_pmr(os_sinit, tgt_base_addr, xen_size, check_high_pmr) )
+        txt_reset(SLAUNCH_ERROR_LO_PMR_MLE);
+
+    /*
+     * If present, check that MBI is covered by PMR. MBI starts with 'uint32_t
+     * total_size'.
+     */
+    if ( info->boot_params_base != 0 &&
+         !is_in_pmr(os_sinit, info->boot_params_base,
+                    *(uint32_t *)(uintptr_t)info->boot_params_base,
+                    check_high_pmr) )
+        txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR);
+
+    /* Check if TPM event log (if present) is covered by PMR. */
+    /*
+     * FIXME: currently commented out as GRUB allocates it in a hole between
+     * PMR and reserved RAM, due to 2MB resolution of PMR. There are no other
+     * easy-to-use DMA protection mechanisms that would allow to protect that
+     * part of memory. TPR (TXT DMA Protection Range) gives 1MB resolution, but
+     * it still wouldn't be enough.
+     *
+     * One possible solution would be for GRUB to allocate log at lower address,
+     * but this would further increase memory space fragmentation. Another
+     * option is to align PMR up instead of down, making PMR cover part of
+     * reserved region, but it is unclear what the consequences may be.
+     *
+     * In tboot this issue was resolved by reserving leftover chunks of memory
+     * in e820 and/or UEFI memory map. This is also a valid solution, but would
+     * require more changes to GRUB than the ones listed above, as event log is
+     * allocated much earlier than PMRs.
+     */
+    /*
+    if ( os_mle->evtlog_addr != 0 && os_mle->evtlog_size != 0 &&
+         !is_in_pmr(os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size,
+                    check_high_pmr) )
+        txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR);
+    */
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* X86_INTEL_TXT_H */
-- 
2.49.0
Re: [PATCH v3 05/22] x86/boot/slaunch-early: early TXT checks and boot data retrieval
Posted by Jan Beulich 3 months, 3 weeks ago
On 30.05.2025 15:17, Sergii Dmytruk wrote:
> --- a/xen/arch/x86/include/asm/intel-txt.h
> +++ b/xen/arch/x86/include/asm/intel-txt.h
> @@ -93,6 +93,8 @@
>  
>  #ifndef __ASSEMBLY__
>  
> +#include <xen/slr-table.h>
> +
>  /* Need to differentiate between pre- and post paging enabled. */
>  #ifdef __EARLY_SLAUNCH__
>  #include <xen/macros.h>
> @@ -308,6 +310,116 @@ static inline void *txt_init(void)
>      return txt_heap;
>  }
>  
> +static inline int is_in_pmr(const struct txt_os_sinit_data *os_sinit,
> +                            uint64_t base, uint32_t size, int check_high)
> +{
> +    /* Check for size overflow. */
> +    if ( base + size < base )
> +        txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW);
> +
> +    /* Low range always starts at 0, so its size is also end address. */
> +    if ( base >= os_sinit->vtd_pmr_lo_base &&
> +         base + size <= os_sinit->vtd_pmr_lo_size )

If you leverage what the comment says in the 2nd comparsion, why not also
in the first (which means that could be dropped altogether)? If the start
is always zero, why does the field exist in the first place?

> +        return 1;
> +
> +    if ( check_high && os_sinit->vtd_pmr_hi_size != 0 )
> +    {
> +        if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <
> +             os_sinit->vtd_pmr_hi_size )
> +            txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW);
> +        if ( base >= os_sinit->vtd_pmr_hi_base &&
> +             base + size <= os_sinit->vtd_pmr_hi_base +
> +                            os_sinit->vtd_pmr_hi_size )
> +            return 1;
> +    }
> +
> +    return 0;
> +}
> +
> +static inline void txt_verify_pmr_ranges(
> +    const struct txt_os_mle_data *os_mle,
> +    const struct txt_os_sinit_data *os_sinit,
> +    const struct slr_entry_intel_info *info,
> +    uint32_t load_base_addr,
> +    uint32_t tgt_base_addr,
> +    uint32_t xen_size)
> +{
> +    int check_high_pmr = 0;

Just like Ross mentioned for the return value of is_in_pmr(), this one also
looks as if it wanted to be bool.

> +    /* Verify the value of the low PMR base. It should always be 0. */
> +    if ( os_sinit->vtd_pmr_lo_base != 0 )
> +        txt_reset(SLAUNCH_ERROR_LO_PMR_BASE);
> +
> +    /*
> +     * Low PMR size should not be 0 on current platforms. There is an ongoing
> +     * transition to TPR-based DMA protection instead of PMR-based; this is not
> +     * yet supported by the code.
> +     */
> +    if ( os_sinit->vtd_pmr_lo_size == 0 )
> +        txt_reset(SLAUNCH_ERROR_LO_PMR_SIZE);
> +
> +    /* Check if regions overlap. Treat regions with no hole between as error. */
> +    if ( os_sinit->vtd_pmr_hi_size != 0 &&
> +         os_sinit->vtd_pmr_hi_base <= os_sinit->vtd_pmr_lo_size )
> +        txt_reset(SLAUNCH_ERROR_HI_PMR_BASE);
> +
> +    /* All regions accessed by 32b code must be below 4G. */
> +    if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <=
> +         0x100000000ULL )
> +        check_high_pmr = 1;

The addition overflowing is only checked later, and that check may be bypassed
based on the result here. Is that not a possible problem?

> +    /*
> +     * ACM checks that TXT heap and MLE memory is protected against DMA. We have
> +     * to check if MBI and whole Xen memory is protected. The latter is done in
> +     * case bootloader failed to set whole image as MLE and to make sure that
> +     * both pre- and post-relocation code is protected.
> +     */
> +
> +    /* Check if all of Xen before relocation is covered by PMR. */
> +    if ( !is_in_pmr(os_sinit, load_base_addr, xen_size, check_high_pmr) )
> +        txt_reset(SLAUNCH_ERROR_LO_PMR_MLE);
> +
> +    /* Check if all of Xen after relocation is covered by PMR. */
> +    if ( load_base_addr != tgt_base_addr &&
> +         !is_in_pmr(os_sinit, tgt_base_addr, xen_size, check_high_pmr) )
> +        txt_reset(SLAUNCH_ERROR_LO_PMR_MLE);
> +
> +    /*
> +     * If present, check that MBI is covered by PMR. MBI starts with 'uint32_t
> +     * total_size'.
> +     */
> +    if ( info->boot_params_base != 0 &&
> +         !is_in_pmr(os_sinit, info->boot_params_base,
> +                    *(uint32_t *)(uintptr_t)info->boot_params_base,

What is this "MBI" which "starts with 'uint32_t total_size'"? Do you perhaps
mean multiboot2_fixed_t? If you really can't use a proper structure ref here,
please at least mention whatever type that is in our code base, so the use
can be found by e.g. grep.

> +                    check_high_pmr) )
> +        txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR);
> +
> +    /* Check if TPM event log (if present) is covered by PMR. */
> +    /*
> +     * FIXME: currently commented out as GRUB allocates it in a hole between
> +     * PMR and reserved RAM, due to 2MB resolution of PMR. There are no other
> +     * easy-to-use DMA protection mechanisms that would allow to protect that
> +     * part of memory. TPR (TXT DMA Protection Range) gives 1MB resolution, but
> +     * it still wouldn't be enough.
> +     *
> +     * One possible solution would be for GRUB to allocate log at lower address,
> +     * but this would further increase memory space fragmentation. Another
> +     * option is to align PMR up instead of down, making PMR cover part of
> +     * reserved region, but it is unclear what the consequences may be.
> +     *
> +     * In tboot this issue was resolved by reserving leftover chunks of memory
> +     * in e820 and/or UEFI memory map. This is also a valid solution, but would
> +     * require more changes to GRUB than the ones listed above, as event log is
> +     * allocated much earlier than PMRs.
> +     */
> +    /*
> +    if ( os_mle->evtlog_addr != 0 && os_mle->evtlog_size != 0 &&
> +         !is_in_pmr(os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size,
> +                    check_high_pmr) )
> +        txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR);
> +    */
> +}
> +
>  #endif /* __ASSEMBLY__ */

These inline functions are pretty large. Why do they need to be inline ones?

Jan
Re: [PATCH v3 05/22] x86/boot/slaunch-early: early TXT checks and boot data retrieval
Posted by Sergii Dmytruk 1 month, 1 week ago
On Tue, Jul 08, 2025 at 06:00:13PM +0200, Jan Beulich wrote:
> > +static inline int is_in_pmr(const struct txt_os_sinit_data *os_sinit,
> > +                            uint64_t base, uint32_t size, int check_high)
> > +{
> > +    /* Check for size overflow. */
> > +    if ( base + size < base )
> > +        txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW);
> > +
> > +    /* Low range always starts at 0, so its size is also end address. */
> > +    if ( base >= os_sinit->vtd_pmr_lo_base &&
> > +         base + size <= os_sinit->vtd_pmr_lo_size )
>
> If you leverage what the comment says in the 2nd comparsion, why not also
> in the first (which means that could be dropped altogether)? If the start
> is always zero, why does the field exist in the first place?

The range always starts at 0 here because txt_verify_pmr_ranges() reboots
earlier if this assumption doesn't hold.  The field can have non-zero
value, but I guess the more memory is protected the better.  I'll remove
the first part of the check as useless and clarify the comment.

> > +static inline void txt_verify_pmr_ranges(
> > +    const struct txt_os_mle_data *os_mle,
> > +    const struct txt_os_sinit_data *os_sinit,
> > +    const struct slr_entry_intel_info *info,
> > +    uint32_t load_base_addr,
> > +    uint32_t tgt_base_addr,
> > +    uint32_t xen_size)
> > +{
> > +    int check_high_pmr = 0;
>
> Just like Ross mentioned for the return value of is_in_pmr(), this one also
> looks as if it wanted to be bool.

Will update.

> > +    /* All regions accessed by 32b code must be below 4G. */
> > +    if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <=
> > +         0x100000000ULL )
> > +        check_high_pmr = 1;
>
> The addition overflowing is only checked later, and that check may be bypassed
> based on the result here. Is that not a possible problem?

Thanks, that looks like a problem to me.  Moved the overflow check from
is_in_pmr() before this check.

> > +    /*
> > +     * If present, check that MBI is covered by PMR. MBI starts with 'uint32_t
> > +     * total_size'.
> > +     */
> > +    if ( info->boot_params_base != 0 &&
> > +         !is_in_pmr(os_sinit, info->boot_params_base,
> > +                    *(uint32_t *)(uintptr_t)info->boot_params_base,
>
> What is this "MBI" which "starts with 'uint32_t total_size'"? Do you perhaps
> mean multiboot2_fixed_t? If you really can't use a proper structure ref here,
> please at least mention whatever type that is in our code base, so the use
> can be found by e.g. grep.

Yes, it's MultiBoot2 Info.  Nothing precludes using multiboot2_fixed_t,
will update.

> These inline functions are pretty large. Why do they need to be inline ones?
>
> Jan

The functions are run at entry points, one of which is in 32-bit early
code and another in 64-bit EFI.  Having this in the header is simpler
than compiling the code twice.  Despite having many lines, it's just a
sequence of checks, so it didn't seem like too much for a header.

Regards
Re: [PATCH v3 05/22] x86/boot/slaunch-early: early TXT checks and boot data retrieval
Posted by Jan Beulich 1 month, 1 week ago
On 23.09.2025 10:39, Sergii Dmytruk wrote:
> On Tue, Jul 08, 2025 at 06:00:13PM +0200, Jan Beulich wrote:
>> These inline functions are pretty large. Why do they need to be inline ones?
> 
> The functions are run at entry points, one of which is in 32-bit early
> code and another in 64-bit EFI.  Having this in the header is simpler
> than compiling the code twice.  Despite having many lines, it's just a
> sequence of checks, so it didn't seem like too much for a header.

Otoh especially the being compiled as 32-bit and as 64-bit can end up being
a pitfall - one wouldn't necessarily expect this when editing a header file.
A dual-built C file would be different in this regard, imo.

Jan
Re: [PATCH v3 05/22] x86/boot/slaunch-early: early TXT checks and boot data retrieval
Posted by ross.philipson@oracle.com 4 months, 4 weeks ago
On 5/30/25 6:17 AM, Sergii Dmytruk wrote:
> From: Krystian Hebel <krystian.hebel@3mdeb.com>
> 
> The tests validate that important parts of memory are protected against
> DMA attacks, including Xen and MBI. Modules can be tested later, when it
> is possible to report issues to a user before invoking TXT reset.
> 
> TPM event log validation is temporarily disabled due to an issue with
> its allocation by bootloader (GRUB) which will need to be modified to
> address this. Ultimately event log will also have to be validated early
> as it is used immediately after these tests to hold MBI measurements.
> See larger comment in txt_verify_pmr_ranges().
> 
> Signed-off-by: Krystian Hebel <krystian.hebel@3mdeb.com>
> Signed-off-by: Sergii Dmytruk <sergii.dmytruk@3mdeb.com>
> ---
>   xen/arch/x86/boot/slaunch-early.c    |   6 ++
>   xen/arch/x86/include/asm/intel-txt.h | 112 +++++++++++++++++++++++++++
>   2 files changed, 118 insertions(+)
> 
> diff --git a/xen/arch/x86/boot/slaunch-early.c b/xen/arch/x86/boot/slaunch-early.c
> index c9d364bcd5..662144e42f 100644
> --- a/xen/arch/x86/boot/slaunch-early.c
> +++ b/xen/arch/x86/boot/slaunch-early.c
> @@ -22,10 +22,13 @@ void asmlinkage slaunch_early_init(uint32_t load_base_addr,
>       void *txt_heap;
>       const struct txt_os_mle_data *os_mle;
>       const struct slr_table *slrt;
> +    const struct txt_os_sinit_data *os_sinit;
>       const struct slr_entry_intel_info *intel_info;
> +    uint32_t size = tgt_end_addr - tgt_base_addr;
>   
>       txt_heap = txt_init();
>       os_mle = txt_os_mle_data_start(txt_heap);
> +    os_sinit = txt_os_sinit_data_start(txt_heap);
>   
>       result->slrt_pa = os_mle->slrt;
>       result->mbi_pa = 0;
> @@ -38,4 +41,7 @@ void asmlinkage slaunch_early_init(uint32_t load_base_addr,
>           return;
>   
>       result->mbi_pa = intel_info->boot_params_base;
> +
> +    txt_verify_pmr_ranges(os_mle, os_sinit, intel_info,
> +                          load_base_addr, tgt_base_addr, size);
>   }
> diff --git a/xen/arch/x86/include/asm/intel-txt.h b/xen/arch/x86/include/asm/intel-txt.h
> index 7658457e9d..122b4293ea 100644
> --- a/xen/arch/x86/include/asm/intel-txt.h
> +++ b/xen/arch/x86/include/asm/intel-txt.h
> @@ -93,6 +93,8 @@
>   
>   #ifndef __ASSEMBLY__
>   
> +#include <xen/slr-table.h>
> +
>   /* Need to differentiate between pre- and post paging enabled. */
>   #ifdef __EARLY_SLAUNCH__
>   #include <xen/macros.h>
> @@ -308,6 +310,116 @@ static inline void *txt_init(void)
>       return txt_heap;
>   }
>   
> +static inline int is_in_pmr(const struct txt_os_sinit_data *os_sinit,
> +                            uint64_t base, uint32_t size, int check_high)

bool return val?

> +{
> +    /* Check for size overflow. */
> +    if ( base + size < base )
> +        txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW);
> +
> +    /* Low range always starts at 0, so its size is also end address. */
> +    if ( base >= os_sinit->vtd_pmr_lo_base &&
> +         base + size <= os_sinit->vtd_pmr_lo_size )
> +        return 1;
> +
> +    if ( check_high && os_sinit->vtd_pmr_hi_size != 0 )
> +    {
> +        if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <
> +             os_sinit->vtd_pmr_hi_size )
> +            txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW);
> +        if ( base >= os_sinit->vtd_pmr_hi_base &&
> +             base + size <= os_sinit->vtd_pmr_hi_base +
> +                            os_sinit->vtd_pmr_hi_size )
> +            return 1;
> +    }
> +
> +    return 0;
> +}
> +
> +static inline void txt_verify_pmr_ranges(
> +    const struct txt_os_mle_data *os_mle,
> +    const struct txt_os_sinit_data *os_sinit,
> +    const struct slr_entry_intel_info *info,
> +    uint32_t load_base_addr,
> +    uint32_t tgt_base_addr,
> +    uint32_t xen_size)
> +{
> +    int check_high_pmr = 0;
> +
> +    /* Verify the value of the low PMR base. It should always be 0. */
> +    if ( os_sinit->vtd_pmr_lo_base != 0 )
> +        txt_reset(SLAUNCH_ERROR_LO_PMR_BASE);
> +
> +    /*
> +     * Low PMR size should not be 0 on current platforms. There is an ongoing
> +     * transition to TPR-based DMA protection instead of PMR-based; this is not
> +     * yet supported by the code.
> +     */
> +    if ( os_sinit->vtd_pmr_lo_size == 0 )
> +        txt_reset(SLAUNCH_ERROR_LO_PMR_SIZE);
> +
> +    /* Check if regions overlap. Treat regions with no hole between as error. */
> +    if ( os_sinit->vtd_pmr_hi_size != 0 &&
> +         os_sinit->vtd_pmr_hi_base <= os_sinit->vtd_pmr_lo_size )
> +        txt_reset(SLAUNCH_ERROR_HI_PMR_BASE);
> +
> +    /* All regions accessed by 32b code must be below 4G. */
> +    if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <=
> +         0x100000000ULL )
> +        check_high_pmr = 1;
> +
> +    /*
> +     * ACM checks that TXT heap and MLE memory is protected against DMA. We have
> +     * to check if MBI and whole Xen memory is protected. The latter is done in
> +     * case bootloader failed to set whole image as MLE and to make sure that
> +     * both pre- and post-relocation code is protected.
> +     */
> +

Is this the full list of entities that should be covered by PMRs? I am 
thinking of entries in the SLR policy that should be covered. E.g. with 
Linux we ensure setup_data entry blobs are covered before measuring.

> +    /* Check if all of Xen before relocation is covered by PMR. */
> +    if ( !is_in_pmr(os_sinit, load_base_addr, xen_size, check_high_pmr) )
> +        txt_reset(SLAUNCH_ERROR_LO_PMR_MLE);
> +
> +    /* Check if all of Xen after relocation is covered by PMR. */
> +    if ( load_base_addr != tgt_base_addr &&
> +         !is_in_pmr(os_sinit, tgt_base_addr, xen_size, check_high_pmr) )
> +        txt_reset(SLAUNCH_ERROR_LO_PMR_MLE);
> +
> +    /*
> +     * If present, check that MBI is covered by PMR. MBI starts with 'uint32_t
> +     * total_size'.
> +     */
> +    if ( info->boot_params_base != 0 &&
> +         !is_in_pmr(os_sinit, info->boot_params_base,
> +                    *(uint32_t *)(uintptr_t)info->boot_params_base,
> +                    check_high_pmr) )
> +        txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR);
> +
> +    /* Check if TPM event log (if present) is covered by PMR. */
> +    /*
> +     * FIXME: currently commented out as GRUB allocates it in a hole between
> +     * PMR and reserved RAM, due to 2MB resolution of PMR. There are no other
> +     * easy-to-use DMA protection mechanisms that would allow to protect that
> +     * part of memory. TPR (TXT DMA Protection Range) gives 1MB resolution, but
> +     * it still wouldn't be enough.
> +     *
> +     * One possible solution would be for GRUB to allocate log at lower address,
> +     * but this would further increase memory space fragmentation. Another
> +     * option is to align PMR up instead of down, making PMR cover part of
> +     * reserved region, but it is unclear what the consequences may be.

The consequences depend on the firmware. The failure mode we used to see 
was on some systems if the PMR covered certain areas marked as reserved, 
the system will hang at boot. In this particular case, firmware was 
trying to use an xHCI controller to get to the kb attached to use it at 
boot time. When DMA to the host controller was blocked, the firmware was 
unhappy. We have not seen this issue in a while and the current logic in 
the prologue code just sets the upper bound to the highest RAM area 
below 4Gb which is optimal.

The most correct solution for PMRs is to read the VTd RMRR structures. 
These can tell you what reserved regions should not be blocked like this 
(if any). This will give more control over the best configuration for 
the PMRs and what to avoid. This needs to be done in the prologue code 
and validated in the DLME.

And yea, TPR support too where available.

Thanks
Ross

> +     *
> +     * In tboot this issue was resolved by reserving leftover chunks of memory
> +     * in e820 and/or UEFI memory map. This is also a valid solution, but would
> +     * require more changes to GRUB than the ones listed above, as event log is
> +     * allocated much earlier than PMRs.
> +     */
> +    /*
> +    if ( os_mle->evtlog_addr != 0 && os_mle->evtlog_size != 0 &&
> +         !is_in_pmr(os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size,
> +                    check_high_pmr) )
> +        txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR);
> +    */
> +}
> +
>   #endif /* __ASSEMBLY__ */
>   
>   #endif /* X86_INTEL_TXT_H */
Re: [PATCH v3 05/22] x86/boot/slaunch-early: early TXT checks and boot data retrieval
Posted by Sergii Dmytruk 4 months, 3 weeks ago
On Tue, Jun 03, 2025 at 10:03:31AM -0700, ross.philipson@oracle.com wrote:
> > From: Krystian Hebel <krystian.hebel@3mdeb.com>
> > 
> > The tests validate that important parts of memory are protected against
> > DMA attacks, including Xen and MBI. Modules can be tested later, when it
> > is possible to report issues to a user before invoking TXT reset.
> > 
> > TPM event log validation is temporarily disabled due to an issue with
> > its allocation by bootloader (GRUB) which will need to be modified to
> > address this. Ultimately event log will also have to be validated early
> > as it is used immediately after these tests to hold MBI measurements.
> > See larger comment in txt_verify_pmr_ranges().
> > 
> > Signed-off-by: Krystian Hebel <krystian.hebel@3mdeb.com>
> > Signed-off-by: Sergii Dmytruk <sergii.dmytruk@3mdeb.com>
> > ---

> > +static inline int is_in_pmr(const struct txt_os_sinit_data *os_sinit,
> > +                            uint64_t base, uint32_t size, int check_high)
>
> bool return val?

Will change.

> > +    /*
> > +     * ACM checks that TXT heap and MLE memory is protected against DMA. We have
> > +     * to check if MBI and whole Xen memory is protected. The latter is done in
> > +     * case bootloader failed to set whole image as MLE and to make sure that
> > +     * both pre- and post-relocation code is protected.
> > +     */
> > +
>
> Is this the full list of entities that should be covered by PMRs? I am
> thinking of entries in the SLR policy that should be covered. E.g. with
> Linux we ensure setup_data entry blobs are covered before measuring.

Xen's equivalent of setup_data is MBI which is checked below. Command-lines
of Xen and modules are part of MBI as well.

> > +    /* Check if all of Xen before relocation is covered by PMR. */
> > +    if ( !is_in_pmr(os_sinit, load_base_addr, xen_size, check_high_pmr) )
> > +        txt_reset(SLAUNCH_ERROR_LO_PMR_MLE);
> > +
> > +    /* Check if all of Xen after relocation is covered by PMR. */
> > +    if ( load_base_addr != tgt_base_addr &&
> > +         !is_in_pmr(os_sinit, tgt_base_addr, xen_size, check_high_pmr) )
> > +        txt_reset(SLAUNCH_ERROR_LO_PMR_MLE);
> > +
> > +    /*
> > +     * If present, check that MBI is covered by PMR. MBI starts with 'uint32_t
> > +     * total_size'.
> > +     */
> > +    if ( info->boot_params_base != 0 &&
> > +         !is_in_pmr(os_sinit, info->boot_params_base,
> > +                    *(uint32_t *)(uintptr_t)info->boot_params_base,
> > +                    check_high_pmr) )
> > +        txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR);

> > +    /* Check if TPM event log (if present) is covered by PMR. */
> > +    /*
> > +     * FIXME: currently commented out as GRUB allocates it in a hole between
> > +     * PMR and reserved RAM, due to 2MB resolution of PMR. There are no other
> > +     * easy-to-use DMA protection mechanisms that would allow to protect that
> > +     * part of memory. TPR (TXT DMA Protection Range) gives 1MB resolution, but
> > +     * it still wouldn't be enough.
> > +     *
> > +     * One possible solution would be for GRUB to allocate log at lower address,
> > +     * but this would further increase memory space fragmentation. Another
> > +     * option is to align PMR up instead of down, making PMR cover part of
> > +     * reserved region, but it is unclear what the consequences may be.
>
> The consequences depend on the firmware. The failure mode we used to see was
> on some systems if the PMR covered certain areas marked as reserved, the
> system will hang at boot. In this particular case, firmware was trying to
> use an xHCI controller to get to the kb attached to use it at boot time.
> When DMA to the host controller was blocked, the firmware was unhappy. We
> have not seen this issue in a while and the current logic in the prologue
> code just sets the upper bound to the highest RAM area below 4Gb which is
> optimal.
>
> The most correct solution for PMRs is to read the VTd RMRR structures. These
> can tell you what reserved regions should not be blocked like this (if any).
> This will give more control over the best configuration for the PMRs and
> what to avoid. This needs to be done in the prologue code and validated in
> the DLME.
>
> And yea, TPR support too where available.
>
> Thanks
> Ross

I guess this needs checking whether it's still an issue.

Regards