Currently, we rely on the logic in DXE IPL to create new page tables
from scratch when executing in X64 mode, which means that we run with
the initial page tables all throughout PEI, and never enable protections
such as the CPU stack guard, even though the logic is already in place
for IA32.
So let's enable the existing logic for X64 as well. This will permit us
to apply stricter memory permissions to code and data allocations, as
well as the stack, when executing in PEI. It also makes the DxeIpl logic
redundant, and should allow us to make the PcdDxeIplBuildPageTables
feature PCD limited to IA32 DxeIpl loading the x64 DXE core.
When running in long mode, use the same logic that DxeIpl uses to
determine the size of the address space, whether or not to use 1 GB leaf
entries and whether or not to use 5 level paging. Note that in long
mode, PEI is entered with paging enabled, and given that switching
between 4 and 5 levels of paging is not currently supported without
dropping out of 64-bit mode temporarily, all we can do is carry on
without changing the number of levels.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
UefiCpuPkg/CpuMpPei/CpuMpPei.inf | 2 +
UefiCpuPkg/CpuMpPei/CpuPaging.c | 163 ++++++++++++++++----
2 files changed, 139 insertions(+), 26 deletions(-)
diff --git a/UefiCpuPkg/CpuMpPei/CpuMpPei.inf b/UefiCpuPkg/CpuMpPei/CpuMpPei.inf
index 865be5627e8551ee..77eecaa0ea035b38 100644
--- a/UefiCpuPkg/CpuMpPei/CpuMpPei.inf
+++ b/UefiCpuPkg/CpuMpPei/CpuMpPei.inf
@@ -65,6 +65,8 @@ [Ppis]
[Pcd]
gEfiMdeModulePkgTokenSpaceGuid.PcdPteMemoryEncryptionAddressOrMask ## CONSUMES
gEfiMdeModulePkgTokenSpaceGuid.PcdCpuStackGuard ## CONSUMES
+ gEfiMdeModulePkgTokenSpaceGuid.PcdUse1GPageTable ## SOMETIMES_CONSUMES
+ gEfiMdeModulePkgTokenSpaceGuid.PcdUse5LevelPageTable ## SOMETIMES_CONSUMES
gUefiCpuPkgTokenSpaceGuid.PcdCpuStackSwitchExceptionList ## SOMETIMES_CONSUMES
gUefiCpuPkgTokenSpaceGuid.PcdCpuKnownGoodStackSize ## SOMETIMES_CONSUMES
gUefiCpuPkgTokenSpaceGuid.PcdCpuApStackSize ## SOMETIMES_CONSUMES
diff --git a/UefiCpuPkg/CpuMpPei/CpuPaging.c b/UefiCpuPkg/CpuMpPei/CpuPaging.c
index 175e47ccd737a0c1..2a901e44253434c2 100644
--- a/UefiCpuPkg/CpuMpPei/CpuPaging.c
+++ b/UefiCpuPkg/CpuMpPei/CpuPaging.c
@@ -302,7 +302,7 @@ ConvertMemoryPageAttributes (
return RETURN_INVALID_PARAMETER;
}
- MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;
+ MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_ADDRESS;
if ((BaseAddress > MaximumAddress) ||
(Length > MaximumAddress) ||
(BaseAddress > MaximumAddress - (Length - 1)))
@@ -350,16 +350,91 @@ ConvertMemoryPageAttributes (
return RETURN_SUCCESS;
}
+/*
+ * Get physical address bits supported.
+ *
+ * @return The number of supported physical address bits.
+ */
+STATIC
+UINT8
+GetPhysicalAddressBits (
+ VOID
+ )
+{
+ EFI_HOB_CPU *Hob;
+ UINT32 RegEax;
+
+ Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
+ if (Hob != NULL) {
+ return Hob->SizeOfMemorySpace;
+ }
+
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
+ if (RegEax >= 0x80000008) {
+ AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
+ return (UINT8)RegEax;
+ }
+
+ return 36;
+}
+
+/*
+ * Determine and return the paging mode to be used in long mode, based on PCD
+ * configuration and CPU support for 1G leaf descriptors and 5 level paging.
+ *
+ * @return The paging mode
+ */
+STATIC
+PAGING_MODE
+GetPagingMode (
+ VOID
+ )
+{
+ BOOLEAN Page5LevelSupport;
+ BOOLEAN Page1GSupport;
+ UINT32 RegEax;
+ UINT32 RegEdx;
+ IA32_CR4 Cr4;
+
+ Cr4.UintN = AsmReadCr4 ();
+ Page5LevelSupport = (Cr4.Bits.LA57 != 0);
+ ASSERT (PcdGetBool (PcdUse5LevelPageTable) == Page5LevelSupport);
+
+ Page1GSupport = FALSE;
+ if (PcdGetBool (PcdUse1GPageTable)) {
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
+ if (RegEax >= 0x80000001) {
+ AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
+ if ((RegEdx & BIT26) != 0) {
+ Page1GSupport = TRUE;
+ }
+ }
+ }
+
+ if (Page5LevelSupport && Page1GSupport) {
+ return Paging5Level1GB;
+ } else if (Page5LevelSupport) {
+ return Paging5Level;
+ } else if (Page1GSupport) {
+ return Paging4Level1GB;
+ } else {
+ return Paging4Level;
+ }
+}
+
/**
- Enable PAE Page Table.
+ Enable Page Table.
- @retval EFI_SUCCESS The PAE Page Table was enabled successfully.
- @retval EFI_OUT_OF_RESOURCES The PAE Page Table could not be enabled due to lack of available memory.
+ @param LongMode Whether the execution mode is 64 bit
+
+ @retval EFI_SUCCESS The Page Table was enabled successfully.
+ @retval EFI_OUT_OF_RESOURCES The Page Table could not be enabled due to lack of available memory.
**/
+STATIC
EFI_STATUS
-EnablePaePageTable (
- VOID
+EnablePageTable (
+ IN BOOLEAN LongMode
)
{
EFI_STATUS Status;
@@ -369,6 +444,8 @@ EnablePaePageTable (
UINTN BufferSize;
IA32_MAP_ATTRIBUTE MapAttribute;
IA32_MAP_ATTRIBUTE MapMask;
+ PAGING_MODE PagingMode;
+ UINT64 Length;
PageTable = 0;
Buffer = NULL;
@@ -378,10 +455,28 @@ EnablePaePageTable (
MapAttribute.Bits.Present = 1;
MapAttribute.Bits.ReadWrite = 1;
- //
- // 1:1 map 4GB in 32bit mode
- //
- Status = PageTableMap (&PageTable, PagingPae, 0, &BufferSize, 0, SIZE_4GB, &MapAttribute, &MapMask, NULL);
+ if (!LongMode) {
+ //
+ // 1:1 map 4GB in 32bit mode
+ //
+ PagingMode = PagingPae;
+ Length = SIZE_4GB;
+ } else {
+ PagingMode = GetPagingMode ();
+ Length = LShiftU64 (1, GetPhysicalAddressBits ());
+ }
+
+ Status = PageTableMap (
+ &PageTable,
+ PagingMode,
+ 0,
+ &BufferSize,
+ 0,
+ Length,
+ &MapAttribute,
+ &MapMask,
+ NULL
+ );
ASSERT (Status == EFI_BUFFER_TOO_SMALL);
if (Status != EFI_BUFFER_TOO_SMALL) {
return Status;
@@ -398,12 +493,23 @@ EnablePaePageTable (
DEBUG ((
DEBUG_INFO,
- "EnablePaePageTable: Created PageTable = 0x%x, BufferSize = %x\n",
+ "%a: Created PageTable = 0x%x, BufferSize = %x\n",
+ __func__,
PageTable,
BufferSize
));
- Status = PageTableMap (&PageTable, PagingPae, Buffer, &BufferSize, 0, SIZE_4GB, &MapAttribute, &MapMask, NULL);
+ Status = PageTableMap (
+ &PageTable,
+ PagingMode,
+ Buffer,
+ &BufferSize,
+ 0,
+ Length,
+ &MapAttribute,
+ &MapMask,
+ NULL
+ );
ASSERT_EFI_ERROR (Status);
if (EFI_ERROR (Status) || (PageTable == 0)) {
return EFI_OUT_OF_RESOURCES;
@@ -414,15 +520,17 @@ EnablePaePageTable (
//
AsmWriteCr3 (PageTable);
- //
- // Enable CR4.PAE
- //
- AsmWriteCr4 (AsmReadCr4 () | BIT5);
+ if (!LongMode) {
+ //
+ // Enable CR4.PAE
+ //
+ AsmWriteCr4 (AsmReadCr4 () | BIT5);
- //
- // Enable CR0.PG
- //
- AsmWriteCr0 (AsmReadCr0 () | BIT31);
+ //
+ // Enable CR0.PG
+ //
+ AsmWriteCr0 (AsmReadCr0 () | BIT31);
+ }
return Status;
}
@@ -557,6 +665,9 @@ MemoryDiscoveredPpiNotifyCallback (
EDKII_MIGRATED_FV_INFO *MigratedFvInfo;
EFI_PEI_HOB_POINTERS Hob;
IA32_CR0 Cr0;
+ BOOLEAN LongMode;
+
+ LongMode = (sizeof (UINTN) == sizeof (UINT64));
//
// Paging must be setup first. Otherwise the exception TSS setup during MP
@@ -565,7 +676,7 @@ MemoryDiscoveredPpiNotifyCallback (
//
InitStackGuard = FALSE;
Hob.Raw = NULL;
- if (IsIa32PaeSupported ()) {
+ if (LongMode || IsIa32PaeSupported ()) {
Hob.Raw = GetFirstGuidHob (&gEdkiiMigratedFvInfoGuid);
InitStackGuard = PcdGetBool (PcdCpuStackGuard);
}
@@ -575,12 +686,12 @@ MemoryDiscoveredPpiNotifyCallback (
// is to enable paging if it is not enabled (only in 32bit mode).
//
Cr0.UintN = AsmReadCr0 ();
- if ((Cr0.Bits.PG == 0) && (InitStackGuard || (Hob.Raw != NULL))) {
- ASSERT (sizeof (UINTN) == sizeof (UINT32));
-
- Status = EnablePaePageTable ();
+ if (LongMode ||
+ ((Cr0.Bits.PG == 0) && (InitStackGuard || (Hob.Raw != NULL))))
+ {
+ Status = EnablePageTable (LongMode);
if (EFI_ERROR (Status)) {
- DEBUG ((DEBUG_ERROR, "MemoryDiscoveredPpiNotifyCallback: Failed to enable PAE page table: %r.\n", Status));
+ DEBUG ((DEBUG_ERROR, "%a: Failed to enable page table: %r.\n", __func__, Status));
CpuDeadLoop ();
}
}
--
2.39.2
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#105943): https://edk2.groups.io/g/devel/message/105943
Mute This Topic: https://groups.io/mt/99411875/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
Ard,
https://github.com/tianocore/edk2/blob/8314a85893f5b75baa0031a5138028188a626243/UefiCpuPkg/SecCore/SecMain.c#L637 is changed by @Wu, Jiaxin to create page table in permanent memory.
(I didn't check your patch in detail. But it sounds like you try to do the same thing that above code has already done.)
Thanks,
Ray
> -----Original Message-----
> From: Ard Biesheuvel <ardb@kernel.org>
> Sent: Friday, June 9, 2023 1:23 AM
> To: devel@edk2.groups.io
> Cc: Ard Biesheuvel <ardb@kernel.org>; Ni, Ray <ray.ni@intel.com>; Yao,
> Jiewen <jiewen.yao@intel.com>; Gerd Hoffmann <kraxel@redhat.com>; Taylor
> Beebe <t@taylorbeebe.com>; Oliver Smith-Denny <osd@smith-denny.com>;
> Bi, Dandan <dandan.bi@intel.com>; Tan, Dun <dun.tan@intel.com>; Gao,
> Liming <gaoliming@byosoft.com.cn>; Kinney, Michael D
> <michael.d.kinney@intel.com>; Michael Kubacki
> <mikuback@linux.microsoft.com>; Dong, Eric <eric.dong@intel.com>; Kumar,
> Rahul R <rahul.r.kumar@intel.com>; Kun Qin <kuqin12@gmail.com>
> Subject: [PATCH 2/2] UefiCpuPkg/CpuMpPei X64: Reallocate page tables in
> permanent DRAM
>
> Currently, we rely on the logic in DXE IPL to create new page tables
> from scratch when executing in X64 mode, which means that we run with
> the initial page tables all throughout PEI, and never enable protections
> such as the CPU stack guard, even though the logic is already in place
> for IA32.
>
> So let's enable the existing logic for X64 as well. This will permit us
> to apply stricter memory permissions to code and data allocations, as
> well as the stack, when executing in PEI. It also makes the DxeIpl logic
> redundant, and should allow us to make the PcdDxeIplBuildPageTables
> feature PCD limited to IA32 DxeIpl loading the x64 DXE core.
>
> When running in long mode, use the same logic that DxeIpl uses to
> determine the size of the address space, whether or not to use 1 GB leaf
> entries and whether or not to use 5 level paging. Note that in long
> mode, PEI is entered with paging enabled, and given that switching
> between 4 and 5 levels of paging is not currently supported without
> dropping out of 64-bit mode temporarily, all we can do is carry on
> without changing the number of levels.
>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> UefiCpuPkg/CpuMpPei/CpuMpPei.inf | 2 +
> UefiCpuPkg/CpuMpPei/CpuPaging.c | 163 ++++++++++++++++----
> 2 files changed, 139 insertions(+), 26 deletions(-)
>
> diff --git a/UefiCpuPkg/CpuMpPei/CpuMpPei.inf
> b/UefiCpuPkg/CpuMpPei/CpuMpPei.inf
> index 865be5627e8551ee..77eecaa0ea035b38 100644
> --- a/UefiCpuPkg/CpuMpPei/CpuMpPei.inf
> +++ b/UefiCpuPkg/CpuMpPei/CpuMpPei.inf
> @@ -65,6 +65,8 @@ [Ppis]
> [Pcd]
>
>
> gEfiMdeModulePkgTokenSpaceGuid.PcdPteMemoryEncryptionAddressOrMas
> k ## CONSUMES
>
> gEfiMdeModulePkgTokenSpaceGuid.PcdCpuStackGuard ##
> CONSUMES
>
> + gEfiMdeModulePkgTokenSpaceGuid.PcdUse1GPageTable ##
> SOMETIMES_CONSUMES
>
> + gEfiMdeModulePkgTokenSpaceGuid.PcdUse5LevelPageTable ##
> SOMETIMES_CONSUMES
>
> gUefiCpuPkgTokenSpaceGuid.PcdCpuStackSwitchExceptionList ##
> SOMETIMES_CONSUMES
>
> gUefiCpuPkgTokenSpaceGuid.PcdCpuKnownGoodStackSize ##
> SOMETIMES_CONSUMES
>
> gUefiCpuPkgTokenSpaceGuid.PcdCpuApStackSize ##
> SOMETIMES_CONSUMES
>
> diff --git a/UefiCpuPkg/CpuMpPei/CpuPaging.c
> b/UefiCpuPkg/CpuMpPei/CpuPaging.c
> index 175e47ccd737a0c1..2a901e44253434c2 100644
> --- a/UefiCpuPkg/CpuMpPei/CpuPaging.c
> +++ b/UefiCpuPkg/CpuMpPei/CpuPaging.c
> @@ -302,7 +302,7 @@ ConvertMemoryPageAttributes (
> return RETURN_INVALID_PARAMETER;
>
> }
>
>
>
> - MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;
>
> + MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_ADDRESS;
>
> if ((BaseAddress > MaximumAddress) ||
>
> (Length > MaximumAddress) ||
>
> (BaseAddress > MaximumAddress - (Length - 1)))
>
> @@ -350,16 +350,91 @@ ConvertMemoryPageAttributes (
> return RETURN_SUCCESS;
>
> }
>
>
>
> +/*
>
> + * Get physical address bits supported.
>
> + *
>
> + * @return The number of supported physical address bits.
>
> + */
>
> +STATIC
>
> +UINT8
>
> +GetPhysicalAddressBits (
>
> + VOID
>
> + )
>
> +{
>
> + EFI_HOB_CPU *Hob;
>
> + UINT32 RegEax;
>
> +
>
> + Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
>
> + if (Hob != NULL) {
>
> + return Hob->SizeOfMemorySpace;
>
> + }
>
> +
>
> + AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
>
> + if (RegEax >= 0x80000008) {
>
> + AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
>
> + return (UINT8)RegEax;
>
> + }
>
> +
>
> + return 36;
>
> +}
>
> +
>
> +/*
>
> + * Determine and return the paging mode to be used in long mode, based on
> PCD
>
> + * configuration and CPU support for 1G leaf descriptors and 5 level paging.
>
> + *
>
> + * @return The paging mode
>
> + */
>
> +STATIC
>
> +PAGING_MODE
>
> +GetPagingMode (
>
> + VOID
>
> + )
>
> +{
>
> + BOOLEAN Page5LevelSupport;
>
> + BOOLEAN Page1GSupport;
>
> + UINT32 RegEax;
>
> + UINT32 RegEdx;
>
> + IA32_CR4 Cr4;
>
> +
>
> + Cr4.UintN = AsmReadCr4 ();
>
> + Page5LevelSupport = (Cr4.Bits.LA57 != 0);
>
> + ASSERT (PcdGetBool (PcdUse5LevelPageTable) == Page5LevelSupport);
>
> +
>
> + Page1GSupport = FALSE;
>
> + if (PcdGetBool (PcdUse1GPageTable)) {
>
> + AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
>
> + if (RegEax >= 0x80000001) {
>
> + AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
>
> + if ((RegEdx & BIT26) != 0) {
>
> + Page1GSupport = TRUE;
>
> + }
>
> + }
>
> + }
>
> +
>
> + if (Page5LevelSupport && Page1GSupport) {
>
> + return Paging5Level1GB;
>
> + } else if (Page5LevelSupport) {
>
> + return Paging5Level;
>
> + } else if (Page1GSupport) {
>
> + return Paging4Level1GB;
>
> + } else {
>
> + return Paging4Level;
>
> + }
>
> +}
>
> +
>
> /**
>
> - Enable PAE Page Table.
>
> + Enable Page Table.
>
>
>
> - @retval EFI_SUCCESS The PAE Page Table was enabled successfully.
>
> - @retval EFI_OUT_OF_RESOURCES The PAE Page Table could not be enabled
> due to lack of available memory.
>
> + @param LongMode Whether the execution mode is 64 bit
>
> +
>
> + @retval EFI_SUCCESS The Page Table was enabled successfully.
>
> + @retval EFI_OUT_OF_RESOURCES The Page Table could not be enabled
> due to lack of available memory.
>
>
>
> **/
>
> +STATIC
>
> EFI_STATUS
>
> -EnablePaePageTable (
>
> - VOID
>
> +EnablePageTable (
>
> + IN BOOLEAN LongMode
>
> )
>
> {
>
> EFI_STATUS Status;
>
> @@ -369,6 +444,8 @@ EnablePaePageTable (
> UINTN BufferSize;
>
> IA32_MAP_ATTRIBUTE MapAttribute;
>
> IA32_MAP_ATTRIBUTE MapMask;
>
> + PAGING_MODE PagingMode;
>
> + UINT64 Length;
>
>
>
> PageTable = 0;
>
> Buffer = NULL;
>
> @@ -378,10 +455,28 @@ EnablePaePageTable (
> MapAttribute.Bits.Present = 1;
>
> MapAttribute.Bits.ReadWrite = 1;
>
>
>
> - //
>
> - // 1:1 map 4GB in 32bit mode
>
> - //
>
> - Status = PageTableMap (&PageTable, PagingPae, 0, &BufferSize, 0, SIZE_4GB,
> &MapAttribute, &MapMask, NULL);
>
> + if (!LongMode) {
>
> + //
>
> + // 1:1 map 4GB in 32bit mode
>
> + //
>
> + PagingMode = PagingPae;
>
> + Length = SIZE_4GB;
>
> + } else {
>
> + PagingMode = GetPagingMode ();
>
> + Length = LShiftU64 (1, GetPhysicalAddressBits ());
>
> + }
>
> +
>
> + Status = PageTableMap (
>
> + &PageTable,
>
> + PagingMode,
>
> + 0,
>
> + &BufferSize,
>
> + 0,
>
> + Length,
>
> + &MapAttribute,
>
> + &MapMask,
>
> + NULL
>
> + );
>
> ASSERT (Status == EFI_BUFFER_TOO_SMALL);
>
> if (Status != EFI_BUFFER_TOO_SMALL) {
>
> return Status;
>
> @@ -398,12 +493,23 @@ EnablePaePageTable (
>
>
> DEBUG ((
>
> DEBUG_INFO,
>
> - "EnablePaePageTable: Created PageTable = 0x%x, BufferSize = %x\n",
>
> + "%a: Created PageTable = 0x%x, BufferSize = %x\n",
>
> + __func__,
>
> PageTable,
>
> BufferSize
>
> ));
>
>
>
> - Status = PageTableMap (&PageTable, PagingPae, Buffer, &BufferSize, 0,
> SIZE_4GB, &MapAttribute, &MapMask, NULL);
>
> + Status = PageTableMap (
>
> + &PageTable,
>
> + PagingMode,
>
> + Buffer,
>
> + &BufferSize,
>
> + 0,
>
> + Length,
>
> + &MapAttribute,
>
> + &MapMask,
>
> + NULL
>
> + );
>
> ASSERT_EFI_ERROR (Status);
>
> if (EFI_ERROR (Status) || (PageTable == 0)) {
>
> return EFI_OUT_OF_RESOURCES;
>
> @@ -414,15 +520,17 @@ EnablePaePageTable (
> //
>
> AsmWriteCr3 (PageTable);
>
>
>
> - //
>
> - // Enable CR4.PAE
>
> - //
>
> - AsmWriteCr4 (AsmReadCr4 () | BIT5);
>
> + if (!LongMode) {
>
> + //
>
> + // Enable CR4.PAE
>
> + //
>
> + AsmWriteCr4 (AsmReadCr4 () | BIT5);
>
>
>
> - //
>
> - // Enable CR0.PG
>
> - //
>
> - AsmWriteCr0 (AsmReadCr0 () | BIT31);
>
> + //
>
> + // Enable CR0.PG
>
> + //
>
> + AsmWriteCr0 (AsmReadCr0 () | BIT31);
>
> + }
>
>
>
> return Status;
>
> }
>
> @@ -557,6 +665,9 @@ MemoryDiscoveredPpiNotifyCallback (
> EDKII_MIGRATED_FV_INFO *MigratedFvInfo;
>
> EFI_PEI_HOB_POINTERS Hob;
>
> IA32_CR0 Cr0;
>
> + BOOLEAN LongMode;
>
> +
>
> + LongMode = (sizeof (UINTN) == sizeof (UINT64));
>
>
>
> //
>
> // Paging must be setup first. Otherwise the exception TSS setup during MP
>
> @@ -565,7 +676,7 @@ MemoryDiscoveredPpiNotifyCallback (
> //
>
> InitStackGuard = FALSE;
>
> Hob.Raw = NULL;
>
> - if (IsIa32PaeSupported ()) {
>
> + if (LongMode || IsIa32PaeSupported ()) {
>
> Hob.Raw = GetFirstGuidHob (&gEdkiiMigratedFvInfoGuid);
>
> InitStackGuard = PcdGetBool (PcdCpuStackGuard);
>
> }
>
> @@ -575,12 +686,12 @@ MemoryDiscoveredPpiNotifyCallback (
> // is to enable paging if it is not enabled (only in 32bit mode).
>
> //
>
> Cr0.UintN = AsmReadCr0 ();
>
> - if ((Cr0.Bits.PG == 0) && (InitStackGuard || (Hob.Raw != NULL))) {
>
> - ASSERT (sizeof (UINTN) == sizeof (UINT32));
>
> -
>
> - Status = EnablePaePageTable ();
>
> + if (LongMode ||
>
> + ((Cr0.Bits.PG == 0) && (InitStackGuard || (Hob.Raw != NULL))))
>
> + {
>
> + Status = EnablePageTable (LongMode);
>
> if (EFI_ERROR (Status)) {
>
> - DEBUG ((DEBUG_ERROR, "MemoryDiscoveredPpiNotifyCallback: Failed to
> enable PAE page table: %r.\n", Status));
>
> + DEBUG ((DEBUG_ERROR, "%a: Failed to enable page table: %r.\n",
> __func__, Status));
>
> CpuDeadLoop ();
>
> }
>
> }
>
> --
> 2.39.2
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#105960): https://edk2.groups.io/g/devel/message/105960
Mute This Topic: https://groups.io/mt/99411875/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/leave/3901457/1787277/102458076/xyzzy [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
On 6/8/2023 1:23 PM, Ard Biesheuvel wrote:
> Currently, we rely on the logic in DXE IPL to create new page tables
> from scratch when executing in X64 mode, which means that we run with
> the initial page tables all throughout PEI, and never enable protections
> such as the CPU stack guard, even though the logic is already in place
> for IA32.
>
> So let's enable the existing logic for X64 as well. This will permit us
> to apply stricter memory permissions to code and data allocations, as
> well as the stack, when executing in PEI. It also makes the DxeIpl logic
> redundant, and should allow us to make the PcdDxeIplBuildPageTables
> feature PCD limited to IA32 DxeIpl loading the x64 DXE core.
>
> When running in long mode, use the same logic that DxeIpl uses to
> determine the size of the address space, whether or not to use 1 GB leaf
> entries and whether or not to use 5 level paging. Note that in long
> mode, PEI is entered with paging enabled, and given that switching
> between 4 and 5 levels of paging is not currently supported without
> dropping out of 64-bit mode temporarily, all we can do is carry on
> without changing the number of levels.
>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> UefiCpuPkg/CpuMpPei/CpuMpPei.inf | 2 +
> UefiCpuPkg/CpuMpPei/CpuPaging.c | 163 ++++++++++++++++----
> 2 files changed, 139 insertions(+), 26 deletions(-)
>
> diff --git a/UefiCpuPkg/CpuMpPei/CpuMpPei.inf b/UefiCpuPkg/CpuMpPei/CpuMpPei.inf
> index 865be5627e8551ee..77eecaa0ea035b38 100644
> --- a/UefiCpuPkg/CpuMpPei/CpuMpPei.inf
> +++ b/UefiCpuPkg/CpuMpPei/CpuMpPei.inf
> @@ -65,6 +65,8 @@ [Ppis]
> [Pcd]
>
> gEfiMdeModulePkgTokenSpaceGuid.PcdPteMemoryEncryptionAddressOrMask ## CONSUMES
>
> gEfiMdeModulePkgTokenSpaceGuid.PcdCpuStackGuard ## CONSUMES
>
> + gEfiMdeModulePkgTokenSpaceGuid.PcdUse1GPageTable ## SOMETIMES_CONSUMES
>
> + gEfiMdeModulePkgTokenSpaceGuid.PcdUse5LevelPageTable ## SOMETIMES_CONSUMES
>
> gUefiCpuPkgTokenSpaceGuid.PcdCpuStackSwitchExceptionList ## SOMETIMES_CONSUMES
>
> gUefiCpuPkgTokenSpaceGuid.PcdCpuKnownGoodStackSize ## SOMETIMES_CONSUMES
>
> gUefiCpuPkgTokenSpaceGuid.PcdCpuApStackSize ## SOMETIMES_CONSUMES
>
> diff --git a/UefiCpuPkg/CpuMpPei/CpuPaging.c b/UefiCpuPkg/CpuMpPei/CpuPaging.c
> index 175e47ccd737a0c1..2a901e44253434c2 100644
> --- a/UefiCpuPkg/CpuMpPei/CpuPaging.c
> +++ b/UefiCpuPkg/CpuMpPei/CpuPaging.c
> @@ -302,7 +302,7 @@ ConvertMemoryPageAttributes (
> return RETURN_INVALID_PARAMETER;
>
> }
>
>
>
> - MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;
>
> + MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_ADDRESS;
>
> if ((BaseAddress > MaximumAddress) ||
>
> (Length > MaximumAddress) ||
>
> (BaseAddress > MaximumAddress - (Length - 1)))
>
> @@ -350,16 +350,91 @@ ConvertMemoryPageAttributes (
> return RETURN_SUCCESS;
>
> }
>
>
>
> +/*
>
> + * Get physical address bits supported.
>
> + *
>
> + * @return The number of supported physical address bits.
>
> + */
>
> +STATIC
>
> +UINT8
>
> +GetPhysicalAddressBits (
>
> + VOID
>
> + )
>
> +{
>
> + EFI_HOB_CPU *Hob;
>
> + UINT32 RegEax;
>
> +
>
> + Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
>
> + if (Hob != NULL) {
>
> + return Hob->SizeOfMemorySpace;
>
> + }
>
> +
>
> + AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
>
> + if (RegEax >= 0x80000008) {
>
> + AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
>
> + return (UINT8)RegEax;
>
> + }
>
> +
>
> + return 36;
>
> +}
>
> +
I saw a similar patch come through earlier today adding
GetMaxPlatformAddressBits() to CpubLib -
https://edk2.groups.io/g/devel/message/105909.
>
> +/*
>
> + * Determine and return the paging mode to be used in long mode, based on PCD
>
> + * configuration and CPU support for 1G leaf descriptors and 5 level paging.
>
> + *
>
> + * @return The paging mode
>
> + */
>
> +STATIC
>
> +PAGING_MODE
>
> +GetPagingMode (
>
> + VOID
>
> + )
>
> +{
>
> + BOOLEAN Page5LevelSupport;
>
> + BOOLEAN Page1GSupport;
>
> + UINT32 RegEax;
>
> + UINT32 RegEdx;
>
> + IA32_CR4 Cr4;
>
> +
>
> + Cr4.UintN = AsmReadCr4 ();
>
> + Page5LevelSupport = (Cr4.Bits.LA57 != 0);
>
> + ASSERT (PcdGetBool (PcdUse5LevelPageTable) == Page5LevelSupport);
>
> +
>
> + Page1GSupport = FALSE;
>
> + if (PcdGetBool (PcdUse1GPageTable)) {
>
> + AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
>
> + if (RegEax >= 0x80000001) {
>
> + AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
>
> + if ((RegEdx & BIT26) != 0) {
>
> + Page1GSupport = TRUE;
>
> + }
>
> + }
>
> + }
>
> +
>
> + if (Page5LevelSupport && Page1GSupport) {
>
> + return Paging5Level1GB;
>
> + } else if (Page5LevelSupport) {
>
> + return Paging5Level;
>
> + } else if (Page1GSupport) {
>
> + return Paging4Level1GB;
>
> + } else {
>
> + return Paging4Level;
>
> + }
>
> +}
>
> +
>
> /**
>
> - Enable PAE Page Table.
>
> + Enable Page Table.
>
>
>
> - @retval EFI_SUCCESS The PAE Page Table was enabled successfully.
>
> - @retval EFI_OUT_OF_RESOURCES The PAE Page Table could not be enabled due to lack of available memory.
>
> + @param LongMode Whether the execution mode is 64 bit
>
> +
>
> + @retval EFI_SUCCESS The Page Table was enabled successfully.
>
> + @retval EFI_OUT_OF_RESOURCES The Page Table could not be enabled due to lack of available memory.
>
>
>
> **/
>
> +STATIC
>
> EFI_STATUS
>
> -EnablePaePageTable (
>
> - VOID
>
> +EnablePageTable (
>
> + IN BOOLEAN LongMode
>
> )
>
> {
>
> EFI_STATUS Status;
>
> @@ -369,6 +444,8 @@ EnablePaePageTable (
> UINTN BufferSize;
>
> IA32_MAP_ATTRIBUTE MapAttribute;
>
> IA32_MAP_ATTRIBUTE MapMask;
>
> + PAGING_MODE PagingMode;
>
> + UINT64 Length;
>
>
>
> PageTable = 0;
>
> Buffer = NULL;
>
> @@ -378,10 +455,28 @@ EnablePaePageTable (
> MapAttribute.Bits.Present = 1;
>
> MapAttribute.Bits.ReadWrite = 1;
>
>
>
> - //
>
> - // 1:1 map 4GB in 32bit mode
>
> - //
>
> - Status = PageTableMap (&PageTable, PagingPae, 0, &BufferSize, 0, SIZE_4GB, &MapAttribute, &MapMask, NULL);
>
> + if (!LongMode) {
>
> + //
>
> + // 1:1 map 4GB in 32bit mode
>
> + //
>
> + PagingMode = PagingPae;
>
> + Length = SIZE_4GB;
>
> + } else {
>
> + PagingMode = GetPagingMode ();
>
> + Length = LShiftU64 (1, GetPhysicalAddressBits ());
>
> + }
>
> +
>
> + Status = PageTableMap (
>
> + &PageTable,
>
> + PagingMode,
>
> + 0,
>
> + &BufferSize,
>
> + 0,
>
> + Length,
>
> + &MapAttribute,
>
> + &MapMask,
>
> + NULL
>
> + );
>
> ASSERT (Status == EFI_BUFFER_TOO_SMALL);
>
> if (Status != EFI_BUFFER_TOO_SMALL) {
>
> return Status;
>
> @@ -398,12 +493,23 @@ EnablePaePageTable (
>
>
> DEBUG ((
>
> DEBUG_INFO,
>
> - "EnablePaePageTable: Created PageTable = 0x%x, BufferSize = %x\n",
>
> + "%a: Created PageTable = 0x%x, BufferSize = %x\n",
>
> + __func__,
>
> PageTable,
>
> BufferSize
>
> ));
>
>
>
> - Status = PageTableMap (&PageTable, PagingPae, Buffer, &BufferSize, 0, SIZE_4GB, &MapAttribute, &MapMask, NULL);
>
> + Status = PageTableMap (
>
> + &PageTable,
>
> + PagingMode,
>
> + Buffer,
>
> + &BufferSize,
>
> + 0,
>
> + Length,
>
> + &MapAttribute,
>
> + &MapMask,
>
> + NULL
>
> + );
>
> ASSERT_EFI_ERROR (Status);
>
> if (EFI_ERROR (Status) || (PageTable == 0)) {
>
> return EFI_OUT_OF_RESOURCES;
>
> @@ -414,15 +520,17 @@ EnablePaePageTable (
> //
>
> AsmWriteCr3 (PageTable);
>
>
>
> - //
>
> - // Enable CR4.PAE
>
> - //
>
> - AsmWriteCr4 (AsmReadCr4 () | BIT5);
>
> + if (!LongMode) {
>
> + //
>
> + // Enable CR4.PAE
>
> + //
>
> + AsmWriteCr4 (AsmReadCr4 () | BIT5);
>
>
>
> - //
>
> - // Enable CR0.PG
>
> - //
>
> - AsmWriteCr0 (AsmReadCr0 () | BIT31);
>
> + //
>
> + // Enable CR0.PG
>
> + //
>
> + AsmWriteCr0 (AsmReadCr0 () | BIT31);
>
> + }
>
>
>
> return Status;
>
> }
>
> @@ -557,6 +665,9 @@ MemoryDiscoveredPpiNotifyCallback (
> EDKII_MIGRATED_FV_INFO *MigratedFvInfo;
>
> EFI_PEI_HOB_POINTERS Hob;
>
> IA32_CR0 Cr0;
>
> + BOOLEAN LongMode;
>
> +
>
> + LongMode = (sizeof (UINTN) == sizeof (UINT64));
>
>
>
> //
>
> // Paging must be setup first. Otherwise the exception TSS setup during MP
>
> @@ -565,7 +676,7 @@ MemoryDiscoveredPpiNotifyCallback (
> //
>
> InitStackGuard = FALSE;
>
> Hob.Raw = NULL;
>
> - if (IsIa32PaeSupported ()) {
>
> + if (LongMode || IsIa32PaeSupported ()) {
>
> Hob.Raw = GetFirstGuidHob (&gEdkiiMigratedFvInfoGuid);
>
> InitStackGuard = PcdGetBool (PcdCpuStackGuard);
>
> }
>
> @@ -575,12 +686,12 @@ MemoryDiscoveredPpiNotifyCallback (
> // is to enable paging if it is not enabled (only in 32bit mode).
>
> //
>
> Cr0.UintN = AsmReadCr0 ();
>
> - if ((Cr0.Bits.PG == 0) && (InitStackGuard || (Hob.Raw != NULL))) {
>
> - ASSERT (sizeof (UINTN) == sizeof (UINT32));
>
> -
>
> - Status = EnablePaePageTable ();
>
> + if (LongMode ||
>
> + ((Cr0.Bits.PG == 0) && (InitStackGuard || (Hob.Raw != NULL))))
>
> + {
>
> + Status = EnablePageTable (LongMode);
>
> if (EFI_ERROR (Status)) {
>
> - DEBUG ((DEBUG_ERROR, "MemoryDiscoveredPpiNotifyCallback: Failed to enable PAE page table: %r.\n", Status));
>
> + DEBUG ((DEBUG_ERROR, "%a: Failed to enable page table: %r.\n", __func__, Status));
>
> CpuDeadLoop ();
>
> }
>
> }
>
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#105953): https://edk2.groups.io/g/devel/message/105953
Mute This Topic: https://groups.io/mt/99411875/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
On 6/8/2023 10:23 AM, Ard Biesheuvel wrote: > Currently, we rely on the logic in DXE IPL to create new page tables > from scratch when executing in X64 mode, which means that we run with > the initial page tables all throughout PEI, and never enable protections > such as the CPU stack guard, even though the logic is already in place > for IA32. > > So let's enable the existing logic for X64 as well. This will permit us > to apply stricter memory permissions to code and data allocations, as > well as the stack, when executing in PEI. It also makes the DxeIpl logic > redundant, and should allow us to make the PcdDxeIplBuildPageTables > feature PCD limited to IA32 DxeIpl loading the x64 DXE core. > > When running in long mode, use the same logic that DxeIpl uses to > determine the size of the address space, whether or not to use 1 GB leaf > entries and whether or not to use 5 level paging. Note that in long > mode, PEI is entered with paging enabled, and given that switching > between 4 and 5 levels of paging is not currently supported without > dropping out of 64-bit mode temporarily, all we can do is carry on > without changing the number of levels. > I certainly agree with extending the ability to have memory protections in PEI (and trying to unify across x86 and ARM (and beyond :)). A few things I am trying to understand: Does ARM today rebuild the page table in DxeIpl? Or is it using an earlier built page table? If I understand your proposal correctly, with the addition of this patch, you are suggesting we can drop creating new page tables in DxeIpl and use only one page table throughout. Again, I like the idea of having mapped memory protections that continue through, but do you have concerns that we may end up with garbage from PEI in DXE in the page table? For OEMs, they may not control PEI and therefore be at the whim of another's PEI page table. Would you envision the GCD gets built from the existing page table or that the GCD gets built according to resource descriptor HOBs and DxeCore ensures that the page table reflects what the HOBs indicated? Thanks, Oliver -=-=-=-=-=-=-=-=-=-=-=- Groups.io Links: You receive all messages sent to this group. View/Reply Online (#105944): https://edk2.groups.io/g/devel/message/105944 Mute This Topic: https://groups.io/mt/99411875/1787277 Group Owner: devel+owner@edk2.groups.io Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org] -=-=-=-=-=-=-=-=-=-=-=-
On Thu, 8 Jun 2023 at 19:39, Oliver Smith-Denny <osde@linux.microsoft.com> wrote: > > On 6/8/2023 10:23 AM, Ard Biesheuvel wrote: > > Currently, we rely on the logic in DXE IPL to create new page tables > > from scratch when executing in X64 mode, which means that we run with > > the initial page tables all throughout PEI, and never enable protections > > such as the CPU stack guard, even though the logic is already in place > > for IA32. > > > > So let's enable the existing logic for X64 as well. This will permit us > > to apply stricter memory permissions to code and data allocations, as > > well as the stack, when executing in PEI. It also makes the DxeIpl logic > > redundant, and should allow us to make the PcdDxeIplBuildPageTables > > feature PCD limited to IA32 DxeIpl loading the x64 DXE core. > > > > When running in long mode, use the same logic that DxeIpl uses to > > determine the size of the address space, whether or not to use 1 GB leaf > > entries and whether or not to use 5 level paging. Note that in long > > mode, PEI is entered with paging enabled, and given that switching > > between 4 and 5 levels of paging is not currently supported without > > dropping out of 64-bit mode temporarily, all we can do is carry on > > without changing the number of levels. > > > > I certainly agree with extending the ability to have memory protections > in PEI (and trying to unify across x86 and ARM (and beyond :)). > > A few things I am trying to understand: > > Does ARM today rebuild the page table in DxeIpl? Or is it using an > earlier built page table? > No. Most platforms run without any page tables until the permanent memory is installed, at which point it essentially maps what the platform describes as device memory and as normal memory. > If I understand your proposal correctly, with the addition of this > patch, you are suggesting we can drop creating new page tables in DxeIpl > and use only one page table throughout. Yes. > Again, I like the idea of having > mapped memory protections that continue through, but do you have > concerns that we may end up with garbage from PEI in DXE in the page > table? For OEMs, they may not control PEI and therefore be at the whim > of another's PEI page table. Would you envision the GCD gets built from > the existing page table or that the GCD gets built according to resource > descriptor HOBs and DxeCore ensures that the page table reflects what > the HOBs indicated? > If there is a reason to start with a clean slate when DxeIpl hands over to DXE core, I'd prefer that to be a conscious decision rather than a consequence of the X64 vs IA32 legacy. I think you can make a case for priming the GCD map based on resource descriptors rather than current mappings, with the exception of DXE core itself and the DXE mode stack. But I'd like to understand better what we think might be wrong with the page tables as PEI leaves them. -=-=-=-=-=-=-=-=-=-=-=- Groups.io Links: You receive all messages sent to this group. View/Reply Online (#105945): https://edk2.groups.io/g/devel/message/105945 Mute This Topic: https://groups.io/mt/99411875/1787277 Group Owner: devel+owner@edk2.groups.io Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org] -=-=-=-=-=-=-=-=-=-=-=-
On 6/8/2023 10:48 AM, Ard Biesheuvel wrote: > On Thu, 8 Jun 2023 at 19:39, Oliver Smith-Denny > <osde@linux.microsoft.com> wrote: >> On 6/8/2023 10:23 AM, Ard Biesheuvel wrote: >>> Currently, we rely on the logic in DXE IPL to create new page tables >>> from scratch when executing in X64 mode, which means that we run with >>> the initial page tables all throughout PEI, and never enable protections >>> such as the CPU stack guard, even though the logic is already in place >>> for IA32. >>> >>> So let's enable the existing logic for X64 as well. This will permit us >>> to apply stricter memory permissions to code and data allocations, as >>> well as the stack, when executing in PEI. It also makes the DxeIpl logic >>> redundant, and should allow us to make the PcdDxeIplBuildPageTables >>> feature PCD limited to IA32 DxeIpl loading the x64 DXE core. >>> >>> When running in long mode, use the same logic that DxeIpl uses to >>> determine the size of the address space, whether or not to use 1 GB leaf >>> entries and whether or not to use 5 level paging. Note that in long >>> mode, PEI is entered with paging enabled, and given that switching >>> between 4 and 5 levels of paging is not currently supported without >>> dropping out of 64-bit mode temporarily, all we can do is carry on >>> without changing the number of levels. >>> >> I certainly agree with extending the ability to have memory protections >> in PEI (and trying to unify across x86 and ARM (and beyond :)). >> >> A few things I am trying to understand: >> >> Does ARM today rebuild the page table in DxeIpl? Or is it using an >> earlier built page table? >> > No. Most platforms run without any page tables until the permanent > memory is installed, at which point it essentially maps what the > platform describes as device memory and as normal memory. > > >> If I understand your proposal correctly, with the addition of this >> patch, you are suggesting we can drop creating new page tables in DxeIpl >> and use only one page table throughout. > Yes. > >> Again, I like the idea of having >> mapped memory protections that continue through, but do you have >> concerns that we may end up with garbage from PEI in DXE in the page >> table? For OEMs, they may not control PEI and therefore be at the whim >> of another's PEI page table. Would you envision the GCD gets built from >> the existing page table or that the GCD gets built according to resource >> descriptor HOBs and DxeCore ensures that the page table reflects what >> the HOBs indicated? >> > If there is a reason to start with a clean slate when DxeIpl hands > over to DXE core, I'd prefer that to be a conscious decision rather > than a consequence of the X64 vs IA32 legacy. > > I think you can make a case for priming the GCD map based on resource > descriptors rather than current mappings, with the exception of DXE > core itself and the DXE mode stack. But I'd like to understand better > what we think might be wrong with the page tables as PEI leaves them. On many platforms there are different "owners" for these different parts of firmware code. The PEI phase is a place where the Silicon vendor and Platform teams must work together. The Dxe Phase may have a different set of partners. Industry trends definitely show more silicon vendor driven diversity in the PEI phase of the boot process and with this diversity it is much harder to make solid assumptions about the execution environment. We have also discussed in the past meeting that PEI should be configurable using different solutions given it isn't a place where unknown 3rd party compatibility is critical. This means that PEI might have different requirements than DXE and thus the configuration inherited from PEI may not be compliant. Additionally, the code and driver mappings from PEI phase should not be relevant in DXE. Even with the same architecture being used these are different execution phases with different constructs. Keeping the PEI code mapped will only lead to additional security and correctness challenges. Finally, as an overarching theme of this project we have suggested we should not be coupling the various phases, their requirements, and their assumptions together. You could just as easily apply this to DXE and SMM/MM. These are all independent execution environments and the more we can provide simplification and consistency the better our chances are of getting correct implementations across the ecosystem. > > > > > -=-=-=-=-=-=-=-=-=-=-=- Groups.io Links: You receive all messages sent to this group. View/Reply Online (#105950): https://edk2.groups.io/g/devel/message/105950 Mute This Topic: https://groups.io/mt/99411875/1787277 Group Owner: devel+owner@edk2.groups.io Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org] -=-=-=-=-=-=-=-=-=-=-=-
I think Sean's point aligns more closely with traditional PI boot phase separation goals. Btw, in the project we discussed, this issue was meant to capture the DXE memory protection init dependencies - https://github.com/tianocore/edk2/issues/4502 if someone would like to update that at some point. Thanks, Michael On 6/8/2023 2:27 PM, Sean wrote: > On 6/8/2023 10:48 AM, Ard Biesheuvel wrote: >> On Thu, 8 Jun 2023 at 19:39, Oliver Smith-Denny >> <osde@linux.microsoft.com> wrote: >>> On 6/8/2023 10:23 AM, Ard Biesheuvel wrote: >>>> Currently, we rely on the logic in DXE IPL to create new page tables >>>> from scratch when executing in X64 mode, which means that we run with >>>> the initial page tables all throughout PEI, and never enable >>>> protections >>>> such as the CPU stack guard, even though the logic is already in place >>>> for IA32. >>>> >>>> So let's enable the existing logic for X64 as well. This will permit us >>>> to apply stricter memory permissions to code and data allocations, as >>>> well as the stack, when executing in PEI. It also makes the DxeIpl >>>> logic >>>> redundant, and should allow us to make the PcdDxeIplBuildPageTables >>>> feature PCD limited to IA32 DxeIpl loading the x64 DXE core. >>>> >>>> When running in long mode, use the same logic that DxeIpl uses to >>>> determine the size of the address space, whether or not to use 1 GB >>>> leaf >>>> entries and whether or not to use 5 level paging. Note that in long >>>> mode, PEI is entered with paging enabled, and given that switching >>>> between 4 and 5 levels of paging is not currently supported without >>>> dropping out of 64-bit mode temporarily, all we can do is carry on >>>> without changing the number of levels. >>>> >>> I certainly agree with extending the ability to have memory protections >>> in PEI (and trying to unify across x86 and ARM (and beyond :)). >>> >>> A few things I am trying to understand: >>> >>> Does ARM today rebuild the page table in DxeIpl? Or is it using an >>> earlier built page table? >>> >> No. Most platforms run without any page tables until the permanent >> memory is installed, at which point it essentially maps what the >> platform describes as device memory and as normal memory. >> >> >>> If I understand your proposal correctly, with the addition of this >>> patch, you are suggesting we can drop creating new page tables in DxeIpl >>> and use only one page table throughout. >> Yes. >> >>> Again, I like the idea of having >>> mapped memory protections that continue through, but do you have >>> concerns that we may end up with garbage from PEI in DXE in the page >>> table? For OEMs, they may not control PEI and therefore be at the whim >>> of another's PEI page table. Would you envision the GCD gets built from >>> the existing page table or that the GCD gets built according to resource >>> descriptor HOBs and DxeCore ensures that the page table reflects what >>> the HOBs indicated? >>> >> If there is a reason to start with a clean slate when DxeIpl hands >> over to DXE core, I'd prefer that to be a conscious decision rather >> than a consequence of the X64 vs IA32 legacy. >> >> I think you can make a case for priming the GCD map based on resource >> descriptors rather than current mappings, with the exception of DXE >> core itself and the DXE mode stack. But I'd like to understand better >> what we think might be wrong with the page tables as PEI leaves them. > > > On many platforms there are different "owners" for these different parts > of firmware code. The PEI phase is a place where the Silicon vendor and > Platform teams must work together. The Dxe Phase may have a different > set of partners. Industry trends definitely show more silicon vendor > driven diversity in the PEI phase of the boot process and with this > diversity it is much harder to make solid assumptions about the > execution environment. We have also discussed in the past meeting that > PEI should be configurable using different solutions given it isn't a > place where unknown 3rd party compatibility is critical. This means > that PEI might have different requirements than DXE and thus the > configuration inherited from PEI may not be compliant. Additionally, the > code and driver mappings from PEI phase should not be relevant in DXE. > Even with the same architecture being used these are different execution > phases with different constructs. Keeping the PEI code mapped will only > lead to additional security and correctness challenges. Finally, as an > overarching theme of this project we have suggested we should not be > coupling the various phases, their requirements, and their assumptions > together. You could just as easily apply this to DXE and SMM/MM. These > are all independent execution environments and the more we can provide > simplification and consistency the better our chances are of getting > correct implementations across the ecosystem. > >> >> >> >> >> > > > > -=-=-=-=-=-=-=-=-=-=-=- Groups.io Links: You receive all messages sent to this group. View/Reply Online (#105952): https://edk2.groups.io/g/devel/message/105952 Mute This Topic: https://groups.io/mt/99411875/1787277 Group Owner: devel+owner@edk2.groups.io Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org] -=-=-=-=-=-=-=-=-=-=-=-
On Thu, 8 Jun 2023 at 21:32, Michael Kubacki <mikuback@linux.microsoft.com> wrote: > > I think Sean's point aligns more closely with traditional PI boot phase > separation goals. Btw, in the project we discussed, this issue was meant > to capture the DXE memory protection init dependencies - > https://github.com/tianocore/edk2/issues/4502 if someone would like to > update that at some point. > Yeah, I think that makes sense. But the current status quo (on X64) is to remove *all* protections when handing over to DXE, and so dropping that and running with whatever PEI left behind is hardly going to be worse. (and this is what we do on ARM) But I agree that it makes sense for these manipulations to be scoped. So we might manage a separate set of shadow page tables in the CPU PEIM that also produces the memattr PPI, and manipulations can apply to the active set only or to both sets (for, e.g., the DXE core, DXE IPL and DXE mode stack). That would at least permit use to drop the current kludge in DxeIpl, which only knows how to create an unrestricted 1:1 mapping of the entire address space, with 1x a NX mapping (for the stack) and 1x a non-encrypted mapping (for the GHCB page on confidential VMs). It also provides a better basis for architectures to carry their own specific logic for this, instead of having a subdirectory for each arch under DxeIpl/ > On 6/8/2023 2:27 PM, Sean wrote: > > On 6/8/2023 10:48 AM, Ard Biesheuvel wrote: > >> On Thu, 8 Jun 2023 at 19:39, Oliver Smith-Denny > >> <osde@linux.microsoft.com> wrote: > >>> On 6/8/2023 10:23 AM, Ard Biesheuvel wrote: > >>>> Currently, we rely on the logic in DXE IPL to create new page tables > >>>> from scratch when executing in X64 mode, which means that we run with > >>>> the initial page tables all throughout PEI, and never enable > >>>> protections > >>>> such as the CPU stack guard, even though the logic is already in place > >>>> for IA32. > >>>> > >>>> So let's enable the existing logic for X64 as well. This will permit us > >>>> to apply stricter memory permissions to code and data allocations, as > >>>> well as the stack, when executing in PEI. It also makes the DxeIpl > >>>> logic > >>>> redundant, and should allow us to make the PcdDxeIplBuildPageTables > >>>> feature PCD limited to IA32 DxeIpl loading the x64 DXE core. > >>>> > >>>> When running in long mode, use the same logic that DxeIpl uses to > >>>> determine the size of the address space, whether or not to use 1 GB > >>>> leaf > >>>> entries and whether or not to use 5 level paging. Note that in long > >>>> mode, PEI is entered with paging enabled, and given that switching > >>>> between 4 and 5 levels of paging is not currently supported without > >>>> dropping out of 64-bit mode temporarily, all we can do is carry on > >>>> without changing the number of levels. > >>>> > >>> I certainly agree with extending the ability to have memory protections > >>> in PEI (and trying to unify across x86 and ARM (and beyond :)). > >>> > >>> A few things I am trying to understand: > >>> > >>> Does ARM today rebuild the page table in DxeIpl? Or is it using an > >>> earlier built page table? > >>> > >> No. Most platforms run without any page tables until the permanent > >> memory is installed, at which point it essentially maps what the > >> platform describes as device memory and as normal memory. > >> > >> > >>> If I understand your proposal correctly, with the addition of this > >>> patch, you are suggesting we can drop creating new page tables in DxeIpl > >>> and use only one page table throughout. > >> Yes. > >> > >>> Again, I like the idea of having > >>> mapped memory protections that continue through, but do you have > >>> concerns that we may end up with garbage from PEI in DXE in the page > >>> table? For OEMs, they may not control PEI and therefore be at the whim > >>> of another's PEI page table. Would you envision the GCD gets built from > >>> the existing page table or that the GCD gets built according to resource > >>> descriptor HOBs and DxeCore ensures that the page table reflects what > >>> the HOBs indicated? > >>> > >> If there is a reason to start with a clean slate when DxeIpl hands > >> over to DXE core, I'd prefer that to be a conscious decision rather > >> than a consequence of the X64 vs IA32 legacy. > >> > >> I think you can make a case for priming the GCD map based on resource > >> descriptors rather than current mappings, with the exception of DXE > >> core itself and the DXE mode stack. But I'd like to understand better > >> what we think might be wrong with the page tables as PEI leaves them. > > > > > > On many platforms there are different "owners" for these different parts > > of firmware code. The PEI phase is a place where the Silicon vendor and > > Platform teams must work together. The Dxe Phase may have a different > > set of partners. Industry trends definitely show more silicon vendor > > driven diversity in the PEI phase of the boot process and with this > > diversity it is much harder to make solid assumptions about the > > execution environment. We have also discussed in the past meeting that > > PEI should be configurable using different solutions given it isn't a > > place where unknown 3rd party compatibility is critical. This means > > that PEI might have different requirements than DXE and thus the > > configuration inherited from PEI may not be compliant. Additionally, the > > code and driver mappings from PEI phase should not be relevant in DXE. > > Even with the same architecture being used these are different execution > > phases with different constructs. Keeping the PEI code mapped will only > > lead to additional security and correctness challenges. Finally, as an > > overarching theme of this project we have suggested we should not be > > coupling the various phases, their requirements, and their assumptions > > together. You could just as easily apply this to DXE and SMM/MM. These > > are all independent execution environments and the more we can provide > > simplification and consistency the better our chances are of getting > > correct implementations across the ecosystem. > > > >> > >> > >> > >> > >> > > > > > > > > -=-=-=-=-=-=-=-=-=-=-=- Groups.io Links: You receive all messages sent to this group. View/Reply Online (#105959): https://edk2.groups.io/g/devel/message/105959 Mute This Topic: https://groups.io/mt/99411875/1787277 Group Owner: devel+owner@edk2.groups.io Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org] -=-=-=-=-=-=-=-=-=-=-=-
© 2016 - 2026 Red Hat, Inc.