[edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable() to create smm page table

duntan posted 15 patches 1 year, 3 months ago
There is a newer version of this series
[edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable() to create smm page table
Posted by duntan 1 year, 3 months ago
This commit is code refinement to current smm pagetable generation
code. Add a new GenSmmPageTable() API to create smm page table
based on the PageTableMap() API in CpuPageTableLib. Caller only
needs to specify the paging mode and the PhysicalAddressBits to map.
This function can be used to create both IA32 pae paging and X64
5level, 4level paging.

Signed-off-by: Dun Tan <dun.tan@intel.com>
Cc: Eric Dong <eric.dong@intel.com>
Cc: Ray Ni <ray.ni@intel.com>
Cc: Rahul Kumar <rahul1.kumar@intel.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
---
 UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c           |   2 +-
 UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h         |  15 +++++++++++++++
 UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c |  65 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c            | 220 ++++++++++++++++++++++++++--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 4 files changed, 107 insertions(+), 195 deletions(-)

diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
index 9c8107080a..b11264ce4a 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
@@ -63,7 +63,7 @@ SmmInitPageTable (
     InitializeIDTSmmStackGuard ();
   }
 
-  return Gen4GPageTable (TRUE);
+  return GenSmmPageTable (PagingPae, mPhysicalAddressBits);
 }
 
 /**
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
index a7da9673a5..5399659bc0 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
@@ -553,6 +553,21 @@ Gen4GPageTable (
   IN      BOOLEAN  Is32BitPageTable
   );
 
+/**
+  Create page table based on input PagingMode and PhysicalAddressBits in smm.
+
+  @param[in]      PagingMode           The paging mode.
+  @param[in]      PhysicalAddressBits  The bits of physical address to map.
+
+  @retval         PageTable Address
+
+**/
+UINTN
+GenSmmPageTable (
+  IN PAGING_MODE  PagingMode,
+  IN UINT8        PhysicalAddressBits
+  );
+
 /**
   Initialize global data for MP synchronization.
 
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
index ef0ba9a355..138ff43c9d 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
@@ -1642,6 +1642,71 @@ EdkiiSmmClearMemoryAttributes (
   return SmmClearMemoryAttributes (BaseAddress, Length, Attributes);
 }
 
+/**
+  Create page table based on input PagingMode and PhysicalAddressBits in smm.
+
+  @param[in]      PagingMode           The paging mode.
+  @param[in]      PhysicalAddressBits  The bits of physical address to map.
+
+  @retval         PageTable Address
+
+**/
+UINTN
+GenSmmPageTable (
+  IN PAGING_MODE  PagingMode,
+  IN UINT8        PhysicalAddressBits
+  )
+{
+  UINTN               PageTableBufferSize;
+  UINTN               PageTable;
+  VOID                *PageTableBuffer;
+  IA32_MAP_ATTRIBUTE  MapAttribute;
+  IA32_MAP_ATTRIBUTE  MapMask;
+  RETURN_STATUS       Status;
+  UINTN               GuardPage;
+  UINTN               Index;
+  UINT64              Length;
+
+  Length                           = LShiftU64 (1, PhysicalAddressBits);
+  PageTable                        = 0;
+  PageTableBufferSize              = 0;
+  MapMask.Uint64                   = MAX_UINT64;
+  MapAttribute.Uint64              = mAddressEncMask;
+  MapAttribute.Bits.Present        = 1;
+  MapAttribute.Bits.ReadWrite      = 1;
+  MapAttribute.Bits.UserSupervisor = 1;
+  MapAttribute.Bits.Accessed       = 1;
+  MapAttribute.Bits.Dirty          = 1;
+
+  Status = PageTableMap (&PageTable, PagingMode, NULL, &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
+  ASSERT (Status == RETURN_BUFFER_TOO_SMALL);
+  DEBUG ((DEBUG_INFO, "GenSMMPageTable: 0x%x bytes needed for initial SMM page table\n", PageTableBufferSize));
+  PageTableBuffer = AllocatePageTableMemory (EFI_SIZE_TO_PAGES (PageTableBufferSize));
+  ASSERT (PageTableBuffer != NULL);
+  Status = PageTableMap (&PageTable, PagingMode, PageTableBuffer, &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
+  ASSERT (Status == RETURN_SUCCESS);
+  ASSERT (PageTableBufferSize == 0);
+
+  if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+    //
+    // Mark the 4KB guard page between known good stack and smm stack as non-present
+    //
+    for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
+      GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * (mSmmStackSize + mSmmShadowStackSize);
+      Status    = ConvertMemoryPageAttributes (PageTable, PagingMode, GuardPage, SIZE_4KB, EFI_MEMORY_RP, TRUE, NULL);
+    }
+  }
+
+  if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
+    //
+    // Mark [0, 4k] as non-present
+    //
+    Status = ConvertMemoryPageAttributes (PageTable, PagingMode, 0, SIZE_4KB, EFI_MEMORY_RP, TRUE, NULL);
+  }
+
+  return (UINTN)PageTable;
+}
+
 /**
   This function retrieves the attributes of the memory region specified by
   BaseAddress and Length. If different attributes are got from different part
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
index 25ced50955..060e6dc147 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
@@ -167,160 +167,6 @@ CalculateMaximumSupportAddress (
   return PhysicalAddressBits;
 }
 
-/**
-  Set static page table.
-
-  @param[in] PageTable              Address of page table.
-  @param[in] PhysicalAddressBits    The maximum physical address bits supported.
-**/
-VOID
-SetStaticPageTable (
-  IN UINTN  PageTable,
-  IN UINT8  PhysicalAddressBits
-  )
-{
-  UINT64  PageAddress;
-  UINTN   NumberOfPml5EntriesNeeded;
-  UINTN   NumberOfPml4EntriesNeeded;
-  UINTN   NumberOfPdpEntriesNeeded;
-  UINTN   IndexOfPml5Entries;
-  UINTN   IndexOfPml4Entries;
-  UINTN   IndexOfPdpEntries;
-  UINTN   IndexOfPageDirectoryEntries;
-  UINT64  *PageMapLevel5Entry;
-  UINT64  *PageMapLevel4Entry;
-  UINT64  *PageMap;
-  UINT64  *PageDirectoryPointerEntry;
-  UINT64  *PageDirectory1GEntry;
-  UINT64  *PageDirectoryEntry;
-
-  //
-  // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
-  //  when 5-Level Paging is disabled.
-  //
-  ASSERT (PhysicalAddressBits <= 52);
-  if (!m5LevelPagingNeeded && (PhysicalAddressBits > 48)) {
-    PhysicalAddressBits = 48;
-  }
-
-  NumberOfPml5EntriesNeeded = 1;
-  if (PhysicalAddressBits > 48) {
-    NumberOfPml5EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 48);
-    PhysicalAddressBits       = 48;
-  }
-
-  NumberOfPml4EntriesNeeded = 1;
-  if (PhysicalAddressBits > 39) {
-    NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 39);
-    PhysicalAddressBits       = 39;
-  }
-
-  NumberOfPdpEntriesNeeded = 1;
-  ASSERT (PhysicalAddressBits > 30);
-  NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 30);
-
-  //
-  // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
-  //
-  PageMap = (VOID *)PageTable;
-
-  PageMapLevel4Entry = PageMap;
-  PageMapLevel5Entry = NULL;
-  if (m5LevelPagingNeeded) {
-    //
-    // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
-    //
-    PageMapLevel5Entry = PageMap;
-  }
-
-  PageAddress = 0;
-
-  for ( IndexOfPml5Entries = 0
-        ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
-        ; IndexOfPml5Entries++, PageMapLevel5Entry++)
-  {
-    //
-    // Each PML5 entry points to a page of PML4 entires.
-    // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
-    // When 5-Level Paging is disabled, below allocation happens only once.
-    //
-    if (m5LevelPagingNeeded) {
-      PageMapLevel4Entry = (UINT64 *)((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
-      if (PageMapLevel4Entry == NULL) {
-        PageMapLevel4Entry = AllocatePageTableMemory (1);
-        ASSERT (PageMapLevel4Entry != NULL);
-        ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE (1));
-
-        *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
-      }
-    }
-
-    for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
-      //
-      // Each PML4 entry points to a page of Page Directory Pointer entries.
-      //
-      PageDirectoryPointerEntry = (UINT64 *)((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
-      if (PageDirectoryPointerEntry == NULL) {
-        PageDirectoryPointerEntry = AllocatePageTableMemory (1);
-        ASSERT (PageDirectoryPointerEntry != NULL);
-        ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE (1));
-
-        *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
-      }
-
-      if (m1GPageTableSupport) {
-        PageDirectory1GEntry = PageDirectoryPointerEntry;
-        for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
-          if ((IndexOfPml4Entries == 0) && (IndexOfPageDirectoryEntries < 4)) {
-            //
-            // Skip the < 4G entries
-            //
-            continue;
-          }
-
-          //
-          // Fill in the Page Directory entries
-          //
-          *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
-        }
-      } else {
-        PageAddress = BASE_4GB;
-        for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
-          if ((IndexOfPml4Entries == 0) && (IndexOfPdpEntries < 4)) {
-            //
-            // Skip the < 4G entries
-            //
-            continue;
-          }
-
-          //
-          // Each Directory Pointer entries points to a page of Page Directory entires.
-          // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
-          //
-          PageDirectoryEntry = (UINT64 *)((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
-          if (PageDirectoryEntry == NULL) {
-            PageDirectoryEntry = AllocatePageTableMemory (1);
-            ASSERT (PageDirectoryEntry != NULL);
-            ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE (1));
-
-            //
-            // Fill in a Page Directory Pointer Entries
-            //
-            *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
-          }
-
-          for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
-            //
-            // Fill in the Page Directory entries
-            //
-            *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
-          }
-        }
-      }
-    }
-  }
-}
-
 /**
   Create PageTable for SMM use.
 
@@ -332,15 +178,16 @@ SmmInitPageTable (
   VOID
   )
 {
-  EFI_PHYSICAL_ADDRESS      Pages;
-  UINT64                    *PTEntry;
+  UINTN                     PageTable;
   LIST_ENTRY                *FreePage;
   UINTN                     Index;
   UINTN                     PageFaultHandlerHookAddress;
   IA32_IDT_GATE_DESCRIPTOR  *IdtEntry;
   EFI_STATUS                Status;
+  UINT64                    *PdptEntry;
   UINT64                    *Pml4Entry;
   UINT64                    *Pml5Entry;
+  UINT8                     PhysicalAddressBits;
 
   //
   // Initialize spin lock
@@ -357,59 +204,44 @@ SmmInitPageTable (
   } else {
     mPagingMode = m1GPageTableSupport ? Paging4Level1GB : Paging4Level;
   }
+
   DEBUG ((DEBUG_INFO, "5LevelPaging Needed             - %d\n", m5LevelPagingNeeded));
   DEBUG ((DEBUG_INFO, "1GPageTable Support             - %d\n", m1GPageTableSupport));
   DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
   DEBUG ((DEBUG_INFO, "PhysicalAddressBits             - %d\n", mPhysicalAddressBits));
-  //
-  // Generate PAE page table for the first 4GB memory space
-  //
-  Pages = Gen4GPageTable (FALSE);
 
   //
-  // Set IA32_PG_PMNT bit to mask this entry
+  // Generate initial SMM page table.
+  // Only map [0, 4G] when PcdCpuSmmRestrictedMemoryAccess is FALSE.
   //
-  PTEntry = (UINT64 *)(UINTN)Pages;
-  for (Index = 0; Index < 4; Index++) {
-    PTEntry[Index] |= IA32_PG_PMNT;
-  }
-
-  //
-  // Fill Page-Table-Level4 (PML4) entry
-  //
-  Pml4Entry = (UINT64 *)AllocatePageTableMemory (1);
-  ASSERT (Pml4Entry != NULL);
-  *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
-  ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
-
-  //
-  // Set sub-entries number
-  //
-  SetSubEntriesNum (Pml4Entry, 3);
-  PTEntry = Pml4Entry;
+  PhysicalAddressBits = mCpuSmmRestrictedMemoryAccess ? mPhysicalAddressBits : 32;
+  PageTable           = GenSmmPageTable (mPagingMode, PhysicalAddressBits);
 
   if (m5LevelPagingNeeded) {
+    Pml5Entry = (UINT64 *)PageTable;
     //
-    // Fill PML5 entry
-    //
-    Pml5Entry = (UINT64 *)AllocatePageTableMemory (1);
-    ASSERT (Pml5Entry != NULL);
-    *Pml5Entry = (UINTN)Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
-    ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
-    //
-    // Set sub-entries number
+    // Set Pml5Entry sub-entries number for smm PF handler usage.
     //
     SetSubEntriesNum (Pml5Entry, 1);
-    PTEntry = Pml5Entry;
+    Pml4Entry = (UINT64 *)((*Pml5Entry) & ~mAddressEncMask & gPhyMask);
+  } else {
+    Pml4Entry = (UINT64 *)PageTable;
+  }
+
+  //
+  // Set IA32_PG_PMNT bit to mask first 4 PdptEntry.
+  //
+  PdptEntry = (UINT64 *)((*Pml4Entry) & ~mAddressEncMask & gPhyMask);
+  for (Index = 0; Index < 4; Index++) {
+    PdptEntry[Index] |= IA32_PG_PMNT;
   }
 
-  if (mCpuSmmRestrictedMemoryAccess) {
+  if (!mCpuSmmRestrictedMemoryAccess) {
     //
-    // When access to non-SMRAM memory is restricted, create page table
-    // that covers all memory space.
+    // Set Pml4Entry sub-entries number for smm PF handler usage.
     //
-    SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);
-  } else {
+    SetSubEntriesNum (Pml4Entry, 3);
+
     //
     // Add pages to page pool
     //
@@ -466,7 +298,7 @@ SmmInitPageTable (
   //
   // Return the address of PML4/PML5 (to set CR3)
   //
-  return (UINT32)(UINTN)PTEntry;
+  return (UINT32)PageTable;
 }
 
 /**
-- 
2.31.1.windows.1



-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#104930): https://edk2.groups.io/g/devel/message/104930
Mute This Topic: https://groups.io/mt/98922936/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
Re: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable() to create smm page table
Posted by Ni, Ray 1 year, 3 months ago
      //
      // SMM Stack Guard Enabled
      // Append Shadow Stack after normal stack
      //   2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.
      //
      // |= Stacks
      // +--------------------------------------------------+---------------------------------------------------------------+
      // | Known Good Stack | Guard Page |    SMM Stack     | Known Good Shadow Stack | Guard Page |    SMM Shadow Stack    |
      // +--------------------------------------------------+---------------------------------------------------------------+
      // |         4K       |    4K      |PcdCpuSmmStackSize|            4K           |    4K      |PcdCpuSmmShadowStackSize|
      // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
      // |                                                                                                                  |
      // |<-------------------------------------------- Processor N ------------------------------------------------------->|
      //

GenSmmPageTable() only sets the "Guard page" in "mSmmStackSize range" as not-present.
But the "Guard page" in "mSmmShadowStackSize range" is not marked as not-present.
Why?

Thanks,
Ray

> -----Original Message-----
> From: devel@edk2.groups.io <devel@edk2.groups.io> On Behalf Of duntan
> Sent: Tuesday, May 16, 2023 5:59 PM
> To: devel@edk2.groups.io
> Cc: Dong, Eric <eric.dong@intel.com>; Ni, Ray <ray.ni@intel.com>; Kumar, Rahul
> R <rahul.r.kumar@intel.com>; Gerd Hoffmann <kraxel@redhat.com>
> Subject: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable() to
> create smm page table
> 
> This commit is code refinement to current smm pagetable generation
> code. Add a new GenSmmPageTable() API to create smm page table
> based on the PageTableMap() API in CpuPageTableLib. Caller only
> needs to specify the paging mode and the PhysicalAddressBits to map.
> This function can be used to create both IA32 pae paging and X64
> 5level, 4level paging.
> 
> Signed-off-by: Dun Tan <dun.tan@intel.com>
> Cc: Eric Dong <eric.dong@intel.com>
> Cc: Ray Ni <ray.ni@intel.com>
> Cc: Rahul Kumar <rahul1.kumar@intel.com>
> Cc: Gerd Hoffmann <kraxel@redhat.com>
> ---
>  UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c           |   2 +-
>  UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h         |  15
> +++++++++++++++
>  UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c |  65
> +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c            | 220
> ++++++++++++++++++++++++++-----------------------------------------------------------
> --------------------------------------------------------------------------------------------------
> -------------------------------------
>  4 files changed, 107 insertions(+), 195 deletions(-)
> 
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> index 9c8107080a..b11264ce4a 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> @@ -63,7 +63,7 @@ SmmInitPageTable (
>      InitializeIDTSmmStackGuard ();
>    }
> 
> -  return Gen4GPageTable (TRUE);
> +  return GenSmmPageTable (PagingPae, mPhysicalAddressBits);
>  }
> 
>  /**
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> index a7da9673a5..5399659bc0 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> @@ -553,6 +553,21 @@ Gen4GPageTable (
>    IN      BOOLEAN  Is32BitPageTable
>    );
> 
> +/**
> +  Create page table based on input PagingMode and PhysicalAddressBits in smm.
> +
> +  @param[in]      PagingMode           The paging mode.
> +  @param[in]      PhysicalAddressBits  The bits of physical address to map.
> +
> +  @retval         PageTable Address
> +
> +**/
> +UINTN
> +GenSmmPageTable (
> +  IN PAGING_MODE  PagingMode,
> +  IN UINT8        PhysicalAddressBits
> +  );
> +
>  /**
>    Initialize global data for MP synchronization.
> 
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> index ef0ba9a355..138ff43c9d 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> @@ -1642,6 +1642,71 @@ EdkiiSmmClearMemoryAttributes (
>    return SmmClearMemoryAttributes (BaseAddress, Length, Attributes);
>  }
> 
> +/**
> +  Create page table based on input PagingMode and PhysicalAddressBits in smm.
> +
> +  @param[in]      PagingMode           The paging mode.
> +  @param[in]      PhysicalAddressBits  The bits of physical address to map.
> +
> +  @retval         PageTable Address
> +
> +**/
> +UINTN
> +GenSmmPageTable (
> +  IN PAGING_MODE  PagingMode,
> +  IN UINT8        PhysicalAddressBits
> +  )
> +{
> +  UINTN               PageTableBufferSize;
> +  UINTN               PageTable;
> +  VOID                *PageTableBuffer;
> +  IA32_MAP_ATTRIBUTE  MapAttribute;
> +  IA32_MAP_ATTRIBUTE  MapMask;
> +  RETURN_STATUS       Status;
> +  UINTN               GuardPage;
> +  UINTN               Index;
> +  UINT64              Length;
> +
> +  Length                           = LShiftU64 (1, PhysicalAddressBits);
> +  PageTable                        = 0;
> +  PageTableBufferSize              = 0;
> +  MapMask.Uint64                   = MAX_UINT64;
> +  MapAttribute.Uint64              = mAddressEncMask;
> +  MapAttribute.Bits.Present        = 1;
> +  MapAttribute.Bits.ReadWrite      = 1;
> +  MapAttribute.Bits.UserSupervisor = 1;
> +  MapAttribute.Bits.Accessed       = 1;
> +  MapAttribute.Bits.Dirty          = 1;
> +
> +  Status = PageTableMap (&PageTable, PagingMode, NULL,
> &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
> +  ASSERT (Status == RETURN_BUFFER_TOO_SMALL);
> +  DEBUG ((DEBUG_INFO, "GenSMMPageTable: 0x%x bytes needed for initial
> SMM page table\n", PageTableBufferSize));
> +  PageTableBuffer = AllocatePageTableMemory (EFI_SIZE_TO_PAGES
> (PageTableBufferSize));
> +  ASSERT (PageTableBuffer != NULL);
> +  Status = PageTableMap (&PageTable, PagingMode, PageTableBuffer,
> &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
> +  ASSERT (Status == RETURN_SUCCESS);
> +  ASSERT (PageTableBufferSize == 0);
> +
> +  if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
> +    //
> +    // Mark the 4KB guard page between known good stack and smm stack as
> non-present
> +    //
> +    for (Index = 0; Index < gSmmCpuPrivate-
> >SmmCoreEntryContext.NumberOfCpus; Index++) {
> +      GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE + Index *
> (mSmmStackSize + mSmmShadowStackSize);
> +      Status    = ConvertMemoryPageAttributes (PageTable, PagingMode,
> GuardPage, SIZE_4KB, EFI_MEMORY_RP, TRUE, NULL);
> +    }
> +  }
> +
> +  if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
> +    //
> +    // Mark [0, 4k] as non-present
> +    //
> +    Status = ConvertMemoryPageAttributes (PageTable, PagingMode, 0, SIZE_4KB,
> EFI_MEMORY_RP, TRUE, NULL);
> +  }
> +
> +  return (UINTN)PageTable;
> +}
> +
>  /**
>    This function retrieves the attributes of the memory region specified by
>    BaseAddress and Length. If different attributes are got from different part
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> index 25ced50955..060e6dc147 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> @@ -167,160 +167,6 @@ CalculateMaximumSupportAddress (
>    return PhysicalAddressBits;
>  }
> 
> -/**
> -  Set static page table.
> -
> -  @param[in] PageTable              Address of page table.
> -  @param[in] PhysicalAddressBits    The maximum physical address bits
> supported.
> -**/
> -VOID
> -SetStaticPageTable (
> -  IN UINTN  PageTable,
> -  IN UINT8  PhysicalAddressBits
> -  )
> -{
> -  UINT64  PageAddress;
> -  UINTN   NumberOfPml5EntriesNeeded;
> -  UINTN   NumberOfPml4EntriesNeeded;
> -  UINTN   NumberOfPdpEntriesNeeded;
> -  UINTN   IndexOfPml5Entries;
> -  UINTN   IndexOfPml4Entries;
> -  UINTN   IndexOfPdpEntries;
> -  UINTN   IndexOfPageDirectoryEntries;
> -  UINT64  *PageMapLevel5Entry;
> -  UINT64  *PageMapLevel4Entry;
> -  UINT64  *PageMap;
> -  UINT64  *PageDirectoryPointerEntry;
> -  UINT64  *PageDirectory1GEntry;
> -  UINT64  *PageDirectoryEntry;
> -
> -  //
> -  // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
> -  //  when 5-Level Paging is disabled.
> -  //
> -  ASSERT (PhysicalAddressBits <= 52);
> -  if (!m5LevelPagingNeeded && (PhysicalAddressBits > 48)) {
> -    PhysicalAddressBits = 48;
> -  }
> -
> -  NumberOfPml5EntriesNeeded = 1;
> -  if (PhysicalAddressBits > 48) {
> -    NumberOfPml5EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits -
> 48);
> -    PhysicalAddressBits       = 48;
> -  }
> -
> -  NumberOfPml4EntriesNeeded = 1;
> -  if (PhysicalAddressBits > 39) {
> -    NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits -
> 39);
> -    PhysicalAddressBits       = 39;
> -  }
> -
> -  NumberOfPdpEntriesNeeded = 1;
> -  ASSERT (PhysicalAddressBits > 30);
> -  NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 30);
> -
> -  //
> -  // By architecture only one PageMapLevel4 exists - so lets allocate storage for
> it.
> -  //
> -  PageMap = (VOID *)PageTable;
> -
> -  PageMapLevel4Entry = PageMap;
> -  PageMapLevel5Entry = NULL;
> -  if (m5LevelPagingNeeded) {
> -    //
> -    // By architecture only one PageMapLevel5 exists - so lets allocate storage for
> it.
> -    //
> -    PageMapLevel5Entry = PageMap;
> -  }
> -
> -  PageAddress = 0;
> -
> -  for ( IndexOfPml5Entries = 0
> -        ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
> -        ; IndexOfPml5Entries++, PageMapLevel5Entry++)
> -  {
> -    //
> -    // Each PML5 entry points to a page of PML4 entires.
> -    // So lets allocate space for them and fill them in in the IndexOfPml4Entries
> loop.
> -    // When 5-Level Paging is disabled, below allocation happens only once.
> -    //
> -    if (m5LevelPagingNeeded) {
> -      PageMapLevel4Entry = (UINT64 *)((*PageMapLevel5Entry) &
> ~mAddressEncMask & gPhyMask);
> -      if (PageMapLevel4Entry == NULL) {
> -        PageMapLevel4Entry = AllocatePageTableMemory (1);
> -        ASSERT (PageMapLevel4Entry != NULL);
> -        ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE (1));
> -
> -        *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> -      }
> -    }
> -
> -    for (IndexOfPml4Entries = 0; IndexOfPml4Entries <
> (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512);
> IndexOfPml4Entries++, PageMapLevel4Entry++) {
> -      //
> -      // Each PML4 entry points to a page of Page Directory Pointer entries.
> -      //
> -      PageDirectoryPointerEntry = (UINT64 *)((*PageMapLevel4Entry) &
> ~mAddressEncMask & gPhyMask);
> -      if (PageDirectoryPointerEntry == NULL) {
> -        PageDirectoryPointerEntry = AllocatePageTableMemory (1);
> -        ASSERT (PageDirectoryPointerEntry != NULL);
> -        ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE (1));
> -
> -        *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> -      }
> -
> -      if (m1GPageTableSupport) {
> -        PageDirectory1GEntry = PageDirectoryPointerEntry;
> -        for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512;
> IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress +=
> SIZE_1GB) {
> -          if ((IndexOfPml4Entries == 0) && (IndexOfPageDirectoryEntries < 4)) {
> -            //
> -            // Skip the < 4G entries
> -            //
> -            continue;
> -          }
> -
> -          //
> -          // Fill in the Page Directory entries
> -          //
> -          *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS
> | PAGE_ATTRIBUTE_BITS;
> -        }
> -      } else {
> -        PageAddress = BASE_4GB;
> -        for (IndexOfPdpEntries = 0; IndexOfPdpEntries <
> (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512);
> IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
> -          if ((IndexOfPml4Entries == 0) && (IndexOfPdpEntries < 4)) {
> -            //
> -            // Skip the < 4G entries
> -            //
> -            continue;
> -          }
> -
> -          //
> -          // Each Directory Pointer entries points to a page of Page Directory entires.
> -          // So allocate space for them and fill them in in the
> IndexOfPageDirectoryEntries loop.
> -          //
> -          PageDirectoryEntry = (UINT64 *)((*PageDirectoryPointerEntry) &
> ~mAddressEncMask & gPhyMask);
> -          if (PageDirectoryEntry == NULL) {
> -            PageDirectoryEntry = AllocatePageTableMemory (1);
> -            ASSERT (PageDirectoryEntry != NULL);
> -            ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE (1));
> -
> -            //
> -            // Fill in a Page Directory Pointer Entries
> -            //
> -            *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> -          }
> -
> -          for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512;
> IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress +=
> SIZE_2MB) {
> -            //
> -            // Fill in the Page Directory entries
> -            //
> -            *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS |
> PAGE_ATTRIBUTE_BITS;
> -          }
> -        }
> -      }
> -    }
> -  }
> -}
> -
>  /**
>    Create PageTable for SMM use.
> 
> @@ -332,15 +178,16 @@ SmmInitPageTable (
>    VOID
>    )
>  {
> -  EFI_PHYSICAL_ADDRESS      Pages;
> -  UINT64                    *PTEntry;
> +  UINTN                     PageTable;
>    LIST_ENTRY                *FreePage;
>    UINTN                     Index;
>    UINTN                     PageFaultHandlerHookAddress;
>    IA32_IDT_GATE_DESCRIPTOR  *IdtEntry;
>    EFI_STATUS                Status;
> +  UINT64                    *PdptEntry;
>    UINT64                    *Pml4Entry;
>    UINT64                    *Pml5Entry;
> +  UINT8                     PhysicalAddressBits;
> 
>    //
>    // Initialize spin lock
> @@ -357,59 +204,44 @@ SmmInitPageTable (
>    } else {
>      mPagingMode = m1GPageTableSupport ? Paging4Level1GB : Paging4Level;
>    }
> +
>    DEBUG ((DEBUG_INFO, "5LevelPaging Needed             - %d\n",
> m5LevelPagingNeeded));
>    DEBUG ((DEBUG_INFO, "1GPageTable Support             - %d\n",
> m1GPageTableSupport));
>    DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n",
> mCpuSmmRestrictedMemoryAccess));
>    DEBUG ((DEBUG_INFO, "PhysicalAddressBits             - %d\n",
> mPhysicalAddressBits));
> -  //
> -  // Generate PAE page table for the first 4GB memory space
> -  //
> -  Pages = Gen4GPageTable (FALSE);
> 
>    //
> -  // Set IA32_PG_PMNT bit to mask this entry
> +  // Generate initial SMM page table.
> +  // Only map [0, 4G] when PcdCpuSmmRestrictedMemoryAccess is FALSE.
>    //
> -  PTEntry = (UINT64 *)(UINTN)Pages;
> -  for (Index = 0; Index < 4; Index++) {
> -    PTEntry[Index] |= IA32_PG_PMNT;
> -  }
> -
> -  //
> -  // Fill Page-Table-Level4 (PML4) entry
> -  //
> -  Pml4Entry = (UINT64 *)AllocatePageTableMemory (1);
> -  ASSERT (Pml4Entry != NULL);
> -  *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> -  ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
> -
> -  //
> -  // Set sub-entries number
> -  //
> -  SetSubEntriesNum (Pml4Entry, 3);
> -  PTEntry = Pml4Entry;
> +  PhysicalAddressBits = mCpuSmmRestrictedMemoryAccess ?
> mPhysicalAddressBits : 32;
> +  PageTable           = GenSmmPageTable (mPagingMode, PhysicalAddressBits);
> 
>    if (m5LevelPagingNeeded) {
> +    Pml5Entry = (UINT64 *)PageTable;
>      //
> -    // Fill PML5 entry
> -    //
> -    Pml5Entry = (UINT64 *)AllocatePageTableMemory (1);
> -    ASSERT (Pml5Entry != NULL);
> -    *Pml5Entry = (UINTN)Pml4Entry | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> -    ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
> -    //
> -    // Set sub-entries number
> +    // Set Pml5Entry sub-entries number for smm PF handler usage.
>      //
>      SetSubEntriesNum (Pml5Entry, 1);
> -    PTEntry = Pml5Entry;
> +    Pml4Entry = (UINT64 *)((*Pml5Entry) & ~mAddressEncMask & gPhyMask);
> +  } else {
> +    Pml4Entry = (UINT64 *)PageTable;
> +  }
> +
> +  //
> +  // Set IA32_PG_PMNT bit to mask first 4 PdptEntry.
> +  //
> +  PdptEntry = (UINT64 *)((*Pml4Entry) & ~mAddressEncMask & gPhyMask);
> +  for (Index = 0; Index < 4; Index++) {
> +    PdptEntry[Index] |= IA32_PG_PMNT;
>    }
> 
> -  if (mCpuSmmRestrictedMemoryAccess) {
> +  if (!mCpuSmmRestrictedMemoryAccess) {
>      //
> -    // When access to non-SMRAM memory is restricted, create page table
> -    // that covers all memory space.
> +    // Set Pml4Entry sub-entries number for smm PF handler usage.
>      //
> -    SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);
> -  } else {
> +    SetSubEntriesNum (Pml4Entry, 3);
> +
>      //
>      // Add pages to page pool
>      //
> @@ -466,7 +298,7 @@ SmmInitPageTable (
>    //
>    // Return the address of PML4/PML5 (to set CR3)
>    //
> -  return (UINT32)(UINTN)PTEntry;
> +  return (UINT32)PageTable;
>  }
> 
>  /**
> --
> 2.31.1.windows.1
> 
> 
> 
> 
> 



-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#105622): https://edk2.groups.io/g/devel/message/105622
Mute This Topic: https://groups.io/mt/98922936/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/leave/3901457/1787277/102458076/xyzzy [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
Re: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable() to create smm page table
Posted by duntan 1 year, 3 months ago
GenSmmPageTable() doesn't mark the "Guard page" in "mSmmShadowStackSize range" is to align with old behavior.
GenSmmPageTable() is also used to create SmmS3Cr3 and the "Guard page" in "mSmmShadowStackSize range" is not marked as non-present in SmmS3Cr3.
In old code logic, the "Guard page" in "mSmmShadowStackSize range" is marked as not-present after InitializeMpServiceData() creates the initial smm page table.

-----Original Message-----
From: Ni, Ray <ray.ni@intel.com> 
Sent: Friday, June 2, 2023 11:23 AM
To: devel@edk2.groups.io; Tan, Dun <dun.tan@intel.com>
Cc: Dong, Eric <eric.dong@intel.com>; Kumar, Rahul R <rahul.r.kumar@intel.com>; Gerd Hoffmann <kraxel@redhat.com>
Subject: RE: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable() to create smm page table


      //
      // SMM Stack Guard Enabled
      // Append Shadow Stack after normal stack
      //   2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.
      //
      // |= Stacks
      // +--------------------------------------------------+---------------------------------------------------------------+
      // | Known Good Stack | Guard Page |    SMM Stack     | Known Good Shadow Stack | Guard Page |    SMM Shadow Stack    |
      // +--------------------------------------------------+---------------------------------------------------------------+
      // |         4K       |    4K      |PcdCpuSmmStackSize|            4K           |    4K      |PcdCpuSmmShadowStackSize|
      // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
      // |                                                                                                                  |
      // |<-------------------------------------------- Processor N ------------------------------------------------------->|
      //

GenSmmPageTable() only sets the "Guard page" in "mSmmStackSize range" as not-present.
But the "Guard page" in "mSmmShadowStackSize range" is not marked as not-present.
Why?

Thanks,
Ray

> -----Original Message-----
> From: devel@edk2.groups.io <devel@edk2.groups.io> On Behalf Of duntan
> Sent: Tuesday, May 16, 2023 5:59 PM
> To: devel@edk2.groups.io
> Cc: Dong, Eric <eric.dong@intel.com>; Ni, Ray <ray.ni@intel.com>; 
> Kumar, Rahul R <rahul.r.kumar@intel.com>; Gerd Hoffmann 
> <kraxel@redhat.com>
> Subject: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add 
> GenSmmPageTable() to create smm page table
> 
> This commit is code refinement to current smm pagetable generation 
> code. Add a new GenSmmPageTable() API to create smm page table based 
> on the PageTableMap() API in CpuPageTableLib. Caller only needs to 
> specify the paging mode and the PhysicalAddressBits to map.
> This function can be used to create both IA32 pae paging and X64 
> 5level, 4level paging.
> 
> Signed-off-by: Dun Tan <dun.tan@intel.com>
> Cc: Eric Dong <eric.dong@intel.com>
> Cc: Ray Ni <ray.ni@intel.com>
> Cc: Rahul Kumar <rahul1.kumar@intel.com>
> Cc: Gerd Hoffmann <kraxel@redhat.com>
> ---
>  UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c           |   2 +-
>  UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h         |  15
> +++++++++++++++
>  UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c |  65
> +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c            | 220
> ++++++++++++++++++++++++++--------------------------------------------
> ++++++++++++++++++++++++++---------------
> ----------------------------------------------------------------------
> ----------------------------
> -------------------------------------
>  4 files changed, 107 insertions(+), 195 deletions(-)
> 
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> index 9c8107080a..b11264ce4a 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> @@ -63,7 +63,7 @@ SmmInitPageTable (
>      InitializeIDTSmmStackGuard ();
>    }
> 
> -  return Gen4GPageTable (TRUE);
> +  return GenSmmPageTable (PagingPae, mPhysicalAddressBits);
>  }
> 
>  /**
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> index a7da9673a5..5399659bc0 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> @@ -553,6 +553,21 @@ Gen4GPageTable (
>    IN      BOOLEAN  Is32BitPageTable
>    );
> 
> +/**
> +  Create page table based on input PagingMode and PhysicalAddressBits in smm.
> +
> +  @param[in]      PagingMode           The paging mode.
> +  @param[in]      PhysicalAddressBits  The bits of physical address to map.
> +
> +  @retval         PageTable Address
> +
> +**/
> +UINTN
> +GenSmmPageTable (
> +  IN PAGING_MODE  PagingMode,
> +  IN UINT8        PhysicalAddressBits
> +  );
> +
>  /**
>    Initialize global data for MP synchronization.
> 
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> index ef0ba9a355..138ff43c9d 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> @@ -1642,6 +1642,71 @@ EdkiiSmmClearMemoryAttributes (
>    return SmmClearMemoryAttributes (BaseAddress, Length, Attributes);  
> }
> 
> +/**
> +  Create page table based on input PagingMode and PhysicalAddressBits in smm.
> +
> +  @param[in]      PagingMode           The paging mode.
> +  @param[in]      PhysicalAddressBits  The bits of physical address to map.
> +
> +  @retval         PageTable Address
> +
> +**/
> +UINTN
> +GenSmmPageTable (
> +  IN PAGING_MODE  PagingMode,
> +  IN UINT8        PhysicalAddressBits
> +  )
> +{
> +  UINTN               PageTableBufferSize;
> +  UINTN               PageTable;
> +  VOID                *PageTableBuffer;
> +  IA32_MAP_ATTRIBUTE  MapAttribute;
> +  IA32_MAP_ATTRIBUTE  MapMask;
> +  RETURN_STATUS       Status;
> +  UINTN               GuardPage;
> +  UINTN               Index;
> +  UINT64              Length;
> +
> +  Length                           = LShiftU64 (1, PhysicalAddressBits);
> +  PageTable                        = 0;
> +  PageTableBufferSize              = 0;
> +  MapMask.Uint64                   = MAX_UINT64;
> +  MapAttribute.Uint64              = mAddressEncMask;
> +  MapAttribute.Bits.Present        = 1;
> +  MapAttribute.Bits.ReadWrite      = 1;
> +  MapAttribute.Bits.UserSupervisor = 1;
> +  MapAttribute.Bits.Accessed       = 1;
> +  MapAttribute.Bits.Dirty          = 1;
> +
> +  Status = PageTableMap (&PageTable, PagingMode, NULL,
> &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
> +  ASSERT (Status == RETURN_BUFFER_TOO_SMALL);  DEBUG ((DEBUG_INFO, 
> + "GenSMMPageTable: 0x%x bytes needed for initial
> SMM page table\n", PageTableBufferSize));
> +  PageTableBuffer = AllocatePageTableMemory (EFI_SIZE_TO_PAGES
> (PageTableBufferSize));
> +  ASSERT (PageTableBuffer != NULL);
> +  Status = PageTableMap (&PageTable, PagingMode, PageTableBuffer,
> &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
> +  ASSERT (Status == RETURN_SUCCESS);
> +  ASSERT (PageTableBufferSize == 0);
> +
> +  if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
> +    //
> +    // Mark the 4KB guard page between known good stack and smm stack 
> + as
> non-present
> +    //
> +    for (Index = 0; Index < gSmmCpuPrivate-
> >SmmCoreEntryContext.NumberOfCpus; Index++) {
> +      GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE + Index *
> (mSmmStackSize + mSmmShadowStackSize);
> +      Status    = ConvertMemoryPageAttributes (PageTable, PagingMode,
> GuardPage, SIZE_4KB, EFI_MEMORY_RP, TRUE, NULL);
> +    }
> +  }
> +
> +  if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
> +    //
> +    // Mark [0, 4k] as non-present
> +    //
> +    Status = ConvertMemoryPageAttributes (PageTable, PagingMode, 0, 
> + SIZE_4KB,
> EFI_MEMORY_RP, TRUE, NULL);
> +  }
> +
> +  return (UINTN)PageTable;
> +}
> +
>  /**
>    This function retrieves the attributes of the memory region specified by
>    BaseAddress and Length. If different attributes are got from 
> different part diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> index 25ced50955..060e6dc147 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> @@ -167,160 +167,6 @@ CalculateMaximumSupportAddress (
>    return PhysicalAddressBits;
>  }
> 
> -/**
> -  Set static page table.
> -
> -  @param[in] PageTable              Address of page table.
> -  @param[in] PhysicalAddressBits    The maximum physical address bits
> supported.
> -**/
> -VOID
> -SetStaticPageTable (
> -  IN UINTN  PageTable,
> -  IN UINT8  PhysicalAddressBits
> -  )
> -{
> -  UINT64  PageAddress;
> -  UINTN   NumberOfPml5EntriesNeeded;
> -  UINTN   NumberOfPml4EntriesNeeded;
> -  UINTN   NumberOfPdpEntriesNeeded;
> -  UINTN   IndexOfPml5Entries;
> -  UINTN   IndexOfPml4Entries;
> -  UINTN   IndexOfPdpEntries;
> -  UINTN   IndexOfPageDirectoryEntries;
> -  UINT64  *PageMapLevel5Entry;
> -  UINT64  *PageMapLevel4Entry;
> -  UINT64  *PageMap;
> -  UINT64  *PageDirectoryPointerEntry;
> -  UINT64  *PageDirectory1GEntry;
> -  UINT64  *PageDirectoryEntry;
> -
> -  //
> -  // IA-32e paging translates 48-bit linear addresses to 52-bit 
> physical addresses
> -  //  when 5-Level Paging is disabled.
> -  //
> -  ASSERT (PhysicalAddressBits <= 52);
> -  if (!m5LevelPagingNeeded && (PhysicalAddressBits > 48)) {
> -    PhysicalAddressBits = 48;
> -  }
> -
> -  NumberOfPml5EntriesNeeded = 1;
> -  if (PhysicalAddressBits > 48) {
> -    NumberOfPml5EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits -
> 48);
> -    PhysicalAddressBits       = 48;
> -  }
> -
> -  NumberOfPml4EntriesNeeded = 1;
> -  if (PhysicalAddressBits > 39) {
> -    NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits -
> 39);
> -    PhysicalAddressBits       = 39;
> -  }
> -
> -  NumberOfPdpEntriesNeeded = 1;
> -  ASSERT (PhysicalAddressBits > 30);
> -  NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits 
> - 30);
> -
> -  //
> -  // By architecture only one PageMapLevel4 exists - so lets allocate 
> storage for it.
> -  //
> -  PageMap = (VOID *)PageTable;
> -
> -  PageMapLevel4Entry = PageMap;
> -  PageMapLevel5Entry = NULL;
> -  if (m5LevelPagingNeeded) {
> -    //
> -    // By architecture only one PageMapLevel5 exists - so lets allocate storage for
> it.
> -    //
> -    PageMapLevel5Entry = PageMap;
> -  }
> -
> -  PageAddress = 0;
> -
> -  for ( IndexOfPml5Entries = 0
> -        ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
> -        ; IndexOfPml5Entries++, PageMapLevel5Entry++)
> -  {
> -    //
> -    // Each PML5 entry points to a page of PML4 entires.
> -    // So lets allocate space for them and fill them in in the IndexOfPml4Entries
> loop.
> -    // When 5-Level Paging is disabled, below allocation happens only once.
> -    //
> -    if (m5LevelPagingNeeded) {
> -      PageMapLevel4Entry = (UINT64 *)((*PageMapLevel5Entry) &
> ~mAddressEncMask & gPhyMask);
> -      if (PageMapLevel4Entry == NULL) {
> -        PageMapLevel4Entry = AllocatePageTableMemory (1);
> -        ASSERT (PageMapLevel4Entry != NULL);
> -        ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE (1));
> -
> -        *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> -      }
> -    }
> -
> -    for (IndexOfPml4Entries = 0; IndexOfPml4Entries <
> (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512);
> IndexOfPml4Entries++, PageMapLevel4Entry++) {
> -      //
> -      // Each PML4 entry points to a page of Page Directory Pointer entries.
> -      //
> -      PageDirectoryPointerEntry = (UINT64 *)((*PageMapLevel4Entry) &
> ~mAddressEncMask & gPhyMask);
> -      if (PageDirectoryPointerEntry == NULL) {
> -        PageDirectoryPointerEntry = AllocatePageTableMemory (1);
> -        ASSERT (PageDirectoryPointerEntry != NULL);
> -        ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE (1));
> -
> -        *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> -      }
> -
> -      if (m1GPageTableSupport) {
> -        PageDirectory1GEntry = PageDirectoryPointerEntry;
> -        for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512;
> IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress +=
> SIZE_1GB) {
> -          if ((IndexOfPml4Entries == 0) && (IndexOfPageDirectoryEntries < 4)) {
> -            //
> -            // Skip the < 4G entries
> -            //
> -            continue;
> -          }
> -
> -          //
> -          // Fill in the Page Directory entries
> -          //
> -          *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS
> | PAGE_ATTRIBUTE_BITS;
> -        }
> -      } else {
> -        PageAddress = BASE_4GB;
> -        for (IndexOfPdpEntries = 0; IndexOfPdpEntries <
> (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512);
> IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
> -          if ((IndexOfPml4Entries == 0) && (IndexOfPdpEntries < 4)) {
> -            //
> -            // Skip the < 4G entries
> -            //
> -            continue;
> -          }
> -
> -          //
> -          // Each Directory Pointer entries points to a page of Page Directory entires.
> -          // So allocate space for them and fill them in in the
> IndexOfPageDirectoryEntries loop.
> -          //
> -          PageDirectoryEntry = (UINT64 *)((*PageDirectoryPointerEntry) &
> ~mAddressEncMask & gPhyMask);
> -          if (PageDirectoryEntry == NULL) {
> -            PageDirectoryEntry = AllocatePageTableMemory (1);
> -            ASSERT (PageDirectoryEntry != NULL);
> -            ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE (1));
> -
> -            //
> -            // Fill in a Page Directory Pointer Entries
> -            //
> -            *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> -          }
> -
> -          for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512;
> IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress +=
> SIZE_2MB) {
> -            //
> -            // Fill in the Page Directory entries
> -            //
> -            *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS |
> PAGE_ATTRIBUTE_BITS;
> -          }
> -        }
> -      }
> -    }
> -  }
> -}
> -
>  /**
>    Create PageTable for SMM use.
> 
> @@ -332,15 +178,16 @@ SmmInitPageTable (
>    VOID
>    )
>  {
> -  EFI_PHYSICAL_ADDRESS      Pages;
> -  UINT64                    *PTEntry;
> +  UINTN                     PageTable;
>    LIST_ENTRY                *FreePage;
>    UINTN                     Index;
>    UINTN                     PageFaultHandlerHookAddress;
>    IA32_IDT_GATE_DESCRIPTOR  *IdtEntry;
>    EFI_STATUS                Status;
> +  UINT64                    *PdptEntry;
>    UINT64                    *Pml4Entry;
>    UINT64                    *Pml5Entry;
> +  UINT8                     PhysicalAddressBits;
> 
>    //
>    // Initialize spin lock
> @@ -357,59 +204,44 @@ SmmInitPageTable (
>    } else {
>      mPagingMode = m1GPageTableSupport ? Paging4Level1GB : Paging4Level;
>    }
> +
>    DEBUG ((DEBUG_INFO, "5LevelPaging Needed             - %d\n",
> m5LevelPagingNeeded));
>    DEBUG ((DEBUG_INFO, "1GPageTable Support             - %d\n",
> m1GPageTableSupport));
>    DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", 
> mCpuSmmRestrictedMemoryAccess));
>    DEBUG ((DEBUG_INFO, "PhysicalAddressBits             - %d\n",
> mPhysicalAddressBits));
> -  //
> -  // Generate PAE page table for the first 4GB memory space
> -  //
> -  Pages = Gen4GPageTable (FALSE);
> 
>    //
> -  // Set IA32_PG_PMNT bit to mask this entry
> +  // Generate initial SMM page table.
> +  // Only map [0, 4G] when PcdCpuSmmRestrictedMemoryAccess is FALSE.
>    //
> -  PTEntry = (UINT64 *)(UINTN)Pages;
> -  for (Index = 0; Index < 4; Index++) {
> -    PTEntry[Index] |= IA32_PG_PMNT;
> -  }
> -
> -  //
> -  // Fill Page-Table-Level4 (PML4) entry
> -  //
> -  Pml4Entry = (UINT64 *)AllocatePageTableMemory (1);
> -  ASSERT (Pml4Entry != NULL);
> -  *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> -  ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
> -
> -  //
> -  // Set sub-entries number
> -  //
> -  SetSubEntriesNum (Pml4Entry, 3);
> -  PTEntry = Pml4Entry;
> +  PhysicalAddressBits = mCpuSmmRestrictedMemoryAccess ?
> mPhysicalAddressBits : 32;
> +  PageTable           = GenSmmPageTable (mPagingMode, PhysicalAddressBits);
> 
>    if (m5LevelPagingNeeded) {
> +    Pml5Entry = (UINT64 *)PageTable;
>      //
> -    // Fill PML5 entry
> -    //
> -    Pml5Entry = (UINT64 *)AllocatePageTableMemory (1);
> -    ASSERT (Pml5Entry != NULL);
> -    *Pml5Entry = (UINTN)Pml4Entry | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> -    ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
> -    //
> -    // Set sub-entries number
> +    // Set Pml5Entry sub-entries number for smm PF handler usage.
>      //
>      SetSubEntriesNum (Pml5Entry, 1);
> -    PTEntry = Pml5Entry;
> +    Pml4Entry = (UINT64 *)((*Pml5Entry) & ~mAddressEncMask & 
> + gPhyMask);  } else {
> +    Pml4Entry = (UINT64 *)PageTable;
> +  }
> +
> +  //
> +  // Set IA32_PG_PMNT bit to mask first 4 PdptEntry.
> +  //
> +  PdptEntry = (UINT64 *)((*Pml4Entry) & ~mAddressEncMask & gPhyMask);  
> + for (Index = 0; Index < 4; Index++) {
> +    PdptEntry[Index] |= IA32_PG_PMNT;
>    }
> 
> -  if (mCpuSmmRestrictedMemoryAccess) {
> +  if (!mCpuSmmRestrictedMemoryAccess) {
>      //
> -    // When access to non-SMRAM memory is restricted, create page table
> -    // that covers all memory space.
> +    // Set Pml4Entry sub-entries number for smm PF handler usage.
>      //
> -    SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);
> -  } else {
> +    SetSubEntriesNum (Pml4Entry, 3);
> +
>      //
>      // Add pages to page pool
>      //
> @@ -466,7 +298,7 @@ SmmInitPageTable (
>    //
>    // Return the address of PML4/PML5 (to set CR3)
>    //
> -  return (UINT32)(UINTN)PTEntry;
> +  return (UINT32)PageTable;
>  }
> 
>  /**
> --
> 2.31.1.windows.1
> 
> 
> 
> 
> 



-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#105627): https://edk2.groups.io/g/devel/message/105627
Mute This Topic: https://groups.io/mt/98922936/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
Re: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable() to create smm page table
Posted by duntan 1 year, 3 months ago
Edited the reply to make it clearer. 

-----Original Message-----
From: Tan, Dun 
Sent: Friday, June 2, 2023 11:36 AM
To: Ni, Ray <ray.ni@intel.com>; devel@edk2.groups.io
Cc: Dong, Eric <eric.dong@intel.com>; Kumar, Rahul R <rahul.r.kumar@intel.com>; Gerd Hoffmann <kraxel@redhat.com>
Subject: RE: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable() to create smm page table

GenSmmPageTable() doesn't mark the "Guard page" in "mSmmShadowStackSize range" is to align with old behavior.
GenSmmPageTable() is also used to create SmmS3Cr3 and the "Guard page" in "mSmmShadowStackSize range" is not marked as non-present in SmmS3Cr3.
In the code logic, the "Guard page" in "mSmmShadowStackSize range" is marked as not-present after InitializeMpServiceData() creates the initial smm page table. This process is only done for smm runtime page table.

Thanks,
Dun
-----Original Message-----
From: Ni, Ray <ray.ni@intel.com> 
Sent: Friday, June 2, 2023 11:23 AM
To: devel@edk2.groups.io; Tan, Dun <dun.tan@intel.com>
Cc: Dong, Eric <eric.dong@intel.com>; Kumar, Rahul R <rahul.r.kumar@intel.com>; Gerd Hoffmann <kraxel@redhat.com>
Subject: RE: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable() to create smm page table


      //
      // SMM Stack Guard Enabled
      // Append Shadow Stack after normal stack
      //   2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.
      //
      // |= Stacks
      // +--------------------------------------------------+---------------------------------------------------------------+
      // | Known Good Stack | Guard Page |    SMM Stack     | Known Good Shadow Stack | Guard Page |    SMM Shadow Stack    |
      // +--------------------------------------------------+---------------------------------------------------------------+
      // |         4K       |    4K      |PcdCpuSmmStackSize|            4K           |    4K      |PcdCpuSmmShadowStackSize|
      // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
      // |                                                                                                                  |
      // |<-------------------------------------------- Processor N ------------------------------------------------------->|
      //

GenSmmPageTable() only sets the "Guard page" in "mSmmStackSize range" as not-present.
But the "Guard page" in "mSmmShadowStackSize range" is not marked as not-present.
Why?

Thanks,
Ray

> -----Original Message-----
> From: devel@edk2.groups.io <devel@edk2.groups.io> On Behalf Of duntan
> Sent: Tuesday, May 16, 2023 5:59 PM
> To: devel@edk2.groups.io
> Cc: Dong, Eric <eric.dong@intel.com>; Ni, Ray <ray.ni@intel.com>; 
> Kumar, Rahul R <rahul.r.kumar@intel.com>; Gerd Hoffmann 
> <kraxel@redhat.com>
> Subject: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add 
> GenSmmPageTable() to create smm page table
> 
> This commit is code refinement to current smm pagetable generation 
> code. Add a new GenSmmPageTable() API to create smm page table based 
> on the PageTableMap() API in CpuPageTableLib. Caller only needs to 
> specify the paging mode and the PhysicalAddressBits to map.
> This function can be used to create both IA32 pae paging and X64 
> 5level, 4level paging.
> 
> Signed-off-by: Dun Tan <dun.tan@intel.com>
> Cc: Eric Dong <eric.dong@intel.com>
> Cc: Ray Ni <ray.ni@intel.com>
> Cc: Rahul Kumar <rahul1.kumar@intel.com>
> Cc: Gerd Hoffmann <kraxel@redhat.com>
> ---
>  UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c           |   2 +-
>  UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h         |  15
> +++++++++++++++
>  UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c |  65
> +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c            | 220
> ++++++++++++++++++++++++++--------------------------------------------
> ++++++++++++++++++++++++++---------------
> ----------------------------------------------------------------------
> ----------------------------
> -------------------------------------
>  4 files changed, 107 insertions(+), 195 deletions(-)
> 
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> index 9c8107080a..b11264ce4a 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> @@ -63,7 +63,7 @@ SmmInitPageTable (
>      InitializeIDTSmmStackGuard ();
>    }
> 
> -  return Gen4GPageTable (TRUE);
> +  return GenSmmPageTable (PagingPae, mPhysicalAddressBits);
>  }
> 
>  /**
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> index a7da9673a5..5399659bc0 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> @@ -553,6 +553,21 @@ Gen4GPageTable (
>    IN      BOOLEAN  Is32BitPageTable
>    );
> 
> +/**
> +  Create page table based on input PagingMode and PhysicalAddressBits in smm.
> +
> +  @param[in]      PagingMode           The paging mode.
> +  @param[in]      PhysicalAddressBits  The bits of physical address to map.
> +
> +  @retval         PageTable Address
> +
> +**/
> +UINTN
> +GenSmmPageTable (
> +  IN PAGING_MODE  PagingMode,
> +  IN UINT8        PhysicalAddressBits
> +  );
> +
>  /**
>    Initialize global data for MP synchronization.
> 
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> index ef0ba9a355..138ff43c9d 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> @@ -1642,6 +1642,71 @@ EdkiiSmmClearMemoryAttributes (
>    return SmmClearMemoryAttributes (BaseAddress, Length, Attributes);  
> }
> 
> +/**
> +  Create page table based on input PagingMode and PhysicalAddressBits in smm.
> +
> +  @param[in]      PagingMode           The paging mode.
> +  @param[in]      PhysicalAddressBits  The bits of physical address to map.
> +
> +  @retval         PageTable Address
> +
> +**/
> +UINTN
> +GenSmmPageTable (
> +  IN PAGING_MODE  PagingMode,
> +  IN UINT8        PhysicalAddressBits
> +  )
> +{
> +  UINTN               PageTableBufferSize;
> +  UINTN               PageTable;
> +  VOID                *PageTableBuffer;
> +  IA32_MAP_ATTRIBUTE  MapAttribute;
> +  IA32_MAP_ATTRIBUTE  MapMask;
> +  RETURN_STATUS       Status;
> +  UINTN               GuardPage;
> +  UINTN               Index;
> +  UINT64              Length;
> +
> +  Length                           = LShiftU64 (1, PhysicalAddressBits);
> +  PageTable                        = 0;
> +  PageTableBufferSize              = 0;
> +  MapMask.Uint64                   = MAX_UINT64;
> +  MapAttribute.Uint64              = mAddressEncMask;
> +  MapAttribute.Bits.Present        = 1;
> +  MapAttribute.Bits.ReadWrite      = 1;
> +  MapAttribute.Bits.UserSupervisor = 1;
> +  MapAttribute.Bits.Accessed       = 1;
> +  MapAttribute.Bits.Dirty          = 1;
> +
> +  Status = PageTableMap (&PageTable, PagingMode, NULL,
> &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
> +  ASSERT (Status == RETURN_BUFFER_TOO_SMALL);  DEBUG ((DEBUG_INFO, 
> + "GenSMMPageTable: 0x%x bytes needed for initial
> SMM page table\n", PageTableBufferSize));
> +  PageTableBuffer = AllocatePageTableMemory (EFI_SIZE_TO_PAGES
> (PageTableBufferSize));
> +  ASSERT (PageTableBuffer != NULL);
> +  Status = PageTableMap (&PageTable, PagingMode, PageTableBuffer,
> &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
> +  ASSERT (Status == RETURN_SUCCESS);
> +  ASSERT (PageTableBufferSize == 0);
> +
> +  if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
> +    //
> +    // Mark the 4KB guard page between known good stack and smm stack 
> + as
> non-present
> +    //
> +    for (Index = 0; Index < gSmmCpuPrivate-
> >SmmCoreEntryContext.NumberOfCpus; Index++) {
> +      GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE + Index *
> (mSmmStackSize + mSmmShadowStackSize);
> +      Status    = ConvertMemoryPageAttributes (PageTable, PagingMode,
> GuardPage, SIZE_4KB, EFI_MEMORY_RP, TRUE, NULL);
> +    }
> +  }
> +
> +  if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
> +    //
> +    // Mark [0, 4k] as non-present
> +    //
> +    Status = ConvertMemoryPageAttributes (PageTable, PagingMode, 0, 
> + SIZE_4KB,
> EFI_MEMORY_RP, TRUE, NULL);
> +  }
> +
> +  return (UINTN)PageTable;
> +}
> +
>  /**
>    This function retrieves the attributes of the memory region specified by
>    BaseAddress and Length. If different attributes are got from 
> different part diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> index 25ced50955..060e6dc147 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> @@ -167,160 +167,6 @@ CalculateMaximumSupportAddress (
>    return PhysicalAddressBits;
>  }
> 
> -/**
> -  Set static page table.
> -
> -  @param[in] PageTable              Address of page table.
> -  @param[in] PhysicalAddressBits    The maximum physical address bits
> supported.
> -**/
> -VOID
> -SetStaticPageTable (
> -  IN UINTN  PageTable,
> -  IN UINT8  PhysicalAddressBits
> -  )
> -{
> -  UINT64  PageAddress;
> -  UINTN   NumberOfPml5EntriesNeeded;
> -  UINTN   NumberOfPml4EntriesNeeded;
> -  UINTN   NumberOfPdpEntriesNeeded;
> -  UINTN   IndexOfPml5Entries;
> -  UINTN   IndexOfPml4Entries;
> -  UINTN   IndexOfPdpEntries;
> -  UINTN   IndexOfPageDirectoryEntries;
> -  UINT64  *PageMapLevel5Entry;
> -  UINT64  *PageMapLevel4Entry;
> -  UINT64  *PageMap;
> -  UINT64  *PageDirectoryPointerEntry;
> -  UINT64  *PageDirectory1GEntry;
> -  UINT64  *PageDirectoryEntry;
> -
> -  //
> -  // IA-32e paging translates 48-bit linear addresses to 52-bit 
> physical addresses
> -  //  when 5-Level Paging is disabled.
> -  //
> -  ASSERT (PhysicalAddressBits <= 52);
> -  if (!m5LevelPagingNeeded && (PhysicalAddressBits > 48)) {
> -    PhysicalAddressBits = 48;
> -  }
> -
> -  NumberOfPml5EntriesNeeded = 1;
> -  if (PhysicalAddressBits > 48) {
> -    NumberOfPml5EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits -
> 48);
> -    PhysicalAddressBits       = 48;
> -  }
> -
> -  NumberOfPml4EntriesNeeded = 1;
> -  if (PhysicalAddressBits > 39) {
> -    NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits -
> 39);
> -    PhysicalAddressBits       = 39;
> -  }
> -
> -  NumberOfPdpEntriesNeeded = 1;
> -  ASSERT (PhysicalAddressBits > 30);
> -  NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits 
> - 30);
> -
> -  //
> -  // By architecture only one PageMapLevel4 exists - so lets allocate 
> storage for it.
> -  //
> -  PageMap = (VOID *)PageTable;
> -
> -  PageMapLevel4Entry = PageMap;
> -  PageMapLevel5Entry = NULL;
> -  if (m5LevelPagingNeeded) {
> -    //
> -    // By architecture only one PageMapLevel5 exists - so lets allocate storage for
> it.
> -    //
> -    PageMapLevel5Entry = PageMap;
> -  }
> -
> -  PageAddress = 0;
> -
> -  for ( IndexOfPml5Entries = 0
> -        ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
> -        ; IndexOfPml5Entries++, PageMapLevel5Entry++)
> -  {
> -    //
> -    // Each PML5 entry points to a page of PML4 entires.
> -    // So lets allocate space for them and fill them in in the IndexOfPml4Entries
> loop.
> -    // When 5-Level Paging is disabled, below allocation happens only once.
> -    //
> -    if (m5LevelPagingNeeded) {
> -      PageMapLevel4Entry = (UINT64 *)((*PageMapLevel5Entry) &
> ~mAddressEncMask & gPhyMask);
> -      if (PageMapLevel4Entry == NULL) {
> -        PageMapLevel4Entry = AllocatePageTableMemory (1);
> -        ASSERT (PageMapLevel4Entry != NULL);
> -        ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE (1));
> -
> -        *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> -      }
> -    }
> -
> -    for (IndexOfPml4Entries = 0; IndexOfPml4Entries <
> (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512);
> IndexOfPml4Entries++, PageMapLevel4Entry++) {
> -      //
> -      // Each PML4 entry points to a page of Page Directory Pointer entries.
> -      //
> -      PageDirectoryPointerEntry = (UINT64 *)((*PageMapLevel4Entry) &
> ~mAddressEncMask & gPhyMask);
> -      if (PageDirectoryPointerEntry == NULL) {
> -        PageDirectoryPointerEntry = AllocatePageTableMemory (1);
> -        ASSERT (PageDirectoryPointerEntry != NULL);
> -        ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE (1));
> -
> -        *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> -      }
> -
> -      if (m1GPageTableSupport) {
> -        PageDirectory1GEntry = PageDirectoryPointerEntry;
> -        for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512;
> IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress +=
> SIZE_1GB) {
> -          if ((IndexOfPml4Entries == 0) && (IndexOfPageDirectoryEntries < 4)) {
> -            //
> -            // Skip the < 4G entries
> -            //
> -            continue;
> -          }
> -
> -          //
> -          // Fill in the Page Directory entries
> -          //
> -          *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS
> | PAGE_ATTRIBUTE_BITS;
> -        }
> -      } else {
> -        PageAddress = BASE_4GB;
> -        for (IndexOfPdpEntries = 0; IndexOfPdpEntries <
> (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512);
> IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
> -          if ((IndexOfPml4Entries == 0) && (IndexOfPdpEntries < 4)) {
> -            //
> -            // Skip the < 4G entries
> -            //
> -            continue;
> -          }
> -
> -          //
> -          // Each Directory Pointer entries points to a page of Page Directory entires.
> -          // So allocate space for them and fill them in in the
> IndexOfPageDirectoryEntries loop.
> -          //
> -          PageDirectoryEntry = (UINT64 *)((*PageDirectoryPointerEntry) &
> ~mAddressEncMask & gPhyMask);
> -          if (PageDirectoryEntry == NULL) {
> -            PageDirectoryEntry = AllocatePageTableMemory (1);
> -            ASSERT (PageDirectoryEntry != NULL);
> -            ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE (1));
> -
> -            //
> -            // Fill in a Page Directory Pointer Entries
> -            //
> -            *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> -          }
> -
> -          for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512;
> IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress +=
> SIZE_2MB) {
> -            //
> -            // Fill in the Page Directory entries
> -            //
> -            *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS |
> PAGE_ATTRIBUTE_BITS;
> -          }
> -        }
> -      }
> -    }
> -  }
> -}
> -
>  /**
>    Create PageTable for SMM use.
> 
> @@ -332,15 +178,16 @@ SmmInitPageTable (
>    VOID
>    )
>  {
> -  EFI_PHYSICAL_ADDRESS      Pages;
> -  UINT64                    *PTEntry;
> +  UINTN                     PageTable;
>    LIST_ENTRY                *FreePage;
>    UINTN                     Index;
>    UINTN                     PageFaultHandlerHookAddress;
>    IA32_IDT_GATE_DESCRIPTOR  *IdtEntry;
>    EFI_STATUS                Status;
> +  UINT64                    *PdptEntry;
>    UINT64                    *Pml4Entry;
>    UINT64                    *Pml5Entry;
> +  UINT8                     PhysicalAddressBits;
> 
>    //
>    // Initialize spin lock
> @@ -357,59 +204,44 @@ SmmInitPageTable (
>    } else {
>      mPagingMode = m1GPageTableSupport ? Paging4Level1GB : Paging4Level;
>    }
> +
>    DEBUG ((DEBUG_INFO, "5LevelPaging Needed             - %d\n",
> m5LevelPagingNeeded));
>    DEBUG ((DEBUG_INFO, "1GPageTable Support             - %d\n",
> m1GPageTableSupport));
>    DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", 
> mCpuSmmRestrictedMemoryAccess));
>    DEBUG ((DEBUG_INFO, "PhysicalAddressBits             - %d\n",
> mPhysicalAddressBits));
> -  //
> -  // Generate PAE page table for the first 4GB memory space
> -  //
> -  Pages = Gen4GPageTable (FALSE);
> 
>    //
> -  // Set IA32_PG_PMNT bit to mask this entry
> +  // Generate initial SMM page table.
> +  // Only map [0, 4G] when PcdCpuSmmRestrictedMemoryAccess is FALSE.
>    //
> -  PTEntry = (UINT64 *)(UINTN)Pages;
> -  for (Index = 0; Index < 4; Index++) {
> -    PTEntry[Index] |= IA32_PG_PMNT;
> -  }
> -
> -  //
> -  // Fill Page-Table-Level4 (PML4) entry
> -  //
> -  Pml4Entry = (UINT64 *)AllocatePageTableMemory (1);
> -  ASSERT (Pml4Entry != NULL);
> -  *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> -  ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
> -
> -  //
> -  // Set sub-entries number
> -  //
> -  SetSubEntriesNum (Pml4Entry, 3);
> -  PTEntry = Pml4Entry;
> +  PhysicalAddressBits = mCpuSmmRestrictedMemoryAccess ?
> mPhysicalAddressBits : 32;
> +  PageTable           = GenSmmPageTable (mPagingMode, PhysicalAddressBits);
> 
>    if (m5LevelPagingNeeded) {
> +    Pml5Entry = (UINT64 *)PageTable;
>      //
> -    // Fill PML5 entry
> -    //
> -    Pml5Entry = (UINT64 *)AllocatePageTableMemory (1);
> -    ASSERT (Pml5Entry != NULL);
> -    *Pml5Entry = (UINTN)Pml4Entry | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> -    ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
> -    //
> -    // Set sub-entries number
> +    // Set Pml5Entry sub-entries number for smm PF handler usage.
>      //
>      SetSubEntriesNum (Pml5Entry, 1);
> -    PTEntry = Pml5Entry;
> +    Pml4Entry = (UINT64 *)((*Pml5Entry) & ~mAddressEncMask & 
> + gPhyMask);  } else {
> +    Pml4Entry = (UINT64 *)PageTable;
> +  }
> +
> +  //
> +  // Set IA32_PG_PMNT bit to mask first 4 PdptEntry.
> +  //
> +  PdptEntry = (UINT64 *)((*Pml4Entry) & ~mAddressEncMask & gPhyMask);  
> + for (Index = 0; Index < 4; Index++) {
> +    PdptEntry[Index] |= IA32_PG_PMNT;
>    }
> 
> -  if (mCpuSmmRestrictedMemoryAccess) {
> +  if (!mCpuSmmRestrictedMemoryAccess) {
>      //
> -    // When access to non-SMRAM memory is restricted, create page table
> -    // that covers all memory space.
> +    // Set Pml4Entry sub-entries number for smm PF handler usage.
>      //
> -    SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);
> -  } else {
> +    SetSubEntriesNum (Pml4Entry, 3);
> +
>      //
>      // Add pages to page pool
>      //
> @@ -466,7 +298,7 @@ SmmInitPageTable (
>    //
>    // Return the address of PML4/PML5 (to set CR3)
>    //
> -  return (UINT32)(UINTN)PTEntry;
> +  return (UINT32)PageTable;
>  }
> 
>  /**
> --
> 2.31.1.windows.1
> 
> 
> 
> 
> 



-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#105631): https://edk2.groups.io/g/devel/message/105631
Mute This Topic: https://groups.io/mt/98922936/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
Re: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable() to create smm page table
Posted by Ni, Ray 1 year, 3 months ago
I see.
The GuardPage in normal stack is marked as not-present inside GenSmmPageTable.
The GuardPage in shadow stack is marked as not-present after calling InitializeMpServiceData().

Do you think it would be clearer to group them together?

Thanks,
Ray

> -----Original Message-----
> From: Tan, Dun <dun.tan@intel.com>
> Sent: Friday, June 2, 2023 11:47 AM
> To: Ni, Ray <ray.ni@intel.com>; devel@edk2.groups.io
> Cc: Dong, Eric <eric.dong@intel.com>; Kumar, Rahul R
> <rahul.r.kumar@intel.com>; Gerd Hoffmann <kraxel@redhat.com>
> Subject: RE: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable()
> to create smm page table
> 
> Edited the reply to make it clearer.
> 
> -----Original Message-----
> From: Tan, Dun
> Sent: Friday, June 2, 2023 11:36 AM
> To: Ni, Ray <ray.ni@intel.com>; devel@edk2.groups.io
> Cc: Dong, Eric <eric.dong@intel.com>; Kumar, Rahul R
> <rahul.r.kumar@intel.com>; Gerd Hoffmann <kraxel@redhat.com>
> Subject: RE: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable()
> to create smm page table
> 
> GenSmmPageTable() doesn't mark the "Guard page" in "mSmmShadowStackSize
> range" is to align with old behavior.
> GenSmmPageTable() is also used to create SmmS3Cr3 and the "Guard page" in
> "mSmmShadowStackSize range" is not marked as non-present in SmmS3Cr3.
> In the code logic, the "Guard page" in "mSmmShadowStackSize range" is marked
> as not-present after InitializeMpServiceData() creates the initial smm page table.
> This process is only done for smm runtime page table.
> 
> Thanks,
> Dun
> -----Original Message-----
> From: Ni, Ray <ray.ni@intel.com>
> Sent: Friday, June 2, 2023 11:23 AM
> To: devel@edk2.groups.io; Tan, Dun <dun.tan@intel.com>
> Cc: Dong, Eric <eric.dong@intel.com>; Kumar, Rahul R
> <rahul.r.kumar@intel.com>; Gerd Hoffmann <kraxel@redhat.com>
> Subject: RE: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable()
> to create smm page table
> 
> 
>       //
>       // SMM Stack Guard Enabled
>       // Append Shadow Stack after normal stack
>       //   2 more pages is allocated for each processor, one is guard page and the
> other is known good shadow stack.
>       //
>       // |= Stacks
>       // +--------------------------------------------------+--------------------------------------
> -------------------------+
>       // | Known Good Stack | Guard Page |    SMM Stack     | Known Good Shadow
> Stack | Guard Page |    SMM Shadow Stack    |
>       // +--------------------------------------------------+--------------------------------------
> -------------------------+
>       // |         4K       |    4K      |PcdCpuSmmStackSize|            4K           |    4K
> |PcdCpuSmmShadowStackSize|
>       // |<---------------- mSmmStackSize ----------------->|<---------------------
> mSmmShadowStackSize ------------------->|
>       // |                                                                                                                  |
>       // |<-------------------------------------------- Processor N ----------------------------
> --------------------------->|
>       //
> 
> GenSmmPageTable() only sets the "Guard page" in "mSmmStackSize range" as
> not-present.
> But the "Guard page" in "mSmmShadowStackSize range" is not marked as not-
> present.
> Why?
> 
> Thanks,
> Ray
> 
> > -----Original Message-----
> > From: devel@edk2.groups.io <devel@edk2.groups.io> On Behalf Of duntan
> > Sent: Tuesday, May 16, 2023 5:59 PM
> > To: devel@edk2.groups.io
> > Cc: Dong, Eric <eric.dong@intel.com>; Ni, Ray <ray.ni@intel.com>;
> > Kumar, Rahul R <rahul.r.kumar@intel.com>; Gerd Hoffmann
> > <kraxel@redhat.com>
> > Subject: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add
> > GenSmmPageTable() to create smm page table
> >
> > This commit is code refinement to current smm pagetable generation
> > code. Add a new GenSmmPageTable() API to create smm page table based
> > on the PageTableMap() API in CpuPageTableLib. Caller only needs to
> > specify the paging mode and the PhysicalAddressBits to map.
> > This function can be used to create both IA32 pae paging and X64
> > 5level, 4level paging.
> >
> > Signed-off-by: Dun Tan <dun.tan@intel.com>
> > Cc: Eric Dong <eric.dong@intel.com>
> > Cc: Ray Ni <ray.ni@intel.com>
> > Cc: Rahul Kumar <rahul1.kumar@intel.com>
> > Cc: Gerd Hoffmann <kraxel@redhat.com>
> > ---
> >  UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c           |   2 +-
> >  UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h         |  15
> > +++++++++++++++
> >  UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c |  65
> >
> +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
> >  UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c            | 220
> > ++++++++++++++++++++++++++--------------------------------------------
> > ++++++++++++++++++++++++++---------------
> > ----------------------------------------------------------------------
> > ----------------------------
> > -------------------------------------
> >  4 files changed, 107 insertions(+), 195 deletions(-)
> >
> > diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> > b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> > index 9c8107080a..b11264ce4a 100644
> > --- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> > +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> > @@ -63,7 +63,7 @@ SmmInitPageTable (
> >      InitializeIDTSmmStackGuard ();
> >    }
> >
> > -  return Gen4GPageTable (TRUE);
> > +  return GenSmmPageTable (PagingPae, mPhysicalAddressBits);
> >  }
> >
> >  /**
> > diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> > b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> > index a7da9673a5..5399659bc0 100644
> > --- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> > +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> > @@ -553,6 +553,21 @@ Gen4GPageTable (
> >    IN      BOOLEAN  Is32BitPageTable
> >    );
> >
> > +/**
> > +  Create page table based on input PagingMode and PhysicalAddressBits in
> smm.
> > +
> > +  @param[in]      PagingMode           The paging mode.
> > +  @param[in]      PhysicalAddressBits  The bits of physical address to map.
> > +
> > +  @retval         PageTable Address
> > +
> > +**/
> > +UINTN
> > +GenSmmPageTable (
> > +  IN PAGING_MODE  PagingMode,
> > +  IN UINT8        PhysicalAddressBits
> > +  );
> > +
> >  /**
> >    Initialize global data for MP synchronization.
> >
> > diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> > b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> > index ef0ba9a355..138ff43c9d 100644
> > --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> > +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> > @@ -1642,6 +1642,71 @@ EdkiiSmmClearMemoryAttributes (
> >    return SmmClearMemoryAttributes (BaseAddress, Length, Attributes);
> > }
> >
> > +/**
> > +  Create page table based on input PagingMode and PhysicalAddressBits in
> smm.
> > +
> > +  @param[in]      PagingMode           The paging mode.
> > +  @param[in]      PhysicalAddressBits  The bits of physical address to map.
> > +
> > +  @retval         PageTable Address
> > +
> > +**/
> > +UINTN
> > +GenSmmPageTable (
> > +  IN PAGING_MODE  PagingMode,
> > +  IN UINT8        PhysicalAddressBits
> > +  )
> > +{
> > +  UINTN               PageTableBufferSize;
> > +  UINTN               PageTable;
> > +  VOID                *PageTableBuffer;
> > +  IA32_MAP_ATTRIBUTE  MapAttribute;
> > +  IA32_MAP_ATTRIBUTE  MapMask;
> > +  RETURN_STATUS       Status;
> > +  UINTN               GuardPage;
> > +  UINTN               Index;
> > +  UINT64              Length;
> > +
> > +  Length                           = LShiftU64 (1, PhysicalAddressBits);
> > +  PageTable                        = 0;
> > +  PageTableBufferSize              = 0;
> > +  MapMask.Uint64                   = MAX_UINT64;
> > +  MapAttribute.Uint64              = mAddressEncMask;
> > +  MapAttribute.Bits.Present        = 1;
> > +  MapAttribute.Bits.ReadWrite      = 1;
> > +  MapAttribute.Bits.UserSupervisor = 1;
> > +  MapAttribute.Bits.Accessed       = 1;
> > +  MapAttribute.Bits.Dirty          = 1;
> > +
> > +  Status = PageTableMap (&PageTable, PagingMode, NULL,
> > &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
> > +  ASSERT (Status == RETURN_BUFFER_TOO_SMALL);  DEBUG ((DEBUG_INFO,
> > + "GenSMMPageTable: 0x%x bytes needed for initial
> > SMM page table\n", PageTableBufferSize));
> > +  PageTableBuffer = AllocatePageTableMemory (EFI_SIZE_TO_PAGES
> > (PageTableBufferSize));
> > +  ASSERT (PageTableBuffer != NULL);
> > +  Status = PageTableMap (&PageTable, PagingMode, PageTableBuffer,
> > &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
> > +  ASSERT (Status == RETURN_SUCCESS);
> > +  ASSERT (PageTableBufferSize == 0);
> > +
> > +  if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
> > +    //
> > +    // Mark the 4KB guard page between known good stack and smm stack
> > + as
> > non-present
> > +    //
> > +    for (Index = 0; Index < gSmmCpuPrivate-
> > >SmmCoreEntryContext.NumberOfCpus; Index++) {
> > +      GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE + Index *
> > (mSmmStackSize + mSmmShadowStackSize);
> > +      Status    = ConvertMemoryPageAttributes (PageTable, PagingMode,
> > GuardPage, SIZE_4KB, EFI_MEMORY_RP, TRUE, NULL);
> > +    }
> > +  }
> > +
> > +  if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
> > +    //
> > +    // Mark [0, 4k] as non-present
> > +    //
> > +    Status = ConvertMemoryPageAttributes (PageTable, PagingMode, 0,
> > + SIZE_4KB,
> > EFI_MEMORY_RP, TRUE, NULL);
> > +  }
> > +
> > +  return (UINTN)PageTable;
> > +}
> > +
> >  /**
> >    This function retrieves the attributes of the memory region specified by
> >    BaseAddress and Length. If different attributes are got from
> > different part diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> > b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> > index 25ced50955..060e6dc147 100644
> > --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> > +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> > @@ -167,160 +167,6 @@ CalculateMaximumSupportAddress (
> >    return PhysicalAddressBits;
> >  }
> >
> > -/**
> > -  Set static page table.
> > -
> > -  @param[in] PageTable              Address of page table.
> > -  @param[in] PhysicalAddressBits    The maximum physical address bits
> > supported.
> > -**/
> > -VOID
> > -SetStaticPageTable (
> > -  IN UINTN  PageTable,
> > -  IN UINT8  PhysicalAddressBits
> > -  )
> > -{
> > -  UINT64  PageAddress;
> > -  UINTN   NumberOfPml5EntriesNeeded;
> > -  UINTN   NumberOfPml4EntriesNeeded;
> > -  UINTN   NumberOfPdpEntriesNeeded;
> > -  UINTN   IndexOfPml5Entries;
> > -  UINTN   IndexOfPml4Entries;
> > -  UINTN   IndexOfPdpEntries;
> > -  UINTN   IndexOfPageDirectoryEntries;
> > -  UINT64  *PageMapLevel5Entry;
> > -  UINT64  *PageMapLevel4Entry;
> > -  UINT64  *PageMap;
> > -  UINT64  *PageDirectoryPointerEntry;
> > -  UINT64  *PageDirectory1GEntry;
> > -  UINT64  *PageDirectoryEntry;
> > -
> > -  //
> > -  // IA-32e paging translates 48-bit linear addresses to 52-bit
> > physical addresses
> > -  //  when 5-Level Paging is disabled.
> > -  //
> > -  ASSERT (PhysicalAddressBits <= 52);
> > -  if (!m5LevelPagingNeeded && (PhysicalAddressBits > 48)) {
> > -    PhysicalAddressBits = 48;
> > -  }
> > -
> > -  NumberOfPml5EntriesNeeded = 1;
> > -  if (PhysicalAddressBits > 48) {
> > -    NumberOfPml5EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits -
> > 48);
> > -    PhysicalAddressBits       = 48;
> > -  }
> > -
> > -  NumberOfPml4EntriesNeeded = 1;
> > -  if (PhysicalAddressBits > 39) {
> > -    NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits -
> > 39);
> > -    PhysicalAddressBits       = 39;
> > -  }
> > -
> > -  NumberOfPdpEntriesNeeded = 1;
> > -  ASSERT (PhysicalAddressBits > 30);
> > -  NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits
> > - 30);
> > -
> > -  //
> > -  // By architecture only one PageMapLevel4 exists - so lets allocate
> > storage for it.
> > -  //
> > -  PageMap = (VOID *)PageTable;
> > -
> > -  PageMapLevel4Entry = PageMap;
> > -  PageMapLevel5Entry = NULL;
> > -  if (m5LevelPagingNeeded) {
> > -    //
> > -    // By architecture only one PageMapLevel5 exists - so lets allocate storage
> for
> > it.
> > -    //
> > -    PageMapLevel5Entry = PageMap;
> > -  }
> > -
> > -  PageAddress = 0;
> > -
> > -  for ( IndexOfPml5Entries = 0
> > -        ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
> > -        ; IndexOfPml5Entries++, PageMapLevel5Entry++)
> > -  {
> > -    //
> > -    // Each PML5 entry points to a page of PML4 entires.
> > -    // So lets allocate space for them and fill them in in the IndexOfPml4Entries
> > loop.
> > -    // When 5-Level Paging is disabled, below allocation happens only once.
> > -    //
> > -    if (m5LevelPagingNeeded) {
> > -      PageMapLevel4Entry = (UINT64 *)((*PageMapLevel5Entry) &
> > ~mAddressEncMask & gPhyMask);
> > -      if (PageMapLevel4Entry == NULL) {
> > -        PageMapLevel4Entry = AllocatePageTableMemory (1);
> > -        ASSERT (PageMapLevel4Entry != NULL);
> > -        ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE (1));
> > -
> > -        *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry |
> > mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> > -      }
> > -    }
> > -
> > -    for (IndexOfPml4Entries = 0; IndexOfPml4Entries <
> > (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512);
> > IndexOfPml4Entries++, PageMapLevel4Entry++) {
> > -      //
> > -      // Each PML4 entry points to a page of Page Directory Pointer entries.
> > -      //
> > -      PageDirectoryPointerEntry = (UINT64 *)((*PageMapLevel4Entry) &
> > ~mAddressEncMask & gPhyMask);
> > -      if (PageDirectoryPointerEntry == NULL) {
> > -        PageDirectoryPointerEntry = AllocatePageTableMemory (1);
> > -        ASSERT (PageDirectoryPointerEntry != NULL);
> > -        ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE (1));
> > -
> > -        *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry |
> > mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> > -      }
> > -
> > -      if (m1GPageTableSupport) {
> > -        PageDirectory1GEntry = PageDirectoryPointerEntry;
> > -        for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512;
> > IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress +=
> > SIZE_1GB) {
> > -          if ((IndexOfPml4Entries == 0) && (IndexOfPageDirectoryEntries < 4)) {
> > -            //
> > -            // Skip the < 4G entries
> > -            //
> > -            continue;
> > -          }
> > -
> > -          //
> > -          // Fill in the Page Directory entries
> > -          //
> > -          *PageDirectory1GEntry = PageAddress | mAddressEncMask |
> IA32_PG_PS
> > | PAGE_ATTRIBUTE_BITS;
> > -        }
> > -      } else {
> > -        PageAddress = BASE_4GB;
> > -        for (IndexOfPdpEntries = 0; IndexOfPdpEntries <
> > (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512);
> > IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
> > -          if ((IndexOfPml4Entries == 0) && (IndexOfPdpEntries < 4)) {
> > -            //
> > -            // Skip the < 4G entries
> > -            //
> > -            continue;
> > -          }
> > -
> > -          //
> > -          // Each Directory Pointer entries points to a page of Page Directory
> entires.
> > -          // So allocate space for them and fill them in in the
> > IndexOfPageDirectoryEntries loop.
> > -          //
> > -          PageDirectoryEntry = (UINT64 *)((*PageDirectoryPointerEntry) &
> > ~mAddressEncMask & gPhyMask);
> > -          if (PageDirectoryEntry == NULL) {
> > -            PageDirectoryEntry = AllocatePageTableMemory (1);
> > -            ASSERT (PageDirectoryEntry != NULL);
> > -            ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE (1));
> > -
> > -            //
> > -            // Fill in a Page Directory Pointer Entries
> > -            //
> > -            *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry |
> > mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> > -          }
> > -
> > -          for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries <
> 512;
> > IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress +=
> > SIZE_2MB) {
> > -            //
> > -            // Fill in the Page Directory entries
> > -            //
> > -            *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS
> |
> > PAGE_ATTRIBUTE_BITS;
> > -          }
> > -        }
> > -      }
> > -    }
> > -  }
> > -}
> > -
> >  /**
> >    Create PageTable for SMM use.
> >
> > @@ -332,15 +178,16 @@ SmmInitPageTable (
> >    VOID
> >    )
> >  {
> > -  EFI_PHYSICAL_ADDRESS      Pages;
> > -  UINT64                    *PTEntry;
> > +  UINTN                     PageTable;
> >    LIST_ENTRY                *FreePage;
> >    UINTN                     Index;
> >    UINTN                     PageFaultHandlerHookAddress;
> >    IA32_IDT_GATE_DESCRIPTOR  *IdtEntry;
> >    EFI_STATUS                Status;
> > +  UINT64                    *PdptEntry;
> >    UINT64                    *Pml4Entry;
> >    UINT64                    *Pml5Entry;
> > +  UINT8                     PhysicalAddressBits;
> >
> >    //
> >    // Initialize spin lock
> > @@ -357,59 +204,44 @@ SmmInitPageTable (
> >    } else {
> >      mPagingMode = m1GPageTableSupport ? Paging4Level1GB : Paging4Level;
> >    }
> > +
> >    DEBUG ((DEBUG_INFO, "5LevelPaging Needed             - %d\n",
> > m5LevelPagingNeeded));
> >    DEBUG ((DEBUG_INFO, "1GPageTable Support             - %d\n",
> > m1GPageTableSupport));
> >    DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n",
> > mCpuSmmRestrictedMemoryAccess));
> >    DEBUG ((DEBUG_INFO, "PhysicalAddressBits             - %d\n",
> > mPhysicalAddressBits));
> > -  //
> > -  // Generate PAE page table for the first 4GB memory space
> > -  //
> > -  Pages = Gen4GPageTable (FALSE);
> >
> >    //
> > -  // Set IA32_PG_PMNT bit to mask this entry
> > +  // Generate initial SMM page table.
> > +  // Only map [0, 4G] when PcdCpuSmmRestrictedMemoryAccess is FALSE.
> >    //
> > -  PTEntry = (UINT64 *)(UINTN)Pages;
> > -  for (Index = 0; Index < 4; Index++) {
> > -    PTEntry[Index] |= IA32_PG_PMNT;
> > -  }
> > -
> > -  //
> > -  // Fill Page-Table-Level4 (PML4) entry
> > -  //
> > -  Pml4Entry = (UINT64 *)AllocatePageTableMemory (1);
> > -  ASSERT (Pml4Entry != NULL);
> > -  *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> > -  ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
> > -
> > -  //
> > -  // Set sub-entries number
> > -  //
> > -  SetSubEntriesNum (Pml4Entry, 3);
> > -  PTEntry = Pml4Entry;
> > +  PhysicalAddressBits = mCpuSmmRestrictedMemoryAccess ?
> > mPhysicalAddressBits : 32;
> > +  PageTable           = GenSmmPageTable (mPagingMode, PhysicalAddressBits);
> >
> >    if (m5LevelPagingNeeded) {
> > +    Pml5Entry = (UINT64 *)PageTable;
> >      //
> > -    // Fill PML5 entry
> > -    //
> > -    Pml5Entry = (UINT64 *)AllocatePageTableMemory (1);
> > -    ASSERT (Pml5Entry != NULL);
> > -    *Pml5Entry = (UINTN)Pml4Entry | mAddressEncMask |
> > PAGE_ATTRIBUTE_BITS;
> > -    ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
> > -    //
> > -    // Set sub-entries number
> > +    // Set Pml5Entry sub-entries number for smm PF handler usage.
> >      //
> >      SetSubEntriesNum (Pml5Entry, 1);
> > -    PTEntry = Pml5Entry;
> > +    Pml4Entry = (UINT64 *)((*Pml5Entry) & ~mAddressEncMask &
> > + gPhyMask);  } else {
> > +    Pml4Entry = (UINT64 *)PageTable;
> > +  }
> > +
> > +  //
> > +  // Set IA32_PG_PMNT bit to mask first 4 PdptEntry.
> > +  //
> > +  PdptEntry = (UINT64 *)((*Pml4Entry) & ~mAddressEncMask & gPhyMask);
> > + for (Index = 0; Index < 4; Index++) {
> > +    PdptEntry[Index] |= IA32_PG_PMNT;
> >    }
> >
> > -  if (mCpuSmmRestrictedMemoryAccess) {
> > +  if (!mCpuSmmRestrictedMemoryAccess) {
> >      //
> > -    // When access to non-SMRAM memory is restricted, create page table
> > -    // that covers all memory space.
> > +    // Set Pml4Entry sub-entries number for smm PF handler usage.
> >      //
> > -    SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);
> > -  } else {
> > +    SetSubEntriesNum (Pml4Entry, 3);
> > +
> >      //
> >      // Add pages to page pool
> >      //
> > @@ -466,7 +298,7 @@ SmmInitPageTable (
> >    //
> >    // Return the address of PML4/PML5 (to set CR3)
> >    //
> > -  return (UINT32)(UINTN)PTEntry;
> > +  return (UINT32)PageTable;
> >  }
> >
> >  /**
> > --
> > 2.31.1.windows.1
> >
> >
> >
> > 
> >



-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#105636): https://edk2.groups.io/g/devel/message/105636
Mute This Topic: https://groups.io/mt/98922936/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/leave/3901457/1787277/102458076/xyzzy [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
Re: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable() to create smm page table
Posted by duntan 1 year, 3 months ago
In original code logic, SmmS3 page table set GuardPage in smm normal stack as not-present instead of Smm S3 Stack.
A bugzila has been submitted to track this issue: https://bugzilla.tianocore.org/show_bug.cgi?id=4476 . Will fix the issue in future patches.
So now remain the code status that the GuardPage in normal stack and the GuardPage in shadow stack are protected at different place. 

Thanks,
Dun

-----Original Message-----
From: Ni, Ray <ray.ni@intel.com> 
Sent: Friday, June 2, 2023 1:09 PM
To: Tan, Dun <dun.tan@intel.com>; devel@edk2.groups.io
Cc: Dong, Eric <eric.dong@intel.com>; Kumar, Rahul R <rahul.r.kumar@intel.com>; Gerd Hoffmann <kraxel@redhat.com>
Subject: RE: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add GenSmmPageTable() to create smm page table

I see.
The GuardPage in normal stack is marked as not-present inside GenSmmPageTable.
The GuardPage in shadow stack is marked as not-present after calling InitializeMpServiceData().

Do you think it would be clearer to group them together?

Thanks,
Ray

> -----Original Message-----
> From: Tan, Dun <dun.tan@intel.com>
> Sent: Friday, June 2, 2023 11:47 AM
> To: Ni, Ray <ray.ni@intel.com>; devel@edk2.groups.io
> Cc: Dong, Eric <eric.dong@intel.com>; Kumar, Rahul R 
> <rahul.r.kumar@intel.com>; Gerd Hoffmann <kraxel@redhat.com>
> Subject: RE: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add 
> GenSmmPageTable() to create smm page table
> 
> Edited the reply to make it clearer.
> 
> -----Original Message-----
> From: Tan, Dun
> Sent: Friday, June 2, 2023 11:36 AM
> To: Ni, Ray <ray.ni@intel.com>; devel@edk2.groups.io
> Cc: Dong, Eric <eric.dong@intel.com>; Kumar, Rahul R 
> <rahul.r.kumar@intel.com>; Gerd Hoffmann <kraxel@redhat.com>
> Subject: RE: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add 
> GenSmmPageTable() to create smm page table
> 
> GenSmmPageTable() doesn't mark the "Guard page" in 
> "mSmmShadowStackSize range" is to align with old behavior.
> GenSmmPageTable() is also used to create SmmS3Cr3 and the "Guard page" 
> in "mSmmShadowStackSize range" is not marked as non-present in SmmS3Cr3.
> In the code logic, the "Guard page" in "mSmmShadowStackSize range" is 
> marked as not-present after InitializeMpServiceData() creates the initial smm page table.
> This process is only done for smm runtime page table.
> 
> Thanks,
> Dun
> -----Original Message-----
> From: Ni, Ray <ray.ni@intel.com>
> Sent: Friday, June 2, 2023 11:23 AM
> To: devel@edk2.groups.io; Tan, Dun <dun.tan@intel.com>
> Cc: Dong, Eric <eric.dong@intel.com>; Kumar, Rahul R 
> <rahul.r.kumar@intel.com>; Gerd Hoffmann <kraxel@redhat.com>
> Subject: RE: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add 
> GenSmmPageTable() to create smm page table
> 
> 
>       //
>       // SMM Stack Guard Enabled
>       // Append Shadow Stack after normal stack
>       //   2 more pages is allocated for each processor, one is guard page and the
> other is known good shadow stack.
>       //
>       // |= Stacks
>       // 
> +--------------------------------------------------+------------------
> --------------------
> -------------------------+
>       // | Known Good Stack | Guard Page |    SMM Stack     | Known Good Shadow
> Stack | Guard Page |    SMM Shadow Stack    |
>       // 
> +--------------------------------------------------+------------------
> --------------------
> -------------------------+
>       // |         4K       |    4K      |PcdCpuSmmStackSize|            4K           |    4K
> |PcdCpuSmmShadowStackSize|
>       // |<---------------- mSmmStackSize 
> ----------------->|<---------------------
> mSmmShadowStackSize ------------------->|
>       // |                                                                                                                  |
>       // |<-------------------------------------------- Processor N 
> ----------------------------
> --------------------------->|
>       //
> 
> GenSmmPageTable() only sets the "Guard page" in "mSmmStackSize range" 
> as not-present.
> But the "Guard page" in "mSmmShadowStackSize range" is not marked as 
> not- present.
> Why?
> 
> Thanks,
> Ray
> 
> > -----Original Message-----
> > From: devel@edk2.groups.io <devel@edk2.groups.io> On Behalf Of 
> > duntan
> > Sent: Tuesday, May 16, 2023 5:59 PM
> > To: devel@edk2.groups.io
> > Cc: Dong, Eric <eric.dong@intel.com>; Ni, Ray <ray.ni@intel.com>; 
> > Kumar, Rahul R <rahul.r.kumar@intel.com>; Gerd Hoffmann 
> > <kraxel@redhat.com>
> > Subject: [edk2-devel] [Patch V4 10/15] UefiCpuPkg: Add
> > GenSmmPageTable() to create smm page table
> >
> > This commit is code refinement to current smm pagetable generation 
> > code. Add a new GenSmmPageTable() API to create smm page table based 
> > on the PageTableMap() API in CpuPageTableLib. Caller only needs to 
> > specify the paging mode and the PhysicalAddressBits to map.
> > This function can be used to create both IA32 pae paging and X64 
> > 5level, 4level paging.
> >
> > Signed-off-by: Dun Tan <dun.tan@intel.com>
> > Cc: Eric Dong <eric.dong@intel.com>
> > Cc: Ray Ni <ray.ni@intel.com>
> > Cc: Rahul Kumar <rahul1.kumar@intel.com>
> > Cc: Gerd Hoffmann <kraxel@redhat.com>
> > ---
> >  UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c           |   2 +-
> >  UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h         |  15
> > +++++++++++++++
> >  UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c |  65
> >
> +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
> >  UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c            | 220
> > ++++++++++++++++++++++++++------------------------------------------
> > ++++++++++++++++++++++++++--
> > ++++++++++++++++++++++++++---------------
> > --------------------------------------------------------------------
> > --
> > ----------------------------
> > -------------------------------------
> >  4 files changed, 107 insertions(+), 195 deletions(-)
> >
> > diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> > b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> > index 9c8107080a..b11264ce4a 100644
> > --- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> > +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
> > @@ -63,7 +63,7 @@ SmmInitPageTable (
> >      InitializeIDTSmmStackGuard ();
> >    }
> >
> > -  return Gen4GPageTable (TRUE);
> > +  return GenSmmPageTable (PagingPae, mPhysicalAddressBits);
> >  }
> >
> >  /**
> > diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> > b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> > index a7da9673a5..5399659bc0 100644
> > --- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> > +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> > @@ -553,6 +553,21 @@ Gen4GPageTable (
> >    IN      BOOLEAN  Is32BitPageTable
> >    );
> >
> > +/**
> > +  Create page table based on input PagingMode and 
> > +PhysicalAddressBits in
> smm.
> > +
> > +  @param[in]      PagingMode           The paging mode.
> > +  @param[in]      PhysicalAddressBits  The bits of physical address to map.
> > +
> > +  @retval         PageTable Address
> > +
> > +**/
> > +UINTN
> > +GenSmmPageTable (
> > +  IN PAGING_MODE  PagingMode,
> > +  IN UINT8        PhysicalAddressBits
> > +  );
> > +
> >  /**
> >    Initialize global data for MP synchronization.
> >
> > diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> > b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> > index ef0ba9a355..138ff43c9d 100644
> > --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> > +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> > @@ -1642,6 +1642,71 @@ EdkiiSmmClearMemoryAttributes (
> >    return SmmClearMemoryAttributes (BaseAddress, Length, 
> > Attributes); }
> >
> > +/**
> > +  Create page table based on input PagingMode and 
> > +PhysicalAddressBits in
> smm.
> > +
> > +  @param[in]      PagingMode           The paging mode.
> > +  @param[in]      PhysicalAddressBits  The bits of physical address to map.
> > +
> > +  @retval         PageTable Address
> > +
> > +**/
> > +UINTN
> > +GenSmmPageTable (
> > +  IN PAGING_MODE  PagingMode,
> > +  IN UINT8        PhysicalAddressBits
> > +  )
> > +{
> > +  UINTN               PageTableBufferSize;
> > +  UINTN               PageTable;
> > +  VOID                *PageTableBuffer;
> > +  IA32_MAP_ATTRIBUTE  MapAttribute;
> > +  IA32_MAP_ATTRIBUTE  MapMask;
> > +  RETURN_STATUS       Status;
> > +  UINTN               GuardPage;
> > +  UINTN               Index;
> > +  UINT64              Length;
> > +
> > +  Length                           = LShiftU64 (1, PhysicalAddressBits);
> > +  PageTable                        = 0;
> > +  PageTableBufferSize              = 0;
> > +  MapMask.Uint64                   = MAX_UINT64;
> > +  MapAttribute.Uint64              = mAddressEncMask;
> > +  MapAttribute.Bits.Present        = 1;
> > +  MapAttribute.Bits.ReadWrite      = 1;
> > +  MapAttribute.Bits.UserSupervisor = 1;
> > +  MapAttribute.Bits.Accessed       = 1;
> > +  MapAttribute.Bits.Dirty          = 1;
> > +
> > +  Status = PageTableMap (&PageTable, PagingMode, NULL,
> > &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
> > +  ASSERT (Status == RETURN_BUFFER_TOO_SMALL);  DEBUG ((DEBUG_INFO,
> > + "GenSMMPageTable: 0x%x bytes needed for initial
> > SMM page table\n", PageTableBufferSize));
> > +  PageTableBuffer = AllocatePageTableMemory (EFI_SIZE_TO_PAGES
> > (PageTableBufferSize));
> > +  ASSERT (PageTableBuffer != NULL);  Status = PageTableMap 
> > + (&PageTable, PagingMode, PageTableBuffer,
> > &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
> > +  ASSERT (Status == RETURN_SUCCESS);  ASSERT (PageTableBufferSize 
> > + == 0);
> > +
> > +  if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
> > +    //
> > +    // Mark the 4KB guard page between known good stack and smm 
> > + stack as
> > non-present
> > +    //
> > +    for (Index = 0; Index < gSmmCpuPrivate-
> > >SmmCoreEntryContext.NumberOfCpus; Index++) {
> > +      GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE + Index *
> > (mSmmStackSize + mSmmShadowStackSize);
> > +      Status    = ConvertMemoryPageAttributes (PageTable, PagingMode,
> > GuardPage, SIZE_4KB, EFI_MEMORY_RP, TRUE, NULL);
> > +    }
> > +  }
> > +
> > +  if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
> > +    //
> > +    // Mark [0, 4k] as non-present
> > +    //
> > +    Status = ConvertMemoryPageAttributes (PageTable, PagingMode, 0, 
> > + SIZE_4KB,
> > EFI_MEMORY_RP, TRUE, NULL);
> > +  }
> > +
> > +  return (UINTN)PageTable;
> > +}
> > +
> >  /**
> >    This function retrieves the attributes of the memory region specified by
> >    BaseAddress and Length. If different attributes are got from 
> > different part diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> > b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> > index 25ced50955..060e6dc147 100644
> > --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> > +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> > @@ -167,160 +167,6 @@ CalculateMaximumSupportAddress (
> >    return PhysicalAddressBits;
> >  }
> >
> > -/**
> > -  Set static page table.
> > -
> > -  @param[in] PageTable              Address of page table.
> > -  @param[in] PhysicalAddressBits    The maximum physical address bits
> > supported.
> > -**/
> > -VOID
> > -SetStaticPageTable (
> > -  IN UINTN  PageTable,
> > -  IN UINT8  PhysicalAddressBits
> > -  )
> > -{
> > -  UINT64  PageAddress;
> > -  UINTN   NumberOfPml5EntriesNeeded;
> > -  UINTN   NumberOfPml4EntriesNeeded;
> > -  UINTN   NumberOfPdpEntriesNeeded;
> > -  UINTN   IndexOfPml5Entries;
> > -  UINTN   IndexOfPml4Entries;
> > -  UINTN   IndexOfPdpEntries;
> > -  UINTN   IndexOfPageDirectoryEntries;
> > -  UINT64  *PageMapLevel5Entry;
> > -  UINT64  *PageMapLevel4Entry;
> > -  UINT64  *PageMap;
> > -  UINT64  *PageDirectoryPointerEntry;
> > -  UINT64  *PageDirectory1GEntry;
> > -  UINT64  *PageDirectoryEntry;
> > -
> > -  //
> > -  // IA-32e paging translates 48-bit linear addresses to 52-bit 
> > physical addresses
> > -  //  when 5-Level Paging is disabled.
> > -  //
> > -  ASSERT (PhysicalAddressBits <= 52);
> > -  if (!m5LevelPagingNeeded && (PhysicalAddressBits > 48)) {
> > -    PhysicalAddressBits = 48;
> > -  }
> > -
> > -  NumberOfPml5EntriesNeeded = 1;
> > -  if (PhysicalAddressBits > 48) {
> > -    NumberOfPml5EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits -
> > 48);
> > -    PhysicalAddressBits       = 48;
> > -  }
> > -
> > -  NumberOfPml4EntriesNeeded = 1;
> > -  if (PhysicalAddressBits > 39) {
> > -    NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits -
> > 39);
> > -    PhysicalAddressBits       = 39;
> > -  }
> > -
> > -  NumberOfPdpEntriesNeeded = 1;
> > -  ASSERT (PhysicalAddressBits > 30);
> > -  NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, 
> > PhysicalAddressBits
> > - 30);
> > -
> > -  //
> > -  // By architecture only one PageMapLevel4 exists - so lets 
> > allocate storage for it.
> > -  //
> > -  PageMap = (VOID *)PageTable;
> > -
> > -  PageMapLevel4Entry = PageMap;
> > -  PageMapLevel5Entry = NULL;
> > -  if (m5LevelPagingNeeded) {
> > -    //
> > -    // By architecture only one PageMapLevel5 exists - so lets allocate storage
> for
> > it.
> > -    //
> > -    PageMapLevel5Entry = PageMap;
> > -  }
> > -
> > -  PageAddress = 0;
> > -
> > -  for ( IndexOfPml5Entries = 0
> > -        ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
> > -        ; IndexOfPml5Entries++, PageMapLevel5Entry++)
> > -  {
> > -    //
> > -    // Each PML5 entry points to a page of PML4 entires.
> > -    // So lets allocate space for them and fill them in in the IndexOfPml4Entries
> > loop.
> > -    // When 5-Level Paging is disabled, below allocation happens only once.
> > -    //
> > -    if (m5LevelPagingNeeded) {
> > -      PageMapLevel4Entry = (UINT64 *)((*PageMapLevel5Entry) &
> > ~mAddressEncMask & gPhyMask);
> > -      if (PageMapLevel4Entry == NULL) {
> > -        PageMapLevel4Entry = AllocatePageTableMemory (1);
> > -        ASSERT (PageMapLevel4Entry != NULL);
> > -        ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE (1));
> > -
> > -        *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry |
> > mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> > -      }
> > -    }
> > -
> > -    for (IndexOfPml4Entries = 0; IndexOfPml4Entries <
> > (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512);
> > IndexOfPml4Entries++, PageMapLevel4Entry++) {
> > -      //
> > -      // Each PML4 entry points to a page of Page Directory Pointer entries.
> > -      //
> > -      PageDirectoryPointerEntry = (UINT64 *)((*PageMapLevel4Entry) &
> > ~mAddressEncMask & gPhyMask);
> > -      if (PageDirectoryPointerEntry == NULL) {
> > -        PageDirectoryPointerEntry = AllocatePageTableMemory (1);
> > -        ASSERT (PageDirectoryPointerEntry != NULL);
> > -        ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE (1));
> > -
> > -        *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry |
> > mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> > -      }
> > -
> > -      if (m1GPageTableSupport) {
> > -        PageDirectory1GEntry = PageDirectoryPointerEntry;
> > -        for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512;
> > IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress 
> > IndexOfPageDirectoryEntries+++=
> > SIZE_1GB) {
> > -          if ((IndexOfPml4Entries == 0) && (IndexOfPageDirectoryEntries < 4)) {
> > -            //
> > -            // Skip the < 4G entries
> > -            //
> > -            continue;
> > -          }
> > -
> > -          //
> > -          // Fill in the Page Directory entries
> > -          //
> > -          *PageDirectory1GEntry = PageAddress | mAddressEncMask |
> IA32_PG_PS
> > | PAGE_ATTRIBUTE_BITS;
> > -        }
> > -      } else {
> > -        PageAddress = BASE_4GB;
> > -        for (IndexOfPdpEntries = 0; IndexOfPdpEntries <
> > (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512);
> > IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
> > -          if ((IndexOfPml4Entries == 0) && (IndexOfPdpEntries < 4)) {
> > -            //
> > -            // Skip the < 4G entries
> > -            //
> > -            continue;
> > -          }
> > -
> > -          //
> > -          // Each Directory Pointer entries points to a page of Page Directory
> entires.
> > -          // So allocate space for them and fill them in in the
> > IndexOfPageDirectoryEntries loop.
> > -          //
> > -          PageDirectoryEntry = (UINT64 *)((*PageDirectoryPointerEntry) &
> > ~mAddressEncMask & gPhyMask);
> > -          if (PageDirectoryEntry == NULL) {
> > -            PageDirectoryEntry = AllocatePageTableMemory (1);
> > -            ASSERT (PageDirectoryEntry != NULL);
> > -            ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE (1));
> > -
> > -            //
> > -            // Fill in a Page Directory Pointer Entries
> > -            //
> > -            *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry |
> > mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> > -          }
> > -
> > -          for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries <
> 512;
> > IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress +=
> > SIZE_2MB) {
> > -            //
> > -            // Fill in the Page Directory entries
> > -            //
> > -            *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS
> |
> > PAGE_ATTRIBUTE_BITS;
> > -          }
> > -        }
> > -      }
> > -    }
> > -  }
> > -}
> > -
> >  /**
> >    Create PageTable for SMM use.
> >
> > @@ -332,15 +178,16 @@ SmmInitPageTable (
> >    VOID
> >    )
> >  {
> > -  EFI_PHYSICAL_ADDRESS      Pages;
> > -  UINT64                    *PTEntry;
> > +  UINTN                     PageTable;
> >    LIST_ENTRY                *FreePage;
> >    UINTN                     Index;
> >    UINTN                     PageFaultHandlerHookAddress;
> >    IA32_IDT_GATE_DESCRIPTOR  *IdtEntry;
> >    EFI_STATUS                Status;
> > +  UINT64                    *PdptEntry;
> >    UINT64                    *Pml4Entry;
> >    UINT64                    *Pml5Entry;
> > +  UINT8                     PhysicalAddressBits;
> >
> >    //
> >    // Initialize spin lock
> > @@ -357,59 +204,44 @@ SmmInitPageTable (
> >    } else {
> >      mPagingMode = m1GPageTableSupport ? Paging4Level1GB : Paging4Level;
> >    }
> > +
> >    DEBUG ((DEBUG_INFO, "5LevelPaging Needed             - %d\n",
> > m5LevelPagingNeeded));
> >    DEBUG ((DEBUG_INFO, "1GPageTable Support             - %d\n",
> > m1GPageTableSupport));
> >    DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", 
> > mCpuSmmRestrictedMemoryAccess));
> >    DEBUG ((DEBUG_INFO, "PhysicalAddressBits             - %d\n",
> > mPhysicalAddressBits));
> > -  //
> > -  // Generate PAE page table for the first 4GB memory space
> > -  //
> > -  Pages = Gen4GPageTable (FALSE);
> >
> >    //
> > -  // Set IA32_PG_PMNT bit to mask this entry
> > +  // Generate initial SMM page table.
> > +  // Only map [0, 4G] when PcdCpuSmmRestrictedMemoryAccess is FALSE.
> >    //
> > -  PTEntry = (UINT64 *)(UINTN)Pages;
> > -  for (Index = 0; Index < 4; Index++) {
> > -    PTEntry[Index] |= IA32_PG_PMNT;
> > -  }
> > -
> > -  //
> > -  // Fill Page-Table-Level4 (PML4) entry
> > -  //
> > -  Pml4Entry = (UINT64 *)AllocatePageTableMemory (1);
> > -  ASSERT (Pml4Entry != NULL);
> > -  *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> > -  ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
> > -
> > -  //
> > -  // Set sub-entries number
> > -  //
> > -  SetSubEntriesNum (Pml4Entry, 3);
> > -  PTEntry = Pml4Entry;
> > +  PhysicalAddressBits = mCpuSmmRestrictedMemoryAccess ?
> > mPhysicalAddressBits : 32;
> > +  PageTable           = GenSmmPageTable (mPagingMode, PhysicalAddressBits);
> >
> >    if (m5LevelPagingNeeded) {
> > +    Pml5Entry = (UINT64 *)PageTable;
> >      //
> > -    // Fill PML5 entry
> > -    //
> > -    Pml5Entry = (UINT64 *)AllocatePageTableMemory (1);
> > -    ASSERT (Pml5Entry != NULL);
> > -    *Pml5Entry = (UINTN)Pml4Entry | mAddressEncMask |
> > PAGE_ATTRIBUTE_BITS;
> > -    ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
> > -    //
> > -    // Set sub-entries number
> > +    // Set Pml5Entry sub-entries number for smm PF handler usage.
> >      //
> >      SetSubEntriesNum (Pml5Entry, 1);
> > -    PTEntry = Pml5Entry;
> > +    Pml4Entry = (UINT64 *)((*Pml5Entry) & ~mAddressEncMask & 
> > + gPhyMask);  } else {
> > +    Pml4Entry = (UINT64 *)PageTable;  }
> > +
> > +  //
> > +  // Set IA32_PG_PMNT bit to mask first 4 PdptEntry.
> > +  //
> > +  PdptEntry = (UINT64 *)((*Pml4Entry) & ~mAddressEncMask & 
> > + gPhyMask); for (Index = 0; Index < 4; Index++) {
> > +    PdptEntry[Index] |= IA32_PG_PMNT;
> >    }
> >
> > -  if (mCpuSmmRestrictedMemoryAccess) {
> > +  if (!mCpuSmmRestrictedMemoryAccess) {
> >      //
> > -    // When access to non-SMRAM memory is restricted, create page table
> > -    // that covers all memory space.
> > +    // Set Pml4Entry sub-entries number for smm PF handler usage.
> >      //
> > -    SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);
> > -  } else {
> > +    SetSubEntriesNum (Pml4Entry, 3);
> > +
> >      //
> >      // Add pages to page pool
> >      //
> > @@ -466,7 +298,7 @@ SmmInitPageTable (
> >    //
> >    // Return the address of PML4/PML5 (to set CR3)
> >    //
> > -  return (UINT32)(UINTN)PTEntry;
> > +  return (UINT32)PageTable;
> >  }
> >
> >  /**
> > --
> > 2.31.1.windows.1
> >
> >
> >
> > 
> >



-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#105639): https://edk2.groups.io/g/devel/message/105639
Mute This Topic: https://groups.io/mt/98922936/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-