[edk2-devel] [PATCH v2 3/3] UefiCpuPkg/PiSmmCpu: Enable 5 level paging when CPU supports

Ni, Ray posted 3 patches 6 years, 7 months ago
[edk2-devel] [PATCH v2 3/3] UefiCpuPkg/PiSmmCpu: Enable 5 level paging when CPU supports
Posted by Ni, Ray 6 years, 7 months ago
REF:https://bugzilla.tianocore.org/show_bug.cgi?id=1946

The patch changes SMM environment to use 5 level paging when CPU
supports it.

Signed-off-by: Ray Ni <ray.ni@intel.com>
Cc: Eric Dong <eric.dong@intel.com>
Regression-tested-by: Laszlo Ersek <lersek@redhat.com>
---
 .../PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c   |  20 +-
 UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c        | 272 ++++++----
 UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c       | 485 ++++++++++++------
 UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm   |  12 +
 .../PiSmmCpuDxeSmm/X64/SmmProfileArch.c       |  72 ++-
 5 files changed, 561 insertions(+), 300 deletions(-)

diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
index 069be3aaa5..55090e9c3e 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
@@ -125,18 +125,36 @@ GetPageTableEntry (
   UINTN                 Index2;
   UINTN                 Index3;
   UINTN                 Index4;
+  UINTN                 Index5;
   UINT64                *L1PageTable;
   UINT64                *L2PageTable;
   UINT64                *L3PageTable;
   UINT64                *L4PageTable;
+  UINT64                *L5PageTable;
+  IA32_CR4              Cr4;
+  BOOLEAN               Enable5LevelPaging;
 
+  Index5 = ((UINTN)RShiftU64 (Address, 48)) & PAGING_PAE_INDEX_MASK;
   Index4 = ((UINTN)RShiftU64 (Address, 39)) & PAGING_PAE_INDEX_MASK;
   Index3 = ((UINTN)Address >> 30) & PAGING_PAE_INDEX_MASK;
   Index2 = ((UINTN)Address >> 21) & PAGING_PAE_INDEX_MASK;
   Index1 = ((UINTN)Address >> 12) & PAGING_PAE_INDEX_MASK;
 
+  Cr4.UintN = AsmReadCr4 ();
+  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
+
   if (sizeof(UINTN) == sizeof(UINT64)) {
-    L4PageTable = (UINT64 *)GetPageTableBase ();
+    if (Enable5LevelPaging) {
+      L5PageTable = (UINT64 *)GetPageTableBase ();
+      if (L5PageTable[Index5] == 0) {
+        *PageAttribute = PageNone;
+        return NULL;
+      }
+
+      L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+    } else {
+      L4PageTable = (UINT64 *)GetPageTableBase ();
+    }
     if (L4PageTable[Index4] == 0) {
       *PageAttribute = PageNone;
       return NULL;
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
index e2b6a2d9b2..c5131526f0 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
@@ -534,43 +534,78 @@ InitPaging (
   VOID
   )
 {
+  UINT64                            Pml5Entry;
+  UINT64                            Pml4Entry;
+  UINT64                            *Pml5;
   UINT64                            *Pml4;
   UINT64                            *Pdpt;
   UINT64                            *Pd;
   UINT64                            *Pt;
   UINTN                             Address;
+  UINTN                             Pml5Index;
   UINTN                             Pml4Index;
   UINTN                             PdptIndex;
   UINTN                             PdIndex;
   UINTN                             PtIndex;
   UINTN                             NumberOfPdptEntries;
   UINTN                             NumberOfPml4Entries;
+  UINTN                             NumberOfPml5Entries;
   UINTN                             SizeOfMemorySpace;
   BOOLEAN                           Nx;
+  IA32_CR4                          Cr4;
+  BOOLEAN                           Enable5LevelPaging;
+
+  Cr4.UintN = AsmReadCr4 ();
+  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
 
   if (sizeof (UINTN) == sizeof (UINT64)) {
-    Pml4 = (UINT64*)(UINTN)mSmmProfileCr3;
+    if (!Enable5LevelPaging) {
+      Pml5Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
+      Pml5 = &Pml5Entry;
+    } else {
+      Pml5 = (UINT64*) (UINTN) mSmmProfileCr3;
+    }
     SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
     //
     // Calculate the table entries of PML4E and PDPTE.
     //
-    if (SizeOfMemorySpace <= 39 ) {
-      NumberOfPml4Entries = 1;
-      NumberOfPdptEntries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 30));
-    } else {
-      NumberOfPml4Entries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 39));
-      NumberOfPdptEntries = 512;
+    NumberOfPml5Entries = 1;
+    if (SizeOfMemorySpace > 48) {
+      NumberOfPml5Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 48);
+      SizeOfMemorySpace = 48;
     }
-  } else {
+
     NumberOfPml4Entries = 1;
+    if (SizeOfMemorySpace > 39) {
+      NumberOfPml4Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 39);
+      SizeOfMemorySpace = 39;
+    }
+
+    NumberOfPdptEntries = 1;
+    ASSERT (SizeOfMemorySpace > 30);
+    NumberOfPdptEntries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 30);
+  } else {
+    Pml4Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
+    Pml4 = &Pml4Entry;
+    Pml5Entry = (UINTN) Pml4 | IA32_PG_P;
+    Pml5 = &Pml5Entry;
+    NumberOfPml5Entries  = 1;
+    NumberOfPml4Entries  = 1;
     NumberOfPdptEntries  = 4;
   }
 
   //
   // Go through page table and change 2MB-page into 4KB-page.
   //
-  for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
-    if (sizeof (UINTN) == sizeof (UINT64)) {
+  for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
+    if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
+      //
+      // If PML5 entry does not exist, skip it
+      //
+      continue;
+    }
+    Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);
+    for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
       if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
         //
         // If PML4 entry does not exist, skip it
@@ -578,63 +613,76 @@ InitPaging (
         continue;
       }
       Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
-    } else {
-      Pdpt = (UINT64*)(UINTN)mSmmProfileCr3;
-    }
-    for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
-      if ((*Pdpt & IA32_PG_P) == 0) {
-        //
-        // If PDPT entry does not exist, skip it
-        //
-        continue;
-      }
-      if ((*Pdpt & IA32_PG_PS) != 0) {
-        //
-        // This is 1G entry, skip it
-        //
-        continue;
-      }
-      Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
-      if (Pd == 0) {
-        continue;
-      }
-      for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
-        if ((*Pd & IA32_PG_P) == 0) {
+      for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
+        if ((*Pdpt & IA32_PG_P) == 0) {
+          //
+          // If PDPT entry does not exist, skip it
+          //
+          continue;
+        }
+        if ((*Pdpt & IA32_PG_PS) != 0) {
           //
-          // If PD entry does not exist, skip it
+          // This is 1G entry, skip it
           //
           continue;
         }
-        Address = (((PdptIndex << 9) + PdIndex) << 21);
+        Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+        if (Pd == 0) {
+          continue;
+        }
+        for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
+          if ((*Pd & IA32_PG_P) == 0) {
+            //
+            // If PD entry does not exist, skip it
+            //
+            continue;
+          }
+          Address = (UINTN) LShiftU64 (
+                              LShiftU64 (
+                                LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
+                                9
+                                ) + PdIndex,
+                                21
+                              );
 
-        //
-        // If it is 2M page, check IsAddressSplit()
-        //
-        if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
           //
-          // Based on current page table, create 4KB page table for split area.
+          // If it is 2M page, check IsAddressSplit()
           //
-          ASSERT (Address == (*Pd & PHYSICAL_ADDRESS_MASK));
+          if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
+            //
+            // Based on current page table, create 4KB page table for split area.
+            //
+            ASSERT (Address == (*Pd & PHYSICAL_ADDRESS_MASK));
+
+            Pt = AllocatePageTableMemory (1);
+            ASSERT (Pt != NULL);
 
-          Pt = AllocatePageTableMemory (1);
-          ASSERT (Pt != NULL);
+            *Pd = (UINTN) Pt | IA32_PG_RW | IA32_PG_P;
 
-          // Split it
-          for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++) {
-            Pt[PtIndex] = Address + ((PtIndex << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
-          } // end for PT
-          *Pd = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
-        } // end if IsAddressSplit
-      } // end for PD
-    } // end for PDPT
-  } // end for PML4
+            // Split it
+            for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) {
+              *Pt = Address + ((PtIndex << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
+            } // end for PT
+            *Pd = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+          } // end if IsAddressSplit
+        } // end for PD
+      } // end for PDPT
+    } // end for PML4
+  } // end for PML5
 
   //
   // Go through page table and set several page table entries to absent or execute-disable.
   //
   DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
-  for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
-    if (sizeof (UINTN) == sizeof (UINT64)) {
+  for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
+    if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
+      //
+      // If PML5 entry does not exist, skip it
+      //
+      continue;
+    }
+    Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);
+    for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
       if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
         //
         // If PML4 entry does not exist, skip it
@@ -642,69 +690,73 @@ InitPaging (
         continue;
       }
       Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
-    } else {
-      Pdpt = (UINT64*)(UINTN)mSmmProfileCr3;
-    }
-    for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
-      if ((*Pdpt & IA32_PG_P) == 0) {
-        //
-        // If PDPT entry does not exist, skip it
-        //
-        continue;
-      }
-      if ((*Pdpt & IA32_PG_PS) != 0) {
-        //
-        // This is 1G entry, set NX bit and skip it
-        //
-        if (mXdSupported) {
-          *Pdpt = *Pdpt | IA32_PG_NX;
+      for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
+        if ((*Pdpt & IA32_PG_P) == 0) {
+          //
+          // If PDPT entry does not exist, skip it
+          //
+          continue;
         }
-        continue;
-      }
-      Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
-      if (Pd == 0) {
-        continue;
-      }
-      for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
-        if ((*Pd & IA32_PG_P) == 0) {
+        if ((*Pdpt & IA32_PG_PS) != 0) {
           //
-          // If PD entry does not exist, skip it
+          // This is 1G entry, set NX bit and skip it
           //
+          if (mXdSupported) {
+            *Pdpt = *Pdpt | IA32_PG_NX;
+          }
           continue;
         }
-        Address = (((PdptIndex << 9) + PdIndex) << 21);
-
-        if ((*Pd & IA32_PG_PS) != 0) {
-          // 2MB page
-
-          if (!IsAddressValid (Address, &Nx)) {
+        Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+        if (Pd == 0) {
+          continue;
+        }
+        for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
+          if ((*Pd & IA32_PG_P) == 0) {
             //
-            // Patch to remove Present flag and RW flag
+            // If PD entry does not exist, skip it
             //
-            *Pd = *Pd & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
-          }
-          if (Nx && mXdSupported) {
-            *Pd = *Pd | IA32_PG_NX;
-          }
-        } else {
-          // 4KB page
-          Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
-          if (Pt == 0) {
             continue;
           }
-          for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) {
+          Address = (UINTN) LShiftU64 (
+                              LShiftU64 (
+                                LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
+                                9
+                                ) + PdIndex,
+                                21
+                              );
+
+          if ((*Pd & IA32_PG_PS) != 0) {
+            // 2MB page
+
             if (!IsAddressValid (Address, &Nx)) {
-              *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
+              //
+              // Patch to remove Present flag and RW flag
+              //
+              *Pd = *Pd & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
             }
             if (Nx && mXdSupported) {
-              *Pt = *Pt | IA32_PG_NX;
+              *Pd = *Pd | IA32_PG_NX;
+            }
+          } else {
+            // 4KB page
+            Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+            if (Pt == 0) {
+              continue;
             }
-            Address += SIZE_4KB;
-          } // end for PT
-        } // end if PS
-      } // end for PD
-    } // end for PDPT
-  } // end for PML4
+            for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) {
+              if (!IsAddressValid (Address, &Nx)) {
+                *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
+              }
+              if (Nx && mXdSupported) {
+                *Pt = *Pt | IA32_PG_NX;
+              }
+              Address += SIZE_4KB;
+            } // end for PT
+          } // end if PS
+        } // end for PD
+      } // end for PDPT
+    } // end for PML4
+  } // end for PML5
 
   //
   // Flush TLB
@@ -1156,6 +1208,20 @@ RestorePageTableBelow4G (
 {
   UINTN         PTIndex;
   UINTN         PFIndex;
+  IA32_CR4      Cr4;
+  BOOLEAN       Enable5LevelPaging;
+
+  Cr4.UintN = AsmReadCr4 ();
+  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
+
+  //
+  // PML5
+  //
+  if (Enable5LevelPaging) {
+    PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56);
+    ASSERT (PageTable[PTIndex] != 0);
+    PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+  }
 
   //
   // PML4
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
index 3d5d663d99..c31160735a 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
@@ -16,6 +16,8 @@ SPDX-License-Identifier: BSD-2-Clause-Patent
 LIST_ENTRY                          mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
 BOOLEAN                             m1GPageTableSupport = FALSE;
 BOOLEAN                             mCpuSmmStaticPageTable;
+BOOLEAN                             m5LevelPagingSupport;
+X86_ASSEMBLY_PATCH_LABEL            gPatch5LevelPagingSupport;
 
 /**
   Disable CET.
@@ -60,6 +62,31 @@ Is1GPageSupport (
   return FALSE;
 }
 
+/**
+  Check if 5-level paging is supported by processor or not.
+
+  @retval TRUE   5-level paging is supported.
+  @retval FALSE  5-level paging is not supported.
+
+**/
+BOOLEAN
+Is5LevelPagingSupport (
+  VOID
+  )
+{
+  CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
+
+  AsmCpuidEx (
+    CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
+    CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
+    NULL,
+    NULL,
+    &EcxFlags.Uint32,
+    NULL
+    );
+  return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0);
+}
+
 /**
   Set sub-entries number in entry.
 
@@ -130,14 +157,6 @@ CalculateMaximumSupportAddress (
       PhysicalAddressBits = 36;
     }
   }
-
-  //
-  // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
-  //
-  ASSERT (PhysicalAddressBits <= 52);
-  if (PhysicalAddressBits > 48) {
-    PhysicalAddressBits = 48;
-  }
   return PhysicalAddressBits;
 }
 
@@ -152,89 +171,137 @@ SetStaticPageTable (
   )
 {
   UINT64                                        PageAddress;
+  UINTN                                         NumberOfPml5EntriesNeeded;
   UINTN                                         NumberOfPml4EntriesNeeded;
   UINTN                                         NumberOfPdpEntriesNeeded;
+  UINTN                                         IndexOfPml5Entries;
   UINTN                                         IndexOfPml4Entries;
   UINTN                                         IndexOfPdpEntries;
   UINTN                                         IndexOfPageDirectoryEntries;
+  UINT64                                        *PageMapLevel5Entry;
   UINT64                                        *PageMapLevel4Entry;
   UINT64                                        *PageMap;
   UINT64                                        *PageDirectoryPointerEntry;
   UINT64                                        *PageDirectory1GEntry;
   UINT64                                        *PageDirectoryEntry;
 
-  if (mPhysicalAddressBits <= 39 ) {
-    NumberOfPml4EntriesNeeded = 1;
-    NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));
-  } else {
-    NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));
-    NumberOfPdpEntriesNeeded = 512;
+  //
+  // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
+  //  when 5-Level Paging is disabled.
+  //
+  ASSERT (mPhysicalAddressBits <= 52);
+  if (!m5LevelPagingSupport && mPhysicalAddressBits > 48) {
+    mPhysicalAddressBits = 48;
+  }
+
+  NumberOfPml5EntriesNeeded = 1;
+  if (mPhysicalAddressBits > 48) {
+    NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 48);
+    mPhysicalAddressBits = 48;
+  }
+
+  NumberOfPml4EntriesNeeded = 1;
+  if (mPhysicalAddressBits > 39) {
+    NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 39);
+    mPhysicalAddressBits = 39;
   }
 
+  NumberOfPdpEntriesNeeded = 1;
+  ASSERT (mPhysicalAddressBits > 30);
+  NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 30);
+
   //
   // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
   //
   PageMap         = (VOID *) PageTable;
 
   PageMapLevel4Entry = PageMap;
-  PageAddress        = 0;
-  for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
+  PageMapLevel5Entry = NULL;
+  if (m5LevelPagingSupport) {
     //
-    // Each PML4 entry points to a page of Page Directory Pointer entries.
+    // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
     //
-    PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
-    if (PageDirectoryPointerEntry == NULL) {
-      PageDirectoryPointerEntry = AllocatePageTableMemory (1);
-      ASSERT(PageDirectoryPointerEntry != NULL);
-      ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
+    PageMapLevel5Entry = PageMap;
+  }
+  PageAddress        = 0;
 
-      *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+  for ( IndexOfPml5Entries = 0
+      ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
+      ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
+    //
+    // Each PML5 entry points to a page of PML4 entires.
+    // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
+    // When 5-Level Paging is disabled, below allocation happens only once.
+    //
+    if (m5LevelPagingSupport) {
+      PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
+      if (PageMapLevel4Entry == NULL) {
+        PageMapLevel4Entry = AllocatePageTableMemory (1);
+        ASSERT(PageMapLevel4Entry != NULL);
+        ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));
+
+        *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+      }
     }
 
-    if (m1GPageTableSupport) {
-      PageDirectory1GEntry = PageDirectoryPointerEntry;
-      for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
-        if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
-          //
-          // Skip the < 4G entries
-          //
-          continue;
-        }
-        //
-        // Fill in the Page Directory entries
-        //
-        *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
+    for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
+      //
+      // Each PML4 entry points to a page of Page Directory Pointer entries.
+      //
+      PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
+      if (PageDirectoryPointerEntry == NULL) {
+        PageDirectoryPointerEntry = AllocatePageTableMemory (1);
+        ASSERT(PageDirectoryPointerEntry != NULL);
+        ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
+
+        *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
       }
-    } else {
-      PageAddress = BASE_4GB;
-      for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
-        if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
-          //
-          // Skip the < 4G entries
-          //
-          continue;
-        }
-        //
-        // Each Directory Pointer entries points to a page of Page Directory entires.
-        // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
-        //
-        PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
-        if (PageDirectoryEntry == NULL) {
-          PageDirectoryEntry = AllocatePageTableMemory (1);
-          ASSERT(PageDirectoryEntry != NULL);
-          ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
 
+      if (m1GPageTableSupport) {
+        PageDirectory1GEntry = PageDirectoryPointerEntry;
+        for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
+          if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
+            //
+            // Skip the < 4G entries
+            //
+            continue;
+          }
           //
-          // Fill in a Page Directory Pointer Entries
+          // Fill in the Page Directory entries
           //
-          *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+          *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
         }
-
-        for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
+      } else {
+        PageAddress = BASE_4GB;
+        for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
+          if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
+            //
+            // Skip the < 4G entries
+            //
+            continue;
+          }
           //
-          // Fill in the Page Directory entries
+          // Each Directory Pointer entries points to a page of Page Directory entires.
+          // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
           //
-          *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
+          PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
+          if (PageDirectoryEntry == NULL) {
+            PageDirectoryEntry = AllocatePageTableMemory (1);
+            ASSERT(PageDirectoryEntry != NULL);
+            ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
+
+            //
+            // Fill in a Page Directory Pointer Entries
+            //
+            *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+          }
+
+          for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
+            //
+            // Fill in the Page Directory entries
+            //
+            *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
+          }
         }
       }
     }
@@ -259,6 +326,8 @@ SmmInitPageTable (
   UINTN                             PageFaultHandlerHookAddress;
   IA32_IDT_GATE_DESCRIPTOR          *IdtEntry;
   EFI_STATUS                        Status;
+  UINT64                            *Pml4Entry;
+  UINT64                            *Pml5Entry;
 
   //
   // Initialize spin lock
@@ -266,12 +335,14 @@ SmmInitPageTable (
   InitializeSpinLock (mPFLock);
 
   mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);
-  m1GPageTableSupport = Is1GPageSupport ();
-  DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));
-  DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));
-
-  mPhysicalAddressBits = CalculateMaximumSupportAddress ();
-  DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));
+  m1GPageTableSupport    = Is1GPageSupport ();
+  m5LevelPagingSupport   = Is5LevelPagingSupport ();
+  mPhysicalAddressBits   = CalculateMaximumSupportAddress ();
+  PatchInstructionX86 (gPatch5LevelPagingSupport, m5LevelPagingSupport, 1);
+  DEBUG ((DEBUG_INFO, "5LevelPaging Support     - %d\n", m5LevelPagingSupport));
+  DEBUG ((DEBUG_INFO, "1GPageTable Support      - %d\n", m1GPageTableSupport));
+  DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - %d\n", mCpuSmmStaticPageTable));
+  DEBUG ((DEBUG_INFO, "PhysicalAddressBits      - %d\n", mPhysicalAddressBits));
   //
   // Generate PAE page table for the first 4GB memory space
   //
@@ -288,15 +359,30 @@ SmmInitPageTable (
   //
   // Fill Page-Table-Level4 (PML4) entry
   //
-  PTEntry = (UINT64*)AllocatePageTableMemory (1);
-  ASSERT (PTEntry != NULL);
-  *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
-  ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
+  Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
+  ASSERT (Pml4Entry != NULL);
+  *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+  ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
 
   //
   // Set sub-entries number
   //
-  SetSubEntriesNum (PTEntry, 3);
+  SetSubEntriesNum (Pml4Entry, 3);
+  PTEntry = Pml4Entry;
+
+  if (m5LevelPagingSupport) {
+    //
+    // Fill PML5 entry
+    //
+    Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
+    *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+    ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
+    //
+    // Set sub-entries number
+    //
+    SetSubEntriesNum (Pml5Entry, 1);
+    PTEntry = Pml5Entry;
+  }
 
   if (mCpuSmmStaticPageTable) {
     SetStaticPageTable ((UINTN)PTEntry);
@@ -344,7 +430,7 @@ SmmInitPageTable (
   }
 
   //
-  // Return the address of PML4 (to set CR3)
+  // Return the address of PML4/PML5 (to set CR3)
   //
   return (UINT32)(UINTN)PTEntry;
 }
@@ -436,12 +522,16 @@ ReclaimPages (
   VOID
   )
 {
+  UINT64                       Pml5Entry;
+  UINT64                       *Pml5;
   UINT64                       *Pml4;
   UINT64                       *Pdpt;
   UINT64                       *Pdt;
+  UINTN                        Pml5Index;
   UINTN                        Pml4Index;
   UINTN                        PdptIndex;
   UINTN                        PdtIndex;
+  UINTN                        MinPml5;
   UINTN                        MinPml4;
   UINTN                        MinPdpt;
   UINTN                        MinPdt;
@@ -451,120 +541,147 @@ ReclaimPages (
   BOOLEAN                      PML4EIgnore;
   BOOLEAN                      PDPTEIgnore;
   UINT64                       *ReleasePageAddress;
+  IA32_CR4                     Cr4;
+  BOOLEAN                      Enable5LevelPaging;
 
   Pml4 = NULL;
   Pdpt = NULL;
   Pdt  = NULL;
   MinAcc  = (UINT64)-1;
   MinPml4 = (UINTN)-1;
+  MinPml5 = (UINTN)-1;
   MinPdpt = (UINTN)-1;
   MinPdt  = (UINTN)-1;
   Acc     = 0;
   ReleasePageAddress = 0;
 
+  Cr4.UintN = AsmReadCr4 ();
+  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
+  Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
+
+  if (!Enable5LevelPaging) {
+    //
+    // Create one fake PML5 entry for 4-Level Paging
+    // so that the page table parsing logic only handles 5-Level page structure.
+    //
+    Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
+    Pml5 = &Pml5Entry;
+  }
+
   //
   // First, find the leaf entry has the smallest access record value
   //
-  Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
-  for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
-    if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
+  for (Pml5Index = 0; Pml5Index < Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1; Pml5Index++) {
+    if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
       //
-      // If the PML4 entry is not present or is masked, skip it
+      // If the PML5 entry is not present or is masked, skip it
       //
       continue;
     }
-    Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
-    PML4EIgnore = FALSE;
-    for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
-      if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
+    Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
+    for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
+      if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
         //
-        // If the PDPT entry is not present or is masked, skip it
+        // If the PML4 entry is not present or is masked, skip it
         //
-        if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
-          //
-          // If the PDPT entry is masked, we will ignore checking the PML4 entry
-          //
-          PML4EIgnore = TRUE;
-        }
         continue;
       }
-      if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
-        //
-        // It's not 1-GByte pages entry, it should be a PDPT entry,
-        // we will not check PML4 entry more
-        //
-        PML4EIgnore = TRUE;
-        Pdt =  (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
-        PDPTEIgnore = FALSE;
-        for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
-          if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
+      Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
+      PML4EIgnore = FALSE;
+      for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
+        if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
+          //
+          // If the PDPT entry is not present or is masked, skip it
+          //
+          if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
             //
-            // If the PD entry is not present or is masked, skip it
+            // If the PDPT entry is masked, we will ignore checking the PML4 entry
             //
-            if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
+            PML4EIgnore = TRUE;
+          }
+          continue;
+        }
+        if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
+          //
+          // It's not 1-GByte pages entry, it should be a PDPT entry,
+          // we will not check PML4 entry more
+          //
+          PML4EIgnore = TRUE;
+          Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
+          PDPTEIgnore = FALSE;
+          for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
+            if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
+              //
+              // If the PD entry is not present or is masked, skip it
+              //
+              if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
+                //
+                // If the PD entry is masked, we will not PDPT entry more
+                //
+                PDPTEIgnore = TRUE;
+              }
+              continue;
+            }
+            if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
               //
-              // If the PD entry is masked, we will not PDPT entry more
+              // It's not 2 MByte page table entry, it should be PD entry
+              // we will find the entry has the smallest access record value
               //
               PDPTEIgnore = TRUE;
+              Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
+              if (Acc < MinAcc) {
+                //
+                // If the PD entry has the smallest access record value,
+                // save the Page address to be released
+                //
+                MinAcc  = Acc;
+                MinPml5 = Pml5Index;
+                MinPml4 = Pml4Index;
+                MinPdpt = PdptIndex;
+                MinPdt  = PdtIndex;
+                ReleasePageAddress = Pdt + PdtIndex;
+              }
             }
-            continue;
           }
-          if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
+          if (!PDPTEIgnore) {
             //
-            // It's not 2 MByte page table entry, it should be PD entry
-            // we will find the entry has the smallest access record value
+            // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
+            // it should only has the entries point to 2 MByte Pages
             //
-            PDPTEIgnore = TRUE;
-            Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
+            Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
             if (Acc < MinAcc) {
               //
-              // If the PD entry has the smallest access record value,
+              // If the PDPT entry has the smallest access record value,
               // save the Page address to be released
               //
               MinAcc  = Acc;
+              MinPml5 = Pml5Index;
               MinPml4 = Pml4Index;
               MinPdpt = PdptIndex;
-              MinPdt  = PdtIndex;
-              ReleasePageAddress = Pdt + PdtIndex;
+              MinPdt  = (UINTN)-1;
+              ReleasePageAddress = Pdpt + PdptIndex;
             }
           }
         }
-        if (!PDPTEIgnore) {
-          //
-          // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
-          // it should only has the entries point to 2 MByte Pages
-          //
-          Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
-          if (Acc < MinAcc) {
-            //
-            // If the PDPT entry has the smallest access record value,
-            // save the Page address to be released
-            //
-            MinAcc  = Acc;
-            MinPml4 = Pml4Index;
-            MinPdpt = PdptIndex;
-            MinPdt  = (UINTN)-1;
-            ReleasePageAddress = Pdpt + PdptIndex;
-          }
-        }
       }
-    }
-    if (!PML4EIgnore) {
-      //
-      // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
-      // it should only has the entries point to 1 GByte Pages
-      //
-      Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
-      if (Acc < MinAcc) {
+      if (!PML4EIgnore) {
         //
-        // If the PML4 entry has the smallest access record value,
-        // save the Page address to be released
+        // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
+        // it should only has the entries point to 1 GByte Pages
         //
-        MinAcc  = Acc;
-        MinPml4 = Pml4Index;
-        MinPdpt = (UINTN)-1;
-        MinPdt  = (UINTN)-1;
-        ReleasePageAddress = Pml4 + Pml4Index;
+        Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
+        if (Acc < MinAcc) {
+          //
+          // If the PML4 entry has the smallest access record value,
+          // save the Page address to be released
+          //
+          MinAcc  = Acc;
+          MinPml5 = Pml5Index;
+          MinPml4 = Pml4Index;
+          MinPdpt = (UINTN)-1;
+          MinPdt  = (UINTN)-1;
+          ReleasePageAddress = Pml4 + Pml4Index;
+        }
       }
     }
   }
@@ -588,6 +705,7 @@ ReclaimPages (
       //
       // If 4 KByte Page Table is released, check the PDPT entry
       //
+      Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);
       Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
       SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
       if (SubEntriesNum == 0) {
@@ -679,7 +797,7 @@ SmiDefaultPFHandler (
   )
 {
   UINT64                            *PageTable;
-  UINT64                            *Pml4;
+  UINT64                            *PageTableTop;
   UINT64                            PFAddress;
   UINTN                             StartBit;
   UINTN                             EndBit;
@@ -690,6 +808,8 @@ SmiDefaultPFHandler (
   UINTN                             PageAttribute;
   EFI_STATUS                        Status;
   UINT64                            *UpperEntry;
+  BOOLEAN                           Enable5LevelPaging;
+  IA32_CR4                          Cr4;
 
   //
   // Set default SMM page attribute
@@ -699,9 +819,12 @@ SmiDefaultPFHandler (
   PageAttribute = 0;
 
   EndBit = 0;
-  Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);
+  PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
   PFAddress = AsmReadCr2 ();
 
+  Cr4.UintN = AsmReadCr4 ();
+  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
+
   Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
   //
   // If platform not support page table attribute, set default SMM page attribute
@@ -755,9 +878,9 @@ SmiDefaultPFHandler (
   }
 
   for (Index = 0; Index < NumOfPages; Index++) {
-    PageTable  = Pml4;
+    PageTable  = PageTableTop;
     UpperEntry = NULL;
-    for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {
+    for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
       PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
       if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
         //
@@ -941,13 +1064,20 @@ SetPageTableAttributes (
   UINTN                 Index2;
   UINTN                 Index3;
   UINTN                 Index4;
+  UINTN                 Index5;
   UINT64                *L1PageTable;
   UINT64                *L2PageTable;
   UINT64                *L3PageTable;
   UINT64                *L4PageTable;
+  UINT64                *L5PageTable;
   BOOLEAN               IsSplitted;
   BOOLEAN               PageTableSplitted;
   BOOLEAN               CetEnabled;
+  IA32_CR4              Cr4;
+  BOOLEAN               Enable5LevelPaging;
+
+  Cr4.UintN = AsmReadCr4 ();
+  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
 
   //
   // Don't do this if
@@ -991,44 +1121,59 @@ SetPageTableAttributes (
   do {
     DEBUG ((DEBUG_INFO, "Start...\n"));
     PageTableSplitted = FALSE;
-
-    L4PageTable = (UINT64 *)GetPageTableBase ();
-    SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
-    PageTableSplitted = (PageTableSplitted || IsSplitted);
-
-    for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
-      L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
-      if (L3PageTable == NULL) {
-        continue;
-      }
-
-      SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+    L5PageTable = NULL;
+    if (Enable5LevelPaging) {
+      L5PageTable = (UINT64 *)GetPageTableBase ();
+      SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L5PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
       PageTableSplitted = (PageTableSplitted || IsSplitted);
+    }
 
-      for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
-        if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
-          // 1G
+    for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
+      if (Enable5LevelPaging) {
+        L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+        if (L4PageTable == NULL) {
           continue;
         }
-        L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
-        if (L2PageTable == NULL) {
+      } else {
+        L4PageTable = (UINT64 *)GetPageTableBase ();
+      }
+      SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+      PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+      for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
+        L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+        if (L3PageTable == NULL) {
           continue;
         }
 
-        SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+        SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
         PageTableSplitted = (PageTableSplitted || IsSplitted);
 
-        for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
-          if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
-            // 2M
+        for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
+          if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
+            // 1G
             continue;
           }
-          L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
-          if (L1PageTable == NULL) {
+          L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+          if (L2PageTable == NULL) {
             continue;
           }
-          SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+
+          SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
           PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+          for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
+            if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
+              // 2M
+              continue;
+            }
+            L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+            if (L1PageTable == NULL) {
+              continue;
+            }
+            SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+            PageTableSplitted = (PageTableSplitted || IsSplitted);
+          }
         }
       }
     }
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
index 741e4b7da2..271492a9d7 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
@@ -69,6 +69,7 @@ extern ASM_PFX(mXdSupported)
 global ASM_PFX(gPatchXdSupported)
 global ASM_PFX(gPatchSmiStack)
 global ASM_PFX(gPatchSmiCr3)
+global ASM_PFX(gPatch5LevelPagingSupport)
 global ASM_PFX(gcSmiHandlerTemplate)
 global ASM_PFX(gcSmiHandlerSize)
 
@@ -124,6 +125,17 @@ ProtFlatMode:
 ASM_PFX(gPatchSmiCr3):
     mov     cr3, rax
     mov     eax, 0x668                   ; as cr4.PGE is not set here, refresh cr3
+
+    mov     cl, strict byte 0            ; source operand will be patched
+ASM_PFX(gPatch5LevelPagingSupport):
+    cmp     cl, 0
+    je      SkipEnable5LevelPaging
+    ;
+    ; Enable 5-Level Paging bit
+    ;
+    bts     eax, 12                     ; Set LA57 bit (bit #12)
+SkipEnable5LevelPaging:
+
     mov     cr4, rax                    ; in PreModifyMtrrs() to flush TLB.
 ; Load TSS
     sub     esp, 8                      ; reserve room in stack
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
index e7c78d36fc..63bae5a913 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
@@ -1,7 +1,7 @@
 /** @file
 X64 processor specific functions to enable SMM profile.
 
-Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
 
 SPDX-License-Identifier: BSD-2-Clause-Patent
@@ -147,9 +147,14 @@ RestorePageTableAbove4G (
   BOOLEAN       Existed;
   UINTN         Index;
   UINTN         PFIndex;
+  IA32_CR4      Cr4;
+  BOOLEAN       Enable5LevelPaging;
 
   ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));
 
+  Cr4.UintN = AsmReadCr4 ();
+  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
+
   //
   // If page fault address is 4GB above.
   //
@@ -161,38 +166,48 @@ RestorePageTableAbove4G (
   //
   Existed = FALSE;
   PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
-  PTIndex = BitFieldRead64 (PFAddress, 39, 47);
-  if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
-    // PML4E
-    PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
-    PTIndex = BitFieldRead64 (PFAddress, 30, 38);
+  PTIndex = 0;
+  if (Enable5LevelPaging) {
+    PTIndex = BitFieldRead64 (PFAddress, 48, 56);
+  }
+  if ((!Enable5LevelPaging) || ((PageTable[PTIndex] & IA32_PG_P) != 0)) {
+    // PML5E
+    if (Enable5LevelPaging) {
+      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+    }
+    PTIndex = BitFieldRead64 (PFAddress, 39, 47);
     if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
-      // PDPTE
+      // PML4E
       PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
-      PTIndex = BitFieldRead64 (PFAddress, 21, 29);
-      // PD
-      if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
-        //
-        // 2MB page
-        //
-        Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
-        if ((Address & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {
-          Existed = TRUE;
-        }
-      } else {
-        //
-        // 4KB page
-        //
-        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
-        if (PageTable != 0) {
+      PTIndex = BitFieldRead64 (PFAddress, 30, 38);
+      if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
+        // PDPTE
+        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+        PTIndex = BitFieldRead64 (PFAddress, 21, 29);
+        // PD
+        if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
           //
-          // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.
+          // 2MB page
           //
-          PTIndex = BitFieldRead64 (PFAddress, 12, 20);
           Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
-          if ((Address & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
+          if ((Address & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {
             Existed = TRUE;
           }
+        } else {
+          //
+          // 4KB page
+          //
+          PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
+          if (PageTable != 0) {
+            //
+            // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.
+            //
+            PTIndex = BitFieldRead64 (PFAddress, 12, 20);
+            Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+            if ((Address & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
+              Existed = TRUE;
+            }
+          }
         }
       }
     }
@@ -221,6 +236,11 @@ RestorePageTableAbove4G (
     //
     PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
     PFAddress = AsmReadCr2 ();
+    // PML5E
+    if (Enable5LevelPaging) {
+      PTIndex = BitFieldRead64 (PFAddress, 48, 56);
+      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+    }
     // PML4E
     PTIndex = BitFieldRead64 (PFAddress, 39, 47);
     PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
-- 
2.21.0.windows.1


-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.

View/Reply Online (#43201): https://edk2.groups.io/g/devel/message/43201
Mute This Topic: https://groups.io/mt/32295049/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub  [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-

Re: [edk2-devel] [PATCH v2 3/3] UefiCpuPkg/PiSmmCpu: Enable 5 level paging when CPU supports
Posted by Dong, Eric 6 years, 7 months ago
Reviewed-by: Eric Dong <eric.dong@intel.com>

> -----Original Message-----
> From: Ni, Ray
> Sent: Wednesday, July 3, 2019 2:54 PM
> To: devel@edk2.groups.io
> Cc: Dong, Eric <eric.dong@intel.com>; Laszlo Ersek <lersek@redhat.com>
> Subject: [PATCH v2 3/3] UefiCpuPkg/PiSmmCpu: Enable 5 level paging when
> CPU supports
> 
> REF:https://bugzilla.tianocore.org/show_bug.cgi?id=1946
> 
> The patch changes SMM environment to use 5 level paging when CPU
> supports it.
> 
> Signed-off-by: Ray Ni <ray.ni@intel.com>
> Cc: Eric Dong <eric.dong@intel.com>
> Regression-tested-by: Laszlo Ersek <lersek@redhat.com>
> ---
>  .../PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c   |  20 +-
>  UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c        | 272 ++++++----
>  UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c       | 485 ++++++++++++-----
> -
>  UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm   |  12 +
>  .../PiSmmCpuDxeSmm/X64/SmmProfileArch.c       |  72 ++-
>  5 files changed, 561 insertions(+), 300 deletions(-)
> 
> diff --git
> a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> index 069be3aaa5..55090e9c3e 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> @@ -125,18 +125,36 @@ GetPageTableEntry (
>    UINTN                 Index2;
>    UINTN                 Index3;
>    UINTN                 Index4;
> +  UINTN                 Index5;
>    UINT64                *L1PageTable;
>    UINT64                *L2PageTable;
>    UINT64                *L3PageTable;
>    UINT64                *L4PageTable;
> +  UINT64                *L5PageTable;
> +  IA32_CR4              Cr4;
> +  BOOLEAN               Enable5LevelPaging;
> 
> +  Index5 = ((UINTN)RShiftU64 (Address, 48)) & PAGING_PAE_INDEX_MASK;
>    Index4 = ((UINTN)RShiftU64 (Address, 39)) & PAGING_PAE_INDEX_MASK;
>    Index3 = ((UINTN)Address >> 30) & PAGING_PAE_INDEX_MASK;
>    Index2 = ((UINTN)Address >> 21) & PAGING_PAE_INDEX_MASK;
>    Index1 = ((UINTN)Address >> 12) & PAGING_PAE_INDEX_MASK;
> 
> +  Cr4.UintN = AsmReadCr4 ();
> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> +
>    if (sizeof(UINTN) == sizeof(UINT64)) {
> -    L4PageTable = (UINT64 *)GetPageTableBase ();
> +    if (Enable5LevelPaging) {
> +      L5PageTable = (UINT64 *)GetPageTableBase ();
> +      if (L5PageTable[Index5] == 0) {
> +        *PageAttribute = PageNone;
> +        return NULL;
> +      }
> +
> +      L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] &
> ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
> +    } else {
> +      L4PageTable = (UINT64 *)GetPageTableBase ();
> +    }
>      if (L4PageTable[Index4] == 0) {
>        *PageAttribute = PageNone;
>        return NULL;
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
> index e2b6a2d9b2..c5131526f0 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
> @@ -534,43 +534,78 @@ InitPaging (
>    VOID
>    )
>  {
> +  UINT64                            Pml5Entry;
> +  UINT64                            Pml4Entry;
> +  UINT64                            *Pml5;
>    UINT64                            *Pml4;
>    UINT64                            *Pdpt;
>    UINT64                            *Pd;
>    UINT64                            *Pt;
>    UINTN                             Address;
> +  UINTN                             Pml5Index;
>    UINTN                             Pml4Index;
>    UINTN                             PdptIndex;
>    UINTN                             PdIndex;
>    UINTN                             PtIndex;
>    UINTN                             NumberOfPdptEntries;
>    UINTN                             NumberOfPml4Entries;
> +  UINTN                             NumberOfPml5Entries;
>    UINTN                             SizeOfMemorySpace;
>    BOOLEAN                           Nx;
> +  IA32_CR4                          Cr4;
> +  BOOLEAN                           Enable5LevelPaging;
> +
> +  Cr4.UintN = AsmReadCr4 ();
> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> 
>    if (sizeof (UINTN) == sizeof (UINT64)) {
> -    Pml4 = (UINT64*)(UINTN)mSmmProfileCr3;
> +    if (!Enable5LevelPaging) {
> +      Pml5Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
> +      Pml5 = &Pml5Entry;
> +    } else {
> +      Pml5 = (UINT64*) (UINTN) mSmmProfileCr3;
> +    }
>      SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
>      //
>      // Calculate the table entries of PML4E and PDPTE.
>      //
> -    if (SizeOfMemorySpace <= 39 ) {
> -      NumberOfPml4Entries = 1;
> -      NumberOfPdptEntries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace -
> 30));
> -    } else {
> -      NumberOfPml4Entries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace -
> 39));
> -      NumberOfPdptEntries = 512;
> +    NumberOfPml5Entries = 1;
> +    if (SizeOfMemorySpace > 48) {
> +      NumberOfPml5Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 48);
> +      SizeOfMemorySpace = 48;
>      }
> -  } else {
> +
>      NumberOfPml4Entries = 1;
> +    if (SizeOfMemorySpace > 39) {
> +      NumberOfPml4Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 39);
> +      SizeOfMemorySpace = 39;
> +    }
> +
> +    NumberOfPdptEntries = 1;
> +    ASSERT (SizeOfMemorySpace > 30);
> +    NumberOfPdptEntries = (UINTN) LShiftU64 (1, SizeOfMemorySpace -
> + 30);  } else {
> +    Pml4Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
> +    Pml4 = &Pml4Entry;
> +    Pml5Entry = (UINTN) Pml4 | IA32_PG_P;
> +    Pml5 = &Pml5Entry;
> +    NumberOfPml5Entries  = 1;
> +    NumberOfPml4Entries  = 1;
>      NumberOfPdptEntries  = 4;
>    }
> 
>    //
>    // Go through page table and change 2MB-page into 4KB-page.
>    //
> -  for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
> -    if (sizeof (UINTN) == sizeof (UINT64)) {
> +  for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
> +    if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
> +      //
> +      // If PML5 entry does not exist, skip it
> +      //
> +      continue;
> +    }
> +    Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] &
> PHYSICAL_ADDRESS_MASK);
> +    for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
>        if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
>          //
>          // If PML4 entry does not exist, skip it @@ -578,63 +613,76 @@
> InitPaging (
>          continue;
>        }
>        Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask &
> PHYSICAL_ADDRESS_MASK);
> -    } else {
> -      Pdpt = (UINT64*)(UINTN)mSmmProfileCr3;
> -    }
> -    for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++,
> Pdpt++) {
> -      if ((*Pdpt & IA32_PG_P) == 0) {
> -        //
> -        // If PDPT entry does not exist, skip it
> -        //
> -        continue;
> -      }
> -      if ((*Pdpt & IA32_PG_PS) != 0) {
> -        //
> -        // This is 1G entry, skip it
> -        //
> -        continue;
> -      }
> -      Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask &
> PHYSICAL_ADDRESS_MASK);
> -      if (Pd == 0) {
> -        continue;
> -      }
> -      for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
> -        if ((*Pd & IA32_PG_P) == 0) {
> +      for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++,
> Pdpt++) {
> +        if ((*Pdpt & IA32_PG_P) == 0) {
> +          //
> +          // If PDPT entry does not exist, skip it
> +          //
> +          continue;
> +        }
> +        if ((*Pdpt & IA32_PG_PS) != 0) {
>            //
> -          // If PD entry does not exist, skip it
> +          // This is 1G entry, skip it
>            //
>            continue;
>          }
> -        Address = (((PdptIndex << 9) + PdIndex) << 21);
> +        Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask &
> PHYSICAL_ADDRESS_MASK);
> +        if (Pd == 0) {
> +          continue;
> +        }
> +        for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
> +          if ((*Pd & IA32_PG_P) == 0) {
> +            //
> +            // If PD entry does not exist, skip it
> +            //
> +            continue;
> +          }
> +          Address = (UINTN) LShiftU64 (
> +                              LShiftU64 (
> +                                LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
> +                                9
> +                                ) + PdIndex,
> +                                21
> +                              );
> 
> -        //
> -        // If it is 2M page, check IsAddressSplit()
> -        //
> -        if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
>            //
> -          // Based on current page table, create 4KB page table for split area.
> +          // If it is 2M page, check IsAddressSplit()
>            //
> -          ASSERT (Address == (*Pd & PHYSICAL_ADDRESS_MASK));
> +          if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
> +            //
> +            // Based on current page table, create 4KB page table for split area.
> +            //
> +            ASSERT (Address == (*Pd & PHYSICAL_ADDRESS_MASK));
> +
> +            Pt = AllocatePageTableMemory (1);
> +            ASSERT (Pt != NULL);
> 
> -          Pt = AllocatePageTableMemory (1);
> -          ASSERT (Pt != NULL);
> +            *Pd = (UINTN) Pt | IA32_PG_RW | IA32_PG_P;
> 
> -          // Split it
> -          for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++) {
> -            Pt[PtIndex] = Address + ((PtIndex << 12) | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS);
> -          } // end for PT
> -          *Pd = (UINT64)(UINTN)Pt | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> -        } // end if IsAddressSplit
> -      } // end for PD
> -    } // end for PDPT
> -  } // end for PML4
> +            // Split it
> +            for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) {
> +              *Pt = Address + ((PtIndex << 12) | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS);
> +            } // end for PT
> +            *Pd = (UINT64)(UINTN)Pt | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> +          } // end if IsAddressSplit
> +        } // end for PD
> +      } // end for PDPT
> +    } // end for PML4
> +  } // end for PML5
> 
>    //
>    // Go through page table and set several page table entries to absent or
> execute-disable.
>    //
>    DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
> -  for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
> -    if (sizeof (UINTN) == sizeof (UINT64)) {
> +  for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
> +    if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
> +      //
> +      // If PML5 entry does not exist, skip it
> +      //
> +      continue;
> +    }
> +    Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] &
> PHYSICAL_ADDRESS_MASK);
> +    for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
>        if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
>          //
>          // If PML4 entry does not exist, skip it @@ -642,69 +690,73 @@
> InitPaging (
>          continue;
>        }
>        Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask &
> PHYSICAL_ADDRESS_MASK);
> -    } else {
> -      Pdpt = (UINT64*)(UINTN)mSmmProfileCr3;
> -    }
> -    for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++,
> Pdpt++) {
> -      if ((*Pdpt & IA32_PG_P) == 0) {
> -        //
> -        // If PDPT entry does not exist, skip it
> -        //
> -        continue;
> -      }
> -      if ((*Pdpt & IA32_PG_PS) != 0) {
> -        //
> -        // This is 1G entry, set NX bit and skip it
> -        //
> -        if (mXdSupported) {
> -          *Pdpt = *Pdpt | IA32_PG_NX;
> +      for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++,
> Pdpt++) {
> +        if ((*Pdpt & IA32_PG_P) == 0) {
> +          //
> +          // If PDPT entry does not exist, skip it
> +          //
> +          continue;
>          }
> -        continue;
> -      }
> -      Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask &
> PHYSICAL_ADDRESS_MASK);
> -      if (Pd == 0) {
> -        continue;
> -      }
> -      for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
> -        if ((*Pd & IA32_PG_P) == 0) {
> +        if ((*Pdpt & IA32_PG_PS) != 0) {
>            //
> -          // If PD entry does not exist, skip it
> +          // This is 1G entry, set NX bit and skip it
>            //
> +          if (mXdSupported) {
> +            *Pdpt = *Pdpt | IA32_PG_NX;
> +          }
>            continue;
>          }
> -        Address = (((PdptIndex << 9) + PdIndex) << 21);
> -
> -        if ((*Pd & IA32_PG_PS) != 0) {
> -          // 2MB page
> -
> -          if (!IsAddressValid (Address, &Nx)) {
> +        Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask &
> PHYSICAL_ADDRESS_MASK);
> +        if (Pd == 0) {
> +          continue;
> +        }
> +        for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
> +          if ((*Pd & IA32_PG_P) == 0) {
>              //
> -            // Patch to remove Present flag and RW flag
> +            // If PD entry does not exist, skip it
>              //
> -            *Pd = *Pd & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
> -          }
> -          if (Nx && mXdSupported) {
> -            *Pd = *Pd | IA32_PG_NX;
> -          }
> -        } else {
> -          // 4KB page
> -          Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask &
> PHYSICAL_ADDRESS_MASK);
> -          if (Pt == 0) {
>              continue;
>            }
> -          for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) {
> +          Address = (UINTN) LShiftU64 (
> +                              LShiftU64 (
> +                                LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
> +                                9
> +                                ) + PdIndex,
> +                                21
> +                              );
> +
> +          if ((*Pd & IA32_PG_PS) != 0) {
> +            // 2MB page
> +
>              if (!IsAddressValid (Address, &Nx)) {
> -              *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
> +              //
> +              // Patch to remove Present flag and RW flag
> +              //
> +              *Pd = *Pd & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
>              }
>              if (Nx && mXdSupported) {
> -              *Pt = *Pt | IA32_PG_NX;
> +              *Pd = *Pd | IA32_PG_NX;
> +            }
> +          } else {
> +            // 4KB page
> +            Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask &
> PHYSICAL_ADDRESS_MASK);
> +            if (Pt == 0) {
> +              continue;
>              }
> -            Address += SIZE_4KB;
> -          } // end for PT
> -        } // end if PS
> -      } // end for PD
> -    } // end for PDPT
> -  } // end for PML4
> +            for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) {
> +              if (!IsAddressValid (Address, &Nx)) {
> +                *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
> +              }
> +              if (Nx && mXdSupported) {
> +                *Pt = *Pt | IA32_PG_NX;
> +              }
> +              Address += SIZE_4KB;
> +            } // end for PT
> +          } // end if PS
> +        } // end for PD
> +      } // end for PDPT
> +    } // end for PML4
> +  } // end for PML5
> 
>    //
>    // Flush TLB
> @@ -1156,6 +1208,20 @@ RestorePageTableBelow4G (  {
>    UINTN         PTIndex;
>    UINTN         PFIndex;
> +  IA32_CR4      Cr4;
> +  BOOLEAN       Enable5LevelPaging;
> +
> +  Cr4.UintN = AsmReadCr4 ();
> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> +
> +  //
> +  // PML5
> +  //
> +  if (Enable5LevelPaging) {
> +    PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56);
> +    ASSERT (PageTable[PTIndex] != 0);
> +    PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> + PHYSICAL_ADDRESS_MASK);  }
> 
>    //
>    // PML4
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> index 3d5d663d99..c31160735a 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> @@ -16,6 +16,8 @@ SPDX-License-Identifier: BSD-2-Clause-Patent
>  LIST_ENTRY                          mPagePool = INITIALIZE_LIST_HEAD_VARIABLE
> (mPagePool);
>  BOOLEAN                             m1GPageTableSupport = FALSE;
>  BOOLEAN                             mCpuSmmStaticPageTable;
> +BOOLEAN                             m5LevelPagingSupport;
> +X86_ASSEMBLY_PATCH_LABEL            gPatch5LevelPagingSupport;
> 
>  /**
>    Disable CET.
> @@ -60,6 +62,31 @@ Is1GPageSupport (
>    return FALSE;
>  }
> 
> +/**
> +  Check if 5-level paging is supported by processor or not.
> +
> +  @retval TRUE   5-level paging is supported.
> +  @retval FALSE  5-level paging is not supported.
> +
> +**/
> +BOOLEAN
> +Is5LevelPagingSupport (
> +  VOID
> +  )
> +{
> +  CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
> +
> +  AsmCpuidEx (
> +    CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
> +    CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
> +    NULL,
> +    NULL,
> +    &EcxFlags.Uint32,
> +    NULL
> +    );
> +  return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0); }
> +
>  /**
>    Set sub-entries number in entry.
> 
> @@ -130,14 +157,6 @@ CalculateMaximumSupportAddress (
>        PhysicalAddressBits = 36;
>      }
>    }
> -
> -  //
> -  // IA-32e paging translates 48-bit linear addresses to 52-bit physical
> addresses.
> -  //
> -  ASSERT (PhysicalAddressBits <= 52);
> -  if (PhysicalAddressBits > 48) {
> -    PhysicalAddressBits = 48;
> -  }
>    return PhysicalAddressBits;
>  }
> 
> @@ -152,89 +171,137 @@ SetStaticPageTable (
>    )
>  {
>    UINT64                                        PageAddress;
> +  UINTN                                         NumberOfPml5EntriesNeeded;
>    UINTN                                         NumberOfPml4EntriesNeeded;
>    UINTN                                         NumberOfPdpEntriesNeeded;
> +  UINTN                                         IndexOfPml5Entries;
>    UINTN                                         IndexOfPml4Entries;
>    UINTN                                         IndexOfPdpEntries;
>    UINTN                                         IndexOfPageDirectoryEntries;
> +  UINT64                                        *PageMapLevel5Entry;
>    UINT64                                        *PageMapLevel4Entry;
>    UINT64                                        *PageMap;
>    UINT64                                        *PageDirectoryPointerEntry;
>    UINT64                                        *PageDirectory1GEntry;
>    UINT64                                        *PageDirectoryEntry;
> 
> -  if (mPhysicalAddressBits <= 39 ) {
> -    NumberOfPml4EntriesNeeded = 1;
> -    NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1,
> (mPhysicalAddressBits - 30));
> -  } else {
> -    NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1,
> (mPhysicalAddressBits - 39));
> -    NumberOfPdpEntriesNeeded = 512;
> +  //
> +  // IA-32e paging translates 48-bit linear addresses to 52-bit
> + physical addresses  //  when 5-Level Paging is disabled.
> +  //
> +  ASSERT (mPhysicalAddressBits <= 52);
> +  if (!m5LevelPagingSupport && mPhysicalAddressBits > 48) {
> +    mPhysicalAddressBits = 48;
> +  }
> +
> +  NumberOfPml5EntriesNeeded = 1;
> +  if (mPhysicalAddressBits > 48) {
> +    NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1,
> mPhysicalAddressBits - 48);
> +    mPhysicalAddressBits = 48;
> +  }
> +
> +  NumberOfPml4EntriesNeeded = 1;
> +  if (mPhysicalAddressBits > 39) {
> +    NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1,
> mPhysicalAddressBits - 39);
> +    mPhysicalAddressBits = 39;
>    }
> 
> +  NumberOfPdpEntriesNeeded = 1;
> +  ASSERT (mPhysicalAddressBits > 30);
> +  NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1,
> mPhysicalAddressBits
> + - 30);
> +
>    //
>    // By architecture only one PageMapLevel4 exists - so lets allocate storage
> for it.
>    //
>    PageMap         = (VOID *) PageTable;
> 
>    PageMapLevel4Entry = PageMap;
> -  PageAddress        = 0;
> -  for (IndexOfPml4Entries = 0; IndexOfPml4Entries <
> NumberOfPml4EntriesNeeded; IndexOfPml4Entries++,
> PageMapLevel4Entry++) {
> +  PageMapLevel5Entry = NULL;
> +  if (m5LevelPagingSupport) {
>      //
> -    // Each PML4 entry points to a page of Page Directory Pointer entries.
> +    // By architecture only one PageMapLevel5 exists - so lets allocate storage
> for it.
>      //
> -    PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) &
> ~mAddressEncMask & gPhyMask);
> -    if (PageDirectoryPointerEntry == NULL) {
> -      PageDirectoryPointerEntry = AllocatePageTableMemory (1);
> -      ASSERT(PageDirectoryPointerEntry != NULL);
> -      ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
> +    PageMapLevel5Entry = PageMap;
> +  }
> +  PageAddress        = 0;
> 
> -      *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> +  for ( IndexOfPml5Entries = 0
> +      ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
> +      ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
> +    //
> +    // Each PML5 entry points to a page of PML4 entires.
> +    // So lets allocate space for them and fill them in in the
> IndexOfPml4Entries loop.
> +    // When 5-Level Paging is disabled, below allocation happens only once.
> +    //
> +    if (m5LevelPagingSupport) {
> +      PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) &
> ~mAddressEncMask & gPhyMask);
> +      if (PageMapLevel4Entry == NULL) {
> +        PageMapLevel4Entry = AllocatePageTableMemory (1);
> +        ASSERT(PageMapLevel4Entry != NULL);
> +        ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));
> +
> +        *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> +      }
>      }
> 
> -    if (m1GPageTableSupport) {
> -      PageDirectory1GEntry = PageDirectoryPointerEntry;
> -      for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512;
> IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress +=
> SIZE_1GB) {
> -        if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
> -          //
> -          // Skip the < 4G entries
> -          //
> -          continue;
> -        }
> -        //
> -        // Fill in the Page Directory entries
> -        //
> -        *PageDirectory1GEntry = PageAddress | mAddressEncMask |
> IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
> +    for (IndexOfPml4Entries = 0; IndexOfPml4Entries <
> (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512);
> IndexOfPml4Entries++, PageMapLevel4Entry++) {
> +      //
> +      // Each PML4 entry points to a page of Page Directory Pointer entries.
> +      //
> +      PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) &
> ~mAddressEncMask & gPhyMask);
> +      if (PageDirectoryPointerEntry == NULL) {
> +        PageDirectoryPointerEntry = AllocatePageTableMemory (1);
> +        ASSERT(PageDirectoryPointerEntry != NULL);
> +        ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
> +
> +        *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry
> + | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
>        }
> -    } else {
> -      PageAddress = BASE_4GB;
> -      for (IndexOfPdpEntries = 0; IndexOfPdpEntries <
> NumberOfPdpEntriesNeeded; IndexOfPdpEntries++,
> PageDirectoryPointerEntry++) {
> -        if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
> -          //
> -          // Skip the < 4G entries
> -          //
> -          continue;
> -        }
> -        //
> -        // Each Directory Pointer entries points to a page of Page Directory
> entires.
> -        // So allocate space for them and fill them in in the
> IndexOfPageDirectoryEntries loop.
> -        //
> -        PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) &
> ~mAddressEncMask & gPhyMask);
> -        if (PageDirectoryEntry == NULL) {
> -          PageDirectoryEntry = AllocatePageTableMemory (1);
> -          ASSERT(PageDirectoryEntry != NULL);
> -          ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
> 
> +      if (m1GPageTableSupport) {
> +        PageDirectory1GEntry = PageDirectoryPointerEntry;
> +        for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries <
> 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress
> += SIZE_1GB) {
> +          if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
> +            //
> +            // Skip the < 4G entries
> +            //
> +            continue;
> +          }
>            //
> -          // Fill in a Page Directory Pointer Entries
> +          // Fill in the Page Directory entries
>            //
> -          *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> +          *PageDirectory1GEntry = PageAddress | mAddressEncMask |
> + IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
>          }
> -
> -        for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries <
> 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress +=
> SIZE_2MB) {
> +      } else {
> +        PageAddress = BASE_4GB;
> +        for (IndexOfPdpEntries = 0; IndexOfPdpEntries <
> (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512);
> IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
> +          if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
> +            //
> +            // Skip the < 4G entries
> +            //
> +            continue;
> +          }
>            //
> -          // Fill in the Page Directory entries
> +          // Each Directory Pointer entries points to a page of Page Directory
> entires.
> +          // So allocate space for them and fill them in in the
> IndexOfPageDirectoryEntries loop.
>            //
> -          *PageDirectoryEntry = PageAddress | mAddressEncMask |
> IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
> +          PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) &
> ~mAddressEncMask & gPhyMask);
> +          if (PageDirectoryEntry == NULL) {
> +            PageDirectoryEntry = AllocatePageTableMemory (1);
> +            ASSERT(PageDirectoryEntry != NULL);
> +            ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
> +
> +            //
> +            // Fill in a Page Directory Pointer Entries
> +            //
> +            *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> +          }
> +
> +          for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries <
> 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress +=
> SIZE_2MB) {
> +            //
> +            // Fill in the Page Directory entries
> +            //
> +            *PageDirectoryEntry = PageAddress | mAddressEncMask |
> IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
> +          }
>          }
>        }
>      }
> @@ -259,6 +326,8 @@ SmmInitPageTable (
>    UINTN                             PageFaultHandlerHookAddress;
>    IA32_IDT_GATE_DESCRIPTOR          *IdtEntry;
>    EFI_STATUS                        Status;
> +  UINT64                            *Pml4Entry;
> +  UINT64                            *Pml5Entry;
> 
>    //
>    // Initialize spin lock
> @@ -266,12 +335,14 @@ SmmInitPageTable (
>    InitializeSpinLock (mPFLock);
> 
>    mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);
> -  m1GPageTableSupport = Is1GPageSupport ();
> -  DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n",
> m1GPageTableSupport));
> -  DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n",
> mCpuSmmStaticPageTable));
> -
> -  mPhysicalAddressBits = CalculateMaximumSupportAddress ();
> -  DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n",
> mPhysicalAddressBits));
> +  m1GPageTableSupport    = Is1GPageSupport ();
> +  m5LevelPagingSupport   = Is5LevelPagingSupport ();
> +  mPhysicalAddressBits   = CalculateMaximumSupportAddress ();
> +  PatchInstructionX86 (gPatch5LevelPagingSupport, m5LevelPagingSupport,
> 1);
> +  DEBUG ((DEBUG_INFO, "5LevelPaging Support     - %d\n",
> m5LevelPagingSupport));
> +  DEBUG ((DEBUG_INFO, "1GPageTable Support      - %d\n",
> m1GPageTableSupport));
> +  DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - %d\n",
> mCpuSmmStaticPageTable));
> +  DEBUG ((DEBUG_INFO, "PhysicalAddressBits      - %d\n",
> mPhysicalAddressBits));
>    //
>    // Generate PAE page table for the first 4GB memory space
>    //
> @@ -288,15 +359,30 @@ SmmInitPageTable (
>    //
>    // Fill Page-Table-Level4 (PML4) entry
>    //
> -  PTEntry = (UINT64*)AllocatePageTableMemory (1);
> -  ASSERT (PTEntry != NULL);
> -  *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> -  ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
> +  Pml4Entry = (UINT64*)AllocatePageTableMemory (1);  ASSERT (Pml4Entry
> + != NULL);  *Pml4Entry = Pages | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> + ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
> 
>    //
>    // Set sub-entries number
>    //
> -  SetSubEntriesNum (PTEntry, 3);
> +  SetSubEntriesNum (Pml4Entry, 3);
> +  PTEntry = Pml4Entry;
> +
> +  if (m5LevelPagingSupport) {
> +    //
> +    // Fill PML5 entry
> +    //
> +    Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
> +    *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> +    ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
> +    //
> +    // Set sub-entries number
> +    //
> +    SetSubEntriesNum (Pml5Entry, 1);
> +    PTEntry = Pml5Entry;
> +  }
> 
>    if (mCpuSmmStaticPageTable) {
>      SetStaticPageTable ((UINTN)PTEntry); @@ -344,7 +430,7 @@
> SmmInitPageTable (
>    }
> 
>    //
> -  // Return the address of PML4 (to set CR3)
> +  // Return the address of PML4/PML5 (to set CR3)
>    //
>    return (UINT32)(UINTN)PTEntry;
>  }
> @@ -436,12 +522,16 @@ ReclaimPages (
>    VOID
>    )
>  {
> +  UINT64                       Pml5Entry;
> +  UINT64                       *Pml5;
>    UINT64                       *Pml4;
>    UINT64                       *Pdpt;
>    UINT64                       *Pdt;
> +  UINTN                        Pml5Index;
>    UINTN                        Pml4Index;
>    UINTN                        PdptIndex;
>    UINTN                        PdtIndex;
> +  UINTN                        MinPml5;
>    UINTN                        MinPml4;
>    UINTN                        MinPdpt;
>    UINTN                        MinPdt;
> @@ -451,120 +541,147 @@ ReclaimPages (
>    BOOLEAN                      PML4EIgnore;
>    BOOLEAN                      PDPTEIgnore;
>    UINT64                       *ReleasePageAddress;
> +  IA32_CR4                     Cr4;
> +  BOOLEAN                      Enable5LevelPaging;
> 
>    Pml4 = NULL;
>    Pdpt = NULL;
>    Pdt  = NULL;
>    MinAcc  = (UINT64)-1;
>    MinPml4 = (UINTN)-1;
> +  MinPml5 = (UINTN)-1;
>    MinPdpt = (UINTN)-1;
>    MinPdt  = (UINTN)-1;
>    Acc     = 0;
>    ReleasePageAddress = 0;
> 
> +  Cr4.UintN = AsmReadCr4 ();
> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> +  Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
> +
> +  if (!Enable5LevelPaging) {
> +    //
> +    // Create one fake PML5 entry for 4-Level Paging
> +    // so that the page table parsing logic only handles 5-Level page structure.
> +    //
> +    Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
> +    Pml5 = &Pml5Entry;
> +  }
> +
>    //
>    // First, find the leaf entry has the smallest access record value
>    //
> -  Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
> -  for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4);
> Pml4Index++) {
> -    if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] &
> IA32_PG_PMNT) != 0) {
> +  for (Pml5Index = 0; Pml5Index < Enable5LevelPaging ? (EFI_PAGE_SIZE /
> sizeof (*Pml4)) : 1; Pml5Index++) {
> +    if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] &
> + IA32_PG_PMNT) != 0) {
>        //
> -      // If the PML4 entry is not present or is masked, skip it
> +      // If the PML5 entry is not present or is masked, skip it
>        //
>        continue;
>      }
> -    Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask &
> gPhyMask);
> -    PML4EIgnore = FALSE;
> -    for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt);
> PdptIndex++) {
> -      if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] &
> IA32_PG_PMNT) != 0) {
> +    Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
> +    for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4);
> Pml4Index++) {
> +      if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] &
> + IA32_PG_PMNT) != 0) {
>          //
> -        // If the PDPT entry is not present or is masked, skip it
> +        // If the PML4 entry is not present or is masked, skip it
>          //
> -        if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
> -          //
> -          // If the PDPT entry is masked, we will ignore checking the PML4 entry
> -          //
> -          PML4EIgnore = TRUE;
> -        }
>          continue;
>        }
> -      if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
> -        //
> -        // It's not 1-GByte pages entry, it should be a PDPT entry,
> -        // we will not check PML4 entry more
> -        //
> -        PML4EIgnore = TRUE;
> -        Pdt =  (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask &
> gPhyMask);
> -        PDPTEIgnore = FALSE;
> -        for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++)
> {
> -          if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] &
> IA32_PG_PMNT) != 0) {
> +      Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask &
> gPhyMask);
> +      PML4EIgnore = FALSE;
> +      for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt);
> PdptIndex++) {
> +        if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] &
> IA32_PG_PMNT) != 0) {
> +          //
> +          // If the PDPT entry is not present or is masked, skip it
> +          //
> +          if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
>              //
> -            // If the PD entry is not present or is masked, skip it
> +            // If the PDPT entry is masked, we will ignore checking the
> + PML4 entry
>              //
> -            if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
> +            PML4EIgnore = TRUE;
> +          }
> +          continue;
> +        }
> +        if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
> +          //
> +          // It's not 1-GByte pages entry, it should be a PDPT entry,
> +          // we will not check PML4 entry more
> +          //
> +          PML4EIgnore = TRUE;
> +          Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask &
> gPhyMask);
> +          PDPTEIgnore = FALSE;
> +          for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt);
> PdtIndex++) {
> +            if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] &
> IA32_PG_PMNT) != 0) {
> +              //
> +              // If the PD entry is not present or is masked, skip it
> +              //
> +              if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
> +                //
> +                // If the PD entry is masked, we will not PDPT entry more
> +                //
> +                PDPTEIgnore = TRUE;
> +              }
> +              continue;
> +            }
> +            if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
>                //
> -              // If the PD entry is masked, we will not PDPT entry more
> +              // It's not 2 MByte page table entry, it should be PD entry
> +              // we will find the entry has the smallest access record
> + value
>                //
>                PDPTEIgnore = TRUE;
> +              Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
> +              if (Acc < MinAcc) {
> +                //
> +                // If the PD entry has the smallest access record value,
> +                // save the Page address to be released
> +                //
> +                MinAcc  = Acc;
> +                MinPml5 = Pml5Index;
> +                MinPml4 = Pml4Index;
> +                MinPdpt = PdptIndex;
> +                MinPdt  = PdtIndex;
> +                ReleasePageAddress = Pdt + PdtIndex;
> +              }
>              }
> -            continue;
>            }
> -          if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
> +          if (!PDPTEIgnore) {
>              //
> -            // It's not 2 MByte page table entry, it should be PD entry
> -            // we will find the entry has the smallest access record value
> +            // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
> +            // it should only has the entries point to 2 MByte Pages
>              //
> -            PDPTEIgnore = TRUE;
> -            Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
> +            Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
>              if (Acc < MinAcc) {
>                //
> -              // If the PD entry has the smallest access record value,
> +              // If the PDPT entry has the smallest access record
> + value,
>                // save the Page address to be released
>                //
>                MinAcc  = Acc;
> +              MinPml5 = Pml5Index;
>                MinPml4 = Pml4Index;
>                MinPdpt = PdptIndex;
> -              MinPdt  = PdtIndex;
> -              ReleasePageAddress = Pdt + PdtIndex;
> +              MinPdt  = (UINTN)-1;
> +              ReleasePageAddress = Pdpt + PdptIndex;
>              }
>            }
>          }
> -        if (!PDPTEIgnore) {
> -          //
> -          // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
> -          // it should only has the entries point to 2 MByte Pages
> -          //
> -          Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
> -          if (Acc < MinAcc) {
> -            //
> -            // If the PDPT entry has the smallest access record value,
> -            // save the Page address to be released
> -            //
> -            MinAcc  = Acc;
> -            MinPml4 = Pml4Index;
> -            MinPdpt = PdptIndex;
> -            MinPdt  = (UINTN)-1;
> -            ReleasePageAddress = Pdpt + PdptIndex;
> -          }
> -        }
>        }
> -    }
> -    if (!PML4EIgnore) {
> -      //
> -      // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
> -      // it should only has the entries point to 1 GByte Pages
> -      //
> -      Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
> -      if (Acc < MinAcc) {
> +      if (!PML4EIgnore) {
>          //
> -        // If the PML4 entry has the smallest access record value,
> -        // save the Page address to be released
> +        // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
> +        // it should only has the entries point to 1 GByte Pages
>          //
> -        MinAcc  = Acc;
> -        MinPml4 = Pml4Index;
> -        MinPdpt = (UINTN)-1;
> -        MinPdt  = (UINTN)-1;
> -        ReleasePageAddress = Pml4 + Pml4Index;
> +        Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
> +        if (Acc < MinAcc) {
> +          //
> +          // If the PML4 entry has the smallest access record value,
> +          // save the Page address to be released
> +          //
> +          MinAcc  = Acc;
> +          MinPml5 = Pml5Index;
> +          MinPml4 = Pml4Index;
> +          MinPdpt = (UINTN)-1;
> +          MinPdt  = (UINTN)-1;
> +          ReleasePageAddress = Pml4 + Pml4Index;
> +        }
>        }
>      }
>    }
> @@ -588,6 +705,7 @@ ReclaimPages (
>        //
>        // If 4 KByte Page Table is released, check the PDPT entry
>        //
> +      Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);
>        Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask &
> gPhyMask);
>        SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
>        if (SubEntriesNum == 0) {
> @@ -679,7 +797,7 @@ SmiDefaultPFHandler (
>    )
>  {
>    UINT64                            *PageTable;
> -  UINT64                            *Pml4;
> +  UINT64                            *PageTableTop;
>    UINT64                            PFAddress;
>    UINTN                             StartBit;
>    UINTN                             EndBit;
> @@ -690,6 +808,8 @@ SmiDefaultPFHandler (
>    UINTN                             PageAttribute;
>    EFI_STATUS                        Status;
>    UINT64                            *UpperEntry;
> +  BOOLEAN                           Enable5LevelPaging;
> +  IA32_CR4                          Cr4;
> 
>    //
>    // Set default SMM page attribute
> @@ -699,9 +819,12 @@ SmiDefaultPFHandler (
>    PageAttribute = 0;
> 
>    EndBit = 0;
> -  Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);
> +  PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
>    PFAddress = AsmReadCr2 ();
> 
> +  Cr4.UintN = AsmReadCr4 ();
> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
> +
>    Status = GetPlatformPageTableAttribute (PFAddress, &PageSize,
> &NumOfPages, &PageAttribute);
>    //
>    // If platform not support page table attribute, set default SMM page
> attribute @@ -755,9 +878,9 @@ SmiDefaultPFHandler (
>    }
> 
>    for (Index = 0; Index < NumOfPages; Index++) {
> -    PageTable  = Pml4;
> +    PageTable  = PageTableTop;
>      UpperEntry = NULL;
> -    for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {
> +    for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit;
> + StartBit -= 9) {
>        PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
>        if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
>          //
> @@ -941,13 +1064,20 @@ SetPageTableAttributes (
>    UINTN                 Index2;
>    UINTN                 Index3;
>    UINTN                 Index4;
> +  UINTN                 Index5;
>    UINT64                *L1PageTable;
>    UINT64                *L2PageTable;
>    UINT64                *L3PageTable;
>    UINT64                *L4PageTable;
> +  UINT64                *L5PageTable;
>    BOOLEAN               IsSplitted;
>    BOOLEAN               PageTableSplitted;
>    BOOLEAN               CetEnabled;
> +  IA32_CR4              Cr4;
> +  BOOLEAN               Enable5LevelPaging;
> +
> +  Cr4.UintN = AsmReadCr4 ();
> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> 
>    //
>    // Don't do this if
> @@ -991,44 +1121,59 @@ SetPageTableAttributes (
>    do {
>      DEBUG ((DEBUG_INFO, "Start...\n"));
>      PageTableSplitted = FALSE;
> -
> -    L4PageTable = (UINT64 *)GetPageTableBase ();
> -    SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO,
> &IsSplitted);
> -    PageTableSplitted = (PageTableSplitted || IsSplitted);
> -
> -    for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
> -      L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] &
> ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
> -      if (L3PageTable == NULL) {
> -        continue;
> -      }
> -
> -      SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO,
> &IsSplitted);
> +    L5PageTable = NULL;
> +    if (Enable5LevelPaging) {
> +      L5PageTable = (UINT64 *)GetPageTableBase ();
> +      SmmSetMemoryAttributesEx
> + ((EFI_PHYSICAL_ADDRESS)(UINTN)L5PageTable, SIZE_4KB,
> EFI_MEMORY_RO,
> + &IsSplitted);
>        PageTableSplitted = (PageTableSplitted || IsSplitted);
> +    }
> 
> -      for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
> -        if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
> -          // 1G
> +    for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) :
> 1); Index5++) {
> +      if (Enable5LevelPaging) {
> +        L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] &
> ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
> +        if (L4PageTable == NULL) {
>            continue;
>          }
> -        L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] &
> ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
> -        if (L2PageTable == NULL) {
> +      } else {
> +        L4PageTable = (UINT64 *)GetPageTableBase ();
> +      }
> +      SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO,
> &IsSplitted);
> +      PageTableSplitted = (PageTableSplitted || IsSplitted);
> +
> +      for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
> +        L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] &
> ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
> +        if (L3PageTable == NULL) {
>            continue;
>          }
> 
> -        SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO,
> &IsSplitted);
> +        SmmSetMemoryAttributesEx
> + ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB,
> EFI_MEMORY_RO,
> + &IsSplitted);
>          PageTableSplitted = (PageTableSplitted || IsSplitted);
> 
> -        for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
> -          if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
> -            // 2M
> +        for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
> +          if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
> +            // 1G
>              continue;
>            }
> -          L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] &
> ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
> -          if (L1PageTable == NULL) {
> +          L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] &
> ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
> +          if (L2PageTable == NULL) {
>              continue;
>            }
> -          SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO,
> &IsSplitted);
> +
> +          SmmSetMemoryAttributesEx
> + ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB,
> EFI_MEMORY_RO,
> + &IsSplitted);
>            PageTableSplitted = (PageTableSplitted || IsSplitted);
> +
> +          for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
> +            if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
> +              // 2M
> +              continue;
> +            }
> +            L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] &
> ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
> +            if (L1PageTable == NULL) {
> +              continue;
> +            }
> +            SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO,
> &IsSplitted);
> +            PageTableSplitted = (PageTableSplitted || IsSplitted);
> +          }
>          }
>        }
>      }
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> index 741e4b7da2..271492a9d7 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> @@ -69,6 +69,7 @@ extern ASM_PFX(mXdSupported)  global
> ASM_PFX(gPatchXdSupported)  global ASM_PFX(gPatchSmiStack)  global
> ASM_PFX(gPatchSmiCr3)
> +global ASM_PFX(gPatch5LevelPagingSupport)
>  global ASM_PFX(gcSmiHandlerTemplate)
>  global ASM_PFX(gcSmiHandlerSize)
> 
> @@ -124,6 +125,17 @@ ProtFlatMode:
>  ASM_PFX(gPatchSmiCr3):
>      mov     cr3, rax
>      mov     eax, 0x668                   ; as cr4.PGE is not set here, refresh cr3
> +
> +    mov     cl, strict byte 0            ; source operand will be patched
> +ASM_PFX(gPatch5LevelPagingSupport):
> +    cmp     cl, 0
> +    je      SkipEnable5LevelPaging
> +    ;
> +    ; Enable 5-Level Paging bit
> +    ;
> +    bts     eax, 12                     ; Set LA57 bit (bit #12)
> +SkipEnable5LevelPaging:
> +
>      mov     cr4, rax                    ; in PreModifyMtrrs() to flush TLB.
>  ; Load TSS
>      sub     esp, 8                      ; reserve room in stack
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
> index e7c78d36fc..63bae5a913 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
> @@ -1,7 +1,7 @@
>  /** @file
>  X64 processor specific functions to enable SMM profile.
> 
> -Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>
> +Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
>  Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
> 
>  SPDX-License-Identifier: BSD-2-Clause-Patent @@ -147,9 +147,14 @@
> RestorePageTableAbove4G (
>    BOOLEAN       Existed;
>    UINTN         Index;
>    UINTN         PFIndex;
> +  IA32_CR4      Cr4;
> +  BOOLEAN       Enable5LevelPaging;
> 
>    ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));
> 
> +  Cr4.UintN = AsmReadCr4 ();
> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> +
>    //
>    // If page fault address is 4GB above.
>    //
> @@ -161,38 +166,48 @@ RestorePageTableAbove4G (
>    //
>    Existed = FALSE;
>    PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
> -  PTIndex = BitFieldRead64 (PFAddress, 39, 47);
> -  if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
> -    // PML4E
> -    PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> -    PTIndex = BitFieldRead64 (PFAddress, 30, 38);
> +  PTIndex = 0;
> +  if (Enable5LevelPaging) {
> +    PTIndex = BitFieldRead64 (PFAddress, 48, 56);  }  if
> + ((!Enable5LevelPaging) || ((PageTable[PTIndex] & IA32_PG_P) != 0)) {
> +    // PML5E
> +    if (Enable5LevelPaging) {
> +      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> +    }
> +    PTIndex = BitFieldRead64 (PFAddress, 39, 47);
>      if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
> -      // PDPTE
> +      // PML4E
>        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> -      PTIndex = BitFieldRead64 (PFAddress, 21, 29);
> -      // PD
> -      if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
> -        //
> -        // 2MB page
> -        //
> -        Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask &
> PHYSICAL_ADDRESS_MASK);
> -        if ((Address & ~((1ull << 21) - 1)) == ((PFAddress &
> PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {
> -          Existed = TRUE;
> -        }
> -      } else {
> -        //
> -        // 4KB page
> -        //
> -        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
> -        if (PageTable != 0) {
> +      PTIndex = BitFieldRead64 (PFAddress, 30, 38);
> +      if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
> +        // PDPTE
> +        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> +        PTIndex = BitFieldRead64 (PFAddress, 21, 29);
> +        // PD
> +        if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
>            //
> -          // When there is a valid entry to map to 4KB page, need not create a
> new entry to map 2MB.
> +          // 2MB page
>            //
> -          PTIndex = BitFieldRead64 (PFAddress, 12, 20);
>            Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask &
> PHYSICAL_ADDRESS_MASK);
> -          if ((Address & ~((1ull << 12) - 1)) == (PFAddress &
> PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
> +          if ((Address & ~((1ull << 21) - 1)) == ((PFAddress &
> + PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {
>              Existed = TRUE;
>            }
> +        } else {
> +          //
> +          // 4KB page
> +          //
> +          PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
> +          if (PageTable != 0) {
> +            //
> +            // When there is a valid entry to map to 4KB page, need not create a
> new entry to map 2MB.
> +            //
> +            PTIndex = BitFieldRead64 (PFAddress, 12, 20);
> +            Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask &
> PHYSICAL_ADDRESS_MASK);
> +            if ((Address & ~((1ull << 12) - 1)) == (PFAddress &
> PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
> +              Existed = TRUE;
> +            }
> +          }
>          }
>        }
>      }
> @@ -221,6 +236,11 @@ RestorePageTableAbove4G (
>      //
>      PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
>      PFAddress = AsmReadCr2 ();
> +    // PML5E
> +    if (Enable5LevelPaging) {
> +      PTIndex = BitFieldRead64 (PFAddress, 48, 56);
> +      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> +    }
>      // PML4E
>      PTIndex = BitFieldRead64 (PFAddress, 39, 47);
>      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> --
> 2.21.0.windows.1


-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.

View/Reply Online (#43439): https://edk2.groups.io/g/devel/message/43439
Mute This Topic: https://groups.io/mt/32295049/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub  [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-

Re: [edk2-devel] [PATCH v2 3/3] UefiCpuPkg/PiSmmCpu: Enable 5 level paging when CPU supports
Posted by Michael D Kinney 6 years, 7 months ago
Hi Ray,

I noticed a Linux/GCC build issue with this patch when using GCC version:

    gcc version 8.2.1 20181215 (Red Hat 8.2.1-6) (GCC)

edk2/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c: In function 'ReclaimPages':
edk2/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c:574:89: error: ?: using integer constants in boolean context, the expression will always evaluate to 'true' [-Werror=int-in-bool-context]
   for (Pml5Index = 0; Pml5Index < Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1; Pml5Index++) {

I was able to get the build to pass if I added ().

diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
index c31160735a..a3b62f7787 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
@@ -571,7 +571,7 @@ ReclaimPages (
   //
   // First, find the leaf entry has the smallest access record value
   //
-  for (Pml5Index = 0; Pml5Index < Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1; Pml5Index++) {
+  for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {^M
     if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
       //
       // If the PML5 entry is not present or is masked, skip it

Best regards,

Mike

> -----Original Message-----
> From: devel@edk2.groups.io [mailto:devel@edk2.groups.io]
> On Behalf Of Ni, Ray
> Sent: Tuesday, July 2, 2019 11:54 PM
> To: devel@edk2.groups.io
> Cc: Dong, Eric <eric.dong@intel.com>; Laszlo Ersek
> <lersek@redhat.com>
> Subject: [edk2-devel] [PATCH v2 3/3] UefiCpuPkg/PiSmmCpu:
> Enable 5 level paging when CPU supports
> 
> REF:https://bugzilla.tianocore.org/show_bug.cgi?id=1946
> 
> The patch changes SMM environment to use 5 level paging
> when CPU
> supports it.
> 
> Signed-off-by: Ray Ni <ray.ni@intel.com>
> Cc: Eric Dong <eric.dong@intel.com>
> Regression-tested-by: Laszlo Ersek <lersek@redhat.com>
> ---
>  .../PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c   |  20 +-
>  UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c        | 272
> ++++++----
>  UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c       | 485
> ++++++++++++------
>  UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm   |  12 +
>  .../PiSmmCpuDxeSmm/X64/SmmProfileArch.c       |  72 ++-
>  5 files changed, 561 insertions(+), 300 deletions(-)
> 
> diff --git
> a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> index 069be3aaa5..55090e9c3e 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> @@ -125,18 +125,36 @@ GetPageTableEntry (
>    UINTN                 Index2;
>    UINTN                 Index3;
>    UINTN                 Index4;
> +  UINTN                 Index5;
>    UINT64                *L1PageTable;
>    UINT64                *L2PageTable;
>    UINT64                *L3PageTable;
>    UINT64                *L4PageTable;
> +  UINT64                *L5PageTable;
> +  IA32_CR4              Cr4;
> +  BOOLEAN               Enable5LevelPaging;
> 
> +  Index5 = ((UINTN)RShiftU64 (Address, 48)) &
> PAGING_PAE_INDEX_MASK;
>    Index4 = ((UINTN)RShiftU64 (Address, 39)) &
> PAGING_PAE_INDEX_MASK;
>    Index3 = ((UINTN)Address >> 30) &
> PAGING_PAE_INDEX_MASK;
>    Index2 = ((UINTN)Address >> 21) &
> PAGING_PAE_INDEX_MASK;
>    Index1 = ((UINTN)Address >> 12) &
> PAGING_PAE_INDEX_MASK;
> 
> +  Cr4.UintN = AsmReadCr4 ();
> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> +
>    if (sizeof(UINTN) == sizeof(UINT64)) {
> -    L4PageTable = (UINT64 *)GetPageTableBase ();
> +    if (Enable5LevelPaging) {
> +      L5PageTable = (UINT64 *)GetPageTableBase ();
> +      if (L5PageTable[Index5] == 0) {
> +        *PageAttribute = PageNone;
> +        return NULL;
> +      }
> +
> +      L4PageTable = (UINT64
> *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask &
> PAGING_4K_ADDRESS_MASK_64);
> +    } else {
> +      L4PageTable = (UINT64 *)GetPageTableBase ();
> +    }
>      if (L4PageTable[Index4] == 0) {
>        *PageAttribute = PageNone;
>        return NULL;
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
> index e2b6a2d9b2..c5131526f0 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
> @@ -534,43 +534,78 @@ InitPaging (
>    VOID
>    )
>  {
> +  UINT64                            Pml5Entry;
> +  UINT64                            Pml4Entry;
> +  UINT64                            *Pml5;
>    UINT64                            *Pml4;
>    UINT64                            *Pdpt;
>    UINT64                            *Pd;
>    UINT64                            *Pt;
>    UINTN                             Address;
> +  UINTN                             Pml5Index;
>    UINTN                             Pml4Index;
>    UINTN                             PdptIndex;
>    UINTN                             PdIndex;
>    UINTN                             PtIndex;
>    UINTN                             NumberOfPdptEntries;
>    UINTN                             NumberOfPml4Entries;
> +  UINTN                             NumberOfPml5Entries;
>    UINTN                             SizeOfMemorySpace;
>    BOOLEAN                           Nx;
> +  IA32_CR4                          Cr4;
> +  BOOLEAN                           Enable5LevelPaging;
> +
> +  Cr4.UintN = AsmReadCr4 ();
> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> 
>    if (sizeof (UINTN) == sizeof (UINT64)) {
> -    Pml4 = (UINT64*)(UINTN)mSmmProfileCr3;
> +    if (!Enable5LevelPaging) {
> +      Pml5Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
> +      Pml5 = &Pml5Entry;
> +    } else {
> +      Pml5 = (UINT64*) (UINTN) mSmmProfileCr3;
> +    }
>      SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
>      //
>      // Calculate the table entries of PML4E and PDPTE.
>      //
> -    if (SizeOfMemorySpace <= 39 ) {
> -      NumberOfPml4Entries = 1;
> -      NumberOfPdptEntries = (UINT32)LShiftU64 (1,
> (SizeOfMemorySpace - 30));
> -    } else {
> -      NumberOfPml4Entries = (UINT32)LShiftU64 (1,
> (SizeOfMemorySpace - 39));
> -      NumberOfPdptEntries = 512;
> +    NumberOfPml5Entries = 1;
> +    if (SizeOfMemorySpace > 48) {
> +      NumberOfPml5Entries = (UINTN) LShiftU64 (1,
> SizeOfMemorySpace - 48);
> +      SizeOfMemorySpace = 48;
>      }
> -  } else {
> +
>      NumberOfPml4Entries = 1;
> +    if (SizeOfMemorySpace > 39) {
> +      NumberOfPml4Entries = (UINTN) LShiftU64 (1,
> SizeOfMemorySpace - 39);
> +      SizeOfMemorySpace = 39;
> +    }
> +
> +    NumberOfPdptEntries = 1;
> +    ASSERT (SizeOfMemorySpace > 30);
> +    NumberOfPdptEntries = (UINTN) LShiftU64 (1,
> SizeOfMemorySpace - 30);
> +  } else {
> +    Pml4Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
> +    Pml4 = &Pml4Entry;
> +    Pml5Entry = (UINTN) Pml4 | IA32_PG_P;
> +    Pml5 = &Pml5Entry;
> +    NumberOfPml5Entries  = 1;
> +    NumberOfPml4Entries  = 1;
>      NumberOfPdptEntries  = 4;
>    }
> 
>    //
>    // Go through page table and change 2MB-page into 4KB-
> page.
>    //
> -  for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries;
> Pml4Index++) {
> -    if (sizeof (UINTN) == sizeof (UINT64)) {
> +  for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries;
> Pml5Index++) {
> +    if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
> +      //
> +      // If PML5 entry does not exist, skip it
> +      //
> +      continue;
> +    }
> +    Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] &
> PHYSICAL_ADDRESS_MASK);
> +    for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries;
> Pml4Index++) {
>        if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
>          //
>          // If PML4 entry does not exist, skip it
> @@ -578,63 +613,76 @@ InitPaging (
>          continue;
>        }
>        Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> -    } else {
> -      Pdpt = (UINT64*)(UINTN)mSmmProfileCr3;
> -    }
> -    for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries;
> PdptIndex++, Pdpt++) {
> -      if ((*Pdpt & IA32_PG_P) == 0) {
> -        //
> -        // If PDPT entry does not exist, skip it
> -        //
> -        continue;
> -      }
> -      if ((*Pdpt & IA32_PG_PS) != 0) {
> -        //
> -        // This is 1G entry, skip it
> -        //
> -        continue;
> -      }
> -      Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask &
> PHYSICAL_ADDRESS_MASK);
> -      if (Pd == 0) {
> -        continue;
> -      }
> -      for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof
> (*Pd); PdIndex++, Pd++) {
> -        if ((*Pd & IA32_PG_P) == 0) {
> +      for (PdptIndex = 0; PdptIndex <
> NumberOfPdptEntries; PdptIndex++, Pdpt++) {
> +        if ((*Pdpt & IA32_PG_P) == 0) {
> +          //
> +          // If PDPT entry does not exist, skip it
> +          //
> +          continue;
> +        }
> +        if ((*Pdpt & IA32_PG_PS) != 0) {
>            //
> -          // If PD entry does not exist, skip it
> +          // This is 1G entry, skip it
>            //
>            continue;
>          }
> -        Address = (((PdptIndex << 9) + PdIndex) << 21);
> +        Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask
> & PHYSICAL_ADDRESS_MASK);
> +        if (Pd == 0) {
> +          continue;
> +        }
> +        for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof
> (*Pd); PdIndex++, Pd++) {
> +          if ((*Pd & IA32_PG_P) == 0) {
> +            //
> +            // If PD entry does not exist, skip it
> +            //
> +            continue;
> +          }
> +          Address = (UINTN) LShiftU64 (
> +                              LShiftU64 (
> +                                LShiftU64 ((Pml5Index <<
> 9) + Pml4Index, 9) + PdptIndex,
> +                                9
> +                                ) + PdIndex,
> +                                21
> +                              );
> 
> -        //
> -        // If it is 2M page, check IsAddressSplit()
> -        //
> -        if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit
> (Address)) {
>            //
> -          // Based on current page table, create 4KB
> page table for split area.
> +          // If it is 2M page, check IsAddressSplit()
>            //
> -          ASSERT (Address == (*Pd &
> PHYSICAL_ADDRESS_MASK));
> +          if (((*Pd & IA32_PG_PS) != 0) &&
> IsAddressSplit (Address)) {
> +            //
> +            // Based on current page table, create 4KB
> page table for split area.
> +            //
> +            ASSERT (Address == (*Pd &
> PHYSICAL_ADDRESS_MASK));
> +
> +            Pt = AllocatePageTableMemory (1);
> +            ASSERT (Pt != NULL);
> 
> -          Pt = AllocatePageTableMemory (1);
> -          ASSERT (Pt != NULL);
> +            *Pd = (UINTN) Pt | IA32_PG_RW | IA32_PG_P;
> 
> -          // Split it
> -          for (PtIndex = 0; PtIndex < SIZE_4KB /
> sizeof(*Pt); PtIndex++) {
> -            Pt[PtIndex] = Address + ((PtIndex << 12) |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS);
> -          } // end for PT
> -          *Pd = (UINT64)(UINTN)Pt | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> -        } // end if IsAddressSplit
> -      } // end for PD
> -    } // end for PDPT
> -  } // end for PML4
> +            // Split it
> +            for (PtIndex = 0; PtIndex < SIZE_4KB /
> sizeof(*Pt); PtIndex++, Pt++) {
> +              *Pt = Address + ((PtIndex << 12) |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS);
> +            } // end for PT
> +            *Pd = (UINT64)(UINTN)Pt | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> +          } // end if IsAddressSplit
> +        } // end for PD
> +      } // end for PDPT
> +    } // end for PML4
> +  } // end for PML5
> 
>    //
>    // Go through page table and set several page table
> entries to absent or execute-disable.
>    //
>    DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
> -  for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries;
> Pml4Index++) {
> -    if (sizeof (UINTN) == sizeof (UINT64)) {
> +  for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries;
> Pml5Index++) {
> +    if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
> +      //
> +      // If PML5 entry does not exist, skip it
> +      //
> +      continue;
> +    }
> +    Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] &
> PHYSICAL_ADDRESS_MASK);
> +    for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries;
> Pml4Index++) {
>        if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
>          //
>          // If PML4 entry does not exist, skip it
> @@ -642,69 +690,73 @@ InitPaging (
>          continue;
>        }
>        Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> -    } else {
> -      Pdpt = (UINT64*)(UINTN)mSmmProfileCr3;
> -    }
> -    for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries;
> PdptIndex++, Pdpt++) {
> -      if ((*Pdpt & IA32_PG_P) == 0) {
> -        //
> -        // If PDPT entry does not exist, skip it
> -        //
> -        continue;
> -      }
> -      if ((*Pdpt & IA32_PG_PS) != 0) {
> -        //
> -        // This is 1G entry, set NX bit and skip it
> -        //
> -        if (mXdSupported) {
> -          *Pdpt = *Pdpt | IA32_PG_NX;
> +      for (PdptIndex = 0; PdptIndex <
> NumberOfPdptEntries; PdptIndex++, Pdpt++) {
> +        if ((*Pdpt & IA32_PG_P) == 0) {
> +          //
> +          // If PDPT entry does not exist, skip it
> +          //
> +          continue;
>          }
> -        continue;
> -      }
> -      Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask &
> PHYSICAL_ADDRESS_MASK);
> -      if (Pd == 0) {
> -        continue;
> -      }
> -      for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof
> (*Pd); PdIndex++, Pd++) {
> -        if ((*Pd & IA32_PG_P) == 0) {
> +        if ((*Pdpt & IA32_PG_PS) != 0) {
>            //
> -          // If PD entry does not exist, skip it
> +          // This is 1G entry, set NX bit and skip it
>            //
> +          if (mXdSupported) {
> +            *Pdpt = *Pdpt | IA32_PG_NX;
> +          }
>            continue;
>          }
> -        Address = (((PdptIndex << 9) + PdIndex) << 21);
> -
> -        if ((*Pd & IA32_PG_PS) != 0) {
> -          // 2MB page
> -
> -          if (!IsAddressValid (Address, &Nx)) {
> +        Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask
> & PHYSICAL_ADDRESS_MASK);
> +        if (Pd == 0) {
> +          continue;
> +        }
> +        for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof
> (*Pd); PdIndex++, Pd++) {
> +          if ((*Pd & IA32_PG_P) == 0) {
>              //
> -            // Patch to remove Present flag and RW flag
> +            // If PD entry does not exist, skip it
>              //
> -            *Pd = *Pd &
> (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
> -          }
> -          if (Nx && mXdSupported) {
> -            *Pd = *Pd | IA32_PG_NX;
> -          }
> -        } else {
> -          // 4KB page
> -          Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask
> & PHYSICAL_ADDRESS_MASK);
> -          if (Pt == 0) {
>              continue;
>            }
> -          for (PtIndex = 0; PtIndex < SIZE_4KB /
> sizeof(*Pt); PtIndex++, Pt++) {
> +          Address = (UINTN) LShiftU64 (
> +                              LShiftU64 (
> +                                LShiftU64 ((Pml5Index <<
> 9) + Pml4Index, 9) + PdptIndex,
> +                                9
> +                                ) + PdIndex,
> +                                21
> +                              );
> +
> +          if ((*Pd & IA32_PG_PS) != 0) {
> +            // 2MB page
> +
>              if (!IsAddressValid (Address, &Nx)) {
> -              *Pt = *Pt &
> (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
> +              //
> +              // Patch to remove Present flag and RW
> flag
> +              //
> +              *Pd = *Pd &
> (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
>              }
>              if (Nx && mXdSupported) {
> -              *Pt = *Pt | IA32_PG_NX;
> +              *Pd = *Pd | IA32_PG_NX;
> +            }
> +          } else {
> +            // 4KB page
> +            Pt = (UINT64 *)(UINTN)(*Pd &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> +            if (Pt == 0) {
> +              continue;
>              }
> -            Address += SIZE_4KB;
> -          } // end for PT
> -        } // end if PS
> -      } // end for PD
> -    } // end for PDPT
> -  } // end for PML4
> +            for (PtIndex = 0; PtIndex < SIZE_4KB /
> sizeof(*Pt); PtIndex++, Pt++) {
> +              if (!IsAddressValid (Address, &Nx)) {
> +                *Pt = *Pt &
> (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
> +              }
> +              if (Nx && mXdSupported) {
> +                *Pt = *Pt | IA32_PG_NX;
> +              }
> +              Address += SIZE_4KB;
> +            } // end for PT
> +          } // end if PS
> +        } // end for PD
> +      } // end for PDPT
> +    } // end for PML4
> +  } // end for PML5
> 
>    //
>    // Flush TLB
> @@ -1156,6 +1208,20 @@ RestorePageTableBelow4G (
>  {
>    UINTN         PTIndex;
>    UINTN         PFIndex;
> +  IA32_CR4      Cr4;
> +  BOOLEAN       Enable5LevelPaging;
> +
> +  Cr4.UintN = AsmReadCr4 ();
> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> +
> +  //
> +  // PML5
> +  //
> +  if (Enable5LevelPaging) {
> +    PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56);
> +    ASSERT (PageTable[PTIndex] != 0);
> +    PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> PHYSICAL_ADDRESS_MASK);
> +  }
> 
>    //
>    // PML4
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> index 3d5d663d99..c31160735a 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> @@ -16,6 +16,8 @@ SPDX-License-Identifier: BSD-2-Clause-
> Patent
>  LIST_ENTRY                          mPagePool =
> INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
>  BOOLEAN                             m1GPageTableSupport
> = FALSE;
>  BOOLEAN
> mCpuSmmStaticPageTable;
> +BOOLEAN
> m5LevelPagingSupport;
> +X86_ASSEMBLY_PATCH_LABEL
> gPatch5LevelPagingSupport;
> 
>  /**
>    Disable CET.
> @@ -60,6 +62,31 @@ Is1GPageSupport (
>    return FALSE;
>  }
> 
> +/**
> +  Check if 5-level paging is supported by processor or
> not.
> +
> +  @retval TRUE   5-level paging is supported.
> +  @retval FALSE  5-level paging is not supported.
> +
> +**/
> +BOOLEAN
> +Is5LevelPagingSupport (
> +  VOID
> +  )
> +{
> +  CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
> +
> +  AsmCpuidEx (
> +    CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
> +
> CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
> +    NULL,
> +    NULL,
> +    &EcxFlags.Uint32,
> +    NULL
> +    );
> +  return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0);
> +}
> +
>  /**
>    Set sub-entries number in entry.
> 
> @@ -130,14 +157,6 @@ CalculateMaximumSupportAddress (
>        PhysicalAddressBits = 36;
>      }
>    }
> -
> -  //
> -  // IA-32e paging translates 48-bit linear addresses to
> 52-bit physical addresses.
> -  //
> -  ASSERT (PhysicalAddressBits <= 52);
> -  if (PhysicalAddressBits > 48) {
> -    PhysicalAddressBits = 48;
> -  }
>    return PhysicalAddressBits;
>  }
> 
> @@ -152,89 +171,137 @@ SetStaticPageTable (
>    )
>  {
>    UINT64
> PageAddress;
> +  UINTN
> NumberOfPml5EntriesNeeded;
>    UINTN
> NumberOfPml4EntriesNeeded;
>    UINTN
> NumberOfPdpEntriesNeeded;
> +  UINTN
> IndexOfPml5Entries;
>    UINTN
> IndexOfPml4Entries;
>    UINTN
> IndexOfPdpEntries;
>    UINTN
> IndexOfPageDirectoryEntries;
> +  UINT64
> *PageMapLevel5Entry;
>    UINT64
> *PageMapLevel4Entry;
>    UINT64
> *PageMap;
>    UINT64
> *PageDirectoryPointerEntry;
>    UINT64
> *PageDirectory1GEntry;
>    UINT64
> *PageDirectoryEntry;
> 
> -  if (mPhysicalAddressBits <= 39 ) {
> -    NumberOfPml4EntriesNeeded = 1;
> -    NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1,
> (mPhysicalAddressBits - 30));
> -  } else {
> -    NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1,
> (mPhysicalAddressBits - 39));
> -    NumberOfPdpEntriesNeeded = 512;
> +  //
> +  // IA-32e paging translates 48-bit linear addresses to
> 52-bit physical addresses
> +  //  when 5-Level Paging is disabled.
> +  //
> +  ASSERT (mPhysicalAddressBits <= 52);
> +  if (!m5LevelPagingSupport && mPhysicalAddressBits >
> 48) {
> +    mPhysicalAddressBits = 48;
> +  }
> +
> +  NumberOfPml5EntriesNeeded = 1;
> +  if (mPhysicalAddressBits > 48) {
> +    NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1,
> mPhysicalAddressBits - 48);
> +    mPhysicalAddressBits = 48;
> +  }
> +
> +  NumberOfPml4EntriesNeeded = 1;
> +  if (mPhysicalAddressBits > 39) {
> +    NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1,
> mPhysicalAddressBits - 39);
> +    mPhysicalAddressBits = 39;
>    }
> 
> +  NumberOfPdpEntriesNeeded = 1;
> +  ASSERT (mPhysicalAddressBits > 30);
> +  NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1,
> mPhysicalAddressBits - 30);
> +
>    //
>    // By architecture only one PageMapLevel4 exists - so
> lets allocate storage for it.
>    //
>    PageMap         = (VOID *) PageTable;
> 
>    PageMapLevel4Entry = PageMap;
> -  PageAddress        = 0;
> -  for (IndexOfPml4Entries = 0; IndexOfPml4Entries <
> NumberOfPml4EntriesNeeded; IndexOfPml4Entries++,
> PageMapLevel4Entry++) {
> +  PageMapLevel5Entry = NULL;
> +  if (m5LevelPagingSupport) {
>      //
> -    // Each PML4 entry points to a page of Page
> Directory Pointer entries.
> +    // By architecture only one PageMapLevel5 exists -
> so lets allocate storage for it.
>      //
> -    PageDirectoryPointerEntry = (UINT64 *)
> ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
> -    if (PageDirectoryPointerEntry == NULL) {
> -      PageDirectoryPointerEntry =
> AllocatePageTableMemory (1);
> -      ASSERT(PageDirectoryPointerEntry != NULL);
> -      ZeroMem (PageDirectoryPointerEntry,
> EFI_PAGES_TO_SIZE(1));
> +    PageMapLevel5Entry = PageMap;
> +  }
> +  PageAddress        = 0;
> 
> -      *PageMapLevel4Entry =
> (UINT64)(UINTN)PageDirectoryPointerEntry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> +  for ( IndexOfPml5Entries = 0
> +      ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
> +      ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
> +    //
> +    // Each PML5 entry points to a page of PML4 entires.
> +    // So lets allocate space for them and fill them in
> in the IndexOfPml4Entries loop.
> +    // When 5-Level Paging is disabled, below allocation
> happens only once.
> +    //
> +    if (m5LevelPagingSupport) {
> +      PageMapLevel4Entry = (UINT64 *)
> ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
> +      if (PageMapLevel4Entry == NULL) {
> +        PageMapLevel4Entry = AllocatePageTableMemory
> (1);
> +        ASSERT(PageMapLevel4Entry != NULL);
> +        ZeroMem (PageMapLevel4Entry,
> EFI_PAGES_TO_SIZE(1));
> +
> +        *PageMapLevel5Entry =
> (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> +      }
>      }
> 
> -    if (m1GPageTableSupport) {
> -      PageDirectory1GEntry = PageDirectoryPointerEntry;
> -      for (IndexOfPageDirectoryEntries = 0;
> IndexOfPageDirectoryEntries < 512;
> IndexOfPageDirectoryEntries++, PageDirectory1GEntry++,
> PageAddress += SIZE_1GB) {
> -        if (IndexOfPml4Entries == 0 &&
> IndexOfPageDirectoryEntries < 4) {
> -          //
> -          // Skip the < 4G entries
> -          //
> -          continue;
> -        }
> -        //
> -        // Fill in the Page Directory entries
> -        //
> -        *PageDirectory1GEntry = PageAddress |
> mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
> +    for (IndexOfPml4Entries = 0; IndexOfPml4Entries <
> (NumberOfPml5EntriesNeeded == 1 ?
> NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++,
> PageMapLevel4Entry++) {
> +      //
> +      // Each PML4 entry points to a page of Page
> Directory Pointer entries.
> +      //
> +      PageDirectoryPointerEntry = (UINT64 *)
> ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
> +      if (PageDirectoryPointerEntry == NULL) {
> +        PageDirectoryPointerEntry =
> AllocatePageTableMemory (1);
> +        ASSERT(PageDirectoryPointerEntry != NULL);
> +        ZeroMem (PageDirectoryPointerEntry,
> EFI_PAGES_TO_SIZE(1));
> +
> +        *PageMapLevel4Entry =
> (UINT64)(UINTN)PageDirectoryPointerEntry |
> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
>        }
> -    } else {
> -      PageAddress = BASE_4GB;
> -      for (IndexOfPdpEntries = 0; IndexOfPdpEntries <
> NumberOfPdpEntriesNeeded; IndexOfPdpEntries++,
> PageDirectoryPointerEntry++) {
> -        if (IndexOfPml4Entries == 0 && IndexOfPdpEntries
> < 4) {
> -          //
> -          // Skip the < 4G entries
> -          //
> -          continue;
> -        }
> -        //
> -        // Each Directory Pointer entries points to a
> page of Page Directory entires.
> -        // So allocate space for them and fill them in
> in the IndexOfPageDirectoryEntries loop.
> -        //
> -        PageDirectoryEntry = (UINT64 *)
> ((*PageDirectoryPointerEntry) & ~mAddressEncMask &
> gPhyMask);
> -        if (PageDirectoryEntry == NULL) {
> -          PageDirectoryEntry = AllocatePageTableMemory
> (1);
> -          ASSERT(PageDirectoryEntry != NULL);
> -          ZeroMem (PageDirectoryEntry,
> EFI_PAGES_TO_SIZE(1));
> 
> +      if (m1GPageTableSupport) {
> +        PageDirectory1GEntry =
> PageDirectoryPointerEntry;
> +        for (IndexOfPageDirectoryEntries = 0;
> IndexOfPageDirectoryEntries < 512;
> IndexOfPageDirectoryEntries++, PageDirectory1GEntry++,
> PageAddress += SIZE_1GB) {
> +          if (IndexOfPml4Entries == 0 &&
> IndexOfPageDirectoryEntries < 4) {
> +            //
> +            // Skip the < 4G entries
> +            //
> +            continue;
> +          }
>            //
> -          // Fill in a Page Directory Pointer Entries
> +          // Fill in the Page Directory entries
>            //
> -          *PageDirectoryPointerEntry =
> (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> +          *PageDirectory1GEntry = PageAddress |
> mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
>          }
> -
> -        for (IndexOfPageDirectoryEntries = 0;
> IndexOfPageDirectoryEntries < 512;
> IndexOfPageDirectoryEntries++, PageDirectoryEntry++,
> PageAddress += SIZE_2MB) {
> +      } else {
> +        PageAddress = BASE_4GB;
> +        for (IndexOfPdpEntries = 0; IndexOfPdpEntries <
> (NumberOfPml4EntriesNeeded == 1 ?
> NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++,
> PageDirectoryPointerEntry++) {
> +          if (IndexOfPml4Entries == 0 &&
> IndexOfPdpEntries < 4) {
> +            //
> +            // Skip the < 4G entries
> +            //
> +            continue;
> +          }
>            //
> -          // Fill in the Page Directory entries
> +          // Each Directory Pointer entries points to a
> page of Page Directory entires.
> +          // So allocate space for them and fill them in
> in the IndexOfPageDirectoryEntries loop.
>            //
> -          *PageDirectoryEntry = PageAddress |
> mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
> +          PageDirectoryEntry = (UINT64 *)
> ((*PageDirectoryPointerEntry) & ~mAddressEncMask &
> gPhyMask);
> +          if (PageDirectoryEntry == NULL) {
> +            PageDirectoryEntry = AllocatePageTableMemory
> (1);
> +            ASSERT(PageDirectoryEntry != NULL);
> +            ZeroMem (PageDirectoryEntry,
> EFI_PAGES_TO_SIZE(1));
> +
> +            //
> +            // Fill in a Page Directory Pointer Entries
> +            //
> +            *PageDirectoryPointerEntry =
> (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> +          }
> +
> +          for (IndexOfPageDirectoryEntries = 0;
> IndexOfPageDirectoryEntries < 512;
> IndexOfPageDirectoryEntries++, PageDirectoryEntry++,
> PageAddress += SIZE_2MB) {
> +            //
> +            // Fill in the Page Directory entries
> +            //
> +            *PageDirectoryEntry = PageAddress |
> mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
> +          }
>          }
>        }
>      }
> @@ -259,6 +326,8 @@ SmmInitPageTable (
>    UINTN
> PageFaultHandlerHookAddress;
>    IA32_IDT_GATE_DESCRIPTOR          *IdtEntry;
>    EFI_STATUS                        Status;
> +  UINT64                            *Pml4Entry;
> +  UINT64                            *Pml5Entry;
> 
>    //
>    // Initialize spin lock
> @@ -266,12 +335,14 @@ SmmInitPageTable (
>    InitializeSpinLock (mPFLock);
> 
>    mCpuSmmStaticPageTable = PcdGetBool
> (PcdCpuSmmStaticPageTable);
> -  m1GPageTableSupport = Is1GPageSupport ();
> -  DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n",
> m1GPageTableSupport));
> -  DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable -
> 0x%x\n", mCpuSmmStaticPageTable));
> -
> -  mPhysicalAddressBits = CalculateMaximumSupportAddress
> ();
> -  DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n",
> mPhysicalAddressBits));
> +  m1GPageTableSupport    = Is1GPageSupport ();
> +  m5LevelPagingSupport   = Is5LevelPagingSupport ();
> +  mPhysicalAddressBits   =
> CalculateMaximumSupportAddress ();
> +  PatchInstructionX86 (gPatch5LevelPagingSupport,
> m5LevelPagingSupport, 1);
> +  DEBUG ((DEBUG_INFO, "5LevelPaging Support     - %d\n",
> m5LevelPagingSupport));
> +  DEBUG ((DEBUG_INFO, "1GPageTable Support      - %d\n",
> m1GPageTableSupport));
> +  DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - %d\n",
> mCpuSmmStaticPageTable));
> +  DEBUG ((DEBUG_INFO, "PhysicalAddressBits      - %d\n",
> mPhysicalAddressBits));
>    //
>    // Generate PAE page table for the first 4GB memory
> space
>    //
> @@ -288,15 +359,30 @@ SmmInitPageTable (
>    //
>    // Fill Page-Table-Level4 (PML4) entry
>    //
> -  PTEntry = (UINT64*)AllocatePageTableMemory (1);
> -  ASSERT (PTEntry != NULL);
> -  *PTEntry = Pages | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> -  ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof
> (*PTEntry));
> +  Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
> +  ASSERT (Pml4Entry != NULL);
> +  *Pml4Entry = Pages | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> +  ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof
> (*Pml4Entry));
> 
>    //
>    // Set sub-entries number
>    //
> -  SetSubEntriesNum (PTEntry, 3);
> +  SetSubEntriesNum (Pml4Entry, 3);
> +  PTEntry = Pml4Entry;
> +
> +  if (m5LevelPagingSupport) {
> +    //
> +    // Fill PML5 entry
> +    //
> +    Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
> +    *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask |
> PAGE_ATTRIBUTE_BITS;
> +    ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof
> (*Pml5Entry));
> +    //
> +    // Set sub-entries number
> +    //
> +    SetSubEntriesNum (Pml5Entry, 1);
> +    PTEntry = Pml5Entry;
> +  }
> 
>    if (mCpuSmmStaticPageTable) {
>      SetStaticPageTable ((UINTN)PTEntry);
> @@ -344,7 +430,7 @@ SmmInitPageTable (
>    }
> 
>    //
> -  // Return the address of PML4 (to set CR3)
> +  // Return the address of PML4/PML5 (to set CR3)
>    //
>    return (UINT32)(UINTN)PTEntry;
>  }
> @@ -436,12 +522,16 @@ ReclaimPages (
>    VOID
>    )
>  {
> +  UINT64                       Pml5Entry;
> +  UINT64                       *Pml5;
>    UINT64                       *Pml4;
>    UINT64                       *Pdpt;
>    UINT64                       *Pdt;
> +  UINTN                        Pml5Index;
>    UINTN                        Pml4Index;
>    UINTN                        PdptIndex;
>    UINTN                        PdtIndex;
> +  UINTN                        MinPml5;
>    UINTN                        MinPml4;
>    UINTN                        MinPdpt;
>    UINTN                        MinPdt;
> @@ -451,120 +541,147 @@ ReclaimPages (
>    BOOLEAN                      PML4EIgnore;
>    BOOLEAN                      PDPTEIgnore;
>    UINT64                       *ReleasePageAddress;
> +  IA32_CR4                     Cr4;
> +  BOOLEAN                      Enable5LevelPaging;
> 
>    Pml4 = NULL;
>    Pdpt = NULL;
>    Pdt  = NULL;
>    MinAcc  = (UINT64)-1;
>    MinPml4 = (UINTN)-1;
> +  MinPml5 = (UINTN)-1;
>    MinPdpt = (UINTN)-1;
>    MinPdt  = (UINTN)-1;
>    Acc     = 0;
>    ReleasePageAddress = 0;
> 
> +  Cr4.UintN = AsmReadCr4 ();
> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> +  Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
> +
> +  if (!Enable5LevelPaging) {
> +    //
> +    // Create one fake PML5 entry for 4-Level Paging
> +    // so that the page table parsing logic only handles
> 5-Level page structure.
> +    //
> +    Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
> +    Pml5 = &Pml5Entry;
> +  }
> +
>    //
>    // First, find the leaf entry has the smallest access
> record value
>    //
> -  Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
> -  for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof
> (*Pml4); Pml4Index++) {
> -    if ((Pml4[Pml4Index] & IA32_PG_P) == 0 ||
> (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
> +  for (Pml5Index = 0; Pml5Index < Enable5LevelPaging ?
> (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1; Pml5Index++) {
> +    if ((Pml5[Pml5Index] & IA32_PG_P) == 0 ||
> (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
>        //
> -      // If the PML4 entry is not present or is masked,
> skip it
> +      // If the PML5 entry is not present or is masked,
> skip it
>        //
>        continue;
>      }
> -    Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] &
> ~mAddressEncMask & gPhyMask);
> -    PML4EIgnore = FALSE;
> -    for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE /
> sizeof (*Pdpt); PdptIndex++) {
> -      if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 ||
> (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
> +    Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
> +    for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE /
> sizeof (*Pml4); Pml4Index++) {
> +      if ((Pml4[Pml4Index] & IA32_PG_P) == 0 ||
> (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
>          //
> -        // If the PDPT entry is not present or is
> masked, skip it
> +        // If the PML4 entry is not present or is
> masked, skip it
>          //
> -        if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
> -          //
> -          // If the PDPT entry is masked, we will ignore
> checking the PML4 entry
> -          //
> -          PML4EIgnore = TRUE;
> -        }
>          continue;
>        }
> -      if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
> -        //
> -        // It's not 1-GByte pages entry, it should be a
> PDPT entry,
> -        // we will not check PML4 entry more
> -        //
> -        PML4EIgnore = TRUE;
> -        Pdt =  (UINT64*)(UINTN)(Pdpt[PdptIndex] &
> ~mAddressEncMask & gPhyMask);
> -        PDPTEIgnore = FALSE;
> -        for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE /
> sizeof(*Pdt); PdtIndex++) {
> -          if ((Pdt[PdtIndex] & IA32_PG_P) == 0 ||
> (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
> +      Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] &
> ~mAddressEncMask & gPhyMask);
> +      PML4EIgnore = FALSE;
> +      for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE /
> sizeof (*Pdpt); PdptIndex++) {
> +        if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 ||
> (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
> +          //
> +          // If the PDPT entry is not present or is
> masked, skip it
> +          //
> +          if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
>              //
> -            // If the PD entry is not present or is
> masked, skip it
> +            // If the PDPT entry is masked, we will
> ignore checking the PML4 entry
>              //
> -            if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
> +            PML4EIgnore = TRUE;
> +          }
> +          continue;
> +        }
> +        if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
> +          //
> +          // It's not 1-GByte pages entry, it should be
> a PDPT entry,
> +          // we will not check PML4 entry more
> +          //
> +          PML4EIgnore = TRUE;
> +          Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] &
> ~mAddressEncMask & gPhyMask);
> +          PDPTEIgnore = FALSE;
> +          for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE /
> sizeof(*Pdt); PdtIndex++) {
> +            if ((Pdt[PdtIndex] & IA32_PG_P) == 0 ||
> (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
> +              //
> +              // If the PD entry is not present or is
> masked, skip it
> +              //
> +              if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
> +                //
> +                // If the PD entry is masked, we will
> not PDPT entry more
> +                //
> +                PDPTEIgnore = TRUE;
> +              }
> +              continue;
> +            }
> +            if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
>                //
> -              // If the PD entry is masked, we will not
> PDPT entry more
> +              // It's not 2 MByte page table entry, it
> should be PD entry
> +              // we will find the entry has the smallest
> access record value
>                //
>                PDPTEIgnore = TRUE;
> +              Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
> +              if (Acc < MinAcc) {
> +                //
> +                // If the PD entry has the smallest
> access record value,
> +                // save the Page address to be released
> +                //
> +                MinAcc  = Acc;
> +                MinPml5 = Pml5Index;
> +                MinPml4 = Pml4Index;
> +                MinPdpt = PdptIndex;
> +                MinPdt  = PdtIndex;
> +                ReleasePageAddress = Pdt + PdtIndex;
> +              }
>              }
> -            continue;
>            }
> -          if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
> +          if (!PDPTEIgnore) {
>              //
> -            // It's not 2 MByte page table entry, it
> should be PD entry
> -            // we will find the entry has the smallest
> access record value
> +            // If this PDPT entry has no PDT entries
> pointer to 4 KByte pages,
> +            // it should only has the entries point to 2
> MByte Pages
>              //
> -            PDPTEIgnore = TRUE;
> -            Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
> +            Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
>              if (Acc < MinAcc) {
>                //
> -              // If the PD entry has the smallest access
> record value,
> +              // If the PDPT entry has the smallest
> access record value,
>                // save the Page address to be released
>                //
>                MinAcc  = Acc;
> +              MinPml5 = Pml5Index;
>                MinPml4 = Pml4Index;
>                MinPdpt = PdptIndex;
> -              MinPdt  = PdtIndex;
> -              ReleasePageAddress = Pdt + PdtIndex;
> +              MinPdt  = (UINTN)-1;
> +              ReleasePageAddress = Pdpt + PdptIndex;
>              }
>            }
>          }
> -        if (!PDPTEIgnore) {
> -          //
> -          // If this PDPT entry has no PDT entries
> pointer to 4 KByte pages,
> -          // it should only has the entries point to 2
> MByte Pages
> -          //
> -          Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
> -          if (Acc < MinAcc) {
> -            //
> -            // If the PDPT entry has the smallest access
> record value,
> -            // save the Page address to be released
> -            //
> -            MinAcc  = Acc;
> -            MinPml4 = Pml4Index;
> -            MinPdpt = PdptIndex;
> -            MinPdt  = (UINTN)-1;
> -            ReleasePageAddress = Pdpt + PdptIndex;
> -          }
> -        }
>        }
> -    }
> -    if (!PML4EIgnore) {
> -      //
> -      // If PML4 entry has no the PDPT entry pointer to
> 2 MByte pages,
> -      // it should only has the entries point to 1 GByte
> Pages
> -      //
> -      Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
> -      if (Acc < MinAcc) {
> +      if (!PML4EIgnore) {
>          //
> -        // If the PML4 entry has the smallest access
> record value,
> -        // save the Page address to be released
> +        // If PML4 entry has no the PDPT entry pointer
> to 2 MByte pages,
> +        // it should only has the entries point to 1
> GByte Pages
>          //
> -        MinAcc  = Acc;
> -        MinPml4 = Pml4Index;
> -        MinPdpt = (UINTN)-1;
> -        MinPdt  = (UINTN)-1;
> -        ReleasePageAddress = Pml4 + Pml4Index;
> +        Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
> +        if (Acc < MinAcc) {
> +          //
> +          // If the PML4 entry has the smallest access
> record value,
> +          // save the Page address to be released
> +          //
> +          MinAcc  = Acc;
> +          MinPml5 = Pml5Index;
> +          MinPml4 = Pml4Index;
> +          MinPdpt = (UINTN)-1;
> +          MinPdt  = (UINTN)-1;
> +          ReleasePageAddress = Pml4 + Pml4Index;
> +        }
>        }
>      }
>    }
> @@ -588,6 +705,7 @@ ReclaimPages (
>        //
>        // If 4 KByte Page Table is released, check the
> PDPT entry
>        //
> +      Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] &
> gPhyMask);
>        Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] &
> ~mAddressEncMask & gPhyMask);
>        SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
>        if (SubEntriesNum == 0) {
> @@ -679,7 +797,7 @@ SmiDefaultPFHandler (
>    )
>  {
>    UINT64                            *PageTable;
> -  UINT64                            *Pml4;
> +  UINT64                            *PageTableTop;
>    UINT64                            PFAddress;
>    UINTN                             StartBit;
>    UINTN                             EndBit;
> @@ -690,6 +808,8 @@ SmiDefaultPFHandler (
>    UINTN                             PageAttribute;
>    EFI_STATUS                        Status;
>    UINT64                            *UpperEntry;
> +  BOOLEAN                           Enable5LevelPaging;
> +  IA32_CR4                          Cr4;
> 
>    //
>    // Set default SMM page attribute
> @@ -699,9 +819,12 @@ SmiDefaultPFHandler (
>    PageAttribute = 0;
> 
>    EndBit = 0;
> -  Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);
> +  PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
>    PFAddress = AsmReadCr2 ();
> 
> +  Cr4.UintN = AsmReadCr4 ();
> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
> +
>    Status = GetPlatformPageTableAttribute (PFAddress,
> &PageSize, &NumOfPages, &PageAttribute);
>    //
>    // If platform not support page table attribute, set
> default SMM page attribute
> @@ -755,9 +878,9 @@ SmiDefaultPFHandler (
>    }
> 
>    for (Index = 0; Index < NumOfPages; Index++) {
> -    PageTable  = Pml4;
> +    PageTable  = PageTableTop;
>      UpperEntry = NULL;
> -    for (StartBit = 39; StartBit > EndBit; StartBit -=
> 9) {
> +    for (StartBit = Enable5LevelPaging ? 48 : 39;
> StartBit > EndBit; StartBit -= 9) {
>        PTIndex = BitFieldRead64 (PFAddress, StartBit,
> StartBit + 8);
>        if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
>          //
> @@ -941,13 +1064,20 @@ SetPageTableAttributes (
>    UINTN                 Index2;
>    UINTN                 Index3;
>    UINTN                 Index4;
> +  UINTN                 Index5;
>    UINT64                *L1PageTable;
>    UINT64                *L2PageTable;
>    UINT64                *L3PageTable;
>    UINT64                *L4PageTable;
> +  UINT64                *L5PageTable;
>    BOOLEAN               IsSplitted;
>    BOOLEAN               PageTableSplitted;
>    BOOLEAN               CetEnabled;
> +  IA32_CR4              Cr4;
> +  BOOLEAN               Enable5LevelPaging;
> +
> +  Cr4.UintN = AsmReadCr4 ();
> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> 
>    //
>    // Don't do this if
> @@ -991,44 +1121,59 @@ SetPageTableAttributes (
>    do {
>      DEBUG ((DEBUG_INFO, "Start...\n"));
>      PageTableSplitted = FALSE;
> -
> -    L4PageTable = (UINT64 *)GetPageTableBase ();
> -    SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB,
> EFI_MEMORY_RO, &IsSplitted);
> -    PageTableSplitted = (PageTableSplitted ||
> IsSplitted);
> -
> -    for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64);
> Index4++) {
> -      L3PageTable = (UINT64
> *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask &
> PAGING_4K_ADDRESS_MASK_64);
> -      if (L3PageTable == NULL) {
> -        continue;
> -      }
> -
> -      SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB,
> EFI_MEMORY_RO, &IsSplitted);
> +    L5PageTable = NULL;
> +    if (Enable5LevelPaging) {
> +      L5PageTable = (UINT64 *)GetPageTableBase ();
> +      SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L5PageTable, SIZE_4KB,
> EFI_MEMORY_RO, &IsSplitted);
>        PageTableSplitted = (PageTableSplitted ||
> IsSplitted);
> +    }
> 
> -      for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64);
> Index3++) {
> -        if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
> -          // 1G
> +    for (Index5 = 0; Index5 < (Enable5LevelPaging ?
> SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
> +      if (Enable5LevelPaging) {
> +        L4PageTable = (UINT64
> *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask &
> PAGING_4K_ADDRESS_MASK_64);
> +        if (L4PageTable == NULL) {
>            continue;
>          }
> -        L2PageTable = (UINT64
> *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask &
> PAGING_4K_ADDRESS_MASK_64);
> -        if (L2PageTable == NULL) {
> +      } else {
> +        L4PageTable = (UINT64 *)GetPageTableBase ();
> +      }
> +      SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB,
> EFI_MEMORY_RO, &IsSplitted);
> +      PageTableSplitted = (PageTableSplitted ||
> IsSplitted);
> +
> +      for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64);
> Index4++) {
> +        L3PageTable = (UINT64
> *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask &
> PAGING_4K_ADDRESS_MASK_64);
> +        if (L3PageTable == NULL) {
>            continue;
>          }
> 
> -        SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB,
> EFI_MEMORY_RO, &IsSplitted);
> +        SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB,
> EFI_MEMORY_RO, &IsSplitted);
>          PageTableSplitted = (PageTableSplitted ||
> IsSplitted);
> 
> -        for (Index2 = 0; Index2 <
> SIZE_4KB/sizeof(UINT64); Index2++) {
> -          if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
> -            // 2M
> +        for (Index3 = 0; Index3 <
> SIZE_4KB/sizeof(UINT64); Index3++) {
> +          if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
> +            // 1G
>              continue;
>            }
> -          L1PageTable = (UINT64
> *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask &
> PAGING_4K_ADDRESS_MASK_64);
> -          if (L1PageTable == NULL) {
> +          L2PageTable = (UINT64
> *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask &
> PAGING_4K_ADDRESS_MASK_64);
> +          if (L2PageTable == NULL) {
>              continue;
>            }
> -          SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB,
> EFI_MEMORY_RO, &IsSplitted);
> +
> +          SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB,
> EFI_MEMORY_RO, &IsSplitted);
>            PageTableSplitted = (PageTableSplitted ||
> IsSplitted);
> +
> +          for (Index2 = 0; Index2 <
> SIZE_4KB/sizeof(UINT64); Index2++) {
> +            if ((L2PageTable[Index2] & IA32_PG_PS) != 0)
> {
> +              // 2M
> +              continue;
> +            }
> +            L1PageTable = (UINT64
> *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask &
> PAGING_4K_ADDRESS_MASK_64);
> +            if (L1PageTable == NULL) {
> +              continue;
> +            }
> +            SmmSetMemoryAttributesEx
> ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB,
> EFI_MEMORY_RO, &IsSplitted);
> +            PageTableSplitted = (PageTableSplitted ||
> IsSplitted);
> +          }
>          }
>        }
>      }
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> index 741e4b7da2..271492a9d7 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> @@ -69,6 +69,7 @@ extern ASM_PFX(mXdSupported)
>  global ASM_PFX(gPatchXdSupported)
>  global ASM_PFX(gPatchSmiStack)
>  global ASM_PFX(gPatchSmiCr3)
> +global ASM_PFX(gPatch5LevelPagingSupport)
>  global ASM_PFX(gcSmiHandlerTemplate)
>  global ASM_PFX(gcSmiHandlerSize)
> 
> @@ -124,6 +125,17 @@ ProtFlatMode:
>  ASM_PFX(gPatchSmiCr3):
>      mov     cr3, rax
>      mov     eax, 0x668                   ; as cr4.PGE is
> not set here, refresh cr3
> +
> +    mov     cl, strict byte 0            ; source
> operand will be patched
> +ASM_PFX(gPatch5LevelPagingSupport):
> +    cmp     cl, 0
> +    je      SkipEnable5LevelPaging
> +    ;
> +    ; Enable 5-Level Paging bit
> +    ;
> +    bts     eax, 12                     ; Set LA57 bit
> (bit #12)
> +SkipEnable5LevelPaging:
> +
>      mov     cr4, rax                    ; in
> PreModifyMtrrs() to flush TLB.
>  ; Load TSS
>      sub     esp, 8                      ; reserve room
> in stack
> diff --git
> a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
> index e7c78d36fc..63bae5a913 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
> @@ -1,7 +1,7 @@
>  /** @file
>  X64 processor specific functions to enable SMM profile.
> 
> -Copyright (c) 2012 - 2016, Intel Corporation. All rights
> reserved.<BR>
> +Copyright (c) 2012 - 2019, Intel Corporation. All rights
> reserved.<BR>
>  Copyright (c) 2017, AMD Incorporated. All rights
> reserved.<BR>
> 
>  SPDX-License-Identifier: BSD-2-Clause-Patent
> @@ -147,9 +147,14 @@ RestorePageTableAbove4G (
>    BOOLEAN       Existed;
>    UINTN         Index;
>    UINTN         PFIndex;
> +  IA32_CR4      Cr4;
> +  BOOLEAN       Enable5LevelPaging;
> 
>    ASSERT ((PageTable != NULL) && (IsValidPFAddress !=
> NULL));
> 
> +  Cr4.UintN = AsmReadCr4 ();
> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> +
>    //
>    // If page fault address is 4GB above.
>    //
> @@ -161,38 +166,48 @@ RestorePageTableAbove4G (
>    //
>    Existed = FALSE;
>    PageTable = (UINT64*)(AsmReadCr3 () &
> PHYSICAL_ADDRESS_MASK);
> -  PTIndex = BitFieldRead64 (PFAddress, 39, 47);
> -  if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
> -    // PML4E
> -    PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> -    PTIndex = BitFieldRead64 (PFAddress, 30, 38);
> +  PTIndex = 0;
> +  if (Enable5LevelPaging) {
> +    PTIndex = BitFieldRead64 (PFAddress, 48, 56);
> +  }
> +  if ((!Enable5LevelPaging) || ((PageTable[PTIndex] &
> IA32_PG_P) != 0)) {
> +    // PML5E
> +    if (Enable5LevelPaging) {
> +      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> +    }
> +    PTIndex = BitFieldRead64 (PFAddress, 39, 47);
>      if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
> -      // PDPTE
> +      // PML4E
>        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> -      PTIndex = BitFieldRead64 (PFAddress, 21, 29);
> -      // PD
> -      if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
> -        //
> -        // 2MB page
> -        //
> -        Address = (UINT64)(PageTable[PTIndex] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> -        if ((Address & ~((1ull << 21) - 1)) ==
> ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) -
> 1)))) {
> -          Existed = TRUE;
> -        }
> -      } else {
> -        //
> -        // 4KB page
> -        //
> -        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex]
> & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
> -        if (PageTable != 0) {
> +      PTIndex = BitFieldRead64 (PFAddress, 30, 38);
> +      if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
> +        // PDPTE
> +        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex]
> & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> +        PTIndex = BitFieldRead64 (PFAddress, 21, 29);
> +        // PD
> +        if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
>            //
> -          // When there is a valid entry to map to 4KB
> page, need not create a new entry to map 2MB.
> +          // 2MB page
>            //
> -          PTIndex = BitFieldRead64 (PFAddress, 12, 20);
>            Address = (UINT64)(PageTable[PTIndex] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> -          if ((Address & ~((1ull << 12) - 1)) ==
> (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) -
> 1))) {
> +          if ((Address & ~((1ull << 21) - 1)) ==
> ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) -
> 1)))) {
>              Existed = TRUE;
>            }
> +        } else {
> +          //
> +          // 4KB page
> +          //
> +          PageTable =
> (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask&
> PHYSICAL_ADDRESS_MASK);
> +          if (PageTable != 0) {
> +            //
> +            // When there is a valid entry to map to 4KB
> page, need not create a new entry to map 2MB.
> +            //
> +            PTIndex = BitFieldRead64 (PFAddress, 12,
> 20);
> +            Address = (UINT64)(PageTable[PTIndex] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> +            if ((Address & ~((1ull << 12) - 1)) ==
> (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) -
> 1))) {
> +              Existed = TRUE;
> +            }
> +          }
>          }
>        }
>      }
> @@ -221,6 +236,11 @@ RestorePageTableAbove4G (
>      //
>      PageTable = (UINT64*)(AsmReadCr3 () &
> PHYSICAL_ADDRESS_MASK);
>      PFAddress = AsmReadCr2 ();
> +    // PML5E
> +    if (Enable5LevelPaging) {
> +      PTIndex = BitFieldRead64 (PFAddress, 48, 56);
> +      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> +    }
>      // PML4E
>      PTIndex = BitFieldRead64 (PFAddress, 39, 47);
>      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> --
> 2.21.0.windows.1
> 
> 
> 


-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.

View/Reply Online (#43525): https://edk2.groups.io/g/devel/message/43525
Mute This Topic: https://groups.io/mt/32295049/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub  [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-

Re: [edk2-devel] [PATCH v2 3/3] UefiCpuPkg/PiSmmCpu: Enable 5 level paging when CPU supports
Posted by Ni, Ray 6 years, 7 months ago
Mike,
Thanks for raising this build failure.
I just tried in my Ubuntu 18 in Win10. Even GCC7 complains about this. My bad!
I just posted a fix.

Thanks,
Ray

> -----Original Message-----
> From: Kinney, Michael D
> Sent: Thursday, July 11, 2019 4:06 AM
> To: devel@edk2.groups.io; Ni, Ray <ray.ni@intel.com>; Kinney, Michael D <michael.d.kinney@intel.com>
> Cc: Dong, Eric <eric.dong@intel.com>; Laszlo Ersek <lersek@redhat.com>
> Subject: RE: [edk2-devel] [PATCH v2 3/3] UefiCpuPkg/PiSmmCpu: Enable 5 level paging when CPU supports
> 
> Hi Ray,
> 
> I noticed a Linux/GCC build issue with this patch when using GCC version:
> 
>     gcc version 8.2.1 20181215 (Red Hat 8.2.1-6) (GCC)
> 
> edk2/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c: In function 'ReclaimPages':
> edk2/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c:574:89: error: ?: using integer constants in boolean context, the
> expression will always evaluate to 'true' [-Werror=int-in-bool-context]
>    for (Pml5Index = 0; Pml5Index < Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1; Pml5Index++) {
> 
> I was able to get the build to pass if I added ().
> 
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> index c31160735a..a3b62f7787 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> @@ -571,7 +571,7 @@ ReclaimPages (
>    //
>    // First, find the leaf entry has the smallest access record value
>    //
> -  for (Pml5Index = 0; Pml5Index < Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1; Pml5Index++) {
> +  for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {^M
>      if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
>        //
>        // If the PML5 entry is not present or is masked, skip it
> 
> Best regards,
> 
> Mike
> 
> > -----Original Message-----
> > From: devel@edk2.groups.io [mailto:devel@edk2.groups.io]
> > On Behalf Of Ni, Ray
> > Sent: Tuesday, July 2, 2019 11:54 PM
> > To: devel@edk2.groups.io
> > Cc: Dong, Eric <eric.dong@intel.com>; Laszlo Ersek
> > <lersek@redhat.com>
> > Subject: [edk2-devel] [PATCH v2 3/3] UefiCpuPkg/PiSmmCpu:
> > Enable 5 level paging when CPU supports
> >
> > REF:https://bugzilla.tianocore.org/show_bug.cgi?id=1946
> >
> > The patch changes SMM environment to use 5 level paging
> > when CPU
> > supports it.
> >
> > Signed-off-by: Ray Ni <ray.ni@intel.com>
> > Cc: Eric Dong <eric.dong@intel.com>
> > Regression-tested-by: Laszlo Ersek <lersek@redhat.com>
> > ---
> >  .../PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c   |  20 +-
> >  UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c        | 272
> > ++++++----
> >  UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c       | 485
> > ++++++++++++------
> >  UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm   |  12 +
> >  .../PiSmmCpuDxeSmm/X64/SmmProfileArch.c       |  72 ++-
> >  5 files changed, 561 insertions(+), 300 deletions(-)
> >
> > diff --git
> > a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> > b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> > index 069be3aaa5..55090e9c3e 100644
> > --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> > +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
> > @@ -125,18 +125,36 @@ GetPageTableEntry (
> >    UINTN                 Index2;
> >    UINTN                 Index3;
> >    UINTN                 Index4;
> > +  UINTN                 Index5;
> >    UINT64                *L1PageTable;
> >    UINT64                *L2PageTable;
> >    UINT64                *L3PageTable;
> >    UINT64                *L4PageTable;
> > +  UINT64                *L5PageTable;
> > +  IA32_CR4              Cr4;
> > +  BOOLEAN               Enable5LevelPaging;
> >
> > +  Index5 = ((UINTN)RShiftU64 (Address, 48)) &
> > PAGING_PAE_INDEX_MASK;
> >    Index4 = ((UINTN)RShiftU64 (Address, 39)) &
> > PAGING_PAE_INDEX_MASK;
> >    Index3 = ((UINTN)Address >> 30) &
> > PAGING_PAE_INDEX_MASK;
> >    Index2 = ((UINTN)Address >> 21) &
> > PAGING_PAE_INDEX_MASK;
> >    Index1 = ((UINTN)Address >> 12) &
> > PAGING_PAE_INDEX_MASK;
> >
> > +  Cr4.UintN = AsmReadCr4 ();
> > +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> > +
> >    if (sizeof(UINTN) == sizeof(UINT64)) {
> > -    L4PageTable = (UINT64 *)GetPageTableBase ();
> > +    if (Enable5LevelPaging) {
> > +      L5PageTable = (UINT64 *)GetPageTableBase ();
> > +      if (L5PageTable[Index5] == 0) {
> > +        *PageAttribute = PageNone;
> > +        return NULL;
> > +      }
> > +
> > +      L4PageTable = (UINT64
> > *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask &
> > PAGING_4K_ADDRESS_MASK_64);
> > +    } else {
> > +      L4PageTable = (UINT64 *)GetPageTableBase ();
> > +    }
> >      if (L4PageTable[Index4] == 0) {
> >        *PageAttribute = PageNone;
> >        return NULL;
> > diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
> > b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
> > index e2b6a2d9b2..c5131526f0 100644
> > --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
> > +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
> > @@ -534,43 +534,78 @@ InitPaging (
> >    VOID
> >    )
> >  {
> > +  UINT64                            Pml5Entry;
> > +  UINT64                            Pml4Entry;
> > +  UINT64                            *Pml5;
> >    UINT64                            *Pml4;
> >    UINT64                            *Pdpt;
> >    UINT64                            *Pd;
> >    UINT64                            *Pt;
> >    UINTN                             Address;
> > +  UINTN                             Pml5Index;
> >    UINTN                             Pml4Index;
> >    UINTN                             PdptIndex;
> >    UINTN                             PdIndex;
> >    UINTN                             PtIndex;
> >    UINTN                             NumberOfPdptEntries;
> >    UINTN                             NumberOfPml4Entries;
> > +  UINTN                             NumberOfPml5Entries;
> >    UINTN                             SizeOfMemorySpace;
> >    BOOLEAN                           Nx;
> > +  IA32_CR4                          Cr4;
> > +  BOOLEAN                           Enable5LevelPaging;
> > +
> > +  Cr4.UintN = AsmReadCr4 ();
> > +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> >
> >    if (sizeof (UINTN) == sizeof (UINT64)) {
> > -    Pml4 = (UINT64*)(UINTN)mSmmProfileCr3;
> > +    if (!Enable5LevelPaging) {
> > +      Pml5Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
> > +      Pml5 = &Pml5Entry;
> > +    } else {
> > +      Pml5 = (UINT64*) (UINTN) mSmmProfileCr3;
> > +    }
> >      SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
> >      //
> >      // Calculate the table entries of PML4E and PDPTE.
> >      //
> > -    if (SizeOfMemorySpace <= 39 ) {
> > -      NumberOfPml4Entries = 1;
> > -      NumberOfPdptEntries = (UINT32)LShiftU64 (1,
> > (SizeOfMemorySpace - 30));
> > -    } else {
> > -      NumberOfPml4Entries = (UINT32)LShiftU64 (1,
> > (SizeOfMemorySpace - 39));
> > -      NumberOfPdptEntries = 512;
> > +    NumberOfPml5Entries = 1;
> > +    if (SizeOfMemorySpace > 48) {
> > +      NumberOfPml5Entries = (UINTN) LShiftU64 (1,
> > SizeOfMemorySpace - 48);
> > +      SizeOfMemorySpace = 48;
> >      }
> > -  } else {
> > +
> >      NumberOfPml4Entries = 1;
> > +    if (SizeOfMemorySpace > 39) {
> > +      NumberOfPml4Entries = (UINTN) LShiftU64 (1,
> > SizeOfMemorySpace - 39);
> > +      SizeOfMemorySpace = 39;
> > +    }
> > +
> > +    NumberOfPdptEntries = 1;
> > +    ASSERT (SizeOfMemorySpace > 30);
> > +    NumberOfPdptEntries = (UINTN) LShiftU64 (1,
> > SizeOfMemorySpace - 30);
> > +  } else {
> > +    Pml4Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
> > +    Pml4 = &Pml4Entry;
> > +    Pml5Entry = (UINTN) Pml4 | IA32_PG_P;
> > +    Pml5 = &Pml5Entry;
> > +    NumberOfPml5Entries  = 1;
> > +    NumberOfPml4Entries  = 1;
> >      NumberOfPdptEntries  = 4;
> >    }
> >
> >    //
> >    // Go through page table and change 2MB-page into 4KB-
> > page.
> >    //
> > -  for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries;
> > Pml4Index++) {
> > -    if (sizeof (UINTN) == sizeof (UINT64)) {
> > +  for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries;
> > Pml5Index++) {
> > +    if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
> > +      //
> > +      // If PML5 entry does not exist, skip it
> > +      //
> > +      continue;
> > +    }
> > +    Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] &
> > PHYSICAL_ADDRESS_MASK);
> > +    for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries;
> > Pml4Index++) {
> >        if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
> >          //
> >          // If PML4 entry does not exist, skip it
> > @@ -578,63 +613,76 @@ InitPaging (
> >          continue;
> >        }
> >        Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] &
> > ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> > -    } else {
> > -      Pdpt = (UINT64*)(UINTN)mSmmProfileCr3;
> > -    }
> > -    for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries;
> > PdptIndex++, Pdpt++) {
> > -      if ((*Pdpt & IA32_PG_P) == 0) {
> > -        //
> > -        // If PDPT entry does not exist, skip it
> > -        //
> > -        continue;
> > -      }
> > -      if ((*Pdpt & IA32_PG_PS) != 0) {
> > -        //
> > -        // This is 1G entry, skip it
> > -        //
> > -        continue;
> > -      }
> > -      Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask &
> > PHYSICAL_ADDRESS_MASK);
> > -      if (Pd == 0) {
> > -        continue;
> > -      }
> > -      for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof
> > (*Pd); PdIndex++, Pd++) {
> > -        if ((*Pd & IA32_PG_P) == 0) {
> > +      for (PdptIndex = 0; PdptIndex <
> > NumberOfPdptEntries; PdptIndex++, Pdpt++) {
> > +        if ((*Pdpt & IA32_PG_P) == 0) {
> > +          //
> > +          // If PDPT entry does not exist, skip it
> > +          //
> > +          continue;
> > +        }
> > +        if ((*Pdpt & IA32_PG_PS) != 0) {
> >            //
> > -          // If PD entry does not exist, skip it
> > +          // This is 1G entry, skip it
> >            //
> >            continue;
> >          }
> > -        Address = (((PdptIndex << 9) + PdIndex) << 21);
> > +        Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask
> > & PHYSICAL_ADDRESS_MASK);
> > +        if (Pd == 0) {
> > +          continue;
> > +        }
> > +        for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof
> > (*Pd); PdIndex++, Pd++) {
> > +          if ((*Pd & IA32_PG_P) == 0) {
> > +            //
> > +            // If PD entry does not exist, skip it
> > +            //
> > +            continue;
> > +          }
> > +          Address = (UINTN) LShiftU64 (
> > +                              LShiftU64 (
> > +                                LShiftU64 ((Pml5Index <<
> > 9) + Pml4Index, 9) + PdptIndex,
> > +                                9
> > +                                ) + PdIndex,
> > +                                21
> > +                              );
> >
> > -        //
> > -        // If it is 2M page, check IsAddressSplit()
> > -        //
> > -        if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit
> > (Address)) {
> >            //
> > -          // Based on current page table, create 4KB
> > page table for split area.
> > +          // If it is 2M page, check IsAddressSplit()
> >            //
> > -          ASSERT (Address == (*Pd &
> > PHYSICAL_ADDRESS_MASK));
> > +          if (((*Pd & IA32_PG_PS) != 0) &&
> > IsAddressSplit (Address)) {
> > +            //
> > +            // Based on current page table, create 4KB
> > page table for split area.
> > +            //
> > +            ASSERT (Address == (*Pd &
> > PHYSICAL_ADDRESS_MASK));
> > +
> > +            Pt = AllocatePageTableMemory (1);
> > +            ASSERT (Pt != NULL);
> >
> > -          Pt = AllocatePageTableMemory (1);
> > -          ASSERT (Pt != NULL);
> > +            *Pd = (UINTN) Pt | IA32_PG_RW | IA32_PG_P;
> >
> > -          // Split it
> > -          for (PtIndex = 0; PtIndex < SIZE_4KB /
> > sizeof(*Pt); PtIndex++) {
> > -            Pt[PtIndex] = Address + ((PtIndex << 12) |
> > mAddressEncMask | PAGE_ATTRIBUTE_BITS);
> > -          } // end for PT
> > -          *Pd = (UINT64)(UINTN)Pt | mAddressEncMask |
> > PAGE_ATTRIBUTE_BITS;
> > -        } // end if IsAddressSplit
> > -      } // end for PD
> > -    } // end for PDPT
> > -  } // end for PML4
> > +            // Split it
> > +            for (PtIndex = 0; PtIndex < SIZE_4KB /
> > sizeof(*Pt); PtIndex++, Pt++) {
> > +              *Pt = Address + ((PtIndex << 12) |
> > mAddressEncMask | PAGE_ATTRIBUTE_BITS);
> > +            } // end for PT
> > +            *Pd = (UINT64)(UINTN)Pt | mAddressEncMask |
> > PAGE_ATTRIBUTE_BITS;
> > +          } // end if IsAddressSplit
> > +        } // end for PD
> > +      } // end for PDPT
> > +    } // end for PML4
> > +  } // end for PML5
> >
> >    //
> >    // Go through page table and set several page table
> > entries to absent or execute-disable.
> >    //
> >    DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
> > -  for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries;
> > Pml4Index++) {
> > -    if (sizeof (UINTN) == sizeof (UINT64)) {
> > +  for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries;
> > Pml5Index++) {
> > +    if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
> > +      //
> > +      // If PML5 entry does not exist, skip it
> > +      //
> > +      continue;
> > +    }
> > +    Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] &
> > PHYSICAL_ADDRESS_MASK);
> > +    for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries;
> > Pml4Index++) {
> >        if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
> >          //
> >          // If PML4 entry does not exist, skip it
> > @@ -642,69 +690,73 @@ InitPaging (
> >          continue;
> >        }
> >        Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] &
> > ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> > -    } else {
> > -      Pdpt = (UINT64*)(UINTN)mSmmProfileCr3;
> > -    }
> > -    for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries;
> > PdptIndex++, Pdpt++) {
> > -      if ((*Pdpt & IA32_PG_P) == 0) {
> > -        //
> > -        // If PDPT entry does not exist, skip it
> > -        //
> > -        continue;
> > -      }
> > -      if ((*Pdpt & IA32_PG_PS) != 0) {
> > -        //
> > -        // This is 1G entry, set NX bit and skip it
> > -        //
> > -        if (mXdSupported) {
> > -          *Pdpt = *Pdpt | IA32_PG_NX;
> > +      for (PdptIndex = 0; PdptIndex <
> > NumberOfPdptEntries; PdptIndex++, Pdpt++) {
> > +        if ((*Pdpt & IA32_PG_P) == 0) {
> > +          //
> > +          // If PDPT entry does not exist, skip it
> > +          //
> > +          continue;
> >          }
> > -        continue;
> > -      }
> > -      Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask &
> > PHYSICAL_ADDRESS_MASK);
> > -      if (Pd == 0) {
> > -        continue;
> > -      }
> > -      for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof
> > (*Pd); PdIndex++, Pd++) {
> > -        if ((*Pd & IA32_PG_P) == 0) {
> > +        if ((*Pdpt & IA32_PG_PS) != 0) {
> >            //
> > -          // If PD entry does not exist, skip it
> > +          // This is 1G entry, set NX bit and skip it
> >            //
> > +          if (mXdSupported) {
> > +            *Pdpt = *Pdpt | IA32_PG_NX;
> > +          }
> >            continue;
> >          }
> > -        Address = (((PdptIndex << 9) + PdIndex) << 21);
> > -
> > -        if ((*Pd & IA32_PG_PS) != 0) {
> > -          // 2MB page
> > -
> > -          if (!IsAddressValid (Address, &Nx)) {
> > +        Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask
> > & PHYSICAL_ADDRESS_MASK);
> > +        if (Pd == 0) {
> > +          continue;
> > +        }
> > +        for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof
> > (*Pd); PdIndex++, Pd++) {
> > +          if ((*Pd & IA32_PG_P) == 0) {
> >              //
> > -            // Patch to remove Present flag and RW flag
> > +            // If PD entry does not exist, skip it
> >              //
> > -            *Pd = *Pd &
> > (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
> > -          }
> > -          if (Nx && mXdSupported) {
> > -            *Pd = *Pd | IA32_PG_NX;
> > -          }
> > -        } else {
> > -          // 4KB page
> > -          Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask
> > & PHYSICAL_ADDRESS_MASK);
> > -          if (Pt == 0) {
> >              continue;
> >            }
> > -          for (PtIndex = 0; PtIndex < SIZE_4KB /
> > sizeof(*Pt); PtIndex++, Pt++) {
> > +          Address = (UINTN) LShiftU64 (
> > +                              LShiftU64 (
> > +                                LShiftU64 ((Pml5Index <<
> > 9) + Pml4Index, 9) + PdptIndex,
> > +                                9
> > +                                ) + PdIndex,
> > +                                21
> > +                              );
> > +
> > +          if ((*Pd & IA32_PG_PS) != 0) {
> > +            // 2MB page
> > +
> >              if (!IsAddressValid (Address, &Nx)) {
> > -              *Pt = *Pt &
> > (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
> > +              //
> > +              // Patch to remove Present flag and RW
> > flag
> > +              //
> > +              *Pd = *Pd &
> > (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
> >              }
> >              if (Nx && mXdSupported) {
> > -              *Pt = *Pt | IA32_PG_NX;
> > +              *Pd = *Pd | IA32_PG_NX;
> > +            }
> > +          } else {
> > +            // 4KB page
> > +            Pt = (UINT64 *)(UINTN)(*Pd &
> > ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> > +            if (Pt == 0) {
> > +              continue;
> >              }
> > -            Address += SIZE_4KB;
> > -          } // end for PT
> > -        } // end if PS
> > -      } // end for PD
> > -    } // end for PDPT
> > -  } // end for PML4
> > +            for (PtIndex = 0; PtIndex < SIZE_4KB /
> > sizeof(*Pt); PtIndex++, Pt++) {
> > +              if (!IsAddressValid (Address, &Nx)) {
> > +                *Pt = *Pt &
> > (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
> > +              }
> > +              if (Nx && mXdSupported) {
> > +                *Pt = *Pt | IA32_PG_NX;
> > +              }
> > +              Address += SIZE_4KB;
> > +            } // end for PT
> > +          } // end if PS
> > +        } // end for PD
> > +      } // end for PDPT
> > +    } // end for PML4
> > +  } // end for PML5
> >
> >    //
> >    // Flush TLB
> > @@ -1156,6 +1208,20 @@ RestorePageTableBelow4G (
> >  {
> >    UINTN         PTIndex;
> >    UINTN         PFIndex;
> > +  IA32_CR4      Cr4;
> > +  BOOLEAN       Enable5LevelPaging;
> > +
> > +  Cr4.UintN = AsmReadCr4 ();
> > +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> > +
> > +  //
> > +  // PML5
> > +  //
> > +  if (Enable5LevelPaging) {
> > +    PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56);
> > +    ASSERT (PageTable[PTIndex] != 0);
> > +    PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> > PHYSICAL_ADDRESS_MASK);
> > +  }
> >
> >    //
> >    // PML4
> > diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> > b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> > index 3d5d663d99..c31160735a 100644
> > --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> > +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> > @@ -16,6 +16,8 @@ SPDX-License-Identifier: BSD-2-Clause-
> > Patent
> >  LIST_ENTRY                          mPagePool =
> > INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
> >  BOOLEAN                             m1GPageTableSupport
> > = FALSE;
> >  BOOLEAN
> > mCpuSmmStaticPageTable;
> > +BOOLEAN
> > m5LevelPagingSupport;
> > +X86_ASSEMBLY_PATCH_LABEL
> > gPatch5LevelPagingSupport;
> >
> >  /**
> >    Disable CET.
> > @@ -60,6 +62,31 @@ Is1GPageSupport (
> >    return FALSE;
> >  }
> >
> > +/**
> > +  Check if 5-level paging is supported by processor or
> > not.
> > +
> > +  @retval TRUE   5-level paging is supported.
> > +  @retval FALSE  5-level paging is not supported.
> > +
> > +**/
> > +BOOLEAN
> > +Is5LevelPagingSupport (
> > +  VOID
> > +  )
> > +{
> > +  CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
> > +
> > +  AsmCpuidEx (
> > +    CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
> > +
> > CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
> > +    NULL,
> > +    NULL,
> > +    &EcxFlags.Uint32,
> > +    NULL
> > +    );
> > +  return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0);
> > +}
> > +
> >  /**
> >    Set sub-entries number in entry.
> >
> > @@ -130,14 +157,6 @@ CalculateMaximumSupportAddress (
> >        PhysicalAddressBits = 36;
> >      }
> >    }
> > -
> > -  //
> > -  // IA-32e paging translates 48-bit linear addresses to
> > 52-bit physical addresses.
> > -  //
> > -  ASSERT (PhysicalAddressBits <= 52);
> > -  if (PhysicalAddressBits > 48) {
> > -    PhysicalAddressBits = 48;
> > -  }
> >    return PhysicalAddressBits;
> >  }
> >
> > @@ -152,89 +171,137 @@ SetStaticPageTable (
> >    )
> >  {
> >    UINT64
> > PageAddress;
> > +  UINTN
> > NumberOfPml5EntriesNeeded;
> >    UINTN
> > NumberOfPml4EntriesNeeded;
> >    UINTN
> > NumberOfPdpEntriesNeeded;
> > +  UINTN
> > IndexOfPml5Entries;
> >    UINTN
> > IndexOfPml4Entries;
> >    UINTN
> > IndexOfPdpEntries;
> >    UINTN
> > IndexOfPageDirectoryEntries;
> > +  UINT64
> > *PageMapLevel5Entry;
> >    UINT64
> > *PageMapLevel4Entry;
> >    UINT64
> > *PageMap;
> >    UINT64
> > *PageDirectoryPointerEntry;
> >    UINT64
> > *PageDirectory1GEntry;
> >    UINT64
> > *PageDirectoryEntry;
> >
> > -  if (mPhysicalAddressBits <= 39 ) {
> > -    NumberOfPml4EntriesNeeded = 1;
> > -    NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1,
> > (mPhysicalAddressBits - 30));
> > -  } else {
> > -    NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1,
> > (mPhysicalAddressBits - 39));
> > -    NumberOfPdpEntriesNeeded = 512;
> > +  //
> > +  // IA-32e paging translates 48-bit linear addresses to
> > 52-bit physical addresses
> > +  //  when 5-Level Paging is disabled.
> > +  //
> > +  ASSERT (mPhysicalAddressBits <= 52);
> > +  if (!m5LevelPagingSupport && mPhysicalAddressBits >
> > 48) {
> > +    mPhysicalAddressBits = 48;
> > +  }
> > +
> > +  NumberOfPml5EntriesNeeded = 1;
> > +  if (mPhysicalAddressBits > 48) {
> > +    NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1,
> > mPhysicalAddressBits - 48);
> > +    mPhysicalAddressBits = 48;
> > +  }
> > +
> > +  NumberOfPml4EntriesNeeded = 1;
> > +  if (mPhysicalAddressBits > 39) {
> > +    NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1,
> > mPhysicalAddressBits - 39);
> > +    mPhysicalAddressBits = 39;
> >    }
> >
> > +  NumberOfPdpEntriesNeeded = 1;
> > +  ASSERT (mPhysicalAddressBits > 30);
> > +  NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1,
> > mPhysicalAddressBits - 30);
> > +
> >    //
> >    // By architecture only one PageMapLevel4 exists - so
> > lets allocate storage for it.
> >    //
> >    PageMap         = (VOID *) PageTable;
> >
> >    PageMapLevel4Entry = PageMap;
> > -  PageAddress        = 0;
> > -  for (IndexOfPml4Entries = 0; IndexOfPml4Entries <
> > NumberOfPml4EntriesNeeded; IndexOfPml4Entries++,
> > PageMapLevel4Entry++) {
> > +  PageMapLevel5Entry = NULL;
> > +  if (m5LevelPagingSupport) {
> >      //
> > -    // Each PML4 entry points to a page of Page
> > Directory Pointer entries.
> > +    // By architecture only one PageMapLevel5 exists -
> > so lets allocate storage for it.
> >      //
> > -    PageDirectoryPointerEntry = (UINT64 *)
> > ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
> > -    if (PageDirectoryPointerEntry == NULL) {
> > -      PageDirectoryPointerEntry =
> > AllocatePageTableMemory (1);
> > -      ASSERT(PageDirectoryPointerEntry != NULL);
> > -      ZeroMem (PageDirectoryPointerEntry,
> > EFI_PAGES_TO_SIZE(1));
> > +    PageMapLevel5Entry = PageMap;
> > +  }
> > +  PageAddress        = 0;
> >
> > -      *PageMapLevel4Entry =
> > (UINT64)(UINTN)PageDirectoryPointerEntry |
> > mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> > +  for ( IndexOfPml5Entries = 0
> > +      ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
> > +      ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
> > +    //
> > +    // Each PML5 entry points to a page of PML4 entires.
> > +    // So lets allocate space for them and fill them in
> > in the IndexOfPml4Entries loop.
> > +    // When 5-Level Paging is disabled, below allocation
> > happens only once.
> > +    //
> > +    if (m5LevelPagingSupport) {
> > +      PageMapLevel4Entry = (UINT64 *)
> > ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
> > +      if (PageMapLevel4Entry == NULL) {
> > +        PageMapLevel4Entry = AllocatePageTableMemory
> > (1);
> > +        ASSERT(PageMapLevel4Entry != NULL);
> > +        ZeroMem (PageMapLevel4Entry,
> > EFI_PAGES_TO_SIZE(1));
> > +
> > +        *PageMapLevel5Entry =
> > (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask |
> > PAGE_ATTRIBUTE_BITS;
> > +      }
> >      }
> >
> > -    if (m1GPageTableSupport) {
> > -      PageDirectory1GEntry = PageDirectoryPointerEntry;
> > -      for (IndexOfPageDirectoryEntries = 0;
> > IndexOfPageDirectoryEntries < 512;
> > IndexOfPageDirectoryEntries++, PageDirectory1GEntry++,
> > PageAddress += SIZE_1GB) {
> > -        if (IndexOfPml4Entries == 0 &&
> > IndexOfPageDirectoryEntries < 4) {
> > -          //
> > -          // Skip the < 4G entries
> > -          //
> > -          continue;
> > -        }
> > -        //
> > -        // Fill in the Page Directory entries
> > -        //
> > -        *PageDirectory1GEntry = PageAddress |
> > mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
> > +    for (IndexOfPml4Entries = 0; IndexOfPml4Entries <
> > (NumberOfPml5EntriesNeeded == 1 ?
> > NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++,
> > PageMapLevel4Entry++) {
> > +      //
> > +      // Each PML4 entry points to a page of Page
> > Directory Pointer entries.
> > +      //
> > +      PageDirectoryPointerEntry = (UINT64 *)
> > ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
> > +      if (PageDirectoryPointerEntry == NULL) {
> > +        PageDirectoryPointerEntry =
> > AllocatePageTableMemory (1);
> > +        ASSERT(PageDirectoryPointerEntry != NULL);
> > +        ZeroMem (PageDirectoryPointerEntry,
> > EFI_PAGES_TO_SIZE(1));
> > +
> > +        *PageMapLevel4Entry =
> > (UINT64)(UINTN)PageDirectoryPointerEntry |
> > mAddressEncMask | PAGE_ATTRIBUTE_BITS;
> >        }
> > -    } else {
> > -      PageAddress = BASE_4GB;
> > -      for (IndexOfPdpEntries = 0; IndexOfPdpEntries <
> > NumberOfPdpEntriesNeeded; IndexOfPdpEntries++,
> > PageDirectoryPointerEntry++) {
> > -        if (IndexOfPml4Entries == 0 && IndexOfPdpEntries
> > < 4) {
> > -          //
> > -          // Skip the < 4G entries
> > -          //
> > -          continue;
> > -        }
> > -        //
> > -        // Each Directory Pointer entries points to a
> > page of Page Directory entires.
> > -        // So allocate space for them and fill them in
> > in the IndexOfPageDirectoryEntries loop.
> > -        //
> > -        PageDirectoryEntry = (UINT64 *)
> > ((*PageDirectoryPointerEntry) & ~mAddressEncMask &
> > gPhyMask);
> > -        if (PageDirectoryEntry == NULL) {
> > -          PageDirectoryEntry = AllocatePageTableMemory
> > (1);
> > -          ASSERT(PageDirectoryEntry != NULL);
> > -          ZeroMem (PageDirectoryEntry,
> > EFI_PAGES_TO_SIZE(1));
> >
> > +      if (m1GPageTableSupport) {
> > +        PageDirectory1GEntry =
> > PageDirectoryPointerEntry;
> > +        for (IndexOfPageDirectoryEntries = 0;
> > IndexOfPageDirectoryEntries < 512;
> > IndexOfPageDirectoryEntries++, PageDirectory1GEntry++,
> > PageAddress += SIZE_1GB) {
> > +          if (IndexOfPml4Entries == 0 &&
> > IndexOfPageDirectoryEntries < 4) {
> > +            //
> > +            // Skip the < 4G entries
> > +            //
> > +            continue;
> > +          }
> >            //
> > -          // Fill in a Page Directory Pointer Entries
> > +          // Fill in the Page Directory entries
> >            //
> > -          *PageDirectoryPointerEntry =
> > (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask |
> > PAGE_ATTRIBUTE_BITS;
> > +          *PageDirectory1GEntry = PageAddress |
> > mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
> >          }
> > -
> > -        for (IndexOfPageDirectoryEntries = 0;
> > IndexOfPageDirectoryEntries < 512;
> > IndexOfPageDirectoryEntries++, PageDirectoryEntry++,
> > PageAddress += SIZE_2MB) {
> > +      } else {
> > +        PageAddress = BASE_4GB;
> > +        for (IndexOfPdpEntries = 0; IndexOfPdpEntries <
> > (NumberOfPml4EntriesNeeded == 1 ?
> > NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++,
> > PageDirectoryPointerEntry++) {
> > +          if (IndexOfPml4Entries == 0 &&
> > IndexOfPdpEntries < 4) {
> > +            //
> > +            // Skip the < 4G entries
> > +            //
> > +            continue;
> > +          }
> >            //
> > -          // Fill in the Page Directory entries
> > +          // Each Directory Pointer entries points to a
> > page of Page Directory entires.
> > +          // So allocate space for them and fill them in
> > in the IndexOfPageDirectoryEntries loop.
> >            //
> > -          *PageDirectoryEntry = PageAddress |
> > mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
> > +          PageDirectoryEntry = (UINT64 *)
> > ((*PageDirectoryPointerEntry) & ~mAddressEncMask &
> > gPhyMask);
> > +          if (PageDirectoryEntry == NULL) {
> > +            PageDirectoryEntry = AllocatePageTableMemory
> > (1);
> > +            ASSERT(PageDirectoryEntry != NULL);
> > +            ZeroMem (PageDirectoryEntry,
> > EFI_PAGES_TO_SIZE(1));
> > +
> > +            //
> > +            // Fill in a Page Directory Pointer Entries
> > +            //
> > +            *PageDirectoryPointerEntry =
> > (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask |
> > PAGE_ATTRIBUTE_BITS;
> > +          }
> > +
> > +          for (IndexOfPageDirectoryEntries = 0;
> > IndexOfPageDirectoryEntries < 512;
> > IndexOfPageDirectoryEntries++, PageDirectoryEntry++,
> > PageAddress += SIZE_2MB) {
> > +            //
> > +            // Fill in the Page Directory entries
> > +            //
> > +            *PageDirectoryEntry = PageAddress |
> > mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
> > +          }
> >          }
> >        }
> >      }
> > @@ -259,6 +326,8 @@ SmmInitPageTable (
> >    UINTN
> > PageFaultHandlerHookAddress;
> >    IA32_IDT_GATE_DESCRIPTOR          *IdtEntry;
> >    EFI_STATUS                        Status;
> > +  UINT64                            *Pml4Entry;
> > +  UINT64                            *Pml5Entry;
> >
> >    //
> >    // Initialize spin lock
> > @@ -266,12 +335,14 @@ SmmInitPageTable (
> >    InitializeSpinLock (mPFLock);
> >
> >    mCpuSmmStaticPageTable = PcdGetBool
> > (PcdCpuSmmStaticPageTable);
> > -  m1GPageTableSupport = Is1GPageSupport ();
> > -  DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n",
> > m1GPageTableSupport));
> > -  DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable -
> > 0x%x\n", mCpuSmmStaticPageTable));
> > -
> > -  mPhysicalAddressBits = CalculateMaximumSupportAddress
> > ();
> > -  DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n",
> > mPhysicalAddressBits));
> > +  m1GPageTableSupport    = Is1GPageSupport ();
> > +  m5LevelPagingSupport   = Is5LevelPagingSupport ();
> > +  mPhysicalAddressBits   =
> > CalculateMaximumSupportAddress ();
> > +  PatchInstructionX86 (gPatch5LevelPagingSupport,
> > m5LevelPagingSupport, 1);
> > +  DEBUG ((DEBUG_INFO, "5LevelPaging Support     - %d\n",
> > m5LevelPagingSupport));
> > +  DEBUG ((DEBUG_INFO, "1GPageTable Support      - %d\n",
> > m1GPageTableSupport));
> > +  DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - %d\n",
> > mCpuSmmStaticPageTable));
> > +  DEBUG ((DEBUG_INFO, "PhysicalAddressBits      - %d\n",
> > mPhysicalAddressBits));
> >    //
> >    // Generate PAE page table for the first 4GB memory
> > space
> >    //
> > @@ -288,15 +359,30 @@ SmmInitPageTable (
> >    //
> >    // Fill Page-Table-Level4 (PML4) entry
> >    //
> > -  PTEntry = (UINT64*)AllocatePageTableMemory (1);
> > -  ASSERT (PTEntry != NULL);
> > -  *PTEntry = Pages | mAddressEncMask |
> > PAGE_ATTRIBUTE_BITS;
> > -  ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof
> > (*PTEntry));
> > +  Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
> > +  ASSERT (Pml4Entry != NULL);
> > +  *Pml4Entry = Pages | mAddressEncMask |
> > PAGE_ATTRIBUTE_BITS;
> > +  ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof
> > (*Pml4Entry));
> >
> >    //
> >    // Set sub-entries number
> >    //
> > -  SetSubEntriesNum (PTEntry, 3);
> > +  SetSubEntriesNum (Pml4Entry, 3);
> > +  PTEntry = Pml4Entry;
> > +
> > +  if (m5LevelPagingSupport) {
> > +    //
> > +    // Fill PML5 entry
> > +    //
> > +    Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
> > +    *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask |
> > PAGE_ATTRIBUTE_BITS;
> > +    ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof
> > (*Pml5Entry));
> > +    //
> > +    // Set sub-entries number
> > +    //
> > +    SetSubEntriesNum (Pml5Entry, 1);
> > +    PTEntry = Pml5Entry;
> > +  }
> >
> >    if (mCpuSmmStaticPageTable) {
> >      SetStaticPageTable ((UINTN)PTEntry);
> > @@ -344,7 +430,7 @@ SmmInitPageTable (
> >    }
> >
> >    //
> > -  // Return the address of PML4 (to set CR3)
> > +  // Return the address of PML4/PML5 (to set CR3)
> >    //
> >    return (UINT32)(UINTN)PTEntry;
> >  }
> > @@ -436,12 +522,16 @@ ReclaimPages (
> >    VOID
> >    )
> >  {
> > +  UINT64                       Pml5Entry;
> > +  UINT64                       *Pml5;
> >    UINT64                       *Pml4;
> >    UINT64                       *Pdpt;
> >    UINT64                       *Pdt;
> > +  UINTN                        Pml5Index;
> >    UINTN                        Pml4Index;
> >    UINTN                        PdptIndex;
> >    UINTN                        PdtIndex;
> > +  UINTN                        MinPml5;
> >    UINTN                        MinPml4;
> >    UINTN                        MinPdpt;
> >    UINTN                        MinPdt;
> > @@ -451,120 +541,147 @@ ReclaimPages (
> >    BOOLEAN                      PML4EIgnore;
> >    BOOLEAN                      PDPTEIgnore;
> >    UINT64                       *ReleasePageAddress;
> > +  IA32_CR4                     Cr4;
> > +  BOOLEAN                      Enable5LevelPaging;
> >
> >    Pml4 = NULL;
> >    Pdpt = NULL;
> >    Pdt  = NULL;
> >    MinAcc  = (UINT64)-1;
> >    MinPml4 = (UINTN)-1;
> > +  MinPml5 = (UINTN)-1;
> >    MinPdpt = (UINTN)-1;
> >    MinPdt  = (UINTN)-1;
> >    Acc     = 0;
> >    ReleasePageAddress = 0;
> >
> > +  Cr4.UintN = AsmReadCr4 ();
> > +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> > +  Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
> > +
> > +  if (!Enable5LevelPaging) {
> > +    //
> > +    // Create one fake PML5 entry for 4-Level Paging
> > +    // so that the page table parsing logic only handles
> > 5-Level page structure.
> > +    //
> > +    Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
> > +    Pml5 = &Pml5Entry;
> > +  }
> > +
> >    //
> >    // First, find the leaf entry has the smallest access
> > record value
> >    //
> > -  Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
> > -  for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof
> > (*Pml4); Pml4Index++) {
> > -    if ((Pml4[Pml4Index] & IA32_PG_P) == 0 ||
> > (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
> > +  for (Pml5Index = 0; Pml5Index < Enable5LevelPaging ?
> > (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1; Pml5Index++) {
> > +    if ((Pml5[Pml5Index] & IA32_PG_P) == 0 ||
> > (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
> >        //
> > -      // If the PML4 entry is not present or is masked,
> > skip it
> > +      // If the PML5 entry is not present or is masked,
> > skip it
> >        //
> >        continue;
> >      }
> > -    Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] &
> > ~mAddressEncMask & gPhyMask);
> > -    PML4EIgnore = FALSE;
> > -    for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE /
> > sizeof (*Pdpt); PdptIndex++) {
> > -      if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 ||
> > (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
> > +    Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
> > +    for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE /
> > sizeof (*Pml4); Pml4Index++) {
> > +      if ((Pml4[Pml4Index] & IA32_PG_P) == 0 ||
> > (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
> >          //
> > -        // If the PDPT entry is not present or is
> > masked, skip it
> > +        // If the PML4 entry is not present or is
> > masked, skip it
> >          //
> > -        if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
> > -          //
> > -          // If the PDPT entry is masked, we will ignore
> > checking the PML4 entry
> > -          //
> > -          PML4EIgnore = TRUE;
> > -        }
> >          continue;
> >        }
> > -      if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
> > -        //
> > -        // It's not 1-GByte pages entry, it should be a
> > PDPT entry,
> > -        // we will not check PML4 entry more
> > -        //
> > -        PML4EIgnore = TRUE;
> > -        Pdt =  (UINT64*)(UINTN)(Pdpt[PdptIndex] &
> > ~mAddressEncMask & gPhyMask);
> > -        PDPTEIgnore = FALSE;
> > -        for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE /
> > sizeof(*Pdt); PdtIndex++) {
> > -          if ((Pdt[PdtIndex] & IA32_PG_P) == 0 ||
> > (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
> > +      Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] &
> > ~mAddressEncMask & gPhyMask);
> > +      PML4EIgnore = FALSE;
> > +      for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE /
> > sizeof (*Pdpt); PdptIndex++) {
> > +        if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 ||
> > (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
> > +          //
> > +          // If the PDPT entry is not present or is
> > masked, skip it
> > +          //
> > +          if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
> >              //
> > -            // If the PD entry is not present or is
> > masked, skip it
> > +            // If the PDPT entry is masked, we will
> > ignore checking the PML4 entry
> >              //
> > -            if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
> > +            PML4EIgnore = TRUE;
> > +          }
> > +          continue;
> > +        }
> > +        if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
> > +          //
> > +          // It's not 1-GByte pages entry, it should be
> > a PDPT entry,
> > +          // we will not check PML4 entry more
> > +          //
> > +          PML4EIgnore = TRUE;
> > +          Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] &
> > ~mAddressEncMask & gPhyMask);
> > +          PDPTEIgnore = FALSE;
> > +          for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE /
> > sizeof(*Pdt); PdtIndex++) {
> > +            if ((Pdt[PdtIndex] & IA32_PG_P) == 0 ||
> > (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
> > +              //
> > +              // If the PD entry is not present or is
> > masked, skip it
> > +              //
> > +              if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
> > +                //
> > +                // If the PD entry is masked, we will
> > not PDPT entry more
> > +                //
> > +                PDPTEIgnore = TRUE;
> > +              }
> > +              continue;
> > +            }
> > +            if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
> >                //
> > -              // If the PD entry is masked, we will not
> > PDPT entry more
> > +              // It's not 2 MByte page table entry, it
> > should be PD entry
> > +              // we will find the entry has the smallest
> > access record value
> >                //
> >                PDPTEIgnore = TRUE;
> > +              Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
> > +              if (Acc < MinAcc) {
> > +                //
> > +                // If the PD entry has the smallest
> > access record value,
> > +                // save the Page address to be released
> > +                //
> > +                MinAcc  = Acc;
> > +                MinPml5 = Pml5Index;
> > +                MinPml4 = Pml4Index;
> > +                MinPdpt = PdptIndex;
> > +                MinPdt  = PdtIndex;
> > +                ReleasePageAddress = Pdt + PdtIndex;
> > +              }
> >              }
> > -            continue;
> >            }
> > -          if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
> > +          if (!PDPTEIgnore) {
> >              //
> > -            // It's not 2 MByte page table entry, it
> > should be PD entry
> > -            // we will find the entry has the smallest
> > access record value
> > +            // If this PDPT entry has no PDT entries
> > pointer to 4 KByte pages,
> > +            // it should only has the entries point to 2
> > MByte Pages
> >              //
> > -            PDPTEIgnore = TRUE;
> > -            Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
> > +            Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
> >              if (Acc < MinAcc) {
> >                //
> > -              // If the PD entry has the smallest access
> > record value,
> > +              // If the PDPT entry has the smallest
> > access record value,
> >                // save the Page address to be released
> >                //
> >                MinAcc  = Acc;
> > +              MinPml5 = Pml5Index;
> >                MinPml4 = Pml4Index;
> >                MinPdpt = PdptIndex;
> > -              MinPdt  = PdtIndex;
> > -              ReleasePageAddress = Pdt + PdtIndex;
> > +              MinPdt  = (UINTN)-1;
> > +              ReleasePageAddress = Pdpt + PdptIndex;
> >              }
> >            }
> >          }
> > -        if (!PDPTEIgnore) {
> > -          //
> > -          // If this PDPT entry has no PDT entries
> > pointer to 4 KByte pages,
> > -          // it should only has the entries point to 2
> > MByte Pages
> > -          //
> > -          Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
> > -          if (Acc < MinAcc) {
> > -            //
> > -            // If the PDPT entry has the smallest access
> > record value,
> > -            // save the Page address to be released
> > -            //
> > -            MinAcc  = Acc;
> > -            MinPml4 = Pml4Index;
> > -            MinPdpt = PdptIndex;
> > -            MinPdt  = (UINTN)-1;
> > -            ReleasePageAddress = Pdpt + PdptIndex;
> > -          }
> > -        }
> >        }
> > -    }
> > -    if (!PML4EIgnore) {
> > -      //
> > -      // If PML4 entry has no the PDPT entry pointer to
> > 2 MByte pages,
> > -      // it should only has the entries point to 1 GByte
> > Pages
> > -      //
> > -      Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
> > -      if (Acc < MinAcc) {
> > +      if (!PML4EIgnore) {
> >          //
> > -        // If the PML4 entry has the smallest access
> > record value,
> > -        // save the Page address to be released
> > +        // If PML4 entry has no the PDPT entry pointer
> > to 2 MByte pages,
> > +        // it should only has the entries point to 1
> > GByte Pages
> >          //
> > -        MinAcc  = Acc;
> > -        MinPml4 = Pml4Index;
> > -        MinPdpt = (UINTN)-1;
> > -        MinPdt  = (UINTN)-1;
> > -        ReleasePageAddress = Pml4 + Pml4Index;
> > +        Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
> > +        if (Acc < MinAcc) {
> > +          //
> > +          // If the PML4 entry has the smallest access
> > record value,
> > +          // save the Page address to be released
> > +          //
> > +          MinAcc  = Acc;
> > +          MinPml5 = Pml5Index;
> > +          MinPml4 = Pml4Index;
> > +          MinPdpt = (UINTN)-1;
> > +          MinPdt  = (UINTN)-1;
> > +          ReleasePageAddress = Pml4 + Pml4Index;
> > +        }
> >        }
> >      }
> >    }
> > @@ -588,6 +705,7 @@ ReclaimPages (
> >        //
> >        // If 4 KByte Page Table is released, check the
> > PDPT entry
> >        //
> > +      Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] &
> > gPhyMask);
> >        Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] &
> > ~mAddressEncMask & gPhyMask);
> >        SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
> >        if (SubEntriesNum == 0) {
> > @@ -679,7 +797,7 @@ SmiDefaultPFHandler (
> >    )
> >  {
> >    UINT64                            *PageTable;
> > -  UINT64                            *Pml4;
> > +  UINT64                            *PageTableTop;
> >    UINT64                            PFAddress;
> >    UINTN                             StartBit;
> >    UINTN                             EndBit;
> > @@ -690,6 +808,8 @@ SmiDefaultPFHandler (
> >    UINTN                             PageAttribute;
> >    EFI_STATUS                        Status;
> >    UINT64                            *UpperEntry;
> > +  BOOLEAN                           Enable5LevelPaging;
> > +  IA32_CR4                          Cr4;
> >
> >    //
> >    // Set default SMM page attribute
> > @@ -699,9 +819,12 @@ SmiDefaultPFHandler (
> >    PageAttribute = 0;
> >
> >    EndBit = 0;
> > -  Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);
> > +  PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
> >    PFAddress = AsmReadCr2 ();
> >
> > +  Cr4.UintN = AsmReadCr4 ();
> > +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
> > +
> >    Status = GetPlatformPageTableAttribute (PFAddress,
> > &PageSize, &NumOfPages, &PageAttribute);
> >    //
> >    // If platform not support page table attribute, set
> > default SMM page attribute
> > @@ -755,9 +878,9 @@ SmiDefaultPFHandler (
> >    }
> >
> >    for (Index = 0; Index < NumOfPages; Index++) {
> > -    PageTable  = Pml4;
> > +    PageTable  = PageTableTop;
> >      UpperEntry = NULL;
> > -    for (StartBit = 39; StartBit > EndBit; StartBit -=
> > 9) {
> > +    for (StartBit = Enable5LevelPaging ? 48 : 39;
> > StartBit > EndBit; StartBit -= 9) {
> >        PTIndex = BitFieldRead64 (PFAddress, StartBit,
> > StartBit + 8);
> >        if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
> >          //
> > @@ -941,13 +1064,20 @@ SetPageTableAttributes (
> >    UINTN                 Index2;
> >    UINTN                 Index3;
> >    UINTN                 Index4;
> > +  UINTN                 Index5;
> >    UINT64                *L1PageTable;
> >    UINT64                *L2PageTable;
> >    UINT64                *L3PageTable;
> >    UINT64                *L4PageTable;
> > +  UINT64                *L5PageTable;
> >    BOOLEAN               IsSplitted;
> >    BOOLEAN               PageTableSplitted;
> >    BOOLEAN               CetEnabled;
> > +  IA32_CR4              Cr4;
> > +  BOOLEAN               Enable5LevelPaging;
> > +
> > +  Cr4.UintN = AsmReadCr4 ();
> > +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> >
> >    //
> >    // Don't do this if
> > @@ -991,44 +1121,59 @@ SetPageTableAttributes (
> >    do {
> >      DEBUG ((DEBUG_INFO, "Start...\n"));
> >      PageTableSplitted = FALSE;
> > -
> > -    L4PageTable = (UINT64 *)GetPageTableBase ();
> > -    SmmSetMemoryAttributesEx
> > ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB,
> > EFI_MEMORY_RO, &IsSplitted);
> > -    PageTableSplitted = (PageTableSplitted ||
> > IsSplitted);
> > -
> > -    for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64);
> > Index4++) {
> > -      L3PageTable = (UINT64
> > *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask &
> > PAGING_4K_ADDRESS_MASK_64);
> > -      if (L3PageTable == NULL) {
> > -        continue;
> > -      }
> > -
> > -      SmmSetMemoryAttributesEx
> > ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB,
> > EFI_MEMORY_RO, &IsSplitted);
> > +    L5PageTable = NULL;
> > +    if (Enable5LevelPaging) {
> > +      L5PageTable = (UINT64 *)GetPageTableBase ();
> > +      SmmSetMemoryAttributesEx
> > ((EFI_PHYSICAL_ADDRESS)(UINTN)L5PageTable, SIZE_4KB,
> > EFI_MEMORY_RO, &IsSplitted);
> >        PageTableSplitted = (PageTableSplitted ||
> > IsSplitted);
> > +    }
> >
> > -      for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64);
> > Index3++) {
> > -        if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
> > -          // 1G
> > +    for (Index5 = 0; Index5 < (Enable5LevelPaging ?
> > SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
> > +      if (Enable5LevelPaging) {
> > +        L4PageTable = (UINT64
> > *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask &
> > PAGING_4K_ADDRESS_MASK_64);
> > +        if (L4PageTable == NULL) {
> >            continue;
> >          }
> > -        L2PageTable = (UINT64
> > *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask &
> > PAGING_4K_ADDRESS_MASK_64);
> > -        if (L2PageTable == NULL) {
> > +      } else {
> > +        L4PageTable = (UINT64 *)GetPageTableBase ();
> > +      }
> > +      SmmSetMemoryAttributesEx
> > ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB,
> > EFI_MEMORY_RO, &IsSplitted);
> > +      PageTableSplitted = (PageTableSplitted ||
> > IsSplitted);
> > +
> > +      for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64);
> > Index4++) {
> > +        L3PageTable = (UINT64
> > *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask &
> > PAGING_4K_ADDRESS_MASK_64);
> > +        if (L3PageTable == NULL) {
> >            continue;
> >          }
> >
> > -        SmmSetMemoryAttributesEx
> > ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB,
> > EFI_MEMORY_RO, &IsSplitted);
> > +        SmmSetMemoryAttributesEx
> > ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB,
> > EFI_MEMORY_RO, &IsSplitted);
> >          PageTableSplitted = (PageTableSplitted ||
> > IsSplitted);
> >
> > -        for (Index2 = 0; Index2 <
> > SIZE_4KB/sizeof(UINT64); Index2++) {
> > -          if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
> > -            // 2M
> > +        for (Index3 = 0; Index3 <
> > SIZE_4KB/sizeof(UINT64); Index3++) {
> > +          if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
> > +            // 1G
> >              continue;
> >            }
> > -          L1PageTable = (UINT64
> > *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask &
> > PAGING_4K_ADDRESS_MASK_64);
> > -          if (L1PageTable == NULL) {
> > +          L2PageTable = (UINT64
> > *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask &
> > PAGING_4K_ADDRESS_MASK_64);
> > +          if (L2PageTable == NULL) {
> >              continue;
> >            }
> > -          SmmSetMemoryAttributesEx
> > ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB,
> > EFI_MEMORY_RO, &IsSplitted);
> > +
> > +          SmmSetMemoryAttributesEx
> > ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB,
> > EFI_MEMORY_RO, &IsSplitted);
> >            PageTableSplitted = (PageTableSplitted ||
> > IsSplitted);
> > +
> > +          for (Index2 = 0; Index2 <
> > SIZE_4KB/sizeof(UINT64); Index2++) {
> > +            if ((L2PageTable[Index2] & IA32_PG_PS) != 0)
> > {
> > +              // 2M
> > +              continue;
> > +            }
> > +            L1PageTable = (UINT64
> > *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask &
> > PAGING_4K_ADDRESS_MASK_64);
> > +            if (L1PageTable == NULL) {
> > +              continue;
> > +            }
> > +            SmmSetMemoryAttributesEx
> > ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB,
> > EFI_MEMORY_RO, &IsSplitted);
> > +            PageTableSplitted = (PageTableSplitted ||
> > IsSplitted);
> > +          }
> >          }
> >        }
> >      }
> > diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> > b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> > index 741e4b7da2..271492a9d7 100644
> > --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> > +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> > @@ -69,6 +69,7 @@ extern ASM_PFX(mXdSupported)
> >  global ASM_PFX(gPatchXdSupported)
> >  global ASM_PFX(gPatchSmiStack)
> >  global ASM_PFX(gPatchSmiCr3)
> > +global ASM_PFX(gPatch5LevelPagingSupport)
> >  global ASM_PFX(gcSmiHandlerTemplate)
> >  global ASM_PFX(gcSmiHandlerSize)
> >
> > @@ -124,6 +125,17 @@ ProtFlatMode:
> >  ASM_PFX(gPatchSmiCr3):
> >      mov     cr3, rax
> >      mov     eax, 0x668                   ; as cr4.PGE is
> > not set here, refresh cr3
> > +
> > +    mov     cl, strict byte 0            ; source
> > operand will be patched
> > +ASM_PFX(gPatch5LevelPagingSupport):
> > +    cmp     cl, 0
> > +    je      SkipEnable5LevelPaging
> > +    ;
> > +    ; Enable 5-Level Paging bit
> > +    ;
> > +    bts     eax, 12                     ; Set LA57 bit
> > (bit #12)
> > +SkipEnable5LevelPaging:
> > +
> >      mov     cr4, rax                    ; in
> > PreModifyMtrrs() to flush TLB.
> >  ; Load TSS
> >      sub     esp, 8                      ; reserve room
> > in stack
> > diff --git
> > a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
> > b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
> > index e7c78d36fc..63bae5a913 100644
> > --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
> > +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
> > @@ -1,7 +1,7 @@
> >  /** @file
> >  X64 processor specific functions to enable SMM profile.
> >
> > -Copyright (c) 2012 - 2016, Intel Corporation. All rights
> > reserved.<BR>
> > +Copyright (c) 2012 - 2019, Intel Corporation. All rights
> > reserved.<BR>
> >  Copyright (c) 2017, AMD Incorporated. All rights
> > reserved.<BR>
> >
> >  SPDX-License-Identifier: BSD-2-Clause-Patent
> > @@ -147,9 +147,14 @@ RestorePageTableAbove4G (
> >    BOOLEAN       Existed;
> >    UINTN         Index;
> >    UINTN         PFIndex;
> > +  IA32_CR4      Cr4;
> > +  BOOLEAN       Enable5LevelPaging;
> >
> >    ASSERT ((PageTable != NULL) && (IsValidPFAddress !=
> > NULL));
> >
> > +  Cr4.UintN = AsmReadCr4 ();
> > +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
> > +
> >    //
> >    // If page fault address is 4GB above.
> >    //
> > @@ -161,38 +166,48 @@ RestorePageTableAbove4G (
> >    //
> >    Existed = FALSE;
> >    PageTable = (UINT64*)(AsmReadCr3 () &
> > PHYSICAL_ADDRESS_MASK);
> > -  PTIndex = BitFieldRead64 (PFAddress, 39, 47);
> > -  if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
> > -    // PML4E
> > -    PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> > ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> > -    PTIndex = BitFieldRead64 (PFAddress, 30, 38);
> > +  PTIndex = 0;
> > +  if (Enable5LevelPaging) {
> > +    PTIndex = BitFieldRead64 (PFAddress, 48, 56);
> > +  }
> > +  if ((!Enable5LevelPaging) || ((PageTable[PTIndex] &
> > IA32_PG_P) != 0)) {
> > +    // PML5E
> > +    if (Enable5LevelPaging) {
> > +      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> > ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> > +    }
> > +    PTIndex = BitFieldRead64 (PFAddress, 39, 47);
> >      if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
> > -      // PDPTE
> > +      // PML4E
> >        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> > ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> > -      PTIndex = BitFieldRead64 (PFAddress, 21, 29);
> > -      // PD
> > -      if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
> > -        //
> > -        // 2MB page
> > -        //
> > -        Address = (UINT64)(PageTable[PTIndex] &
> > ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> > -        if ((Address & ~((1ull << 21) - 1)) ==
> > ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) -
> > 1)))) {
> > -          Existed = TRUE;
> > -        }
> > -      } else {
> > -        //
> > -        // 4KB page
> > -        //
> > -        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex]
> > & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
> > -        if (PageTable != 0) {
> > +      PTIndex = BitFieldRead64 (PFAddress, 30, 38);
> > +      if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
> > +        // PDPTE
> > +        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex]
> > & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> > +        PTIndex = BitFieldRead64 (PFAddress, 21, 29);
> > +        // PD
> > +        if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
> >            //
> > -          // When there is a valid entry to map to 4KB
> > page, need not create a new entry to map 2MB.
> > +          // 2MB page
> >            //
> > -          PTIndex = BitFieldRead64 (PFAddress, 12, 20);
> >            Address = (UINT64)(PageTable[PTIndex] &
> > ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> > -          if ((Address & ~((1ull << 12) - 1)) ==
> > (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) -
> > 1))) {
> > +          if ((Address & ~((1ull << 21) - 1)) ==
> > ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) -
> > 1)))) {
> >              Existed = TRUE;
> >            }
> > +        } else {
> > +          //
> > +          // 4KB page
> > +          //
> > +          PageTable =
> > (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask&
> > PHYSICAL_ADDRESS_MASK);
> > +          if (PageTable != 0) {
> > +            //
> > +            // When there is a valid entry to map to 4KB
> > page, need not create a new entry to map 2MB.
> > +            //
> > +            PTIndex = BitFieldRead64 (PFAddress, 12,
> > 20);
> > +            Address = (UINT64)(PageTable[PTIndex] &
> > ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> > +            if ((Address & ~((1ull << 12) - 1)) ==
> > (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) -
> > 1))) {
> > +              Existed = TRUE;
> > +            }
> > +          }
> >          }
> >        }
> >      }
> > @@ -221,6 +236,11 @@ RestorePageTableAbove4G (
> >      //
> >      PageTable = (UINT64*)(AsmReadCr3 () &
> > PHYSICAL_ADDRESS_MASK);
> >      PFAddress = AsmReadCr2 ();
> > +    // PML5E
> > +    if (Enable5LevelPaging) {
> > +      PTIndex = BitFieldRead64 (PFAddress, 48, 56);
> > +      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> > ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> > +    }
> >      // PML4E
> >      PTIndex = BitFieldRead64 (PFAddress, 39, 47);
> >      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
> > ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
> > --
> > 2.21.0.windows.1
> >
> >
> > 


-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.

View/Reply Online (#43534): https://edk2.groups.io/g/devel/message/43534
Mute This Topic: https://groups.io/mt/32295049/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub  [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-

Re: [edk2-devel] [PATCH v2 3/3] UefiCpuPkg/PiSmmCpu: Enable 5 level paging when CPU supports
Posted by Laszlo Ersek 6 years, 7 months ago
On 07/11/19 03:19, Ni, Ray wrote:
> Mike,
> Thanks for raising this build failure.
> I just tried in my Ubuntu 18 in Win10. Even GCC7 complains about this. My bad!
> I just posted a fix.

Thanks -- as I requested there, please do not push this new patch until
the revert+reapply completes.

Laszlo

>> -----Original Message-----
>> From: Kinney, Michael D
>> Sent: Thursday, July 11, 2019 4:06 AM
>> To: devel@edk2.groups.io; Ni, Ray <ray.ni@intel.com>; Kinney, Michael D <michael.d.kinney@intel.com>
>> Cc: Dong, Eric <eric.dong@intel.com>; Laszlo Ersek <lersek@redhat.com>
>> Subject: RE: [edk2-devel] [PATCH v2 3/3] UefiCpuPkg/PiSmmCpu: Enable 5 level paging when CPU supports
>>
>> Hi Ray,
>>
>> I noticed a Linux/GCC build issue with this patch when using GCC version:
>>
>>     gcc version 8.2.1 20181215 (Red Hat 8.2.1-6) (GCC)
>>
>> edk2/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c: In function 'ReclaimPages':
>> edk2/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c:574:89: error: ?: using integer constants in boolean context, the
>> expression will always evaluate to 'true' [-Werror=int-in-bool-context]
>>    for (Pml5Index = 0; Pml5Index < Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1; Pml5Index++) {
>>
>> I was able to get the build to pass if I added ().
>>
>> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
>> index c31160735a..a3b62f7787 100644
>> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
>> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
>> @@ -571,7 +571,7 @@ ReclaimPages (
>>    //
>>    // First, find the leaf entry has the smallest access record value
>>    //
>> -  for (Pml5Index = 0; Pml5Index < Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1; Pml5Index++) {
>> +  for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {^M
>>      if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
>>        //
>>        // If the PML5 entry is not present or is masked, skip it
>>
>> Best regards,
>>
>> Mike
>>
>>> -----Original Message-----
>>> From: devel@edk2.groups.io [mailto:devel@edk2.groups.io]
>>> On Behalf Of Ni, Ray
>>> Sent: Tuesday, July 2, 2019 11:54 PM
>>> To: devel@edk2.groups.io
>>> Cc: Dong, Eric <eric.dong@intel.com>; Laszlo Ersek
>>> <lersek@redhat.com>
>>> Subject: [edk2-devel] [PATCH v2 3/3] UefiCpuPkg/PiSmmCpu:
>>> Enable 5 level paging when CPU supports
>>>
>>> REF:https://bugzilla.tianocore.org/show_bug.cgi?id=1946
>>>
>>> The patch changes SMM environment to use 5 level paging
>>> when CPU
>>> supports it.
>>>
>>> Signed-off-by: Ray Ni <ray.ni@intel.com>
>>> Cc: Eric Dong <eric.dong@intel.com>
>>> Regression-tested-by: Laszlo Ersek <lersek@redhat.com>
>>> ---
>>>  .../PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c   |  20 +-
>>>  UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c        | 272
>>> ++++++----
>>>  UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c       | 485
>>> ++++++++++++------
>>>  UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm   |  12 +
>>>  .../PiSmmCpuDxeSmm/X64/SmmProfileArch.c       |  72 ++-
>>>  5 files changed, 561 insertions(+), 300 deletions(-)
>>>
>>> diff --git
>>> a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
>>> b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
>>> index 069be3aaa5..55090e9c3e 100644
>>> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
>>> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
>>> @@ -125,18 +125,36 @@ GetPageTableEntry (
>>>    UINTN                 Index2;
>>>    UINTN                 Index3;
>>>    UINTN                 Index4;
>>> +  UINTN                 Index5;
>>>    UINT64                *L1PageTable;
>>>    UINT64                *L2PageTable;
>>>    UINT64                *L3PageTable;
>>>    UINT64                *L4PageTable;
>>> +  UINT64                *L5PageTable;
>>> +  IA32_CR4              Cr4;
>>> +  BOOLEAN               Enable5LevelPaging;
>>>
>>> +  Index5 = ((UINTN)RShiftU64 (Address, 48)) &
>>> PAGING_PAE_INDEX_MASK;
>>>    Index4 = ((UINTN)RShiftU64 (Address, 39)) &
>>> PAGING_PAE_INDEX_MASK;
>>>    Index3 = ((UINTN)Address >> 30) &
>>> PAGING_PAE_INDEX_MASK;
>>>    Index2 = ((UINTN)Address >> 21) &
>>> PAGING_PAE_INDEX_MASK;
>>>    Index1 = ((UINTN)Address >> 12) &
>>> PAGING_PAE_INDEX_MASK;
>>>
>>> +  Cr4.UintN = AsmReadCr4 ();
>>> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
>>> +
>>>    if (sizeof(UINTN) == sizeof(UINT64)) {
>>> -    L4PageTable = (UINT64 *)GetPageTableBase ();
>>> +    if (Enable5LevelPaging) {
>>> +      L5PageTable = (UINT64 *)GetPageTableBase ();
>>> +      if (L5PageTable[Index5] == 0) {
>>> +        *PageAttribute = PageNone;
>>> +        return NULL;
>>> +      }
>>> +
>>> +      L4PageTable = (UINT64
>>> *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask &
>>> PAGING_4K_ADDRESS_MASK_64);
>>> +    } else {
>>> +      L4PageTable = (UINT64 *)GetPageTableBase ();
>>> +    }
>>>      if (L4PageTable[Index4] == 0) {
>>>        *PageAttribute = PageNone;
>>>        return NULL;
>>> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
>>> b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
>>> index e2b6a2d9b2..c5131526f0 100644
>>> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
>>> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
>>> @@ -534,43 +534,78 @@ InitPaging (
>>>    VOID
>>>    )
>>>  {
>>> +  UINT64                            Pml5Entry;
>>> +  UINT64                            Pml4Entry;
>>> +  UINT64                            *Pml5;
>>>    UINT64                            *Pml4;
>>>    UINT64                            *Pdpt;
>>>    UINT64                            *Pd;
>>>    UINT64                            *Pt;
>>>    UINTN                             Address;
>>> +  UINTN                             Pml5Index;
>>>    UINTN                             Pml4Index;
>>>    UINTN                             PdptIndex;
>>>    UINTN                             PdIndex;
>>>    UINTN                             PtIndex;
>>>    UINTN                             NumberOfPdptEntries;
>>>    UINTN                             NumberOfPml4Entries;
>>> +  UINTN                             NumberOfPml5Entries;
>>>    UINTN                             SizeOfMemorySpace;
>>>    BOOLEAN                           Nx;
>>> +  IA32_CR4                          Cr4;
>>> +  BOOLEAN                           Enable5LevelPaging;
>>> +
>>> +  Cr4.UintN = AsmReadCr4 ();
>>> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
>>>
>>>    if (sizeof (UINTN) == sizeof (UINT64)) {
>>> -    Pml4 = (UINT64*)(UINTN)mSmmProfileCr3;
>>> +    if (!Enable5LevelPaging) {
>>> +      Pml5Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
>>> +      Pml5 = &Pml5Entry;
>>> +    } else {
>>> +      Pml5 = (UINT64*) (UINTN) mSmmProfileCr3;
>>> +    }
>>>      SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
>>>      //
>>>      // Calculate the table entries of PML4E and PDPTE.
>>>      //
>>> -    if (SizeOfMemorySpace <= 39 ) {
>>> -      NumberOfPml4Entries = 1;
>>> -      NumberOfPdptEntries = (UINT32)LShiftU64 (1,
>>> (SizeOfMemorySpace - 30));
>>> -    } else {
>>> -      NumberOfPml4Entries = (UINT32)LShiftU64 (1,
>>> (SizeOfMemorySpace - 39));
>>> -      NumberOfPdptEntries = 512;
>>> +    NumberOfPml5Entries = 1;
>>> +    if (SizeOfMemorySpace > 48) {
>>> +      NumberOfPml5Entries = (UINTN) LShiftU64 (1,
>>> SizeOfMemorySpace - 48);
>>> +      SizeOfMemorySpace = 48;
>>>      }
>>> -  } else {
>>> +
>>>      NumberOfPml4Entries = 1;
>>> +    if (SizeOfMemorySpace > 39) {
>>> +      NumberOfPml4Entries = (UINTN) LShiftU64 (1,
>>> SizeOfMemorySpace - 39);
>>> +      SizeOfMemorySpace = 39;
>>> +    }
>>> +
>>> +    NumberOfPdptEntries = 1;
>>> +    ASSERT (SizeOfMemorySpace > 30);
>>> +    NumberOfPdptEntries = (UINTN) LShiftU64 (1,
>>> SizeOfMemorySpace - 30);
>>> +  } else {
>>> +    Pml4Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
>>> +    Pml4 = &Pml4Entry;
>>> +    Pml5Entry = (UINTN) Pml4 | IA32_PG_P;
>>> +    Pml5 = &Pml5Entry;
>>> +    NumberOfPml5Entries  = 1;
>>> +    NumberOfPml4Entries  = 1;
>>>      NumberOfPdptEntries  = 4;
>>>    }
>>>
>>>    //
>>>    // Go through page table and change 2MB-page into 4KB-
>>> page.
>>>    //
>>> -  for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries;
>>> Pml4Index++) {
>>> -    if (sizeof (UINTN) == sizeof (UINT64)) {
>>> +  for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries;
>>> Pml5Index++) {
>>> +    if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
>>> +      //
>>> +      // If PML5 entry does not exist, skip it
>>> +      //
>>> +      continue;
>>> +    }
>>> +    Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] &
>>> PHYSICAL_ADDRESS_MASK);
>>> +    for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries;
>>> Pml4Index++) {
>>>        if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
>>>          //
>>>          // If PML4 entry does not exist, skip it
>>> @@ -578,63 +613,76 @@ InitPaging (
>>>          continue;
>>>        }
>>>        Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] &
>>> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
>>> -    } else {
>>> -      Pdpt = (UINT64*)(UINTN)mSmmProfileCr3;
>>> -    }
>>> -    for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries;
>>> PdptIndex++, Pdpt++) {
>>> -      if ((*Pdpt & IA32_PG_P) == 0) {
>>> -        //
>>> -        // If PDPT entry does not exist, skip it
>>> -        //
>>> -        continue;
>>> -      }
>>> -      if ((*Pdpt & IA32_PG_PS) != 0) {
>>> -        //
>>> -        // This is 1G entry, skip it
>>> -        //
>>> -        continue;
>>> -      }
>>> -      Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask &
>>> PHYSICAL_ADDRESS_MASK);
>>> -      if (Pd == 0) {
>>> -        continue;
>>> -      }
>>> -      for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof
>>> (*Pd); PdIndex++, Pd++) {
>>> -        if ((*Pd & IA32_PG_P) == 0) {
>>> +      for (PdptIndex = 0; PdptIndex <
>>> NumberOfPdptEntries; PdptIndex++, Pdpt++) {
>>> +        if ((*Pdpt & IA32_PG_P) == 0) {
>>> +          //
>>> +          // If PDPT entry does not exist, skip it
>>> +          //
>>> +          continue;
>>> +        }
>>> +        if ((*Pdpt & IA32_PG_PS) != 0) {
>>>            //
>>> -          // If PD entry does not exist, skip it
>>> +          // This is 1G entry, skip it
>>>            //
>>>            continue;
>>>          }
>>> -        Address = (((PdptIndex << 9) + PdIndex) << 21);
>>> +        Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask
>>> & PHYSICAL_ADDRESS_MASK);
>>> +        if (Pd == 0) {
>>> +          continue;
>>> +        }
>>> +        for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof
>>> (*Pd); PdIndex++, Pd++) {
>>> +          if ((*Pd & IA32_PG_P) == 0) {
>>> +            //
>>> +            // If PD entry does not exist, skip it
>>> +            //
>>> +            continue;
>>> +          }
>>> +          Address = (UINTN) LShiftU64 (
>>> +                              LShiftU64 (
>>> +                                LShiftU64 ((Pml5Index <<
>>> 9) + Pml4Index, 9) + PdptIndex,
>>> +                                9
>>> +                                ) + PdIndex,
>>> +                                21
>>> +                              );
>>>
>>> -        //
>>> -        // If it is 2M page, check IsAddressSplit()
>>> -        //
>>> -        if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit
>>> (Address)) {
>>>            //
>>> -          // Based on current page table, create 4KB
>>> page table for split area.
>>> +          // If it is 2M page, check IsAddressSplit()
>>>            //
>>> -          ASSERT (Address == (*Pd &
>>> PHYSICAL_ADDRESS_MASK));
>>> +          if (((*Pd & IA32_PG_PS) != 0) &&
>>> IsAddressSplit (Address)) {
>>> +            //
>>> +            // Based on current page table, create 4KB
>>> page table for split area.
>>> +            //
>>> +            ASSERT (Address == (*Pd &
>>> PHYSICAL_ADDRESS_MASK));
>>> +
>>> +            Pt = AllocatePageTableMemory (1);
>>> +            ASSERT (Pt != NULL);
>>>
>>> -          Pt = AllocatePageTableMemory (1);
>>> -          ASSERT (Pt != NULL);
>>> +            *Pd = (UINTN) Pt | IA32_PG_RW | IA32_PG_P;
>>>
>>> -          // Split it
>>> -          for (PtIndex = 0; PtIndex < SIZE_4KB /
>>> sizeof(*Pt); PtIndex++) {
>>> -            Pt[PtIndex] = Address + ((PtIndex << 12) |
>>> mAddressEncMask | PAGE_ATTRIBUTE_BITS);
>>> -          } // end for PT
>>> -          *Pd = (UINT64)(UINTN)Pt | mAddressEncMask |
>>> PAGE_ATTRIBUTE_BITS;
>>> -        } // end if IsAddressSplit
>>> -      } // end for PD
>>> -    } // end for PDPT
>>> -  } // end for PML4
>>> +            // Split it
>>> +            for (PtIndex = 0; PtIndex < SIZE_4KB /
>>> sizeof(*Pt); PtIndex++, Pt++) {
>>> +              *Pt = Address + ((PtIndex << 12) |
>>> mAddressEncMask | PAGE_ATTRIBUTE_BITS);
>>> +            } // end for PT
>>> +            *Pd = (UINT64)(UINTN)Pt | mAddressEncMask |
>>> PAGE_ATTRIBUTE_BITS;
>>> +          } // end if IsAddressSplit
>>> +        } // end for PD
>>> +      } // end for PDPT
>>> +    } // end for PML4
>>> +  } // end for PML5
>>>
>>>    //
>>>    // Go through page table and set several page table
>>> entries to absent or execute-disable.
>>>    //
>>>    DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
>>> -  for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries;
>>> Pml4Index++) {
>>> -    if (sizeof (UINTN) == sizeof (UINT64)) {
>>> +  for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries;
>>> Pml5Index++) {
>>> +    if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
>>> +      //
>>> +      // If PML5 entry does not exist, skip it
>>> +      //
>>> +      continue;
>>> +    }
>>> +    Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] &
>>> PHYSICAL_ADDRESS_MASK);
>>> +    for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries;
>>> Pml4Index++) {
>>>        if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
>>>          //
>>>          // If PML4 entry does not exist, skip it
>>> @@ -642,69 +690,73 @@ InitPaging (
>>>          continue;
>>>        }
>>>        Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] &
>>> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
>>> -    } else {
>>> -      Pdpt = (UINT64*)(UINTN)mSmmProfileCr3;
>>> -    }
>>> -    for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries;
>>> PdptIndex++, Pdpt++) {
>>> -      if ((*Pdpt & IA32_PG_P) == 0) {
>>> -        //
>>> -        // If PDPT entry does not exist, skip it
>>> -        //
>>> -        continue;
>>> -      }
>>> -      if ((*Pdpt & IA32_PG_PS) != 0) {
>>> -        //
>>> -        // This is 1G entry, set NX bit and skip it
>>> -        //
>>> -        if (mXdSupported) {
>>> -          *Pdpt = *Pdpt | IA32_PG_NX;
>>> +      for (PdptIndex = 0; PdptIndex <
>>> NumberOfPdptEntries; PdptIndex++, Pdpt++) {
>>> +        if ((*Pdpt & IA32_PG_P) == 0) {
>>> +          //
>>> +          // If PDPT entry does not exist, skip it
>>> +          //
>>> +          continue;
>>>          }
>>> -        continue;
>>> -      }
>>> -      Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask &
>>> PHYSICAL_ADDRESS_MASK);
>>> -      if (Pd == 0) {
>>> -        continue;
>>> -      }
>>> -      for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof
>>> (*Pd); PdIndex++, Pd++) {
>>> -        if ((*Pd & IA32_PG_P) == 0) {
>>> +        if ((*Pdpt & IA32_PG_PS) != 0) {
>>>            //
>>> -          // If PD entry does not exist, skip it
>>> +          // This is 1G entry, set NX bit and skip it
>>>            //
>>> +          if (mXdSupported) {
>>> +            *Pdpt = *Pdpt | IA32_PG_NX;
>>> +          }
>>>            continue;
>>>          }
>>> -        Address = (((PdptIndex << 9) + PdIndex) << 21);
>>> -
>>> -        if ((*Pd & IA32_PG_PS) != 0) {
>>> -          // 2MB page
>>> -
>>> -          if (!IsAddressValid (Address, &Nx)) {
>>> +        Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask
>>> & PHYSICAL_ADDRESS_MASK);
>>> +        if (Pd == 0) {
>>> +          continue;
>>> +        }
>>> +        for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof
>>> (*Pd); PdIndex++, Pd++) {
>>> +          if ((*Pd & IA32_PG_P) == 0) {
>>>              //
>>> -            // Patch to remove Present flag and RW flag
>>> +            // If PD entry does not exist, skip it
>>>              //
>>> -            *Pd = *Pd &
>>> (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
>>> -          }
>>> -          if (Nx && mXdSupported) {
>>> -            *Pd = *Pd | IA32_PG_NX;
>>> -          }
>>> -        } else {
>>> -          // 4KB page
>>> -          Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask
>>> & PHYSICAL_ADDRESS_MASK);
>>> -          if (Pt == 0) {
>>>              continue;
>>>            }
>>> -          for (PtIndex = 0; PtIndex < SIZE_4KB /
>>> sizeof(*Pt); PtIndex++, Pt++) {
>>> +          Address = (UINTN) LShiftU64 (
>>> +                              LShiftU64 (
>>> +                                LShiftU64 ((Pml5Index <<
>>> 9) + Pml4Index, 9) + PdptIndex,
>>> +                                9
>>> +                                ) + PdIndex,
>>> +                                21
>>> +                              );
>>> +
>>> +          if ((*Pd & IA32_PG_PS) != 0) {
>>> +            // 2MB page
>>> +
>>>              if (!IsAddressValid (Address, &Nx)) {
>>> -              *Pt = *Pt &
>>> (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
>>> +              //
>>> +              // Patch to remove Present flag and RW
>>> flag
>>> +              //
>>> +              *Pd = *Pd &
>>> (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
>>>              }
>>>              if (Nx && mXdSupported) {
>>> -              *Pt = *Pt | IA32_PG_NX;
>>> +              *Pd = *Pd | IA32_PG_NX;
>>> +            }
>>> +          } else {
>>> +            // 4KB page
>>> +            Pt = (UINT64 *)(UINTN)(*Pd &
>>> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
>>> +            if (Pt == 0) {
>>> +              continue;
>>>              }
>>> -            Address += SIZE_4KB;
>>> -          } // end for PT
>>> -        } // end if PS
>>> -      } // end for PD
>>> -    } // end for PDPT
>>> -  } // end for PML4
>>> +            for (PtIndex = 0; PtIndex < SIZE_4KB /
>>> sizeof(*Pt); PtIndex++, Pt++) {
>>> +              if (!IsAddressValid (Address, &Nx)) {
>>> +                *Pt = *Pt &
>>> (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
>>> +              }
>>> +              if (Nx && mXdSupported) {
>>> +                *Pt = *Pt | IA32_PG_NX;
>>> +              }
>>> +              Address += SIZE_4KB;
>>> +            } // end for PT
>>> +          } // end if PS
>>> +        } // end for PD
>>> +      } // end for PDPT
>>> +    } // end for PML4
>>> +  } // end for PML5
>>>
>>>    //
>>>    // Flush TLB
>>> @@ -1156,6 +1208,20 @@ RestorePageTableBelow4G (
>>>  {
>>>    UINTN         PTIndex;
>>>    UINTN         PFIndex;
>>> +  IA32_CR4      Cr4;
>>> +  BOOLEAN       Enable5LevelPaging;
>>> +
>>> +  Cr4.UintN = AsmReadCr4 ();
>>> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
>>> +
>>> +  //
>>> +  // PML5
>>> +  //
>>> +  if (Enable5LevelPaging) {
>>> +    PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56);
>>> +    ASSERT (PageTable[PTIndex] != 0);
>>> +    PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
>>> PHYSICAL_ADDRESS_MASK);
>>> +  }
>>>
>>>    //
>>>    // PML4
>>> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
>>> b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
>>> index 3d5d663d99..c31160735a 100644
>>> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
>>> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
>>> @@ -16,6 +16,8 @@ SPDX-License-Identifier: BSD-2-Clause-
>>> Patent
>>>  LIST_ENTRY                          mPagePool =
>>> INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
>>>  BOOLEAN                             m1GPageTableSupport
>>> = FALSE;
>>>  BOOLEAN
>>> mCpuSmmStaticPageTable;
>>> +BOOLEAN
>>> m5LevelPagingSupport;
>>> +X86_ASSEMBLY_PATCH_LABEL
>>> gPatch5LevelPagingSupport;
>>>
>>>  /**
>>>    Disable CET.
>>> @@ -60,6 +62,31 @@ Is1GPageSupport (
>>>    return FALSE;
>>>  }
>>>
>>> +/**
>>> +  Check if 5-level paging is supported by processor or
>>> not.
>>> +
>>> +  @retval TRUE   5-level paging is supported.
>>> +  @retval FALSE  5-level paging is not supported.
>>> +
>>> +**/
>>> +BOOLEAN
>>> +Is5LevelPagingSupport (
>>> +  VOID
>>> +  )
>>> +{
>>> +  CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
>>> +
>>> +  AsmCpuidEx (
>>> +    CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
>>> +
>>> CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
>>> +    NULL,
>>> +    NULL,
>>> +    &EcxFlags.Uint32,
>>> +    NULL
>>> +    );
>>> +  return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0);
>>> +}
>>> +
>>>  /**
>>>    Set sub-entries number in entry.
>>>
>>> @@ -130,14 +157,6 @@ CalculateMaximumSupportAddress (
>>>        PhysicalAddressBits = 36;
>>>      }
>>>    }
>>> -
>>> -  //
>>> -  // IA-32e paging translates 48-bit linear addresses to
>>> 52-bit physical addresses.
>>> -  //
>>> -  ASSERT (PhysicalAddressBits <= 52);
>>> -  if (PhysicalAddressBits > 48) {
>>> -    PhysicalAddressBits = 48;
>>> -  }
>>>    return PhysicalAddressBits;
>>>  }
>>>
>>> @@ -152,89 +171,137 @@ SetStaticPageTable (
>>>    )
>>>  {
>>>    UINT64
>>> PageAddress;
>>> +  UINTN
>>> NumberOfPml5EntriesNeeded;
>>>    UINTN
>>> NumberOfPml4EntriesNeeded;
>>>    UINTN
>>> NumberOfPdpEntriesNeeded;
>>> +  UINTN
>>> IndexOfPml5Entries;
>>>    UINTN
>>> IndexOfPml4Entries;
>>>    UINTN
>>> IndexOfPdpEntries;
>>>    UINTN
>>> IndexOfPageDirectoryEntries;
>>> +  UINT64
>>> *PageMapLevel5Entry;
>>>    UINT64
>>> *PageMapLevel4Entry;
>>>    UINT64
>>> *PageMap;
>>>    UINT64
>>> *PageDirectoryPointerEntry;
>>>    UINT64
>>> *PageDirectory1GEntry;
>>>    UINT64
>>> *PageDirectoryEntry;
>>>
>>> -  if (mPhysicalAddressBits <= 39 ) {
>>> -    NumberOfPml4EntriesNeeded = 1;
>>> -    NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1,
>>> (mPhysicalAddressBits - 30));
>>> -  } else {
>>> -    NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1,
>>> (mPhysicalAddressBits - 39));
>>> -    NumberOfPdpEntriesNeeded = 512;
>>> +  //
>>> +  // IA-32e paging translates 48-bit linear addresses to
>>> 52-bit physical addresses
>>> +  //  when 5-Level Paging is disabled.
>>> +  //
>>> +  ASSERT (mPhysicalAddressBits <= 52);
>>> +  if (!m5LevelPagingSupport && mPhysicalAddressBits >
>>> 48) {
>>> +    mPhysicalAddressBits = 48;
>>> +  }
>>> +
>>> +  NumberOfPml5EntriesNeeded = 1;
>>> +  if (mPhysicalAddressBits > 48) {
>>> +    NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1,
>>> mPhysicalAddressBits - 48);
>>> +    mPhysicalAddressBits = 48;
>>> +  }
>>> +
>>> +  NumberOfPml4EntriesNeeded = 1;
>>> +  if (mPhysicalAddressBits > 39) {
>>> +    NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1,
>>> mPhysicalAddressBits - 39);
>>> +    mPhysicalAddressBits = 39;
>>>    }
>>>
>>> +  NumberOfPdpEntriesNeeded = 1;
>>> +  ASSERT (mPhysicalAddressBits > 30);
>>> +  NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1,
>>> mPhysicalAddressBits - 30);
>>> +
>>>    //
>>>    // By architecture only one PageMapLevel4 exists - so
>>> lets allocate storage for it.
>>>    //
>>>    PageMap         = (VOID *) PageTable;
>>>
>>>    PageMapLevel4Entry = PageMap;
>>> -  PageAddress        = 0;
>>> -  for (IndexOfPml4Entries = 0; IndexOfPml4Entries <
>>> NumberOfPml4EntriesNeeded; IndexOfPml4Entries++,
>>> PageMapLevel4Entry++) {
>>> +  PageMapLevel5Entry = NULL;
>>> +  if (m5LevelPagingSupport) {
>>>      //
>>> -    // Each PML4 entry points to a page of Page
>>> Directory Pointer entries.
>>> +    // By architecture only one PageMapLevel5 exists -
>>> so lets allocate storage for it.
>>>      //
>>> -    PageDirectoryPointerEntry = (UINT64 *)
>>> ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
>>> -    if (PageDirectoryPointerEntry == NULL) {
>>> -      PageDirectoryPointerEntry =
>>> AllocatePageTableMemory (1);
>>> -      ASSERT(PageDirectoryPointerEntry != NULL);
>>> -      ZeroMem (PageDirectoryPointerEntry,
>>> EFI_PAGES_TO_SIZE(1));
>>> +    PageMapLevel5Entry = PageMap;
>>> +  }
>>> +  PageAddress        = 0;
>>>
>>> -      *PageMapLevel4Entry =
>>> (UINT64)(UINTN)PageDirectoryPointerEntry |
>>> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
>>> +  for ( IndexOfPml5Entries = 0
>>> +      ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
>>> +      ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
>>> +    //
>>> +    // Each PML5 entry points to a page of PML4 entires.
>>> +    // So lets allocate space for them and fill them in
>>> in the IndexOfPml4Entries loop.
>>> +    // When 5-Level Paging is disabled, below allocation
>>> happens only once.
>>> +    //
>>> +    if (m5LevelPagingSupport) {
>>> +      PageMapLevel4Entry = (UINT64 *)
>>> ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
>>> +      if (PageMapLevel4Entry == NULL) {
>>> +        PageMapLevel4Entry = AllocatePageTableMemory
>>> (1);
>>> +        ASSERT(PageMapLevel4Entry != NULL);
>>> +        ZeroMem (PageMapLevel4Entry,
>>> EFI_PAGES_TO_SIZE(1));
>>> +
>>> +        *PageMapLevel5Entry =
>>> (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask |
>>> PAGE_ATTRIBUTE_BITS;
>>> +      }
>>>      }
>>>
>>> -    if (m1GPageTableSupport) {
>>> -      PageDirectory1GEntry = PageDirectoryPointerEntry;
>>> -      for (IndexOfPageDirectoryEntries = 0;
>>> IndexOfPageDirectoryEntries < 512;
>>> IndexOfPageDirectoryEntries++, PageDirectory1GEntry++,
>>> PageAddress += SIZE_1GB) {
>>> -        if (IndexOfPml4Entries == 0 &&
>>> IndexOfPageDirectoryEntries < 4) {
>>> -          //
>>> -          // Skip the < 4G entries
>>> -          //
>>> -          continue;
>>> -        }
>>> -        //
>>> -        // Fill in the Page Directory entries
>>> -        //
>>> -        *PageDirectory1GEntry = PageAddress |
>>> mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
>>> +    for (IndexOfPml4Entries = 0; IndexOfPml4Entries <
>>> (NumberOfPml5EntriesNeeded == 1 ?
>>> NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++,
>>> PageMapLevel4Entry++) {
>>> +      //
>>> +      // Each PML4 entry points to a page of Page
>>> Directory Pointer entries.
>>> +      //
>>> +      PageDirectoryPointerEntry = (UINT64 *)
>>> ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
>>> +      if (PageDirectoryPointerEntry == NULL) {
>>> +        PageDirectoryPointerEntry =
>>> AllocatePageTableMemory (1);
>>> +        ASSERT(PageDirectoryPointerEntry != NULL);
>>> +        ZeroMem (PageDirectoryPointerEntry,
>>> EFI_PAGES_TO_SIZE(1));
>>> +
>>> +        *PageMapLevel4Entry =
>>> (UINT64)(UINTN)PageDirectoryPointerEntry |
>>> mAddressEncMask | PAGE_ATTRIBUTE_BITS;
>>>        }
>>> -    } else {
>>> -      PageAddress = BASE_4GB;
>>> -      for (IndexOfPdpEntries = 0; IndexOfPdpEntries <
>>> NumberOfPdpEntriesNeeded; IndexOfPdpEntries++,
>>> PageDirectoryPointerEntry++) {
>>> -        if (IndexOfPml4Entries == 0 && IndexOfPdpEntries
>>> < 4) {
>>> -          //
>>> -          // Skip the < 4G entries
>>> -          //
>>> -          continue;
>>> -        }
>>> -        //
>>> -        // Each Directory Pointer entries points to a
>>> page of Page Directory entires.
>>> -        // So allocate space for them and fill them in
>>> in the IndexOfPageDirectoryEntries loop.
>>> -        //
>>> -        PageDirectoryEntry = (UINT64 *)
>>> ((*PageDirectoryPointerEntry) & ~mAddressEncMask &
>>> gPhyMask);
>>> -        if (PageDirectoryEntry == NULL) {
>>> -          PageDirectoryEntry = AllocatePageTableMemory
>>> (1);
>>> -          ASSERT(PageDirectoryEntry != NULL);
>>> -          ZeroMem (PageDirectoryEntry,
>>> EFI_PAGES_TO_SIZE(1));
>>>
>>> +      if (m1GPageTableSupport) {
>>> +        PageDirectory1GEntry =
>>> PageDirectoryPointerEntry;
>>> +        for (IndexOfPageDirectoryEntries = 0;
>>> IndexOfPageDirectoryEntries < 512;
>>> IndexOfPageDirectoryEntries++, PageDirectory1GEntry++,
>>> PageAddress += SIZE_1GB) {
>>> +          if (IndexOfPml4Entries == 0 &&
>>> IndexOfPageDirectoryEntries < 4) {
>>> +            //
>>> +            // Skip the < 4G entries
>>> +            //
>>> +            continue;
>>> +          }
>>>            //
>>> -          // Fill in a Page Directory Pointer Entries
>>> +          // Fill in the Page Directory entries
>>>            //
>>> -          *PageDirectoryPointerEntry =
>>> (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask |
>>> PAGE_ATTRIBUTE_BITS;
>>> +          *PageDirectory1GEntry = PageAddress |
>>> mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
>>>          }
>>> -
>>> -        for (IndexOfPageDirectoryEntries = 0;
>>> IndexOfPageDirectoryEntries < 512;
>>> IndexOfPageDirectoryEntries++, PageDirectoryEntry++,
>>> PageAddress += SIZE_2MB) {
>>> +      } else {
>>> +        PageAddress = BASE_4GB;
>>> +        for (IndexOfPdpEntries = 0; IndexOfPdpEntries <
>>> (NumberOfPml4EntriesNeeded == 1 ?
>>> NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++,
>>> PageDirectoryPointerEntry++) {
>>> +          if (IndexOfPml4Entries == 0 &&
>>> IndexOfPdpEntries < 4) {
>>> +            //
>>> +            // Skip the < 4G entries
>>> +            //
>>> +            continue;
>>> +          }
>>>            //
>>> -          // Fill in the Page Directory entries
>>> +          // Each Directory Pointer entries points to a
>>> page of Page Directory entires.
>>> +          // So allocate space for them and fill them in
>>> in the IndexOfPageDirectoryEntries loop.
>>>            //
>>> -          *PageDirectoryEntry = PageAddress |
>>> mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
>>> +          PageDirectoryEntry = (UINT64 *)
>>> ((*PageDirectoryPointerEntry) & ~mAddressEncMask &
>>> gPhyMask);
>>> +          if (PageDirectoryEntry == NULL) {
>>> +            PageDirectoryEntry = AllocatePageTableMemory
>>> (1);
>>> +            ASSERT(PageDirectoryEntry != NULL);
>>> +            ZeroMem (PageDirectoryEntry,
>>> EFI_PAGES_TO_SIZE(1));
>>> +
>>> +            //
>>> +            // Fill in a Page Directory Pointer Entries
>>> +            //
>>> +            *PageDirectoryPointerEntry =
>>> (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask |
>>> PAGE_ATTRIBUTE_BITS;
>>> +          }
>>> +
>>> +          for (IndexOfPageDirectoryEntries = 0;
>>> IndexOfPageDirectoryEntries < 512;
>>> IndexOfPageDirectoryEntries++, PageDirectoryEntry++,
>>> PageAddress += SIZE_2MB) {
>>> +            //
>>> +            // Fill in the Page Directory entries
>>> +            //
>>> +            *PageDirectoryEntry = PageAddress |
>>> mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
>>> +          }
>>>          }
>>>        }
>>>      }
>>> @@ -259,6 +326,8 @@ SmmInitPageTable (
>>>    UINTN
>>> PageFaultHandlerHookAddress;
>>>    IA32_IDT_GATE_DESCRIPTOR          *IdtEntry;
>>>    EFI_STATUS                        Status;
>>> +  UINT64                            *Pml4Entry;
>>> +  UINT64                            *Pml5Entry;
>>>
>>>    //
>>>    // Initialize spin lock
>>> @@ -266,12 +335,14 @@ SmmInitPageTable (
>>>    InitializeSpinLock (mPFLock);
>>>
>>>    mCpuSmmStaticPageTable = PcdGetBool
>>> (PcdCpuSmmStaticPageTable);
>>> -  m1GPageTableSupport = Is1GPageSupport ();
>>> -  DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n",
>>> m1GPageTableSupport));
>>> -  DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable -
>>> 0x%x\n", mCpuSmmStaticPageTable));
>>> -
>>> -  mPhysicalAddressBits = CalculateMaximumSupportAddress
>>> ();
>>> -  DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n",
>>> mPhysicalAddressBits));
>>> +  m1GPageTableSupport    = Is1GPageSupport ();
>>> +  m5LevelPagingSupport   = Is5LevelPagingSupport ();
>>> +  mPhysicalAddressBits   =
>>> CalculateMaximumSupportAddress ();
>>> +  PatchInstructionX86 (gPatch5LevelPagingSupport,
>>> m5LevelPagingSupport, 1);
>>> +  DEBUG ((DEBUG_INFO, "5LevelPaging Support     - %d\n",
>>> m5LevelPagingSupport));
>>> +  DEBUG ((DEBUG_INFO, "1GPageTable Support      - %d\n",
>>> m1GPageTableSupport));
>>> +  DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - %d\n",
>>> mCpuSmmStaticPageTable));
>>> +  DEBUG ((DEBUG_INFO, "PhysicalAddressBits      - %d\n",
>>> mPhysicalAddressBits));
>>>    //
>>>    // Generate PAE page table for the first 4GB memory
>>> space
>>>    //
>>> @@ -288,15 +359,30 @@ SmmInitPageTable (
>>>    //
>>>    // Fill Page-Table-Level4 (PML4) entry
>>>    //
>>> -  PTEntry = (UINT64*)AllocatePageTableMemory (1);
>>> -  ASSERT (PTEntry != NULL);
>>> -  *PTEntry = Pages | mAddressEncMask |
>>> PAGE_ATTRIBUTE_BITS;
>>> -  ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof
>>> (*PTEntry));
>>> +  Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
>>> +  ASSERT (Pml4Entry != NULL);
>>> +  *Pml4Entry = Pages | mAddressEncMask |
>>> PAGE_ATTRIBUTE_BITS;
>>> +  ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof
>>> (*Pml4Entry));
>>>
>>>    //
>>>    // Set sub-entries number
>>>    //
>>> -  SetSubEntriesNum (PTEntry, 3);
>>> +  SetSubEntriesNum (Pml4Entry, 3);
>>> +  PTEntry = Pml4Entry;
>>> +
>>> +  if (m5LevelPagingSupport) {
>>> +    //
>>> +    // Fill PML5 entry
>>> +    //
>>> +    Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
>>> +    *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask |
>>> PAGE_ATTRIBUTE_BITS;
>>> +    ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof
>>> (*Pml5Entry));
>>> +    //
>>> +    // Set sub-entries number
>>> +    //
>>> +    SetSubEntriesNum (Pml5Entry, 1);
>>> +    PTEntry = Pml5Entry;
>>> +  }
>>>
>>>    if (mCpuSmmStaticPageTable) {
>>>      SetStaticPageTable ((UINTN)PTEntry);
>>> @@ -344,7 +430,7 @@ SmmInitPageTable (
>>>    }
>>>
>>>    //
>>> -  // Return the address of PML4 (to set CR3)
>>> +  // Return the address of PML4/PML5 (to set CR3)
>>>    //
>>>    return (UINT32)(UINTN)PTEntry;
>>>  }
>>> @@ -436,12 +522,16 @@ ReclaimPages (
>>>    VOID
>>>    )
>>>  {
>>> +  UINT64                       Pml5Entry;
>>> +  UINT64                       *Pml5;
>>>    UINT64                       *Pml4;
>>>    UINT64                       *Pdpt;
>>>    UINT64                       *Pdt;
>>> +  UINTN                        Pml5Index;
>>>    UINTN                        Pml4Index;
>>>    UINTN                        PdptIndex;
>>>    UINTN                        PdtIndex;
>>> +  UINTN                        MinPml5;
>>>    UINTN                        MinPml4;
>>>    UINTN                        MinPdpt;
>>>    UINTN                        MinPdt;
>>> @@ -451,120 +541,147 @@ ReclaimPages (
>>>    BOOLEAN                      PML4EIgnore;
>>>    BOOLEAN                      PDPTEIgnore;
>>>    UINT64                       *ReleasePageAddress;
>>> +  IA32_CR4                     Cr4;
>>> +  BOOLEAN                      Enable5LevelPaging;
>>>
>>>    Pml4 = NULL;
>>>    Pdpt = NULL;
>>>    Pdt  = NULL;
>>>    MinAcc  = (UINT64)-1;
>>>    MinPml4 = (UINTN)-1;
>>> +  MinPml5 = (UINTN)-1;
>>>    MinPdpt = (UINTN)-1;
>>>    MinPdt  = (UINTN)-1;
>>>    Acc     = 0;
>>>    ReleasePageAddress = 0;
>>>
>>> +  Cr4.UintN = AsmReadCr4 ();
>>> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
>>> +  Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
>>> +
>>> +  if (!Enable5LevelPaging) {
>>> +    //
>>> +    // Create one fake PML5 entry for 4-Level Paging
>>> +    // so that the page table parsing logic only handles
>>> 5-Level page structure.
>>> +    //
>>> +    Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
>>> +    Pml5 = &Pml5Entry;
>>> +  }
>>> +
>>>    //
>>>    // First, find the leaf entry has the smallest access
>>> record value
>>>    //
>>> -  Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
>>> -  for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof
>>> (*Pml4); Pml4Index++) {
>>> -    if ((Pml4[Pml4Index] & IA32_PG_P) == 0 ||
>>> (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
>>> +  for (Pml5Index = 0; Pml5Index < Enable5LevelPaging ?
>>> (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1; Pml5Index++) {
>>> +    if ((Pml5[Pml5Index] & IA32_PG_P) == 0 ||
>>> (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
>>>        //
>>> -      // If the PML4 entry is not present or is masked,
>>> skip it
>>> +      // If the PML5 entry is not present or is masked,
>>> skip it
>>>        //
>>>        continue;
>>>      }
>>> -    Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] &
>>> ~mAddressEncMask & gPhyMask);
>>> -    PML4EIgnore = FALSE;
>>> -    for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE /
>>> sizeof (*Pdpt); PdptIndex++) {
>>> -      if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 ||
>>> (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
>>> +    Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
>>> +    for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE /
>>> sizeof (*Pml4); Pml4Index++) {
>>> +      if ((Pml4[Pml4Index] & IA32_PG_P) == 0 ||
>>> (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
>>>          //
>>> -        // If the PDPT entry is not present or is
>>> masked, skip it
>>> +        // If the PML4 entry is not present or is
>>> masked, skip it
>>>          //
>>> -        if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
>>> -          //
>>> -          // If the PDPT entry is masked, we will ignore
>>> checking the PML4 entry
>>> -          //
>>> -          PML4EIgnore = TRUE;
>>> -        }
>>>          continue;
>>>        }
>>> -      if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
>>> -        //
>>> -        // It's not 1-GByte pages entry, it should be a
>>> PDPT entry,
>>> -        // we will not check PML4 entry more
>>> -        //
>>> -        PML4EIgnore = TRUE;
>>> -        Pdt =  (UINT64*)(UINTN)(Pdpt[PdptIndex] &
>>> ~mAddressEncMask & gPhyMask);
>>> -        PDPTEIgnore = FALSE;
>>> -        for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE /
>>> sizeof(*Pdt); PdtIndex++) {
>>> -          if ((Pdt[PdtIndex] & IA32_PG_P) == 0 ||
>>> (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
>>> +      Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] &
>>> ~mAddressEncMask & gPhyMask);
>>> +      PML4EIgnore = FALSE;
>>> +      for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE /
>>> sizeof (*Pdpt); PdptIndex++) {
>>> +        if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 ||
>>> (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
>>> +          //
>>> +          // If the PDPT entry is not present or is
>>> masked, skip it
>>> +          //
>>> +          if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
>>>              //
>>> -            // If the PD entry is not present or is
>>> masked, skip it
>>> +            // If the PDPT entry is masked, we will
>>> ignore checking the PML4 entry
>>>              //
>>> -            if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
>>> +            PML4EIgnore = TRUE;
>>> +          }
>>> +          continue;
>>> +        }
>>> +        if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
>>> +          //
>>> +          // It's not 1-GByte pages entry, it should be
>>> a PDPT entry,
>>> +          // we will not check PML4 entry more
>>> +          //
>>> +          PML4EIgnore = TRUE;
>>> +          Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] &
>>> ~mAddressEncMask & gPhyMask);
>>> +          PDPTEIgnore = FALSE;
>>> +          for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE /
>>> sizeof(*Pdt); PdtIndex++) {
>>> +            if ((Pdt[PdtIndex] & IA32_PG_P) == 0 ||
>>> (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
>>> +              //
>>> +              // If the PD entry is not present or is
>>> masked, skip it
>>> +              //
>>> +              if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
>>> +                //
>>> +                // If the PD entry is masked, we will
>>> not PDPT entry more
>>> +                //
>>> +                PDPTEIgnore = TRUE;
>>> +              }
>>> +              continue;
>>> +            }
>>> +            if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
>>>                //
>>> -              // If the PD entry is masked, we will not
>>> PDPT entry more
>>> +              // It's not 2 MByte page table entry, it
>>> should be PD entry
>>> +              // we will find the entry has the smallest
>>> access record value
>>>                //
>>>                PDPTEIgnore = TRUE;
>>> +              Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
>>> +              if (Acc < MinAcc) {
>>> +                //
>>> +                // If the PD entry has the smallest
>>> access record value,
>>> +                // save the Page address to be released
>>> +                //
>>> +                MinAcc  = Acc;
>>> +                MinPml5 = Pml5Index;
>>> +                MinPml4 = Pml4Index;
>>> +                MinPdpt = PdptIndex;
>>> +                MinPdt  = PdtIndex;
>>> +                ReleasePageAddress = Pdt + PdtIndex;
>>> +              }
>>>              }
>>> -            continue;
>>>            }
>>> -          if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
>>> +          if (!PDPTEIgnore) {
>>>              //
>>> -            // It's not 2 MByte page table entry, it
>>> should be PD entry
>>> -            // we will find the entry has the smallest
>>> access record value
>>> +            // If this PDPT entry has no PDT entries
>>> pointer to 4 KByte pages,
>>> +            // it should only has the entries point to 2
>>> MByte Pages
>>>              //
>>> -            PDPTEIgnore = TRUE;
>>> -            Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
>>> +            Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
>>>              if (Acc < MinAcc) {
>>>                //
>>> -              // If the PD entry has the smallest access
>>> record value,
>>> +              // If the PDPT entry has the smallest
>>> access record value,
>>>                // save the Page address to be released
>>>                //
>>>                MinAcc  = Acc;
>>> +              MinPml5 = Pml5Index;
>>>                MinPml4 = Pml4Index;
>>>                MinPdpt = PdptIndex;
>>> -              MinPdt  = PdtIndex;
>>> -              ReleasePageAddress = Pdt + PdtIndex;
>>> +              MinPdt  = (UINTN)-1;
>>> +              ReleasePageAddress = Pdpt + PdptIndex;
>>>              }
>>>            }
>>>          }
>>> -        if (!PDPTEIgnore) {
>>> -          //
>>> -          // If this PDPT entry has no PDT entries
>>> pointer to 4 KByte pages,
>>> -          // it should only has the entries point to 2
>>> MByte Pages
>>> -          //
>>> -          Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
>>> -          if (Acc < MinAcc) {
>>> -            //
>>> -            // If the PDPT entry has the smallest access
>>> record value,
>>> -            // save the Page address to be released
>>> -            //
>>> -            MinAcc  = Acc;
>>> -            MinPml4 = Pml4Index;
>>> -            MinPdpt = PdptIndex;
>>> -            MinPdt  = (UINTN)-1;
>>> -            ReleasePageAddress = Pdpt + PdptIndex;
>>> -          }
>>> -        }
>>>        }
>>> -    }
>>> -    if (!PML4EIgnore) {
>>> -      //
>>> -      // If PML4 entry has no the PDPT entry pointer to
>>> 2 MByte pages,
>>> -      // it should only has the entries point to 1 GByte
>>> Pages
>>> -      //
>>> -      Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
>>> -      if (Acc < MinAcc) {
>>> +      if (!PML4EIgnore) {
>>>          //
>>> -        // If the PML4 entry has the smallest access
>>> record value,
>>> -        // save the Page address to be released
>>> +        // If PML4 entry has no the PDPT entry pointer
>>> to 2 MByte pages,
>>> +        // it should only has the entries point to 1
>>> GByte Pages
>>>          //
>>> -        MinAcc  = Acc;
>>> -        MinPml4 = Pml4Index;
>>> -        MinPdpt = (UINTN)-1;
>>> -        MinPdt  = (UINTN)-1;
>>> -        ReleasePageAddress = Pml4 + Pml4Index;
>>> +        Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
>>> +        if (Acc < MinAcc) {
>>> +          //
>>> +          // If the PML4 entry has the smallest access
>>> record value,
>>> +          // save the Page address to be released
>>> +          //
>>> +          MinAcc  = Acc;
>>> +          MinPml5 = Pml5Index;
>>> +          MinPml4 = Pml4Index;
>>> +          MinPdpt = (UINTN)-1;
>>> +          MinPdt  = (UINTN)-1;
>>> +          ReleasePageAddress = Pml4 + Pml4Index;
>>> +        }
>>>        }
>>>      }
>>>    }
>>> @@ -588,6 +705,7 @@ ReclaimPages (
>>>        //
>>>        // If 4 KByte Page Table is released, check the
>>> PDPT entry
>>>        //
>>> +      Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] &
>>> gPhyMask);
>>>        Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] &
>>> ~mAddressEncMask & gPhyMask);
>>>        SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
>>>        if (SubEntriesNum == 0) {
>>> @@ -679,7 +797,7 @@ SmiDefaultPFHandler (
>>>    )
>>>  {
>>>    UINT64                            *PageTable;
>>> -  UINT64                            *Pml4;
>>> +  UINT64                            *PageTableTop;
>>>    UINT64                            PFAddress;
>>>    UINTN                             StartBit;
>>>    UINTN                             EndBit;
>>> @@ -690,6 +808,8 @@ SmiDefaultPFHandler (
>>>    UINTN                             PageAttribute;
>>>    EFI_STATUS                        Status;
>>>    UINT64                            *UpperEntry;
>>> +  BOOLEAN                           Enable5LevelPaging;
>>> +  IA32_CR4                          Cr4;
>>>
>>>    //
>>>    // Set default SMM page attribute
>>> @@ -699,9 +819,12 @@ SmiDefaultPFHandler (
>>>    PageAttribute = 0;
>>>
>>>    EndBit = 0;
>>> -  Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);
>>> +  PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
>>>    PFAddress = AsmReadCr2 ();
>>>
>>> +  Cr4.UintN = AsmReadCr4 ();
>>> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
>>> +
>>>    Status = GetPlatformPageTableAttribute (PFAddress,
>>> &PageSize, &NumOfPages, &PageAttribute);
>>>    //
>>>    // If platform not support page table attribute, set
>>> default SMM page attribute
>>> @@ -755,9 +878,9 @@ SmiDefaultPFHandler (
>>>    }
>>>
>>>    for (Index = 0; Index < NumOfPages; Index++) {
>>> -    PageTable  = Pml4;
>>> +    PageTable  = PageTableTop;
>>>      UpperEntry = NULL;
>>> -    for (StartBit = 39; StartBit > EndBit; StartBit -=
>>> 9) {
>>> +    for (StartBit = Enable5LevelPaging ? 48 : 39;
>>> StartBit > EndBit; StartBit -= 9) {
>>>        PTIndex = BitFieldRead64 (PFAddress, StartBit,
>>> StartBit + 8);
>>>        if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
>>>          //
>>> @@ -941,13 +1064,20 @@ SetPageTableAttributes (
>>>    UINTN                 Index2;
>>>    UINTN                 Index3;
>>>    UINTN                 Index4;
>>> +  UINTN                 Index5;
>>>    UINT64                *L1PageTable;
>>>    UINT64                *L2PageTable;
>>>    UINT64                *L3PageTable;
>>>    UINT64                *L4PageTable;
>>> +  UINT64                *L5PageTable;
>>>    BOOLEAN               IsSplitted;
>>>    BOOLEAN               PageTableSplitted;
>>>    BOOLEAN               CetEnabled;
>>> +  IA32_CR4              Cr4;
>>> +  BOOLEAN               Enable5LevelPaging;
>>> +
>>> +  Cr4.UintN = AsmReadCr4 ();
>>> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
>>>
>>>    //
>>>    // Don't do this if
>>> @@ -991,44 +1121,59 @@ SetPageTableAttributes (
>>>    do {
>>>      DEBUG ((DEBUG_INFO, "Start...\n"));
>>>      PageTableSplitted = FALSE;
>>> -
>>> -    L4PageTable = (UINT64 *)GetPageTableBase ();
>>> -    SmmSetMemoryAttributesEx
>>> ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB,
>>> EFI_MEMORY_RO, &IsSplitted);
>>> -    PageTableSplitted = (PageTableSplitted ||
>>> IsSplitted);
>>> -
>>> -    for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64);
>>> Index4++) {
>>> -      L3PageTable = (UINT64
>>> *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask &
>>> PAGING_4K_ADDRESS_MASK_64);
>>> -      if (L3PageTable == NULL) {
>>> -        continue;
>>> -      }
>>> -
>>> -      SmmSetMemoryAttributesEx
>>> ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB,
>>> EFI_MEMORY_RO, &IsSplitted);
>>> +    L5PageTable = NULL;
>>> +    if (Enable5LevelPaging) {
>>> +      L5PageTable = (UINT64 *)GetPageTableBase ();
>>> +      SmmSetMemoryAttributesEx
>>> ((EFI_PHYSICAL_ADDRESS)(UINTN)L5PageTable, SIZE_4KB,
>>> EFI_MEMORY_RO, &IsSplitted);
>>>        PageTableSplitted = (PageTableSplitted ||
>>> IsSplitted);
>>> +    }
>>>
>>> -      for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64);
>>> Index3++) {
>>> -        if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
>>> -          // 1G
>>> +    for (Index5 = 0; Index5 < (Enable5LevelPaging ?
>>> SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
>>> +      if (Enable5LevelPaging) {
>>> +        L4PageTable = (UINT64
>>> *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask &
>>> PAGING_4K_ADDRESS_MASK_64);
>>> +        if (L4PageTable == NULL) {
>>>            continue;
>>>          }
>>> -        L2PageTable = (UINT64
>>> *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask &
>>> PAGING_4K_ADDRESS_MASK_64);
>>> -        if (L2PageTable == NULL) {
>>> +      } else {
>>> +        L4PageTable = (UINT64 *)GetPageTableBase ();
>>> +      }
>>> +      SmmSetMemoryAttributesEx
>>> ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB,
>>> EFI_MEMORY_RO, &IsSplitted);
>>> +      PageTableSplitted = (PageTableSplitted ||
>>> IsSplitted);
>>> +
>>> +      for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64);
>>> Index4++) {
>>> +        L3PageTable = (UINT64
>>> *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask &
>>> PAGING_4K_ADDRESS_MASK_64);
>>> +        if (L3PageTable == NULL) {
>>>            continue;
>>>          }
>>>
>>> -        SmmSetMemoryAttributesEx
>>> ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB,
>>> EFI_MEMORY_RO, &IsSplitted);
>>> +        SmmSetMemoryAttributesEx
>>> ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB,
>>> EFI_MEMORY_RO, &IsSplitted);
>>>          PageTableSplitted = (PageTableSplitted ||
>>> IsSplitted);
>>>
>>> -        for (Index2 = 0; Index2 <
>>> SIZE_4KB/sizeof(UINT64); Index2++) {
>>> -          if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
>>> -            // 2M
>>> +        for (Index3 = 0; Index3 <
>>> SIZE_4KB/sizeof(UINT64); Index3++) {
>>> +          if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
>>> +            // 1G
>>>              continue;
>>>            }
>>> -          L1PageTable = (UINT64
>>> *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask &
>>> PAGING_4K_ADDRESS_MASK_64);
>>> -          if (L1PageTable == NULL) {
>>> +          L2PageTable = (UINT64
>>> *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask &
>>> PAGING_4K_ADDRESS_MASK_64);
>>> +          if (L2PageTable == NULL) {
>>>              continue;
>>>            }
>>> -          SmmSetMemoryAttributesEx
>>> ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB,
>>> EFI_MEMORY_RO, &IsSplitted);
>>> +
>>> +          SmmSetMemoryAttributesEx
>>> ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB,
>>> EFI_MEMORY_RO, &IsSplitted);
>>>            PageTableSplitted = (PageTableSplitted ||
>>> IsSplitted);
>>> +
>>> +          for (Index2 = 0; Index2 <
>>> SIZE_4KB/sizeof(UINT64); Index2++) {
>>> +            if ((L2PageTable[Index2] & IA32_PG_PS) != 0)
>>> {
>>> +              // 2M
>>> +              continue;
>>> +            }
>>> +            L1PageTable = (UINT64
>>> *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask &
>>> PAGING_4K_ADDRESS_MASK_64);
>>> +            if (L1PageTable == NULL) {
>>> +              continue;
>>> +            }
>>> +            SmmSetMemoryAttributesEx
>>> ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB,
>>> EFI_MEMORY_RO, &IsSplitted);
>>> +            PageTableSplitted = (PageTableSplitted ||
>>> IsSplitted);
>>> +          }
>>>          }
>>>        }
>>>      }
>>> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
>>> b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
>>> index 741e4b7da2..271492a9d7 100644
>>> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
>>> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
>>> @@ -69,6 +69,7 @@ extern ASM_PFX(mXdSupported)
>>>  global ASM_PFX(gPatchXdSupported)
>>>  global ASM_PFX(gPatchSmiStack)
>>>  global ASM_PFX(gPatchSmiCr3)
>>> +global ASM_PFX(gPatch5LevelPagingSupport)
>>>  global ASM_PFX(gcSmiHandlerTemplate)
>>>  global ASM_PFX(gcSmiHandlerSize)
>>>
>>> @@ -124,6 +125,17 @@ ProtFlatMode:
>>>  ASM_PFX(gPatchSmiCr3):
>>>      mov     cr3, rax
>>>      mov     eax, 0x668                   ; as cr4.PGE is
>>> not set here, refresh cr3
>>> +
>>> +    mov     cl, strict byte 0            ; source
>>> operand will be patched
>>> +ASM_PFX(gPatch5LevelPagingSupport):
>>> +    cmp     cl, 0
>>> +    je      SkipEnable5LevelPaging
>>> +    ;
>>> +    ; Enable 5-Level Paging bit
>>> +    ;
>>> +    bts     eax, 12                     ; Set LA57 bit
>>> (bit #12)
>>> +SkipEnable5LevelPaging:
>>> +
>>>      mov     cr4, rax                    ; in
>>> PreModifyMtrrs() to flush TLB.
>>>  ; Load TSS
>>>      sub     esp, 8                      ; reserve room
>>> in stack
>>> diff --git
>>> a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
>>> b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
>>> index e7c78d36fc..63bae5a913 100644
>>> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
>>> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
>>> @@ -1,7 +1,7 @@
>>>  /** @file
>>>  X64 processor specific functions to enable SMM profile.
>>>
>>> -Copyright (c) 2012 - 2016, Intel Corporation. All rights
>>> reserved.<BR>
>>> +Copyright (c) 2012 - 2019, Intel Corporation. All rights
>>> reserved.<BR>
>>>  Copyright (c) 2017, AMD Incorporated. All rights
>>> reserved.<BR>
>>>
>>>  SPDX-License-Identifier: BSD-2-Clause-Patent
>>> @@ -147,9 +147,14 @@ RestorePageTableAbove4G (
>>>    BOOLEAN       Existed;
>>>    UINTN         Index;
>>>    UINTN         PFIndex;
>>> +  IA32_CR4      Cr4;
>>> +  BOOLEAN       Enable5LevelPaging;
>>>
>>>    ASSERT ((PageTable != NULL) && (IsValidPFAddress !=
>>> NULL));
>>>
>>> +  Cr4.UintN = AsmReadCr4 ();
>>> +  Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
>>> +
>>>    //
>>>    // If page fault address is 4GB above.
>>>    //
>>> @@ -161,38 +166,48 @@ RestorePageTableAbove4G (
>>>    //
>>>    Existed = FALSE;
>>>    PageTable = (UINT64*)(AsmReadCr3 () &
>>> PHYSICAL_ADDRESS_MASK);
>>> -  PTIndex = BitFieldRead64 (PFAddress, 39, 47);
>>> -  if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
>>> -    // PML4E
>>> -    PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
>>> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
>>> -    PTIndex = BitFieldRead64 (PFAddress, 30, 38);
>>> +  PTIndex = 0;
>>> +  if (Enable5LevelPaging) {
>>> +    PTIndex = BitFieldRead64 (PFAddress, 48, 56);
>>> +  }
>>> +  if ((!Enable5LevelPaging) || ((PageTable[PTIndex] &
>>> IA32_PG_P) != 0)) {
>>> +    // PML5E
>>> +    if (Enable5LevelPaging) {
>>> +      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
>>> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
>>> +    }
>>> +    PTIndex = BitFieldRead64 (PFAddress, 39, 47);
>>>      if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
>>> -      // PDPTE
>>> +      // PML4E
>>>        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
>>> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
>>> -      PTIndex = BitFieldRead64 (PFAddress, 21, 29);
>>> -      // PD
>>> -      if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
>>> -        //
>>> -        // 2MB page
>>> -        //
>>> -        Address = (UINT64)(PageTable[PTIndex] &
>>> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
>>> -        if ((Address & ~((1ull << 21) - 1)) ==
>>> ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) -
>>> 1)))) {
>>> -          Existed = TRUE;
>>> -        }
>>> -      } else {
>>> -        //
>>> -        // 4KB page
>>> -        //
>>> -        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex]
>>> & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
>>> -        if (PageTable != 0) {
>>> +      PTIndex = BitFieldRead64 (PFAddress, 30, 38);
>>> +      if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
>>> +        // PDPTE
>>> +        PageTable = (UINT64*)(UINTN)(PageTable[PTIndex]
>>> & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
>>> +        PTIndex = BitFieldRead64 (PFAddress, 21, 29);
>>> +        // PD
>>> +        if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
>>>            //
>>> -          // When there is a valid entry to map to 4KB
>>> page, need not create a new entry to map 2MB.
>>> +          // 2MB page
>>>            //
>>> -          PTIndex = BitFieldRead64 (PFAddress, 12, 20);
>>>            Address = (UINT64)(PageTable[PTIndex] &
>>> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
>>> -          if ((Address & ~((1ull << 12) - 1)) ==
>>> (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) -
>>> 1))) {
>>> +          if ((Address & ~((1ull << 21) - 1)) ==
>>> ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) -
>>> 1)))) {
>>>              Existed = TRUE;
>>>            }
>>> +        } else {
>>> +          //
>>> +          // 4KB page
>>> +          //
>>> +          PageTable =
>>> (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask&
>>> PHYSICAL_ADDRESS_MASK);
>>> +          if (PageTable != 0) {
>>> +            //
>>> +            // When there is a valid entry to map to 4KB
>>> page, need not create a new entry to map 2MB.
>>> +            //
>>> +            PTIndex = BitFieldRead64 (PFAddress, 12,
>>> 20);
>>> +            Address = (UINT64)(PageTable[PTIndex] &
>>> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
>>> +            if ((Address & ~((1ull << 12) - 1)) ==
>>> (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) -
>>> 1))) {
>>> +              Existed = TRUE;
>>> +            }
>>> +          }
>>>          }
>>>        }
>>>      }
>>> @@ -221,6 +236,11 @@ RestorePageTableAbove4G (
>>>      //
>>>      PageTable = (UINT64*)(AsmReadCr3 () &
>>> PHYSICAL_ADDRESS_MASK);
>>>      PFAddress = AsmReadCr2 ();
>>> +    // PML5E
>>> +    if (Enable5LevelPaging) {
>>> +      PTIndex = BitFieldRead64 (PFAddress, 48, 56);
>>> +      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
>>> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
>>> +    }
>>>      // PML4E
>>>      PTIndex = BitFieldRead64 (PFAddress, 39, 47);
>>>      PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] &
>>> ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
>>> --
>>> 2.21.0.windows.1
>>>
>>>
>>> 
> 


-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.

View/Reply Online (#43603): https://edk2.groups.io/g/devel/message/43603
Mute This Topic: https://groups.io/mt/32295049/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub  [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-