Implements SmmCpuSyncLib Library instance. The instance refers the
existing SMM CPU driver (PiSmmCpuDxeSmm) sync implementation
and behavior:
1.Abstract Counter and Run semaphores into SmmCpuSyncCtx.
2.Abstract CPU arrival count operation to
SmmCpuSyncGetArrivedCpuCount(), SmmCpuSyncCheckInCpu(),
SmmCpuSyncCheckOutCpu(), SmmCpuSyncLockDoor().
Implementation is aligned with existing SMM CPU driver.
3. Abstract SMM CPU Sync flow to:
BSP: SmmCpuSyncReleaseOneAp --> AP: SmmCpuSyncWaitForBsp
BSP: SmmCpuSyncWaitForAPs <-- AP: SmmCpuSyncReleaseBsp
Semaphores release & wait during sync flow is same as existing SMM
CPU driver.
4.Same operation to Counter and Run semaphores by leverage the atomic
compare exchange.
Cc: Laszlo Ersek <lersek@redhat.com>
Cc: Eric Dong <eric.dong@intel.com>
Cc: Ray Ni <ray.ni@intel.com>
Cc: Zeng Star <star.zeng@intel.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Rahul Kumar <rahul1.kumar@intel.com>
Signed-off-by: Jiaxin Wu <jiaxin.wu@intel.com>
---
UefiCpuPkg/Library/SmmCpuSyncLib/SmmCpuSyncLib.c | 690 +++++++++++++++++++++
UefiCpuPkg/Library/SmmCpuSyncLib/SmmCpuSyncLib.inf | 39 ++
UefiCpuPkg/UefiCpuPkg.dsc | 3 +
3 files changed, 732 insertions(+)
create mode 100644 UefiCpuPkg/Library/SmmCpuSyncLib/SmmCpuSyncLib.c
create mode 100644 UefiCpuPkg/Library/SmmCpuSyncLib/SmmCpuSyncLib.inf
diff --git a/UefiCpuPkg/Library/SmmCpuSyncLib/SmmCpuSyncLib.c b/UefiCpuPkg/Library/SmmCpuSyncLib/SmmCpuSyncLib.c
new file mode 100644
index 0000000000..307a6b3d78
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuSyncLib/SmmCpuSyncLib.c
@@ -0,0 +1,690 @@
+/** @file
+ SMM CPU Sync lib implementation.
+ The lib provides 3 sets of APIs:
+ 1. ContextInit/ContextDeinit/ContextReset:
+ ContextInit() is called in driver's entrypoint to allocate and initialize the SMM CPU Sync context.
+ ContextDeinit() is called in driver's unload function to deinitialize the SMM CPU Sync context.
+ ContextReset() is called before CPU exist SMI, which allows CPU to check into the next SMI from this point.
+
+ 2. GetArrivedCpuCount/CheckInCpu/CheckOutCpu/LockDoor:
+ When SMI happens, all processors including BSP enter to SMM mode by calling CheckInCpu().
+ The elected BSP calls LockDoor() so that CheckInCpu() will return the error code after that.
+ CheckOutCpu() can be called in error handling flow for the CPU who calls CheckInCpu() earlier.
+ GetArrivedCpuCount() returns the number of checked-in CPUs.
+
+ 3. WaitForAPs/ReleaseOneAp/WaitForBsp/ReleaseBsp
+ WaitForAPs() & ReleaseOneAp() are called from BSP to wait the number of APs and release one specific AP.
+ WaitForBsp() & ReleaseBsp() are called from APs to wait and release BSP.
+ The 4 APIs are used to synchronize the running flow among BSP and APs. BSP and AP Sync flow can be
+ easy understand as below:
+ BSP: ReleaseOneAp --> AP: WaitForBsp
+ BSP: WaitForAPs <-- AP: ReleaseBsp
+
+ Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Base.h>
+#include <Uefi.h>
+#include <Library/UefiLib.h>
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include <Library/SafeIntLib.h>
+#include <Library/SynchronizationLib.h>
+#include <Library/DebugLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/SmmServicesTableLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/SmmCpuSyncLib.h>
+
+typedef struct {
+ ///
+ /// Indicate how many CPU entered SMM.
+ ///
+ volatile UINT32 *Counter;
+} SMM_CPU_SYNC_SEMAPHORE_GLOBAL;
+
+typedef struct {
+ ///
+ /// Used for control each CPU continue run or wait for signal
+ ///
+ volatile UINT32 *Run;
+} SMM_CPU_SYNC_SEMAPHORE_CPU;
+
+struct SMM_CPU_SYNC_CTX {
+ ///
+ /// All global semaphores' pointer in SMM CPU Sync
+ ///
+ SMM_CPU_SYNC_SEMAPHORE_GLOBAL *GlobalSem;
+ ///
+ /// All semaphores for each processor in SMM CPU Sync
+ ///
+ SMM_CPU_SYNC_SEMAPHORE_CPU *CpuSem;
+ ///
+ /// The number of processors in the system.
+ /// This does not indicate the number of processors that entered SMM.
+ ///
+ UINTN NumberOfCpus;
+ ///
+ /// Address of global and each CPU semaphores
+ ///
+ UINTN *SemBlock;
+ ///
+ /// Size of global and each CPU semaphores
+ ///
+ UINTN SemBlockPages;
+};
+
+/**
+ Performs an atomic compare exchange operation to get semaphore.
+ The compare exchange operation must be performed using MP safe
+ mechanisms.
+
+ @param[in,out] Sem IN: 32-bit unsigned integer
+ OUT: original integer - 1
+
+ @retval Original integer - 1
+
+**/
+UINT32
+InternalWaitForSemaphore (
+ IN OUT volatile UINT32 *Sem
+ )
+{
+ UINT32 Value;
+
+ for ( ; ;) {
+ Value = *Sem;
+ if ((Value != 0) &&
+ (InterlockedCompareExchange32 (
+ (UINT32 *)Sem,
+ Value,
+ Value - 1
+ ) == Value))
+ {
+ break;
+ }
+
+ CpuPause ();
+ }
+
+ return Value - 1;
+}
+
+/**
+ Performs an atomic compare exchange operation to release semaphore.
+ The compare exchange operation must be performed using MP safe
+ mechanisms.
+
+ @param[in,out] Sem IN: 32-bit unsigned integer
+ OUT: original integer + 1
+
+ @retval Original integer + 1
+
+**/
+UINT32
+InternalReleaseSemaphore (
+ IN OUT volatile UINT32 *Sem
+ )
+{
+ UINT32 Value;
+
+ do {
+ Value = *Sem;
+ } while (Value + 1 != 0 &&
+ InterlockedCompareExchange32 (
+ (UINT32 *)Sem,
+ Value,
+ Value + 1
+ ) != Value);
+
+ return Value + 1;
+}
+
+/**
+ Performs an atomic compare exchange operation to lock semaphore.
+ The compare exchange operation must be performed using MP safe
+ mechanisms.
+
+ @param[in,out] Sem IN: 32-bit unsigned integer
+ OUT: -1
+
+ @retval Original integer
+
+**/
+UINT32
+InternalLockdownSemaphore (
+ IN OUT volatile UINT32 *Sem
+ )
+{
+ UINT32 Value;
+
+ do {
+ Value = *Sem;
+ } while (InterlockedCompareExchange32 (
+ (UINT32 *)Sem,
+ Value,
+ (UINT32)-1
+ ) != Value);
+
+ return Value;
+}
+
+/**
+ Create and initialize the SMM CPU Sync context.
+
+ SmmCpuSyncContextInit() function is to allocate and initialize the SMM CPU Sync context.
+
+ @param[in] NumberOfCpus The number of Logical Processors in the system.
+ @param[out] SmmCpuSyncCtx Pointer to the new created and initialized SMM CPU Sync context object.
+ NULL will be returned if any error happen during init.
+
+ @retval RETURN_SUCCESS The SMM CPU Sync context was successful created and initialized.
+ @retval RETURN_INVALID_PARAMETER SmmCpuSyncCtx is NULL.
+ @retval RETURN_BUFFER_TOO_SMALL Overflow happen
+ @retval RETURN_OUT_OF_RESOURCES There are not enough resources available to create and initialize SMM CPU Sync context.
+
+**/
+RETURN_STATUS
+EFIAPI
+SmmCpuSyncContextInit (
+ IN UINTN NumberOfCpus,
+ OUT SMM_CPU_SYNC_CTX **SmmCpuSyncCtx
+ )
+{
+ RETURN_STATUS Status;
+
+ UINTN CpuSemInCtxSize;
+ UINTN CtxSize;
+
+ UINTN OneSemSize;
+ UINTN GlobalSemSize;
+ UINTN OneCpuSemSize;
+ UINTN CpuSemSize;
+ UINTN TotalSemSize;
+
+ UINTN SemAddr;
+ UINTN CpuIndex;
+
+ if (SmmCpuSyncCtx == NULL) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ //
+ // Count the CtxSize
+ //
+ Status = SafeUintnMult (NumberOfCpus, sizeof (SMM_CPU_SYNC_SEMAPHORE_CPU), &CpuSemInCtxSize);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ Status = SafeUintnAdd (sizeof (SMM_CPU_SYNC_CTX), CpuSemInCtxSize, &CtxSize);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ Status = SafeUintnAdd (CtxSize, sizeof (SMM_CPU_SYNC_SEMAPHORE_GLOBAL), &CtxSize);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ //
+ // Allocate CtxSize buffer for the *SmmCpuSyncCtx
+ //
+ *SmmCpuSyncCtx = NULL;
+ *SmmCpuSyncCtx = (SMM_CPU_SYNC_CTX *)AllocatePages (EFI_SIZE_TO_PAGES (CtxSize));
+ ASSERT (*SmmCpuSyncCtx != NULL);
+ if (*SmmCpuSyncCtx == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+
+ (*SmmCpuSyncCtx)->GlobalSem = (SMM_CPU_SYNC_SEMAPHORE_GLOBAL *)((UINT8 *)(*SmmCpuSyncCtx) + sizeof (SMM_CPU_SYNC_CTX));
+ (*SmmCpuSyncCtx)->CpuSem = (SMM_CPU_SYNC_SEMAPHORE_CPU *)((UINT8 *)(*SmmCpuSyncCtx) + sizeof (SMM_CPU_SYNC_CTX) + sizeof (SMM_CPU_SYNC_SEMAPHORE_GLOBAL));
+ (*SmmCpuSyncCtx)->NumberOfCpus = NumberOfCpus;
+
+ //
+ // Count the TotalSemSize
+ //
+ OneSemSize = GetSpinLockProperties ();
+
+ Status = SafeUintnMult (OneSemSize, sizeof (SMM_CPU_SYNC_SEMAPHORE_GLOBAL) / sizeof (VOID *), &GlobalSemSize);
+ if (EFI_ERROR (Status)) {
+ goto ON_ERROR;
+ }
+
+ Status = SafeUintnMult (OneSemSize, sizeof (SMM_CPU_SYNC_SEMAPHORE_CPU) / sizeof (VOID *), &OneCpuSemSize);
+ if (EFI_ERROR (Status)) {
+ goto ON_ERROR;
+ }
+
+ Status = SafeUintnMult (NumberOfCpus, OneCpuSemSize, &CpuSemSize);
+ if (EFI_ERROR (Status)) {
+ goto ON_ERROR;
+ }
+
+ Status = SafeUintnAdd (GlobalSemSize, CpuSemSize, &TotalSemSize);
+ if (EFI_ERROR (Status)) {
+ goto ON_ERROR;
+ }
+
+ DEBUG ((DEBUG_INFO, "[%a] - One Semaphore Size = 0x%x\n", __func__, OneSemSize));
+ DEBUG ((DEBUG_INFO, "[%a] - Total Semaphores Size = 0x%x\n", __func__, TotalSemSize));
+
+ //
+ // Allocate for Semaphores in the *SmmCpuSyncCtx
+ //
+ (*SmmCpuSyncCtx)->SemBlockPages = EFI_SIZE_TO_PAGES (TotalSemSize);
+ (*SmmCpuSyncCtx)->SemBlock = AllocatePages ((*SmmCpuSyncCtx)->SemBlockPages);
+ ASSERT ((*SmmCpuSyncCtx)->SemBlock != NULL);
+ if ((*SmmCpuSyncCtx)->SemBlock == NULL) {
+ Status = RETURN_OUT_OF_RESOURCES;
+ goto ON_ERROR;
+ }
+
+ ZeroMem ((*SmmCpuSyncCtx)->SemBlock, TotalSemSize);
+
+ //
+ // Assign Global Semaphore pointer
+ //
+ SemAddr = (UINTN)(*SmmCpuSyncCtx)->SemBlock;
+ (*SmmCpuSyncCtx)->GlobalSem->Counter = (UINT32 *)SemAddr;
+ *(*SmmCpuSyncCtx)->GlobalSem->Counter = 0;
+ DEBUG ((DEBUG_INFO, "[%a] - (*SmmCpuSyncCtx)->GlobalSem->Counter Address: 0x%08x\n", __func__, (UINTN)(*SmmCpuSyncCtx)->GlobalSem->Counter));
+
+ SemAddr += GlobalSemSize;
+
+ //
+ // Assign CPU Semaphore pointer
+ //
+ for (CpuIndex = 0; CpuIndex < NumberOfCpus; CpuIndex++) {
+ (*SmmCpuSyncCtx)->CpuSem[CpuIndex].Run = (UINT32 *)(SemAddr + (CpuSemSize / NumberOfCpus) * CpuIndex);
+ *(*SmmCpuSyncCtx)->CpuSem[CpuIndex].Run = 0;
+ DEBUG ((DEBUG_INFO, "[%a] - (*SmmCpuSyncCtx)->CpuSem[%d].Run Address: 0x%08x\n", __func__, CpuIndex, (UINTN)(*SmmCpuSyncCtx)->CpuSem[CpuIndex].Run));
+ }
+
+ return RETURN_SUCCESS;
+
+ON_ERROR:
+ FreePages (*SmmCpuSyncCtx, EFI_SIZE_TO_PAGES (CtxSize));
+ return Status;
+}
+
+/**
+ Deinit an allocated SMM CPU Sync context.
+
+ SmmCpuSyncContextDeinit() function is to deinitialize SMM CPU Sync context, the resources allocated in
+ SmmCpuSyncContextInit() will be freed.
+
+ Note: This function only can be called after SmmCpuSyncContextInit() return success.
+
+ @param[in,out] SmmCpuSyncCtx Pointer to the SMM CPU Sync context object to be deinitialized.
+
+ @retval RETURN_SUCCESS The SMM CPU Sync context was successful deinitialized.
+ @retval RETURN_INVALID_PARAMETER SmmCpuSyncCtx is NULL.
+
+**/
+RETURN_STATUS
+EFIAPI
+SmmCpuSyncContextDeinit (
+ IN OUT SMM_CPU_SYNC_CTX *SmmCpuSyncCtx
+ )
+{
+ SMM_CPU_SYNC_CTX *Ctx;
+ UINTN CtxSize;
+
+ if (SmmCpuSyncCtx == NULL) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ Ctx = (SMM_CPU_SYNC_CTX *)SmmCpuSyncCtx;
+
+ //
+ // Don't need Safe multiplication & Addition here since it has already checked in
+ // SmmCpuSyncContextInit(), and this function only can be called after SmmCpuSyncContextInit()
+ // return success.
+ //
+ CtxSize = sizeof (SMM_CPU_SYNC_CTX) + sizeof (SMM_CPU_SYNC_SEMAPHORE_GLOBAL) + sizeof (SMM_CPU_SYNC_SEMAPHORE_CPU) * (Ctx->NumberOfCpus);
+
+ FreePages (Ctx->SemBlock, Ctx->SemBlockPages);
+
+ FreePages (Ctx, EFI_SIZE_TO_PAGES (CtxSize));
+
+ return RETURN_SUCCESS;
+}
+
+/**
+ Reset SMM CPU Sync context.
+
+ SmmCpuSyncContextReset() function is to reset SMM CPU Sync context to the initialized state.
+
+ @param[in,out] SmmCpuSyncCtx Pointer to the SMM CPU Sync context object to be reset.
+
+ @retval RETURN_SUCCESS The SMM CPU Sync context was successful reset.
+ @retval RETURN_INVALID_PARAMETER SmmCpuSyncCtx is NULL.
+
+**/
+RETURN_STATUS
+EFIAPI
+SmmCpuSyncContextReset (
+ IN OUT SMM_CPU_SYNC_CTX *SmmCpuSyncCtx
+ )
+{
+ SMM_CPU_SYNC_CTX *Ctx;
+
+ if (SmmCpuSyncCtx == NULL) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ Ctx = (SMM_CPU_SYNC_CTX *)SmmCpuSyncCtx;
+
+ *Ctx->GlobalSem->Counter = 0;
+
+ return RETURN_SUCCESS;
+}
+
+/**
+ Get current number of arrived CPU in SMI.
+
+ For traditional CPU synchronization method, BSP might need to know the current number of arrived CPU in
+ SMI to make sure all APs in SMI. This API can be for that purpose.
+
+ @param[in] SmmCpuSyncCtx Pointer to the SMM CPU Sync context object.
+ @param[in,out] CpuCount Current count of arrived CPU in SMI.
+
+ @retval RETURN_SUCCESS Get current number of arrived CPU in SMI successfully.
+ @retval RETURN_INVALID_PARAMETER SmmCpuSyncCtx or CpuCount is NULL.
+ @retval RETURN_ABORTED Function Aborted due to the door has been locked by SmmCpuSyncLockDoor() function.
+
+**/
+RETURN_STATUS
+EFIAPI
+SmmCpuSyncGetArrivedCpuCount (
+ IN SMM_CPU_SYNC_CTX *SmmCpuSyncCtx,
+ IN OUT UINTN *CpuCount
+ )
+{
+ SMM_CPU_SYNC_CTX *Ctx;
+
+ if ((SmmCpuSyncCtx == NULL) || (CpuCount == NULL)) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ Ctx = (SMM_CPU_SYNC_CTX *)SmmCpuSyncCtx;
+
+ if (*Ctx->GlobalSem->Counter < 0) {
+ return RETURN_ABORTED;
+ }
+
+ *CpuCount = *Ctx->GlobalSem->Counter;
+
+ return RETURN_SUCCESS;
+}
+
+/**
+ Performs an atomic operation to check in CPU.
+
+ When SMI happens, all processors including BSP enter to SMM mode by calling SmmCpuSyncCheckInCpu().
+
+ @param[in,out] SmmCpuSyncCtx Pointer to the SMM CPU Sync context object.
+ @param[in] CpuIndex Check in CPU index.
+
+ @retval RETURN_SUCCESS Check in CPU (CpuIndex) successfully.
+ @retval RETURN_INVALID_PARAMETER SmmCpuSyncCtx is NULL.
+ @retval RETURN_ABORTED Check in CPU failed due to SmmCpuSyncLockDoor() has been called by one elected CPU.
+
+**/
+RETURN_STATUS
+EFIAPI
+SmmCpuSyncCheckInCpu (
+ IN OUT SMM_CPU_SYNC_CTX *SmmCpuSyncCtx,
+ IN UINTN CpuIndex
+ )
+{
+ SMM_CPU_SYNC_CTX *Ctx;
+
+ if (SmmCpuSyncCtx == NULL) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ Ctx = (SMM_CPU_SYNC_CTX *)SmmCpuSyncCtx;
+
+ //
+ // Check to return if Counter has already been locked.
+ //
+ if ((INT32)InternalReleaseSemaphore (Ctx->GlobalSem->Counter) <= 0) {
+ return RETURN_ABORTED;
+ }
+
+ return RETURN_SUCCESS;
+}
+
+/**
+ Performs an atomic operation to check out CPU.
+
+ CheckOutCpu() can be called in error handling flow for the CPU who calls CheckInCpu() earlier.
+
+ @param[in,out] SmmCpuSyncCtx Pointer to the SMM CPU Sync context object.
+ @param[in] CpuIndex Check out CPU index.
+
+ @retval RETURN_SUCCESS Check out CPU (CpuIndex) successfully.
+ @retval RETURN_INVALID_PARAMETER SmmCpuSyncCtx is NULL.
+ @retval RETURN_NOT_READY The CPU is not checked-in.
+ @retval RETURN_ABORTED Check out CPU failed due to SmmCpuSyncLockDoor() has been called by one elected CPU.
+
+**/
+RETURN_STATUS
+EFIAPI
+SmmCpuSyncCheckOutCpu (
+ IN OUT SMM_CPU_SYNC_CTX *SmmCpuSyncCtx,
+ IN UINTN CpuIndex
+ )
+{
+ SMM_CPU_SYNC_CTX *Ctx;
+
+ if (SmmCpuSyncCtx == NULL) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ Ctx = (SMM_CPU_SYNC_CTX *)SmmCpuSyncCtx;
+
+ if (*Ctx->GlobalSem->Counter == 0) {
+ return RETURN_NOT_READY;
+ }
+
+ if ((INT32)InternalWaitForSemaphore (Ctx->GlobalSem->Counter) < 0) {
+ return RETURN_ABORTED;
+ }
+
+ return RETURN_SUCCESS;
+}
+
+/**
+ Performs an atomic operation lock door for CPU checkin or checkout.
+
+ After this function:
+ CPU can not check in via SmmCpuSyncCheckInCpu().
+ CPU can not check out via SmmCpuSyncCheckOutCpu().
+ CPU can not get number of arrived CPU in SMI via SmmCpuSyncGetArrivedCpuCount(). The number of
+ arrived CPU in SMI will be returned in CpuCount.
+
+ The CPU specified by CpuIndex is elected to lock door.
+
+ @param[in,out] SmmCpuSyncCtx Pointer to the SMM CPU Sync context object.
+ @param[in] CpuIndex Indicate which CPU to lock door.
+ @param[in,out] CpuCount Number of arrived CPU in SMI after look door.
+
+ @retval RETURN_SUCCESS Lock door for CPU successfully.
+ @retval RETURN_INVALID_PARAMETER SmmCpuSyncCtx or CpuCount is NULL.
+
+**/
+RETURN_STATUS
+EFIAPI
+SmmCpuSyncLockDoor (
+ IN OUT SMM_CPU_SYNC_CTX *SmmCpuSyncCtx,
+ IN UINTN CpuIndex,
+ IN OUT UINTN *CpuCount
+ )
+{
+ SMM_CPU_SYNC_CTX *Ctx;
+
+ if ((SmmCpuSyncCtx == NULL) || (CpuCount == NULL)) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ Ctx = (SMM_CPU_SYNC_CTX *)SmmCpuSyncCtx;
+
+ *CpuCount = InternalLockdownSemaphore (Ctx->GlobalSem->Counter);
+
+ return RETURN_SUCCESS;
+}
+
+/**
+ Used by the BSP to wait for APs.
+
+ The number of APs need to be waited is specified by NumberOfAPs. The BSP is specified by BspIndex.
+
+ Note: This function is blocking mode, and it will return only after the number of APs released by
+ calling SmmCpuSyncReleaseBsp():
+ BSP: WaitForAPs <-- AP: ReleaseBsp
+
+ @param[in,out] SmmCpuSyncCtx Pointer to the SMM CPU Sync context object.
+ @param[in] NumberOfAPs Number of APs need to be waited by BSP.
+ @param[in] BspIndex The BSP Index to wait for APs.
+
+ @retval RETURN_SUCCESS BSP to wait for APs successfully.
+ @retval RETURN_INVALID_PARAMETER SmmCpuSyncCtx is NULL or NumberOfAPs > total number of processors in system.
+
+**/
+RETURN_STATUS
+EFIAPI
+SmmCpuSyncWaitForAPs (
+ IN OUT SMM_CPU_SYNC_CTX *SmmCpuSyncCtx,
+ IN UINTN NumberOfAPs,
+ IN UINTN BspIndex
+ )
+{
+ SMM_CPU_SYNC_CTX *Ctx;
+
+ if (SmmCpuSyncCtx == NULL) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ Ctx = (SMM_CPU_SYNC_CTX *)SmmCpuSyncCtx;
+
+ if (NumberOfAPs > Ctx->NumberOfCpus) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ while (NumberOfAPs-- > 0) {
+ InternalWaitForSemaphore (Ctx->CpuSem[BspIndex].Run);
+ }
+
+ return RETURN_SUCCESS;
+}
+
+/**
+ Used by the BSP to release one AP.
+
+ The AP is specified by CpuIndex. The BSP is specified by BspIndex.
+
+ @param[in,out] SmmCpuSyncCtx Pointer to the SMM CPU Sync context object.
+ @param[in] CpuIndex Indicate which AP need to be released.
+ @param[in] BspIndex The BSP Index to release AP.
+
+ @retval RETURN_SUCCESS BSP to release one AP successfully.
+ @retval RETURN_INVALID_PARAMETER SmmCpuSyncCtx is NULL or CpuIndex is same as BspIndex.
+
+**/
+RETURN_STATUS
+EFIAPI
+SmmCpuSyncReleaseOneAp (
+ IN OUT SMM_CPU_SYNC_CTX *SmmCpuSyncCtx,
+ IN UINTN CpuIndex,
+ IN UINTN BspIndex
+ )
+{
+ SMM_CPU_SYNC_CTX *Ctx;
+
+ if ((SmmCpuSyncCtx == NULL) || (BspIndex == CpuIndex)) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ Ctx = (SMM_CPU_SYNC_CTX *)SmmCpuSyncCtx;
+
+ InternalReleaseSemaphore (Ctx->CpuSem[CpuIndex].Run);
+
+ return RETURN_SUCCESS;
+}
+
+/**
+ Used by the AP to wait BSP.
+
+ The AP is specified by CpuIndex. The BSP is specified by BspIndex.
+
+ Note: This function is blocking mode, and it will return only after the AP released by
+ calling SmmCpuSyncReleaseOneAp():
+ BSP: ReleaseOneAp --> AP: WaitForBsp
+
+ @param[in,out] SmmCpuSyncCtx Pointer to the SMM CPU Sync context object.
+ @param[in] CpuIndex Indicate which AP wait BSP.
+ @param[in] BspIndex The BSP Index to be waited.
+
+ @retval RETURN_SUCCESS AP to wait BSP successfully.
+ @retval RETURN_INVALID_PARAMETER SmmCpuSyncCtx is NULL or CpuIndex is same as BspIndex.
+
+**/
+RETURN_STATUS
+EFIAPI
+SmmCpuSyncWaitForBsp (
+ IN OUT SMM_CPU_SYNC_CTX *SmmCpuSyncCtx,
+ IN UINTN CpuIndex,
+ IN UINTN BspIndex
+ )
+{
+ SMM_CPU_SYNC_CTX *Ctx;
+
+ if ((SmmCpuSyncCtx == NULL) || (BspIndex == CpuIndex)) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ Ctx = (SMM_CPU_SYNC_CTX *)SmmCpuSyncCtx;
+
+ InternalWaitForSemaphore (Ctx->CpuSem[CpuIndex].Run);
+
+ return RETURN_SUCCESS;
+}
+
+/**
+ Used by the AP to release BSP.
+
+ The AP is specified by CpuIndex. The BSP is specified by BspIndex.
+
+ @param[in,out] SmmCpuSyncCtx Pointer to the SMM CPU Sync context object.
+ @param[in] CpuIndex Indicate which AP release BSP.
+ @param[in] BspIndex The BSP Index to be released.
+
+ @retval RETURN_SUCCESS AP to release BSP successfully.
+ @retval RETURN_INVALID_PARAMETER SmmCpuSyncCtx is NULL or CpuIndex is same as BspIndex.
+
+**/
+RETURN_STATUS
+EFIAPI
+SmmCpuSyncReleaseBsp (
+ IN OUT SMM_CPU_SYNC_CTX *SmmCpuSyncCtx,
+ IN UINTN CpuIndex,
+ IN UINTN BspIndex
+ )
+{
+ SMM_CPU_SYNC_CTX *Ctx;
+
+ if ((SmmCpuSyncCtx == NULL) || (BspIndex == CpuIndex)) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ Ctx = (SMM_CPU_SYNC_CTX *)SmmCpuSyncCtx;
+
+ InternalReleaseSemaphore (Ctx->CpuSem[BspIndex].Run);
+
+ return RETURN_SUCCESS;
+}
diff --git a/UefiCpuPkg/Library/SmmCpuSyncLib/SmmCpuSyncLib.inf b/UefiCpuPkg/Library/SmmCpuSyncLib/SmmCpuSyncLib.inf
new file mode 100644
index 0000000000..6bb1895577
--- /dev/null
+++ b/UefiCpuPkg/Library/SmmCpuSyncLib/SmmCpuSyncLib.inf
@@ -0,0 +1,39 @@
+## @file
+# SMM CPU Synchronization lib.
+#
+# This is SMM CPU Synchronization lib used for SMM CPU sync operations.
+#
+# Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+##
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = SmmCpuSyncLib
+ FILE_GUID = 1ca1bc1a-16a4-46ef-956a-ca500fd3381f
+ MODULE_TYPE = DXE_SMM_DRIVER
+ LIBRARY_CLASS = SmmCpuSyncLib|DXE_SMM_DRIVER
+
+[Sources]
+ SmmCpuSyncLib.c
+
+[Packages]
+ MdePkg/MdePkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ UefiCpuPkg/UefiCpuPkg.dec
+
+[LibraryClasses]
+ UefiLib
+ BaseLib
+ DebugLib
+ PrintLib
+ SafeIntLib
+ SynchronizationLib
+ BaseMemoryLib
+ SmmServicesTableLib
+ MemoryAllocationLib
+
+[Pcd]
+
+[Protocols]
diff --git a/UefiCpuPkg/UefiCpuPkg.dsc b/UefiCpuPkg/UefiCpuPkg.dsc
index 074fd77461..f264031c77 100644
--- a/UefiCpuPkg/UefiCpuPkg.dsc
+++ b/UefiCpuPkg/UefiCpuPkg.dsc
@@ -23,10 +23,11 @@
#
!include MdePkg/MdeLibs.dsc.inc
[LibraryClasses]
+ SafeIntLib|MdePkg/Library/BaseSafeIntLib/BaseSafeIntLib.inf
BaseLib|MdePkg/Library/BaseLib/BaseLib.inf
BaseMemoryLib|MdePkg/Library/BaseMemoryLib/BaseMemoryLib.inf
CpuLib|MdePkg/Library/BaseCpuLib/BaseCpuLib.inf
DebugLib|MdePkg/Library/BaseDebugLibNull/BaseDebugLibNull.inf
SerialPortLib|MdePkg/Library/BaseSerialPortLibNull/BaseSerialPortLibNull.inf
@@ -54,10 +55,11 @@
CacheMaintenanceLib|MdePkg/Library/BaseCacheMaintenanceLib/BaseCacheMaintenanceLib.inf
PciLib|MdePkg/Library/BasePciLibPciExpress/BasePciLibPciExpress.inf
PciExpressLib|MdePkg/Library/BasePciExpressLib/BasePciExpressLib.inf
SmmCpuPlatformHookLib|UefiCpuPkg/Library/SmmCpuPlatformHookLibNull/SmmCpuPlatformHookLibNull.inf
SmmCpuFeaturesLib|UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLib.inf
+ SmmCpuSyncLib|UefiCpuPkg/Library/SmmCpuSyncLib/SmmCpuSyncLib.inf
PeCoffGetEntryPointLib|MdePkg/Library/BasePeCoffGetEntryPointLib/BasePeCoffGetEntryPointLib.inf
PeCoffExtraActionLib|MdePkg/Library/BasePeCoffExtraActionLibNull/BasePeCoffExtraActionLibNull.inf
TpmMeasurementLib|MdeModulePkg/Library/TpmMeasurementLibNull/TpmMeasurementLibNull.inf
CcExitLib|UefiCpuPkg/Library/CcExitLibNull/CcExitLibNull.inf
MicrocodeLib|UefiCpuPkg/Library/MicrocodeLib/MicrocodeLib.inf
@@ -154,10 +156,11 @@
UefiCpuPkg/Library/RegisterCpuFeaturesLib/DxeRegisterCpuFeaturesLib.inf
UefiCpuPkg/Library/SmmCpuPlatformHookLibNull/SmmCpuPlatformHookLibNull.inf
UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLib.inf
UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLibStm.inf
UefiCpuPkg/Library/SmmCpuFeaturesLib/StandaloneMmCpuFeaturesLib.inf
+ UefiCpuPkg/Library/SmmCpuSyncLib/SmmCpuSyncLib.inf
UefiCpuPkg/Library/CcExitLibNull/CcExitLibNull.inf
UefiCpuPkg/PiSmmCommunication/PiSmmCommunicationPei.inf
UefiCpuPkg/PiSmmCommunication/PiSmmCommunicationSmm.inf
UefiCpuPkg/SecCore/SecCore.inf
UefiCpuPkg/SecCore/SecCoreNative.inf
--
2.16.2.windows.1
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#111895): https://edk2.groups.io/g/devel/message/111895
Mute This Topic: https://groups.io/mt/102889293/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
> +typedef struct {
> + ///
> + /// Indicate how many CPU entered SMM.
> + ///
> + volatile UINT32 *Counter;
> +} SMM_CPU_SYNC_SEMAPHORE_GLOBAL;
> +
> +typedef struct {
> + ///
> + /// Used for control each CPU continue run or wait for signal
> + ///
> + volatile UINT32 *Run;
> +} SMM_CPU_SYNC_SEMAPHORE_CPU;
> +
> +struct SMM_CPU_SYNC_CTX {
1. How about "SMM_CPU_SYNC_CONTEXT"?
> + ///
> + /// All global semaphores' pointer in SMM CPU Sync
> + ///
> + SMM_CPU_SYNC_SEMAPHORE_GLOBAL *GlobalSem;
2. There is only one GlobalSem. Can you directly use "volatile UINT32 *Counter" instead of "GlobalSem"?
> + ///
> + /// All semaphores for each processor in SMM CPU Sync
> + ///
> + SMM_CPU_SYNC_SEMAPHORE_CPU *CpuSem;
3. Can we use "volatile UINT32 **Run" instead of pointing to another structure?
Run points to an array where each element is a UINT32 *. Count of array equals to NumberOfCpus.
> + ///
> + /// The number of processors in the system.
> + /// This does not indicate the number of processors that entered SMM.
> + ///
> + UINTN NumberOfCpus;
> + ///
> + /// Address of global and each CPU semaphores
> + ///
> + UINTN *SemBlock;
4. How about "SemBuffer"?
> + ///
> + /// Size of global and each CPU semaphores
> + ///
> + UINTN SemBlockPages;
5. How about "SemBufferSize"?
> +};
> +
> +EFIAPI
> +SmmCpuSyncContextInit (
> + IN UINTN NumberOfCpus,
> + OUT SMM_CPU_SYNC_CTX **SmmCpuSyncCtx
> + )
> +{
> + RETURN_STATUS Status;
> +
> + UINTN CpuSemInCtxSize;
> + UINTN CtxSize;
> +
> + UINTN OneSemSize;
> + UINTN GlobalSemSize;
> + UINTN OneCpuSemSize;
> + UINTN CpuSemSize;
> + UINTN TotalSemSize;
> +
> + UINTN SemAddr;
> + UINTN CpuIndex;
6. Can you remove the empty lines among the local variable declarations?
> +
> + if (SmmCpuSyncCtx == NULL) {
> + return RETURN_INVALID_PARAMETER;
> + }
> +
> + //
> + // Count the CtxSize
> + //
> + Status = SafeUintnMult (NumberOfCpus, sizeof
> (SMM_CPU_SYNC_SEMAPHORE_CPU), &CpuSemInCtxSize);
7. Is there a reason to use SafeUintxxx()? I don't believe the multiplication could exceed MAX_UINTN.
But using SafeUintxxx() makes code hard to read.
> + //
> + // Allocate CtxSize buffer for the *SmmCpuSyncCtx
> + //
> + *SmmCpuSyncCtx = NULL;
8. This is not needed.
> + Status = SafeUintnMult (OneSemSize, sizeof
> (SMM_CPU_SYNC_SEMAPHORE_GLOBAL) / sizeof (VOID *), &GlobalSemSize);
> + if (EFI_ERROR (Status)) {
> + goto ON_ERROR;
9. If you don't use SafeUintxxx(), you don't need "ON_ERROR" label and the "goto".
> +/**
> + Performs an atomic operation to check in CPU.
> +
> + When SMI happens, all processors including BSP enter to SMM mode by
> calling SmmCpuSyncCheckInCpu().
> +
> + @param[in,out] SmmCpuSyncCtx Pointer to the SMM CPU Sync context
> object.
> + @param[in] CpuIndex Check in CPU index.
> +
> + @retval RETURN_SUCCESS Check in CPU (CpuIndex) successfully.
> + @retval RETURN_INVALID_PARAMETER SmmCpuSyncCtx is NULL.
> + @retval RETURN_ABORTED Check in CPU failed due to
> SmmCpuSyncLockDoor() has been called by one elected CPU.
> +
> +**/
> +RETURN_STATUS
> +EFIAPI
> +SmmCpuSyncCheckInCpu (
> + IN OUT SMM_CPU_SYNC_CTX *SmmCpuSyncCtx,
> + IN UINTN CpuIndex
> + )
> +{
> + SMM_CPU_SYNC_CTX *Ctx;
> +
> + if (SmmCpuSyncCtx == NULL) {
> + return RETURN_INVALID_PARAMETER;
> + }
10. Can we use "ASSERT" instead of if-check? We can explicitly mention the behavior in API comments.
> +
> + Ctx = (SMM_CPU_SYNC_CTX *)SmmCpuSyncCtx;
11. Why do we need the type cast?
> +
> +/**
> + Performs an atomic operation lock door for CPU checkin or checkout.
> +
> + After this function:
> + CPU can not check in via SmmCpuSyncCheckInCpu().
> + CPU can not check out via SmmCpuSyncCheckOutCpu().
> + CPU can not get number of arrived CPU in SMI via
> SmmCpuSyncGetArrivedCpuCount(). The number of
> + arrived CPU in SMI will be returned in CpuCount.
12. I agree that after locking, cpu cannot "checkin".
But can cpu "checkout" or "getArrivedcount"? I need to double check.
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#112087): https://edk2.groups.io/g/devel/message/112087
Mute This Topic: https://groups.io/mt/102889293/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/leave/3901457/1787277/102458076/xyzzy [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
> > +struct SMM_CPU_SYNC_CTX {
>
> 1. How about "SMM_CPU_SYNC_CONTEXT"?
Agree.
>
> > + ///
> > + /// All global semaphores' pointer in SMM CPU Sync
> > + ///
> > + SMM_CPU_SYNC_SEMAPHORE_GLOBAL *GlobalSem;
>
> 2. There is only one GlobalSem. Can you directly use "volatile UINT32
> *Counter" instead of "GlobalSem"?
I think it's not the big deal to combine into one structure or separate into different structures.
Separating different attribute field into different structures will benefit the code readability & maintainability & expansibility. It's easy to understand which field is for GlobalSem, and which field is for CpuSem.
With separated structures, it will very easy coding the later logic for semaphore memory allocation/free. Because you know we need 64 bytes alignment for semaphore, in the later coding, Run[cpuIndex] can't meet requirement for that since it's UINT32 type. That will bring a lot of inconvenience for coding and very easy to make mistake. Besides, we only need allcoate/free one continuous Sembuffer for all semaphores, and easy for consumer point to next with CpuSem[CpuIndex].Run.
>
> > + ///
> > + /// All semaphores for each processor in SMM CPU Sync
> > + ///
> > + SMM_CPU_SYNC_SEMAPHORE_CPU *CpuSem;
>
> 3. Can we use "volatile UINT32 **Run" instead of pointing to another
> structure?
> Run points to an array where each element is a UINT32 *. Count of array
> equals to NumberOfCpus.
>
>
Same as above explained.
>
> > + ///
> > + /// The number of processors in the system.
> > + /// This does not indicate the number of processors that entered SMM.
> > + ///
> > + UINTN NumberOfCpus;
> > + ///
> > + /// Address of global and each CPU semaphores
> > + ///
> > + UINTN *SemBlock;
> 4. How about "SemBuffer"?
>
Agree.
>
>
> > + ///
> > + /// Size of global and each CPU semaphores
> > + ///
> > + UINTN SemBlockPages;
>
> 5. How about "SemBufferSize"?
Agree, you want it to be bytes instead of pages?
>
> > +};
> > +
> > +EFIAPI
> > +SmmCpuSyncContextInit (
> > + IN UINTN NumberOfCpus,
> > + OUT SMM_CPU_SYNC_CTX **SmmCpuSyncCtx
> > + )
> > +{
> > + RETURN_STATUS Status;
> > +
> > + UINTN CpuSemInCtxSize;
> > + UINTN CtxSize;
> > +
> > + UINTN OneSemSize;
> > + UINTN GlobalSemSize;
> > + UINTN OneCpuSemSize;
> > + UINTN CpuSemSize;
> > + UINTN TotalSemSize;
> > +
> > + UINTN SemAddr;
> > + UINTN CpuIndex;
>
> 6. Can you remove the empty lines among the local variable declarations?
Agree. I just wanted it to be more readable: no empty lines means it's group usage for some purpose, that was my original coding style, but I can change it since you don't like it. :).
>
> > +
> > + if (SmmCpuSyncCtx == NULL) {
> > + return RETURN_INVALID_PARAMETER;
> > + }
> > +
> > + //
> > + // Count the CtxSize
> > + //
> > + Status = SafeUintnMult (NumberOfCpus, sizeof
> > (SMM_CPU_SYNC_SEMAPHORE_CPU), &CpuSemInCtxSize);
>
> 7. Is there a reason to use SafeUintxxx()? I don't believe the multiplication
> could exceed MAX_UINTN.
> But using SafeUintxxx() makes code hard to read.
>
This is comments from Laszlo:
Please use SafeIntLib for all calculations, to prevent overflows. This
means primarily (but not exclusively) memory size calculations, for
allocations
From API perspective, it's possible since the NumberOfCpus defined as UNITN, right?
> > + //
> > + // Allocate CtxSize buffer for the *SmmCpuSyncCtx
> > + //
> > + *SmmCpuSyncCtx = NULL;
> 8. This is not needed.
>
> > + Status = SafeUintnMult (OneSemSize, sizeof
> > (SMM_CPU_SYNC_SEMAPHORE_GLOBAL) / sizeof (VOID *),
> &GlobalSemSize);
> > + if (EFI_ERROR (Status)) {
> > + goto ON_ERROR;
>
> 9. If you don't use SafeUintxxx(), you don't need "ON_ERROR" label and the
> "goto".
>
Yes, we can discuss whether need SafeUintxxx(), for me, it's both ok. SafeUintxxx doesn't do bad things.
>
> > +/**
> > + Performs an atomic operation to check in CPU.
> > +
> > + When SMI happens, all processors including BSP enter to SMM mode by
> > calling SmmCpuSyncCheckInCpu().
> > +
> > + @param[in,out] SmmCpuSyncCtx Pointer to the SMM CPU Sync
> context
> > object.
> > + @param[in] CpuIndex Check in CPU index.
> > +
> > + @retval RETURN_SUCCESS Check in CPU (CpuIndex) successfully.
> > + @retval RETURN_INVALID_PARAMETER SmmCpuSyncCtx is NULL.
> > + @retval RETURN_ABORTED Check in CPU failed due to
> > SmmCpuSyncLockDoor() has been called by one elected CPU.
> > +
> > +**/
> > +RETURN_STATUS
> > +EFIAPI
> > +SmmCpuSyncCheckInCpu (
> > + IN OUT SMM_CPU_SYNC_CTX *SmmCpuSyncCtx,
> > + IN UINTN CpuIndex
> > + )
> > +{
> > + SMM_CPU_SYNC_CTX *Ctx;
> > +
> > + if (SmmCpuSyncCtx == NULL) {
> > + return RETURN_INVALID_PARAMETER;
> > + }
>
> 10. Can we use "ASSERT" instead of if-check? We can explicitly mention the
> behavior in API comments.
Assert can be added by caller for status if wanted. The API only follow the definition, right? Or add assert before if check?
>
> > +
> > + Ctx = (SMM_CPU_SYNC_CTX *)SmmCpuSyncCtx;
>
> 11. Why do we need the type cast?
Oh, yes, we don't need that, original code defined the ctx as void, I forget update this.
>
> > +
> > +/**
> > + Performs an atomic operation lock door for CPU checkin or checkout.
> > +
> > + After this function:
> > + CPU can not check in via SmmCpuSyncCheckInCpu().
> > + CPU can not check out via SmmCpuSyncCheckOutCpu().
> > + CPU can not get number of arrived CPU in SMI via
> > SmmCpuSyncGetArrivedCpuCount(). The number of
> > + arrived CPU in SMI will be returned in CpuCount.
> 12. I agree that after locking, cpu cannot "checkin".
> But can cpu "checkout" or "getArrivedcount"? I need to double check.
>
Existing implementation logic or SMM_CPU_SYNC_CONTEXT definition doesn't support the checkout or getArrivedcount after lock. I don't want implementation support such kind of case, so added that. Or can we return unsupported status if locked directly? then we can remove that words.
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#112089): https://edk2.groups.io/g/devel/message/112089
Mute This Topic: https://groups.io/mt/102889293/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
© 2016 - 2026 Red Hat, Inc.