The processors with SEV-ES enabled follow the original logic, for the
other cases the APs will be put in 64-bit mode before handing off the
boot process to the OS. To achieve this purpose, this patch duplicate
RelocateApLoop and other related code for the processors with SEV-ES.
Cc: Guo Dong <guo.dong@intel.com>
Cc: Ray Ni <ray.ni@intel.com>
Cc: Sean Rhodes <sean@starlabs.systems>
Cc: James Lu <james.lu@intel.com>
Cc: Gua Guo <gua.guo@intel.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Laszlo Ersek <lersek@redhat.com>
Signed-off-by: Yuanhao Xie <yuanhao.xie@intel.com>
---
UefiCpuPkg/Library/MpInitLib/DxeMpLib.c | 68 ++++++++++++++++++++++++++++++++++++++++++++------------------------
UefiCpuPkg/Library/MpInitLib/MpEqu.inc | 22 ++++++++++++----------
UefiCpuPkg/Library/MpInitLib/MpLib.h | 30 +++++++++++++++++++++++++++++-
UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm | 169 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm | 5 ++++-
5 files changed, 258 insertions(+), 36 deletions(-)
diff --git a/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c b/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c
index a84e9e33ba..dd935a79d3 100644
--- a/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c
+++ b/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c
@@ -1,7 +1,7 @@
/** @file
MP initialize support functions for DXE phase.
- Copyright (c) 2016 - 2020, Intel Corporation. All rights reserved.<BR>
+ Copyright (c) 2016 - 2023, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
@@ -378,32 +378,44 @@ RelocateApLoop (
IN OUT VOID *Buffer
)
{
- CPU_MP_DATA *CpuMpData;
- BOOLEAN MwaitSupport;
- ASM_RELOCATE_AP_LOOP AsmRelocateApLoopFunc;
- UINTN ProcessorNumber;
- UINTN StackStart;
+ CPU_MP_DATA *CpuMpData;
+ BOOLEAN MwaitSupport;
+ ASM_RELOCATE_AP_LOOP AsmRelocateApLoopFunc;
+ ASM_RELOCATE_AP_LOOP_AMDSEV AsmRelocateApLoopFuncAmdSev;
+ UINTN ProcessorNumber;
+ UINTN StackStart;
MpInitLibWhoAmI (&ProcessorNumber);
CpuMpData = GetCpuMpData ();
MwaitSupport = IsMwaitSupport ();
if (CpuMpData->UseSevEsAPMethod) {
- StackStart = CpuMpData->SevEsAPResetStackStart;
+ StackStart = CpuMpData->SevEsAPResetStackStart;
+ AsmRelocateApLoopFuncAmdSev = (ASM_RELOCATE_AP_LOOP)(UINTN)mReservedApLoopFunc;
+ AsmRelocateApLoopFuncAmdSev (
+ MwaitSupport,
+ CpuMpData->ApTargetCState,
+ CpuMpData->PmCodeSegment,
+ StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
+ (UINTN)&mNumberToFinish,
+ CpuMpData->Pm16CodeSegment,
+ CpuMpData->SevEsAPBuffer,
+ CpuMpData->WakeupBuffer
+ );
} else {
- StackStart = mReservedTopOfApStack;
+ StackStart = mReservedTopOfApStack;
+ AsmRelocateApLoopFunc = (ASM_RELOCATE_AP_LOOP)(UINTN)mReservedApLoopFunc;
+ AsmRelocateApLoopFunc (
+ MwaitSupport,
+ CpuMpData->ApTargetCState,
+ CpuMpData->PmCodeSegment,
+ StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
+ (UINTN)&mNumberToFinish,
+ CpuMpData->Pm16CodeSegment,
+ CpuMpData->SevEsAPBuffer,
+ CpuMpData->WakeupBuffer
+ );
}
- AsmRelocateApLoopFunc = (ASM_RELOCATE_AP_LOOP)(UINTN)mReservedApLoopFunc;
- AsmRelocateApLoopFunc (
- MwaitSupport,
- CpuMpData->ApTargetCState,
- CpuMpData->PmCodeSegment,
- StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
- (UINTN)&mNumberToFinish,
- CpuMpData->Pm16CodeSegment,
- CpuMpData->SevEsAPBuffer,
- CpuMpData->WakeupBuffer
- );
//
// It should never reach here
//
@@ -582,11 +594,19 @@ InitMpGlobalData (
mReservedTopOfApStack = (UINTN)Address + ApSafeBufferSize;
ASSERT ((mReservedTopOfApStack & (UINTN)(CPU_STACK_ALIGNMENT - 1)) == 0);
- CopyMem (
- mReservedApLoopFunc,
- CpuMpData->AddressMap.RelocateApLoopFuncAddress,
- CpuMpData->AddressMap.RelocateApLoopFuncSize
- );
+ if (CpuMpData->UseSevEsAPMethod) {
+ CopyMem (
+ mReservedApLoopFunc,
+ CpuMpData->AddressMap.RelocateApLoopFuncAddressAmdSev,
+ CpuMpData->AddressMap.RelocateApLoopFuncSizeAmdSev
+ );
+ } else {
+ CopyMem (
+ mReservedApLoopFunc,
+ CpuMpData->AddressMap.RelocateApLoopFuncAddress,
+ CpuMpData->AddressMap.RelocateApLoopFuncSize
+ );
+ }
Status = gBS->CreateEvent (
EVT_TIMER | EVT_NOTIFY_SIGNAL,
diff --git a/UefiCpuPkg/Library/MpInitLib/MpEqu.inc b/UefiCpuPkg/Library/MpInitLib/MpEqu.inc
index ebadcc6fb3..1472ef2024 100644
--- a/UefiCpuPkg/Library/MpInitLib/MpEqu.inc
+++ b/UefiCpuPkg/Library/MpInitLib/MpEqu.inc
@@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ;
-; Copyright (c) 2015 - 2022, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2015 - 2023, Intel Corporation. All rights reserved.<BR>
; SPDX-License-Identifier: BSD-2-Clause-Patent
;
; Module Name:
@@ -21,15 +21,17 @@ CPU_SWITCH_STATE_LOADED equ 2
; Equivalent NASM structure of MP_ASSEMBLY_ADDRESS_MAP
;
struc MP_ASSEMBLY_ADDRESS_MAP
- .RendezvousFunnelAddress CTYPE_UINTN 1
- .ModeEntryOffset CTYPE_UINTN 1
- .RendezvousFunnelSize CTYPE_UINTN 1
- .RelocateApLoopFuncAddress CTYPE_UINTN 1
- .RelocateApLoopFuncSize CTYPE_UINTN 1
- .ModeTransitionOffset CTYPE_UINTN 1
- .SwitchToRealNoNxOffset CTYPE_UINTN 1
- .SwitchToRealPM16ModeOffset CTYPE_UINTN 1
- .SwitchToRealPM16ModeSize CTYPE_UINTN 1
+ .RendezvousFunnelAddress CTYPE_UINTN 1
+ .ModeEntryOffset CTYPE_UINTN 1
+ .RendezvousFunnelSize CTYPE_UINTN 1
+ .RelocateApLoopFuncAddress CTYPE_UINTN 1
+ .RelocateApLoopFuncSize CTYPE_UINTN 1
+ .RelocateApLoopFuncAddressAmdSev CTYPE_UINTN 1
+ .RelocateApLoopFuncSizeAmdSev CTYPE_UINTN 1
+ .ModeTransitionOffset CTYPE_UINTN 1
+ .SwitchToRealNoNxOffset CTYPE_UINTN 1
+ .SwitchToRealPM16ModeOffset CTYPE_UINTN 1
+ .SwitchToRealPM16ModeSize CTYPE_UINTN 1
endstruc
;
diff --git a/UefiCpuPkg/Library/MpInitLib/MpLib.h b/UefiCpuPkg/Library/MpInitLib/MpLib.h
index f5086e497e..fe60084333 100644
--- a/UefiCpuPkg/Library/MpInitLib/MpLib.h
+++ b/UefiCpuPkg/Library/MpInitLib/MpLib.h
@@ -1,7 +1,7 @@
/** @file
Common header file for MP Initialize Library.
- Copyright (c) 2016 - 2022, Intel Corporation. All rights reserved.<BR>
+ Copyright (c) 2016 - 2023, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2020, AMD Inc. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
@@ -179,6 +179,8 @@ typedef struct {
UINTN RendezvousFunnelSize;
UINT8 *RelocateApLoopFuncAddress;
UINTN RelocateApLoopFuncSize;
+ UINT8 *RelocateApLoopFuncAddressAmdSev;
+ UINTN RelocateApLoopFuncSizeAmdSev;
UINTN ModeTransitionOffset;
UINTN SwitchToRealNoNxOffset;
UINTN SwitchToRealPM16ModeOffset;
@@ -311,6 +313,7 @@ typedef struct {
#define AP_SAFE_STACK_SIZE 128
#define AP_RESET_STACK_SIZE AP_SAFE_STACK_SIZE
+STATIC_ASSERT ((AP_SAFE_STACK_SIZE & (CPU_STACK_ALIGNMENT - 1)) == 0, "AP_SAFE_STACK_SIZE is not aligned with CPU_STACK_ALIGNMENT");
#pragma pack(1)
@@ -373,6 +376,31 @@ typedef
IN UINTN WakeupBuffer
);
+/**
+ Assembly code to place AP into safe loop mode for Amd processors with Sev enabled.
+ Place AP into targeted C-State if MONITOR is supported, otherwise
+ place AP into hlt state.
+ Place AP in protected mode if the current is long mode. Due to AP maybe
+ wakeup by some hardware event. It could avoid accessing page table that
+ may not available during booting to OS.
+ @param[in] MwaitSupport TRUE indicates MONITOR is supported.
+ FALSE indicates MONITOR is not supported.
+ @param[in] ApTargetCState Target C-State value.
+ @param[in] PmCodeSegment Protected mode code segment value.
+**/
+typedef
+ VOID
+(EFIAPI *ASM_RELOCATE_AP_LOOP_AMDSEV)(
+ IN BOOLEAN MwaitSupport,
+ IN UINTN ApTargetCState,
+ IN UINTN PmCodeSegment,
+ IN UINTN TopOfApStack,
+ IN UINTN NumberToFinish,
+ IN UINTN Pm16CodeSegment,
+ IN UINTN SevEsAPJumpTable,
+ IN UINTN WakeupBuffer
+ );
+
/**
Assembly code to get starting address and size of the rendezvous entry for APs.
Information for fixing a jump instruction in the code is also returned.
diff --git a/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm b/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
index 7c2469f9c5..6b48913306 100644
--- a/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
+++ b/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
@@ -346,3 +346,172 @@ PM16Mode:
iret
SwitchToRealProcEnd:
+;-------------------------------------------------------------------------------------
+; AsmRelocateApLoopAmdSev (MwaitSupport, ApTargetCState, PmCodeSegment, TopOfApStack, CountTofinish, Pm16CodeSegment, SevEsAPJumpTable, WakeupBuffer);
+;-------------------------------------------------------------------------------------
+
+AsmRelocateApLoopStartAmdSev:
+BITS 64
+ cmp qword [rsp + 56], 0 ; SevEsAPJumpTable
+ je NoSevEsAmdSev
+
+ ;
+ ; Perform some SEV-ES related setup before leaving 64-bit mode
+ ;
+ push rcx
+ push rdx
+
+ ;
+ ; Get the RDX reset value using CPUID
+ ;
+ mov rax, 1
+ cpuid
+ mov rsi, rax ; Save off the reset value for RDX
+
+ ;
+ ; Prepare the GHCB for the AP_HLT_LOOP VMGEXIT call
+ ; - Must be done while in 64-bit long mode so that writes to
+ ; the GHCB memory will be unencrypted.
+ ; - No NAE events can be generated once this is set otherwise
+ ; the AP_RESET_HOLD SW_EXITCODE will be overwritten.
+ ;
+ mov rcx, 0xc0010130
+ rdmsr ; Retrieve current GHCB address
+ shl rdx, 32
+ or rdx, rax
+
+ mov rdi, rdx
+ xor rax, rax
+ mov rcx, 0x800
+ shr rcx, 3
+ rep stosq ; Clear the GHCB
+
+ mov rax, 0x80000004 ; VMGEXIT AP_RESET_HOLD
+ mov [rdx + 0x390], rax
+ mov rax, 114 ; Set SwExitCode valid bit
+ bts [rdx + 0x3f0], rax
+ inc rax ; Set SwExitInfo1 valid bit
+ bts [rdx + 0x3f0], rax
+ inc rax ; Set SwExitInfo2 valid bit
+ bts [rdx + 0x3f0], rax
+
+ pop rdx
+ pop rcx
+
+NoSevEsAmdSev:
+ cli ; Disable interrupt before switching to 32-bit mode
+ mov rax, [rsp + 40] ; CountTofinish
+ lock dec dword [rax] ; (*CountTofinish)--
+
+ mov r10, [rsp + 48] ; Pm16CodeSegment
+ mov rax, [rsp + 56] ; SevEsAPJumpTable
+ mov rbx, [rsp + 64] ; WakeupBuffer
+ mov rsp, r9 ; TopOfApStack
+
+ push rax ; Save SevEsAPJumpTable
+ push rbx ; Save WakeupBuffer
+ push r10 ; Save Pm16CodeSegment
+ push rcx ; Save MwaitSupport
+ push rdx ; Save ApTargetCState
+
+ lea rax, [PmEntryAmdSev] ; rax <- The start address of transition code
+
+ push r8
+ push rax
+
+ ;
+ ; Clear R8 - R15, for reset, before going into 32-bit mode
+ ;
+ xor r8, r8
+ xor r9, r9
+ xor r10, r10
+ xor r11, r11
+ xor r12, r12
+ xor r13, r13
+ xor r14, r14
+ xor r15, r15
+
+ ;
+ ; Far return into 32-bit mode
+ ;
+o64 retf
+
+BITS 32
+PmEntryAmdSev:
+ mov eax, cr0
+ btr eax, 31 ; Clear CR0.PG
+ mov cr0, eax ; Disable paging and caches
+
+ mov ecx, 0xc0000080
+ rdmsr
+ and ah, ~ 1 ; Clear LME
+ wrmsr
+ mov eax, cr4
+ and al, ~ (1 << 5) ; Clear PAE
+ mov cr4, eax
+
+ pop edx
+ add esp, 4
+ pop ecx,
+ add esp, 4
+
+MwaitCheckAmdSev:
+ cmp cl, 1 ; Check mwait-monitor support
+ jnz HltLoopAmdSev
+ mov ebx, edx ; Save C-State to ebx
+MwaitLoopAmdSev:
+ cli
+ mov eax, esp ; Set Monitor Address
+ xor ecx, ecx ; ecx = 0
+ xor edx, edx ; edx = 0
+ monitor
+ mov eax, ebx ; Mwait Cx, Target C-State per eax[7:4]
+ shl eax, 4
+ mwait
+ jmp MwaitLoopAmdSev
+
+HltLoopAmdSev:
+ pop edx ; PM16CodeSegment
+ add esp, 4
+ pop ebx ; WakeupBuffer
+ add esp, 4
+ pop eax ; SevEsAPJumpTable
+ add esp, 4
+ cmp eax, 0 ; Check for SEV-ES
+ je DoHltAmdSev
+
+ cli
+ ;
+ ; SEV-ES is enabled, use VMGEXIT (GHCB information already
+ ; set by caller)
+ ;
+BITS 64
+ rep vmmcall
+BITS 32
+
+ ;
+ ; Back from VMGEXIT AP_HLT_LOOP
+ ; Push the FLAGS/CS/IP values to use
+ ;
+ push word 0x0002 ; EFLAGS
+ xor ecx, ecx
+ mov cx, [eax + 2] ; CS
+ push cx
+ mov cx, [eax] ; IP
+ push cx
+ push word 0x0000 ; For alignment, will be discarded
+
+ push edx
+ push ebx
+
+ mov edx, esi ; Restore RDX reset value
+
+ retf
+
+DoHltAmdSev:
+ cli
+ hlt
+ jmp DoHltAmdSev
+
+BITS 64
+AsmRelocateApLoopEndAmdSev:
diff --git a/UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm b/UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm
index 5d71995bf8..d36f8ba06d 100644
--- a/UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm
+++ b/UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm
@@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ;
-; Copyright (c) 2015 - 2022, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2015 - 2023, Intel Corporation. All rights reserved.<BR>
; SPDX-License-Identifier: BSD-2-Clause-Patent
;
; Module Name:
@@ -459,6 +459,9 @@ ASM_PFX(AsmGetAddressMap):
lea rax, [AsmRelocateApLoopStart]
mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncAddress], rax
mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncSize], AsmRelocateApLoopEnd - AsmRelocateApLoopStart
+ lea rax, [AsmRelocateApLoopStartAmdSev]
+ mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncAddressAmdSev], rax
+ mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncSizeAmdSev], AsmRelocateApLoopEndAmdSev - AsmRelocateApLoopStartAmdSev
mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.ModeTransitionOffset], Flat32Start - RendezvousFunnelProcStart
mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealNoNxOffset], SwitchToRealProcStart - Flat32Start
mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealPM16ModeOffset], PM16Mode - RendezvousFunnelProcStart
--
2.36.1.windows.1
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#100354): https://edk2.groups.io/g/devel/message/100354
Mute This Topic: https://groups.io/mt/97081028/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
Hi,
> if (CpuMpData->UseSevEsAPMethod) {
> - StackStart = CpuMpData->SevEsAPResetStackStart;
> + StackStart = CpuMpData->SevEsAPResetStackStart;
> + AsmRelocateApLoopFuncAmdSev = (ASM_RELOCATE_AP_LOOP)(UINTN)mReservedApLoopFunc;
> + AsmRelocateApLoopFuncAmdSev (
> + MwaitSupport,
> + CpuMpData->ApTargetCState,
> + CpuMpData->PmCodeSegment,
> + StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
> + (UINTN)&mNumberToFinish,
> + CpuMpData->Pm16CodeSegment,
> + CpuMpData->SevEsAPBuffer,
> + CpuMpData->WakeupBuffer
> + );
Good. Thanks for updating it.
> diff --git a/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm b/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
> index 7c2469f9c5..6b48913306 100644
> --- a/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
> +++ b/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
> @@ -346,3 +346,172 @@ PM16Mode:
> iret
>
> SwitchToRealProcEnd:
> +;-------------------------------------------------------------------------------------
> +; AsmRelocateApLoopAmdSev (MwaitSupport, ApTargetCState, PmCodeSegment, TopOfApStack, CountTofinish, Pm16CodeSegment, SevEsAPJumpTable, WakeupBuffer);
> +;-------------------------------------------------------------------------------------
> +
> +AsmRelocateApLoopStartAmdSev:
> +BITS 64
Hmm, so here you are adding a renamed copy of the AP loop.
Then, in patch #5, you are rewriting the code at the old location.
I'd suggest to move the code instead of copying it, i.e. have one patch
moving (and renaming) the AmdSev AP loop. Have another patch adding the
new code for the generic AP loop. That should result in patches which
are easier to read, especially the new generic AP loop code is not mixed
with AmdSev code removal.
take care,
Gerd
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#100408): https://edk2.groups.io/g/devel/message/100408
Mute This Topic: https://groups.io/mt/97081028/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
I'd suggest to move the code instead of copying it, i.e. have one patch moving (and renaming) the AmdSev AP loop. Have another patch adding the new code for the generic AP loop. That should result in patches which are easier to read, especially the new generic AP loop code is not mixed with AmdSev code removal.
Agree. I will do so.
Thanks
Yuanhao
-----Original Message-----
From: Gerd Hoffmann <kraxel@redhat.com>
Sent: Tuesday, February 21, 2023 5:23 PM
To: Xie, Yuanhao <yuanhao.xie@intel.com>
Cc: devel@edk2.groups.io; Dong, Guo <guo.dong@intel.com>; Ni, Ray <ray.ni@intel.com>; Rhodes, Sean <sean@starlabs.systems>; Lu, James <james.lu@intel.com>; Guo, Gua <gua.guo@intel.com>; Tom Lendacky <thomas.lendacky@amd.com>; Laszlo Ersek <lersek@redhat.com>
Subject: Re: [Patch V2 1/5] UefiCpuPkg: Duplicate RelocateApLoop for the processors with SEV-ES.
Hi,
> if (CpuMpData->UseSevEsAPMethod) {
> - StackStart = CpuMpData->SevEsAPResetStackStart;
> + StackStart = CpuMpData->SevEsAPResetStackStart;
> + AsmRelocateApLoopFuncAmdSev = (ASM_RELOCATE_AP_LOOP)(UINTN)mReservedApLoopFunc;
> + AsmRelocateApLoopFuncAmdSev (
> + MwaitSupport,
> + CpuMpData->ApTargetCState,
> + CpuMpData->PmCodeSegment,
> + StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
> + (UINTN)&mNumberToFinish,
> + CpuMpData->Pm16CodeSegment,
> + CpuMpData->SevEsAPBuffer,
> + CpuMpData->WakeupBuffer
> + );
Good. Thanks for updating it.
> diff --git a/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
> b/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
> index 7c2469f9c5..6b48913306 100644
> --- a/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
> +++ b/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
> @@ -346,3 +346,172 @@ PM16Mode:
> iret
>
> SwitchToRealProcEnd:
> +;--------------------------------------------------------------------
> +----------------- ; AsmRelocateApLoopAmdSev (MwaitSupport,
> +ApTargetCState, PmCodeSegment, TopOfApStack, CountTofinish,
> +Pm16CodeSegment, SevEsAPJumpTable, WakeupBuffer);
> +;--------------------------------------------------------------------
> +-----------------
> +
> +AsmRelocateApLoopStartAmdSev:
> +BITS 64
Hmm, so here you are adding a renamed copy of the AP loop.
Then, in patch #5, you are rewriting the code at the old location.
I'd suggest to move the code instead of copying it, i.e. have one patch moving (and renaming) the AmdSev AP loop. Have another patch adding the new code for the generic AP loop. That should result in patches which are easier to read, especially the new generic AP loop code is not mixed with AmdSev code removal.
take care,
Gerd
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#100454): https://edk2.groups.io/g/devel/message/100454
Mute This Topic: https://groups.io/mt/97081028/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
© 2016 - 2026 Red Hat, Inc.