From: Tom Lendacky <thomas.lendacky@amd.com>
VMGEXIT is a new instruction used for Hypervisor/Guest communication when
running as an SEV-ES guest. A VMGEXIT will cause an automatic exit (AE)
to occur, resulting in a #VMEXIT with an exit code value of 0x403.
To support VMGEXIT, define the VMGEXIT assember routine to issue the
instruction (rep; vmmcall), the GHCB structure and some helper functions
for communicating register information to and from the hypervisor and the
guest.
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
MdePkg/Library/BaseLib/BaseLib.inf | 1 +
MdePkg/Include/Library/BaseLib.h | 14 ++
UefiCpuPkg/Include/Register/Amd/Ghcb.h | 197 ++++++++++++++++++++++++
MdePkg/Library/BaseLib/X64/GccInline.c | 17 ++
MdePkg/Library/BaseLib/X64/VmgExit.nasm | 38 +++++
5 files changed, 267 insertions(+)
create mode 100644 UefiCpuPkg/Include/Register/Amd/Ghcb.h
create mode 100644 MdePkg/Library/BaseLib/X64/VmgExit.nasm
diff --git a/MdePkg/Library/BaseLib/BaseLib.inf b/MdePkg/Library/BaseLib/BaseLib.inf
index 3586beb0ab5c..a41401340f95 100644
--- a/MdePkg/Library/BaseLib/BaseLib.inf
+++ b/MdePkg/Library/BaseLib/BaseLib.inf
@@ -286,6 +286,7 @@ [Sources.X64]
X64/ReadCr2.nasm| MSFT
X64/ReadCr0.nasm| MSFT
X64/ReadEflags.nasm| MSFT
+ X64/VmgExit.nasm | MSFT
X64/Non-existing.c
diff --git a/MdePkg/Include/Library/BaseLib.h b/MdePkg/Include/Library/BaseLib.h
index 2a75bc023f56..80bd5cf57a72 100644
--- a/MdePkg/Include/Library/BaseLib.h
+++ b/MdePkg/Include/Library/BaseLib.h
@@ -7880,6 +7880,20 @@ AsmLfence (
VOID
);
+/**
+ Executes a VMGEXIT instruction (VMMCALL with a REP prefix)
+
+ Executes a VMGEXIT instruction. This function is only available on IA-32 and
+ x64.
+
+**/
+VOID
+EFIAPI
+AsmVmgExit (
+ VOID
+ );
+
+
/**
Patch the immediate operand of an IA32 or X64 instruction such that the byte,
word, dword or qword operand is encoded at the end of the instruction's
diff --git a/UefiCpuPkg/Include/Register/Amd/Ghcb.h b/UefiCpuPkg/Include/Register/Amd/Ghcb.h
new file mode 100644
index 000000000000..e9fd116fac25
--- /dev/null
+++ b/UefiCpuPkg/Include/Register/Amd/Ghcb.h
@@ -0,0 +1,197 @@
+
+#ifndef __GHCB_H__
+#define __GHCB_H__
+
+#include <Protocol/DebugSupport.h>
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+
+#define UD_EXCEPTION 6
+#define GP_EXCEPTION 13
+
+#define GHCB_VERSION_MIN 1
+#define GHCB_VERSION_MAX 1
+
+#define GHCB_STANDARD_USAGE 0
+
+typedef enum {
+ SvmExitDr7Read = 0x27,
+ SvmExitDr7Write = 0x37,
+ SvmExitRdtsc = 0x6E,
+ SvmExitRdpmc,
+ SvmExitCpuid = 0x72,
+ SvmExitInvd = 0x76,
+ SvmExitIoioProt = 0x7B,
+ SvmExitMsr,
+ SvmExitVmmCall = 0x81,
+ SvmExitRdtscp = 0x87,
+ SvmExitWbinvd = 0x89,
+ SvmExitMonitor,
+ SvmExitMwait,
+ SvmExitNpf = 0x400,
+
+ // VMG special exits
+ SvmExitMmioRead = 0x80000001,
+ SvmExitMmioWrite,
+ SvmExitNmiComplete,
+ SvmExitApResetHold,
+
+ SvmExitUnsupported = 0x8000FFFF,
+} SVM_EXITCODE;
+
+typedef enum {
+ GhcbCpl = 25,
+ GhcbRflags = 46,
+ GhcbRip,
+ GhcbRsp = 59,
+ GhcbRax = 63,
+ GhcbRcx = 97,
+ GhcbRdx,
+ GhcbRbx,
+ GhcbRbp = 101,
+ GhcbRsi,
+ GhcbRdi,
+ GhcbR8,
+ GhcbR9,
+ GhcbR10,
+ GhcbR11,
+ GhcbR12,
+ GhcbR13,
+ GhcbR14,
+ GhcbR15,
+ GhcbXCr0 = 125,
+} GHCB_REGISTER;
+
+typedef struct {
+ UINT8 Reserved1[203];
+ UINT8 Cpl;
+ UINT8 Reserved2[148];
+ UINT64 Dr7;
+ UINT8 Reserved3[144];
+ UINT64 Rax;
+ UINT8 Reserved4[264];
+ UINT64 Rcx;
+ UINT64 Rdx;
+ UINT64 Rbx;
+ UINT8 Reserved5[112];
+ UINT64 SwExitCode;
+ UINT64 SwExitInfo1;
+ UINT64 SwExitInfo2;
+ UINT64 SwScratch;
+ UINT8 Reserved6[56];
+ UINT64 XCr0;
+ UINT8 ValidBitmap[16];
+ UINT64 X87StateGpa;
+ UINT8 Reserved7[1016];
+} __attribute__ ((__packed__)) GHCB_SAVE_AREA;
+
+typedef struct {
+ GHCB_SAVE_AREA SaveArea;
+ UINT8 SharedBuffer[2032];
+ UINT8 Reserved1[10];
+ UINT16 ProtocolVersion;
+ UINT32 GhcbUsage;
+} __attribute__ ((__packed__)) __attribute__ ((aligned(SIZE_4KB))) GHCB;
+
+typedef union {
+ struct {
+ UINT32 Lower32Bits;
+ UINT32 Upper32Bits;
+ } Elements;
+
+ UINT64 Uint64;
+} GHCB_EXIT_INFO;
+
+static inline
+BOOLEAN
+GhcbIsRegValid(
+ GHCB *Ghcb,
+ GHCB_REGISTER Reg
+ )
+{
+ UINT32 RegIndex = Reg / 8;
+ UINT32 RegBit = Reg & 0x07;
+
+ return (Ghcb->SaveArea.ValidBitmap[RegIndex] & (1 << RegBit));
+}
+
+static inline
+VOID
+GhcbSetRegValid(
+ GHCB *Ghcb,
+ GHCB_REGISTER Reg
+ )
+{
+ UINT32 RegIndex = Reg / 8;
+ UINT32 RegBit = Reg & 0x07;
+
+ Ghcb->SaveArea.ValidBitmap[RegIndex] |= (1 << RegBit);
+}
+
+static inline
+VOID
+VmgException(
+ UINTN Exception
+ )
+{
+ switch (Exception) {
+ case UD_EXCEPTION:
+ case GP_EXCEPTION:
+ break;
+ default:
+ ASSERT (0);
+ }
+}
+
+static inline
+UINTN
+VmgExit(
+ GHCB *Ghcb,
+ UINT64 ExitCode,
+ UINT64 ExitInfo1,
+ UINT64 ExitInfo2
+ )
+{
+ GHCB_EXIT_INFO ExitInfo;
+ UINTN Reason, Action;
+
+ Ghcb->SaveArea.SwExitCode = ExitCode;
+ Ghcb->SaveArea.SwExitInfo1 = ExitInfo1;
+ Ghcb->SaveArea.SwExitInfo2 = ExitInfo2;
+ AsmVmgExit ();
+
+ if (!Ghcb->SaveArea.SwExitInfo1) {
+ return 0;
+ }
+
+ ExitInfo.Uint64 = Ghcb->SaveArea.SwExitInfo1;
+ Reason = ExitInfo.Elements.Upper32Bits;
+ Action = ExitInfo.Elements.Lower32Bits;
+ switch (Action) {
+ case 1:
+ VmgException (Reason);
+ break;
+ default:
+ ASSERT (0);
+ }
+
+ return Reason;
+}
+
+static inline
+VOID
+VmgInit(
+ GHCB *Ghcb
+ )
+{
+ SetMem (&Ghcb->SaveArea, sizeof (Ghcb->SaveArea), 0);
+}
+
+static inline
+VOID
+VmgDone(
+ GHCB *Ghcb
+ )
+{
+}
+#endif
diff --git a/MdePkg/Library/BaseLib/X64/GccInline.c b/MdePkg/Library/BaseLib/X64/GccInline.c
index 154ce1f57e92..17539caa0798 100644
--- a/MdePkg/Library/BaseLib/X64/GccInline.c
+++ b/MdePkg/Library/BaseLib/X64/GccInline.c
@@ -1798,3 +1798,20 @@ AsmFlushCacheLine (
}
+/**
+ Executes a VMGEXIT instruction.
+
+ Executes a VMGEXIT instruction. This function is only available on IA-32 and
+ X64.
+
+**/
+VOID
+EFIAPI
+AsmVmgExit (
+ VOID
+ )
+{
+ __asm__ __volatile__ ("rep; vmmcall":::"memory");
+}
+
+
diff --git a/MdePkg/Library/BaseLib/X64/VmgExit.nasm b/MdePkg/Library/BaseLib/X64/VmgExit.nasm
new file mode 100644
index 000000000000..b673bb94b60d
--- /dev/null
+++ b/MdePkg/Library/BaseLib/X64/VmgExit.nasm
@@ -0,0 +1,38 @@
+;------------------------------------------------------------------------------
+;
+; Copyright (c) 2019, Advanced Micro Device, Inc. All rights reserved.<BR>
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+; Module Name:
+;
+; VmgExit.Asm
+;
+; Abstract:
+;
+; AsmVmgExit function
+;
+; Notes:
+;
+;------------------------------------------------------------------------------
+
+ DEFAULT REL
+ SECTION .text
+
+;------------------------------------------------------------------------------
+; VOID
+; EFIAPI
+; AsmVmgExit (
+; VOID
+; );
+;------------------------------------------------------------------------------
+global ASM_PFX(AsmVmgExit)
+ASM_PFX(AsmVmgExit):
+ rep; vmmcall
+ ret
+
--
2.17.1
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#46096): https://edk2.groups.io/g/devel/message/46096
Mute This Topic: https://groups.io/mt/32960649/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
Tom,
1. It's not a common practice to have static inline functions defined in header file. Who is going to call them?
2. Recently I made a change to move the AMD registers definitions to MdePkg/Include/Register/Amd from UefiCpuPkg. Do you think that's a good idea and can you please put your new register definitions to MdePkg as well?
3. What happens if the "rep; vmmcall" is executed in Intel processor?
Thanks,
Ray
> -----Original Message-----
> From: Lendacky, Thomas <Thomas.Lendacky@amd.com>
> Sent: Monday, August 19, 2019 2:36 PM
> To: devel@edk2.groups.io
> Cc: Justen, Jordan L <jordan.l.justen@intel.com>; Laszlo Ersek <lersek@redhat.com>; Ard Biesheuvel
> <ard.biesheuvel@linaro.org>; Kinney, Michael D <michael.d.kinney@intel.com>; Gao, Liming <liming.gao@intel.com>; Dong,
> Eric <eric.dong@intel.com>; Ni, Ray <ray.ni@intel.com>; Singh, Brijesh <brijesh.singh@amd.com>
> Subject: [RFC PATCH 08/28] MdePkg/BaseLib: Implement the VMGEXIT support
>
> From: Tom Lendacky <thomas.lendacky@amd.com>
>
> VMGEXIT is a new instruction used for Hypervisor/Guest communication when
> running as an SEV-ES guest. A VMGEXIT will cause an automatic exit (AE)
> to occur, resulting in a #VMEXIT with an exit code value of 0x403.
>
> To support VMGEXIT, define the VMGEXIT assember routine to issue the
> instruction (rep; vmmcall), the GHCB structure and some helper functions
> for communicating register information to and from the hypervisor and the
> guest.
>
> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
> ---
> MdePkg/Library/BaseLib/BaseLib.inf | 1 +
> MdePkg/Include/Library/BaseLib.h | 14 ++
> UefiCpuPkg/Include/Register/Amd/Ghcb.h | 197 ++++++++++++++++++++++++
> MdePkg/Library/BaseLib/X64/GccInline.c | 17 ++
> MdePkg/Library/BaseLib/X64/VmgExit.nasm | 38 +++++
> 5 files changed, 267 insertions(+)
> create mode 100644 UefiCpuPkg/Include/Register/Amd/Ghcb.h
> create mode 100644 MdePkg/Library/BaseLib/X64/VmgExit.nasm
>
> diff --git a/MdePkg/Library/BaseLib/BaseLib.inf b/MdePkg/Library/BaseLib/BaseLib.inf
> index 3586beb0ab5c..a41401340f95 100644
> --- a/MdePkg/Library/BaseLib/BaseLib.inf
> +++ b/MdePkg/Library/BaseLib/BaseLib.inf
> @@ -286,6 +286,7 @@ [Sources.X64]
> X64/ReadCr2.nasm| MSFT
> X64/ReadCr0.nasm| MSFT
> X64/ReadEflags.nasm| MSFT
> + X64/VmgExit.nasm | MSFT
>
>
> X64/Non-existing.c
> diff --git a/MdePkg/Include/Library/BaseLib.h b/MdePkg/Include/Library/BaseLib.h
> index 2a75bc023f56..80bd5cf57a72 100644
> --- a/MdePkg/Include/Library/BaseLib.h
> +++ b/MdePkg/Include/Library/BaseLib.h
> @@ -7880,6 +7880,20 @@ AsmLfence (
> VOID
> );
>
> +/**
> + Executes a VMGEXIT instruction (VMMCALL with a REP prefix)
> +
> + Executes a VMGEXIT instruction. This function is only available on IA-32 and
> + x64.
> +
> +**/
> +VOID
> +EFIAPI
> +AsmVmgExit (
> + VOID
> + );
> +
> +
> /**
> Patch the immediate operand of an IA32 or X64 instruction such that the byte,
> word, dword or qword operand is encoded at the end of the instruction's
> diff --git a/UefiCpuPkg/Include/Register/Amd/Ghcb.h b/UefiCpuPkg/Include/Register/Amd/Ghcb.h
> new file mode 100644
> index 000000000000..e9fd116fac25
> --- /dev/null
> +++ b/UefiCpuPkg/Include/Register/Amd/Ghcb.h
> @@ -0,0 +1,197 @@
> +
> +#ifndef __GHCB_H__
> +#define __GHCB_H__
> +
> +#include <Protocol/DebugSupport.h>
> +#include <Library/BaseLib.h>
> +#include <Library/DebugLib.h>
> +
> +#define UD_EXCEPTION 6
> +#define GP_EXCEPTION 13
> +
> +#define GHCB_VERSION_MIN 1
> +#define GHCB_VERSION_MAX 1
> +
> +#define GHCB_STANDARD_USAGE 0
> +
> +typedef enum {
> + SvmExitDr7Read = 0x27,
> + SvmExitDr7Write = 0x37,
> + SvmExitRdtsc = 0x6E,
> + SvmExitRdpmc,
> + SvmExitCpuid = 0x72,
> + SvmExitInvd = 0x76,
> + SvmExitIoioProt = 0x7B,
> + SvmExitMsr,
> + SvmExitVmmCall = 0x81,
> + SvmExitRdtscp = 0x87,
> + SvmExitWbinvd = 0x89,
> + SvmExitMonitor,
> + SvmExitMwait,
> + SvmExitNpf = 0x400,
> +
> + // VMG special exits
> + SvmExitMmioRead = 0x80000001,
> + SvmExitMmioWrite,
> + SvmExitNmiComplete,
> + SvmExitApResetHold,
> +
> + SvmExitUnsupported = 0x8000FFFF,
> +} SVM_EXITCODE;
> +
> +typedef enum {
> + GhcbCpl = 25,
> + GhcbRflags = 46,
> + GhcbRip,
> + GhcbRsp = 59,
> + GhcbRax = 63,
> + GhcbRcx = 97,
> + GhcbRdx,
> + GhcbRbx,
> + GhcbRbp = 101,
> + GhcbRsi,
> + GhcbRdi,
> + GhcbR8,
> + GhcbR9,
> + GhcbR10,
> + GhcbR11,
> + GhcbR12,
> + GhcbR13,
> + GhcbR14,
> + GhcbR15,
> + GhcbXCr0 = 125,
> +} GHCB_REGISTER;
> +
> +typedef struct {
> + UINT8 Reserved1[203];
> + UINT8 Cpl;
> + UINT8 Reserved2[148];
> + UINT64 Dr7;
> + UINT8 Reserved3[144];
> + UINT64 Rax;
> + UINT8 Reserved4[264];
> + UINT64 Rcx;
> + UINT64 Rdx;
> + UINT64 Rbx;
> + UINT8 Reserved5[112];
> + UINT64 SwExitCode;
> + UINT64 SwExitInfo1;
> + UINT64 SwExitInfo2;
> + UINT64 SwScratch;
> + UINT8 Reserved6[56];
> + UINT64 XCr0;
> + UINT8 ValidBitmap[16];
> + UINT64 X87StateGpa;
> + UINT8 Reserved7[1016];
> +} __attribute__ ((__packed__)) GHCB_SAVE_AREA;
> +
> +typedef struct {
> + GHCB_SAVE_AREA SaveArea;
> + UINT8 SharedBuffer[2032];
> + UINT8 Reserved1[10];
> + UINT16 ProtocolVersion;
> + UINT32 GhcbUsage;
> +} __attribute__ ((__packed__)) __attribute__ ((aligned(SIZE_4KB))) GHCB;
> +
> +typedef union {
> + struct {
> + UINT32 Lower32Bits;
> + UINT32 Upper32Bits;
> + } Elements;
> +
> + UINT64 Uint64;
> +} GHCB_EXIT_INFO;
> +
> +static inline
> +BOOLEAN
> +GhcbIsRegValid(
> + GHCB *Ghcb,
> + GHCB_REGISTER Reg
> + )
> +{
> + UINT32 RegIndex = Reg / 8;
> + UINT32 RegBit = Reg & 0x07;
> +
> + return (Ghcb->SaveArea.ValidBitmap[RegIndex] & (1 << RegBit));
> +}
> +
> +static inline
> +VOID
> +GhcbSetRegValid(
> + GHCB *Ghcb,
> + GHCB_REGISTER Reg
> + )
> +{
> + UINT32 RegIndex = Reg / 8;
> + UINT32 RegBit = Reg & 0x07;
> +
> + Ghcb->SaveArea.ValidBitmap[RegIndex] |= (1 << RegBit);
> +}
> +
> +static inline
> +VOID
> +VmgException(
> + UINTN Exception
> + )
> +{
> + switch (Exception) {
> + case UD_EXCEPTION:
> + case GP_EXCEPTION:
> + break;
> + default:
> + ASSERT (0);
> + }
> +}
> +
> +static inline
> +UINTN
> +VmgExit(
> + GHCB *Ghcb,
> + UINT64 ExitCode,
> + UINT64 ExitInfo1,
> + UINT64 ExitInfo2
> + )
> +{
> + GHCB_EXIT_INFO ExitInfo;
> + UINTN Reason, Action;
> +
> + Ghcb->SaveArea.SwExitCode = ExitCode;
> + Ghcb->SaveArea.SwExitInfo1 = ExitInfo1;
> + Ghcb->SaveArea.SwExitInfo2 = ExitInfo2;
> + AsmVmgExit ();
> +
> + if (!Ghcb->SaveArea.SwExitInfo1) {
> + return 0;
> + }
> +
> + ExitInfo.Uint64 = Ghcb->SaveArea.SwExitInfo1;
> + Reason = ExitInfo.Elements.Upper32Bits;
> + Action = ExitInfo.Elements.Lower32Bits;
> + switch (Action) {
> + case 1:
> + VmgException (Reason);
> + break;
> + default:
> + ASSERT (0);
> + }
> +
> + return Reason;
> +}
> +
> +static inline
> +VOID
> +VmgInit(
> + GHCB *Ghcb
> + )
> +{
> + SetMem (&Ghcb->SaveArea, sizeof (Ghcb->SaveArea), 0);
> +}
> +
> +static inline
> +VOID
> +VmgDone(
> + GHCB *Ghcb
> + )
> +{
> +}
> +#endif
> diff --git a/MdePkg/Library/BaseLib/X64/GccInline.c b/MdePkg/Library/BaseLib/X64/GccInline.c
> index 154ce1f57e92..17539caa0798 100644
> --- a/MdePkg/Library/BaseLib/X64/GccInline.c
> +++ b/MdePkg/Library/BaseLib/X64/GccInline.c
> @@ -1798,3 +1798,20 @@ AsmFlushCacheLine (
> }
>
>
> +/**
> + Executes a VMGEXIT instruction.
> +
> + Executes a VMGEXIT instruction. This function is only available on IA-32 and
> + X64.
> +
> +**/
> +VOID
> +EFIAPI
> +AsmVmgExit (
> + VOID
> + )
> +{
> + __asm__ __volatile__ ("rep; vmmcall":::"memory");
> +}
> +
> +
> diff --git a/MdePkg/Library/BaseLib/X64/VmgExit.nasm b/MdePkg/Library/BaseLib/X64/VmgExit.nasm
> new file mode 100644
> index 000000000000..b673bb94b60d
> --- /dev/null
> +++ b/MdePkg/Library/BaseLib/X64/VmgExit.nasm
> @@ -0,0 +1,38 @@
> +;------------------------------------------------------------------------------
> +;
> +; Copyright (c) 2019, Advanced Micro Device, Inc. All rights reserved.<BR>
> +; This program and the accompanying materials
> +; are licensed and made available under the terms and conditions of the BSD License
> +; which accompanies this distribution. The full text of the license may be found at
> +; http://opensource.org/licenses/bsd-license.php.
> +;
> +; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
> +; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
> +;
> +; Module Name:
> +;
> +; VmgExit.Asm
> +;
> +; Abstract:
> +;
> +; AsmVmgExit function
> +;
> +; Notes:
> +;
> +;------------------------------------------------------------------------------
> +
> + DEFAULT REL
> + SECTION .text
> +
> +;------------------------------------------------------------------------------
> +; VOID
> +; EFIAPI
> +; AsmVmgExit (
> +; VOID
> +; );
> +;------------------------------------------------------------------------------
> +global ASM_PFX(AsmVmgExit)
> +ASM_PFX(AsmVmgExit):
> + rep; vmmcall
> + ret
> +
> --
> 2.17.1
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#46052): https://edk2.groups.io/g/devel/message/46052
Mute This Topic: https://groups.io/mt/32960649/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
Hi Ray,
On 8/19/19 4:47 PM, Ni, Ray wrote:
> Tom,
> 1. It's not a common practice to have static inline functions defined in header file. Who is going to call them?
The functions are called from two locations, so that's why I made them
static inline. I'm new to EDK2 programming, so I'm sure there will be a
number of things I do that will need to be changed.
Should I make them non-inline and move them to the BaseLib in MdePkg or
somewhere else?
> 2. Recently I made a change to move the AMD registers definitions to MdePkg/Include/Register/Amd from UefiCpuPkg. Do you think that's a good idea and can you please put your new register definitions to MdePkg as well?
Ok, let me pull the latest tree and rebase. This patchset is currently
based on a July 17th patch:
cce01f538fb4 ("MdePkg/BaseLib: Base64Decode(): don't declare variables in nested blocks")
so I'm probably behind the change that you made if it was recent.
> 3. What happens if the "rep; vmmcall" is executed in Intel processor?
Good question, I'm not sure. Is there a way that EDK2 has to prevent
execution of unsupported instructions? Currently, this instruction will
only be invoked when it is known that SEV-ES is active.
Thanks,
Tom
>
> Thanks,
> Ray
>
>> -----Original Message-----
>> From: Lendacky, Thomas <Thomas.Lendacky@amd.com>
>> Sent: Monday, August 19, 2019 2:36 PM
>> To: devel@edk2.groups.io
>> Cc: Justen, Jordan L <jordan.l.justen@intel.com>; Laszlo Ersek <lersek@redhat.com>; Ard Biesheuvel
>> <ard.biesheuvel@linaro.org>; Kinney, Michael D <michael.d.kinney@intel.com>; Gao, Liming <liming.gao@intel.com>; Dong,
>> Eric <eric.dong@intel.com>; Ni, Ray <ray.ni@intel.com>; Singh, Brijesh <brijesh.singh@amd.com>
>> Subject: [RFC PATCH 08/28] MdePkg/BaseLib: Implement the VMGEXIT support
>>
>> From: Tom Lendacky <thomas.lendacky@amd.com>
>>
>> VMGEXIT is a new instruction used for Hypervisor/Guest communication when
>> running as an SEV-ES guest. A VMGEXIT will cause an automatic exit (AE)
>> to occur, resulting in a #VMEXIT with an exit code value of 0x403.
>>
>> To support VMGEXIT, define the VMGEXIT assember routine to issue the
>> instruction (rep; vmmcall), the GHCB structure and some helper functions
>> for communicating register information to and from the hypervisor and the
>> guest.
>>
>> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
>> ---
>> MdePkg/Library/BaseLib/BaseLib.inf | 1 +
>> MdePkg/Include/Library/BaseLib.h | 14 ++
>> UefiCpuPkg/Include/Register/Amd/Ghcb.h | 197 ++++++++++++++++++++++++
>> MdePkg/Library/BaseLib/X64/GccInline.c | 17 ++
>> MdePkg/Library/BaseLib/X64/VmgExit.nasm | 38 +++++
>> 5 files changed, 267 insertions(+)
>> create mode 100644 UefiCpuPkg/Include/Register/Amd/Ghcb.h
>> create mode 100644 MdePkg/Library/BaseLib/X64/VmgExit.nasm
>>
>> diff --git a/MdePkg/Library/BaseLib/BaseLib.inf b/MdePkg/Library/BaseLib/BaseLib.inf
>> index 3586beb0ab5c..a41401340f95 100644
>> --- a/MdePkg/Library/BaseLib/BaseLib.inf
>> +++ b/MdePkg/Library/BaseLib/BaseLib.inf
>> @@ -286,6 +286,7 @@ [Sources.X64]
>> X64/ReadCr2.nasm| MSFT
>> X64/ReadCr0.nasm| MSFT
>> X64/ReadEflags.nasm| MSFT
>> + X64/VmgExit.nasm | MSFT
>>
>>
>> X64/Non-existing.c
>> diff --git a/MdePkg/Include/Library/BaseLib.h b/MdePkg/Include/Library/BaseLib.h
>> index 2a75bc023f56..80bd5cf57a72 100644
>> --- a/MdePkg/Include/Library/BaseLib.h
>> +++ b/MdePkg/Include/Library/BaseLib.h
>> @@ -7880,6 +7880,20 @@ AsmLfence (
>> VOID
>> );
>>
>> +/**
>> + Executes a VMGEXIT instruction (VMMCALL with a REP prefix)
>> +
>> + Executes a VMGEXIT instruction. This function is only available on IA-32 and
>> + x64.
>> +
>> +**/
>> +VOID
>> +EFIAPI
>> +AsmVmgExit (
>> + VOID
>> + );
>> +
>> +
>> /**
>> Patch the immediate operand of an IA32 or X64 instruction such that the byte,
>> word, dword or qword operand is encoded at the end of the instruction's
>> diff --git a/UefiCpuPkg/Include/Register/Amd/Ghcb.h b/UefiCpuPkg/Include/Register/Amd/Ghcb.h
>> new file mode 100644
>> index 000000000000..e9fd116fac25
>> --- /dev/null
>> +++ b/UefiCpuPkg/Include/Register/Amd/Ghcb.h
>> @@ -0,0 +1,197 @@
>> +
>> +#ifndef __GHCB_H__
>> +#define __GHCB_H__
>> +
>> +#include <Protocol/DebugSupport.h>
>> +#include <Library/BaseLib.h>
>> +#include <Library/DebugLib.h>
>> +
>> +#define UD_EXCEPTION 6
>> +#define GP_EXCEPTION 13
>> +
>> +#define GHCB_VERSION_MIN 1
>> +#define GHCB_VERSION_MAX 1
>> +
>> +#define GHCB_STANDARD_USAGE 0
>> +
>> +typedef enum {
>> + SvmExitDr7Read = 0x27,
>> + SvmExitDr7Write = 0x37,
>> + SvmExitRdtsc = 0x6E,
>> + SvmExitRdpmc,
>> + SvmExitCpuid = 0x72,
>> + SvmExitInvd = 0x76,
>> + SvmExitIoioProt = 0x7B,
>> + SvmExitMsr,
>> + SvmExitVmmCall = 0x81,
>> + SvmExitRdtscp = 0x87,
>> + SvmExitWbinvd = 0x89,
>> + SvmExitMonitor,
>> + SvmExitMwait,
>> + SvmExitNpf = 0x400,
>> +
>> + // VMG special exits
>> + SvmExitMmioRead = 0x80000001,
>> + SvmExitMmioWrite,
>> + SvmExitNmiComplete,
>> + SvmExitApResetHold,
>> +
>> + SvmExitUnsupported = 0x8000FFFF,
>> +} SVM_EXITCODE;
>> +
>> +typedef enum {
>> + GhcbCpl = 25,
>> + GhcbRflags = 46,
>> + GhcbRip,
>> + GhcbRsp = 59,
>> + GhcbRax = 63,
>> + GhcbRcx = 97,
>> + GhcbRdx,
>> + GhcbRbx,
>> + GhcbRbp = 101,
>> + GhcbRsi,
>> + GhcbRdi,
>> + GhcbR8,
>> + GhcbR9,
>> + GhcbR10,
>> + GhcbR11,
>> + GhcbR12,
>> + GhcbR13,
>> + GhcbR14,
>> + GhcbR15,
>> + GhcbXCr0 = 125,
>> +} GHCB_REGISTER;
>> +
>> +typedef struct {
>> + UINT8 Reserved1[203];
>> + UINT8 Cpl;
>> + UINT8 Reserved2[148];
>> + UINT64 Dr7;
>> + UINT8 Reserved3[144];
>> + UINT64 Rax;
>> + UINT8 Reserved4[264];
>> + UINT64 Rcx;
>> + UINT64 Rdx;
>> + UINT64 Rbx;
>> + UINT8 Reserved5[112];
>> + UINT64 SwExitCode;
>> + UINT64 SwExitInfo1;
>> + UINT64 SwExitInfo2;
>> + UINT64 SwScratch;
>> + UINT8 Reserved6[56];
>> + UINT64 XCr0;
>> + UINT8 ValidBitmap[16];
>> + UINT64 X87StateGpa;
>> + UINT8 Reserved7[1016];
>> +} __attribute__ ((__packed__)) GHCB_SAVE_AREA;
>> +
>> +typedef struct {
>> + GHCB_SAVE_AREA SaveArea;
>> + UINT8 SharedBuffer[2032];
>> + UINT8 Reserved1[10];
>> + UINT16 ProtocolVersion;
>> + UINT32 GhcbUsage;
>> +} __attribute__ ((__packed__)) __attribute__ ((aligned(SIZE_4KB))) GHCB;
>> +
>> +typedef union {
>> + struct {
>> + UINT32 Lower32Bits;
>> + UINT32 Upper32Bits;
>> + } Elements;
>> +
>> + UINT64 Uint64;
>> +} GHCB_EXIT_INFO;
>> +
>> +static inline
>> +BOOLEAN
>> +GhcbIsRegValid(
>> + GHCB *Ghcb,
>> + GHCB_REGISTER Reg
>> + )
>> +{
>> + UINT32 RegIndex = Reg / 8;
>> + UINT32 RegBit = Reg & 0x07;
>> +
>> + return (Ghcb->SaveArea.ValidBitmap[RegIndex] & (1 << RegBit));
>> +}
>> +
>> +static inline
>> +VOID
>> +GhcbSetRegValid(
>> + GHCB *Ghcb,
>> + GHCB_REGISTER Reg
>> + )
>> +{
>> + UINT32 RegIndex = Reg / 8;
>> + UINT32 RegBit = Reg & 0x07;
>> +
>> + Ghcb->SaveArea.ValidBitmap[RegIndex] |= (1 << RegBit);
>> +}
>> +
>> +static inline
>> +VOID
>> +VmgException(
>> + UINTN Exception
>> + )
>> +{
>> + switch (Exception) {
>> + case UD_EXCEPTION:
>> + case GP_EXCEPTION:
>> + break;
>> + default:
>> + ASSERT (0);
>> + }
>> +}
>> +
>> +static inline
>> +UINTN
>> +VmgExit(
>> + GHCB *Ghcb,
>> + UINT64 ExitCode,
>> + UINT64 ExitInfo1,
>> + UINT64 ExitInfo2
>> + )
>> +{
>> + GHCB_EXIT_INFO ExitInfo;
>> + UINTN Reason, Action;
>> +
>> + Ghcb->SaveArea.SwExitCode = ExitCode;
>> + Ghcb->SaveArea.SwExitInfo1 = ExitInfo1;
>> + Ghcb->SaveArea.SwExitInfo2 = ExitInfo2;
>> + AsmVmgExit ();
>> +
>> + if (!Ghcb->SaveArea.SwExitInfo1) {
>> + return 0;
>> + }
>> +
>> + ExitInfo.Uint64 = Ghcb->SaveArea.SwExitInfo1;
>> + Reason = ExitInfo.Elements.Upper32Bits;
>> + Action = ExitInfo.Elements.Lower32Bits;
>> + switch (Action) {
>> + case 1:
>> + VmgException (Reason);
>> + break;
>> + default:
>> + ASSERT (0);
>> + }
>> +
>> + return Reason;
>> +}
>> +
>> +static inline
>> +VOID
>> +VmgInit(
>> + GHCB *Ghcb
>> + )
>> +{
>> + SetMem (&Ghcb->SaveArea, sizeof (Ghcb->SaveArea), 0);
>> +}
>> +
>> +static inline
>> +VOID
>> +VmgDone(
>> + GHCB *Ghcb
>> + )
>> +{
>> +}
>> +#endif
>> diff --git a/MdePkg/Library/BaseLib/X64/GccInline.c b/MdePkg/Library/BaseLib/X64/GccInline.c
>> index 154ce1f57e92..17539caa0798 100644
>> --- a/MdePkg/Library/BaseLib/X64/GccInline.c
>> +++ b/MdePkg/Library/BaseLib/X64/GccInline.c
>> @@ -1798,3 +1798,20 @@ AsmFlushCacheLine (
>> }
>>
>>
>> +/**
>> + Executes a VMGEXIT instruction.
>> +
>> + Executes a VMGEXIT instruction. This function is only available on IA-32 and
>> + X64.
>> +
>> +**/
>> +VOID
>> +EFIAPI
>> +AsmVmgExit (
>> + VOID
>> + )
>> +{
>> + __asm__ __volatile__ ("rep; vmmcall":::"memory");
>> +}
>> +
>> +
>> diff --git a/MdePkg/Library/BaseLib/X64/VmgExit.nasm b/MdePkg/Library/BaseLib/X64/VmgExit.nasm
>> new file mode 100644
>> index 000000000000..b673bb94b60d
>> --- /dev/null
>> +++ b/MdePkg/Library/BaseLib/X64/VmgExit.nasm
>> @@ -0,0 +1,38 @@
>> +;------------------------------------------------------------------------------
>> +;
>> +; Copyright (c) 2019, Advanced Micro Device, Inc. All rights reserved.<BR>
>> +; This program and the accompanying materials
>> +; are licensed and made available under the terms and conditions of the BSD License
>> +; which accompanies this distribution. The full text of the license may be found at
>> +; http://opensource.org/licenses/bsd-license.php.
>> +;
>> +; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
>> +; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
>> +;
>> +; Module Name:
>> +;
>> +; VmgExit.Asm
>> +;
>> +; Abstract:
>> +;
>> +; AsmVmgExit function
>> +;
>> +; Notes:
>> +;
>> +;------------------------------------------------------------------------------
>> +
>> + DEFAULT REL
>> + SECTION .text
>> +
>> +;------------------------------------------------------------------------------
>> +; VOID
>> +; EFIAPI
>> +; AsmVmgExit (
>> +; VOID
>> +; );
>> +;------------------------------------------------------------------------------
>> +global ASM_PFX(AsmVmgExit)
>> +ASM_PFX(AsmVmgExit):
>> + rep; vmmcall
>> + ret
>> +
>> --
>> 2.17.1
>
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#46094): https://edk2.groups.io/g/devel/message/46094
Mute This Topic: https://groups.io/mt/32960649/1787277
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [importer@patchew.org]
-=-=-=-=-=-=-=-=-=-=-=-
© 2016 - 2026 Red Hat, Inc.