[PATCH 22/22] KVM: nSVM: enable GMET for guests

Paolo Bonzini posted 22 patches 2 weeks ago
There is a newer version of this series
[PATCH 22/22] KVM: nSVM: enable GMET for guests
Posted by Paolo Bonzini 2 weeks ago
All that needs to be done is moving the GMET bit from vmcs12 to
vmcs02.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 3 +++
 arch/x86/kvm/svm/svm.c    | 3 +++
 2 files changed, 6 insertions(+)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index d69bcf52f948..397e9afecb78 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -774,6 +774,9 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
 		vmcb02->control.bus_lock_counter = 0;
 
 	vmcb02->control.nested_ctl &= ~SVM_NESTED_CTL_GMET_ENABLE;
+	if (guest_cpu_cap_has(vcpu, X86_FEATURE_GMET))
+		vmcb02->control.nested_ctl |=
+			(svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_GMET_ENABLE);
 
 	/* Done at vmrun: asid.  */
 
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d3b69eb3242b..4a0d97e70dc2 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -5294,6 +5294,9 @@ static __init void svm_set_cpu_caps(void)
 		if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD))
 			kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD);
 
+		if (boot_cpu_has(X86_FEATURE_GMET))
+			kvm_cpu_cap_set(X86_FEATURE_GMET);
+
 		if (vgif)
 			kvm_cpu_cap_set(X86_FEATURE_VGIF);
 
-- 
2.52.0
Re: [PATCH 22/22] KVM: nSVM: enable GMET for guests
Posted by Jon Kohler 1 week, 3 days ago

> On Mar 20, 2026, at 8:09 PM, Paolo Bonzini <pbonzini@redhat.com> wrote:
> 
> All that needs to be done is moving the GMET bit from vmcs12 to
> vmcs02.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> arch/x86/kvm/svm/nested.c | 3 +++
> arch/x86/kvm/svm/svm.c    | 3 +++
> 2 files changed, 6 insertions(+)
> 
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index d69bcf52f948..397e9afecb78 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -774,6 +774,9 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
> vmcb02->control.bus_lock_counter = 0;
> 
> vmcb02->control.nested_ctl &= ~SVM_NESTED_CTL_GMET_ENABLE;
> + if (guest_cpu_cap_has(vcpu, X86_FEATURE_GMET))
> + vmcb02->control.nested_ctl |=
> + (svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_GMET_ENABLE);
> 
> /* Done at vmrun: asid.  */
> 
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index d3b69eb3242b..4a0d97e70dc2 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -5294,6 +5294,9 @@ static __init void svm_set_cpu_caps(void)
> if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD))
> kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD);
> 
> + if (boot_cpu_has(X86_FEATURE_GMET))
> + kvm_cpu_cap_set(X86_FEATURE_GMET);
> +
> if (vgif)
> kvm_cpu_cap_set(X86_FEATURE_VGIF);
> 
> -- 
> 2.52.0
> 

When I enable gmet on the guest, and try to boot with memory integrity
enabled in Windows 11 25H2 guest, the machine does not boot, but rather
gets stuck in an NPF loop and does not make any progress.

I added a snippet of the tracing statements I see when enabling tracing
as well as a snippet of the VM config I’m using, nothing too fancy.

Scratching my head a bit, dropping this here in case you’ve already seen
this issue.

18:43:27.211372

Analyze events for pid(s) 32072, all VCPUs:

                                 VM-EXIT    Samples  Samples%     Time%    Min Time    Max Time         Avg time

                                     npf      35947    99.98%   100.00%      0.31us    463.93us      6.34us ( +-   4.17% )
                               interrupt          5     0.01%     0.00%      0.92us      3.77us      1.91us ( +-  31.48% )
                                     npf          1     0.00%     0.00%      0.63us      0.63us      0.63us ( +-   0.00% )

Total Samples:35953, Total events handled time:227798.77us.

       CPU 0/KVM-32296   [112] .....  3170.471586: kvm_nested_vmexit: vcpu 0 reason npf rip 0xfffff814ff394160 info1 0x0000000200000007 info2 0x000001000225af80 intr_info 0x00000000 error_code 0x00000000 requests 0x0000000000000000
       CPU 0/KVM-32296   [112] .....  3170.471586: kvm_page_fault: vcpu 0 rip 0xfffff814ff394160 address 0x000001000225af80 error_code 0x200000007
       CPU 0/KVM-32296   [112] d....  3170.471586: kvm_entry: vcpu 0, rip 0xfffff814ff394160 intr_info 0x00000000 error_code 0x00000000
       CPU 0/KVM-32296   [112] d....  3170.471586: kvm_exit: vcpu 0 reason npf rip 0xfffff814ff394160 info1 0x0000000200000007 info2 0x000001000225af80 intr_info 0x00000000 error_code 0x00000000 requests 0x0000000000000000
       CPU 0/KVM-32296   [112] .....  3170.471586: kvm_nested_vmexit: vcpu 0 reason npf rip 0xfffff814ff394160 info1 0x0000000200000007 info2 0x000001000225af80 intr_info 0x00000000 error_code 0x00000000 requests 0x0000000000000000
       CPU 0/KVM-32296   [112] .....  3170.471586: kvm_page_fault: vcpu 0 rip 0xfffff814ff394160 address 0x000001000225af80 error_code 0x200000007
       CPU 0/KVM-32296   [112] d....  3170.471587: kvm_entry: vcpu 0, rip 0xfffff814ff394160 intr_info 0x00000000 error_code 0x00000000
       CPU 0/KVM-32296   [112] d....  3170.471587: kvm_exit: vcpu 0 reason npf rip 0xfffff814ff394160 info1 0x0000000200000007 info2 0x000001000225af80 intr_info 0x00000000 error_code 0x00000000 requests 0x0000000000000000
       CPU 0/KVM-32296   [112] .....  3170.471587: kvm_nested_vmexit: vcpu 0 reason npf rip 0xfffff814ff394160 info1 0x0000000200000007 info2 0x000001000225af80 intr_info 0x00000000 error_code 0x00000000 requests 0x0000000000000000
       CPU 0/KVM-32296   [112] .....  3170.471587: kvm_page_fault: vcpu 0 rip 0xfffff814ff394160 address 0x000001000225af80 error_code 0x200000007
       CPU 0/KVM-32296   [112] d....  3170.471587: kvm_entry: vcpu 0, rip 0xfffff814ff394160 intr_info 0x00000000 error_code 0x00000000
       CPU 0/KVM-32296   [112] d....  3170.471588: kvm_exit: vcpu 0 reason npf rip 0xfffff814ff394160 info1 0x0000000200000007 info2 0x000001000225af80 intr_info 0x00000000 error_code 0x00000000 requests 0x0000000000000000
       CPU 0/KVM-32296   [112] .....  3170.471588: kvm_nested_vmexit: vcpu 0 reason npf rip 0xfffff814ff394160 info1 0x0000000200000007 info2 0x000001000225af80 intr_info 0x00000000 error_code 0x00000000 requests 0x0000000000000000
       CPU 0/KVM-32296   [112] .....  3170.471588: kvm_page_fault: vcpu 0 rip 0xfffff814ff394160 address 0x000001000225af80 error_code 0x200000007
       CPU 0/KVM-32296   [112] d....  3170.471588: kvm_entry: vcpu 0, rip 0xfffff814ff394160 intr_info 0x00000000 error_code 0x00000000
       CPU 0/KVM-32296   [112] d....  3170.471589: kvm_exit: vcpu 0 reason npf rip 0xfffff814ff394160 info1 0x0000000200000007 info2 0x000001000225af80 intr_info 0x00000000 error_code 0x00000000 requests 0x0000000000000000

-machine pc-q35-rhel9.6.0,usb=off,smm=on,kernel_irqchip=split,dump-guest-core=off,mem-merge=off,pflash0=libvirt-pflash0-format,pflash1=libvirt-pflash1-storage,acpi=on -accel kvm -cpu EPYC-Genoa-v2,enforce,invtsc=on,svm=on,svme-addr-chk=on,gmet=on,pku=off,hv-time=on,tsc-frequency=2400000000,kvm-pv-eoi=on,hv-relaxed=on,hv-vapic=on,hv-spinlocks=0x2000,hv-vpindex=on,hv-runtime=on,hv-syn
ic=on,hv-stimer=on,hv-tlbflush=on,hv-ipi=on,hv-avic=on,l3-cache=on -smp 4,maxcpus=240,sockets=60,dies=1,clusters=1,cores=4,threads=1 
Re: [PATCH 22/22] KVM: nSVM: enable GMET for guests
Posted by Paolo Bonzini 1 week, 2 days ago
On Tue, Mar 24, 2026 at 8:57 PM Jon Kohler <jon@nutanix.com> wrote:
> On Mar 20, 2026, at 8:09 PM, Paolo Bonzini <pbonzini@redhat.com> wrote:
> > vmcb02->control.nested_ctl &= ~SVM_NESTED_CTL_GMET_ENABLE;
> > if (guest_cpu_cap_has(vcpu, X86_FEATURE_GMET))
> >      vmcb02->control.nested_ctl |=
> >            (svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_GMET_ENABLE);

The issue is with nNPT disabled; these four lines of code have to be
moved inside the "if (nested_npt_enabled(svm))".

(The giveaway is the kvmmmu:fast_page_fault event in the trace, which
never appears with shadow paging).

I have fixed Kai's reported issues and EPT page tests, and will post
the next version after doing some more testing.

Paolo
Re: [PATCH 22/22] KVM: nSVM: enable GMET for guests
Posted by Nikunj A. Dadhania 1 week, 2 days ago

On 3/25/2026 1:27 AM, Jon Kohler wrote:
> 
> 
>> On Mar 20, 2026, at 8:09 PM, Paolo Bonzini <pbonzini@redhat.com> wrote:
>>
>> All that needs to be done is moving the GMET bit from vmcs12 to
>> vmcs02.
>>
>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>> ---
>> arch/x86/kvm/svm/nested.c | 3 +++
>> arch/x86/kvm/svm/svm.c    | 3 +++
>> 2 files changed, 6 insertions(+)
>>
>> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
>> index d69bcf52f948..397e9afecb78 100644
>> --- a/arch/x86/kvm/svm/nested.c
>> +++ b/arch/x86/kvm/svm/nested.c
>> @@ -774,6 +774,9 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
>> vmcb02->control.bus_lock_counter = 0;
>>
>> vmcb02->control.nested_ctl &= ~SVM_NESTED_CTL_GMET_ENABLE;
>> + if (guest_cpu_cap_has(vcpu, X86_FEATURE_GMET))
>> + vmcb02->control.nested_ctl |=
>> + (svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_GMET_ENABLE);
>>
>> /* Done at vmrun: asid.  */
>>
>> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
>> index d3b69eb3242b..4a0d97e70dc2 100644
>> --- a/arch/x86/kvm/svm/svm.c
>> +++ b/arch/x86/kvm/svm/svm.c
>> @@ -5294,6 +5294,9 @@ static __init void svm_set_cpu_caps(void)
>> if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD))
>> kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD);
>>
>> + if (boot_cpu_has(X86_FEATURE_GMET))
>> + kvm_cpu_cap_set(X86_FEATURE_GMET);
>> +
>> if (vgif)
>> kvm_cpu_cap_set(X86_FEATURE_VGIF);
>>
>> -- 
>> 2.52.0
>>
> 
> When I enable gmet on the guest, and try to boot with memory integrity
> enabled in Windows 11 25H2 guest, the machine does not boot, but rather
> gets stuck in an NPF loop and does not make any progress.

Same here as well, trying to debug

  kvm_nested_vmenter: rip: 0xfffff8148793c2ea vmcb: 0x00000001087fd000 nested_rip: 0xfffff81487993d90 int_ctl: 0x00000000 event_inj: 0x00000000 nested_npt=n guest_cr3: 0x00000001087ce000
  kvm_nested_vmexit: vcpu 0 reason npf rip 0xfffff81487993d90 info1 0x0000000200000006 info2 0x00000001087cef80 intr_info 0x00000000 error_code 0x00000000 requests 0x0000000000000000

This seems to be the first entry into the nested guest and nested page fault is on guest_cr3 page

  kvm_mmu_set_spte: gfn 108600 spte 39ac00ee3 (rwx-) level 2 at 3012b1218

A 2M page is provisioned in the NPT

  kvm_nested_vmexit: vcpu 0 reason npf rip 0xfffff81487993d90 info1 0x0000000200000007 info2 0x00000001087cef80 intr_info 0x00000000 error_code 0x00000000 requests 0x0000000000000000

But the fault keeps hitting.