From nobody Sun Feb 8 08:13:59 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A46F2C001DB for ; Fri, 4 Aug 2023 17:35:07 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229884AbjHDRfF (ORCPT ); Fri, 4 Aug 2023 13:35:05 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:58302 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232340AbjHDRe6 (ORCPT ); Fri, 4 Aug 2023 13:34:58 -0400 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 00E14212D for ; Fri, 4 Aug 2023 10:34:03 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1691170443; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=A+5iGrXN/5qdcupU2kFFOdWOZ5R3AF1zigPe0QKATHc=; b=L+UKh6nCcg4CjOGyevsb2mhg6HOrwHahCKAHz8sGAuKEsAKGaJiN6YBrKE70RlHc1I/x61 avMi76fFuqVk6vnJAg4AWRMJId881QqEWvFkoJwuX2EG6Y3hVEu06wO/LVx8aL8PpeVWnH ny55y3c8C37Xkfm4I1QQK3hRYfHVWpU= Received: from mimecast-mx02.redhat.com (mimecast-mx02.redhat.com [66.187.233.88]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-32-Heo_cFpkPs-QlfcK7Lud1Q-1; Fri, 04 Aug 2023 13:33:57 -0400 X-MC-Unique: Heo_cFpkPs-QlfcK7Lud1Q-1 Received: from smtp.corp.redhat.com (int-mx08.intmail.prod.int.rdu2.redhat.com [10.11.54.8]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx02.redhat.com (Postfix) with ESMTPS id BF46B8DC663; Fri, 4 Aug 2023 17:33:56 +0000 (UTC) Received: from virtlab511.virt.lab.eng.bos.redhat.com (virtlab511.virt.lab.eng.bos.redhat.com [10.19.152.198]) by smtp.corp.redhat.com (Postfix) with ESMTP id 86600C5796B; Fri, 4 Aug 2023 17:33:56 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: pgonda@google.com, seanjc@google.com, theflow@google.com, vkuznets@redhat.com, thomas.lendacky@amd.com, stable@vger.kernel.org Subject: [PATCH 1/3] KVM: SEV: snapshot the GHCB before accessing it Date: Fri, 4 Aug 2023 13:33:53 -0400 Message-Id: <20230804173355.51753-2-pbonzini@redhat.com> In-Reply-To: <20230804173355.51753-1-pbonzini@redhat.com> References: <20230804173355.51753-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 3.1 on 10.11.54.8 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Validation of the GHCB is susceptible to time-of-check/time-of-use vulnerab= ilities. To avoid them, we would like to always snapshot the fields that are read in sev_es_validate_vmgexit(), and not use the GHCB anymore after it returns. This means: - invoking sev_es_sync_from_ghcb() before any GHCB access, including before sev_es_validate_vmgexit() - snapshotting all fields including the valid bitmap and the sw_scratch fie= ld, which are currently not caching anywhere. The valid bitmap is the first thing to be copied out of the GHCB; then, further accesses will use the copy in svm->sev_es. Fixes: 291bd20d5d88 ("KVM: SVM: Add initial support for a VMGEXIT VMEXIT") Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini Reviewed-by: Tom Lendacky Tested-by: Peter Gonda --- arch/x86/kvm/svm/sev.c | 69 +++++++++++++++++++++--------------------- arch/x86/kvm/svm/svm.h | 26 ++++++++++++++++ 2 files changed, 61 insertions(+), 34 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 07756b7348ae..e898f0b2b0ba 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2417,15 +2417,18 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *= svm) */ memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); =20 - vcpu->arch.regs[VCPU_REGS_RAX] =3D ghcb_get_rax_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RBX] =3D ghcb_get_rbx_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RCX] =3D ghcb_get_rcx_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RDX] =3D ghcb_get_rdx_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RSI] =3D ghcb_get_rsi_if_valid(ghcb); + BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) !=3D sizeof(ghcb->save.vali= d_bitmap)); + memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->= save.valid_bitmap)); =20 - svm->vmcb->save.cpl =3D ghcb_get_cpl_if_valid(ghcb); + vcpu->arch.regs[VCPU_REGS_RAX] =3D kvm_ghcb_get_rax_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RBX] =3D kvm_ghcb_get_rbx_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RCX] =3D kvm_ghcb_get_rcx_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RDX] =3D kvm_ghcb_get_rdx_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RSI] =3D kvm_ghcb_get_rsi_if_valid(svm, ghcb); =20 - if (ghcb_xcr0_is_valid(ghcb)) { + svm->vmcb->save.cpl =3D kvm_ghcb_get_cpl_if_valid(svm, ghcb); + + if (kvm_ghcb_xcr0_is_valid(svm)) { vcpu->arch.xcr0 =3D ghcb_get_xcr0(ghcb); kvm_update_cpuid_runtime(vcpu); } @@ -2436,6 +2439,7 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *sv= m) control->exit_code_hi =3D upper_32_bits(exit_code); control->exit_info_1 =3D ghcb_get_sw_exit_info_1(ghcb); control->exit_info_2 =3D ghcb_get_sw_exit_info_2(ghcb); + svm->sev_es.sw_scratch =3D kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb); =20 /* Clear the valid entries fields */ memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); @@ -2464,56 +2468,56 @@ static int sev_es_validate_vmgexit(struct vcpu_svm = *svm) =20 reason =3D GHCB_ERR_MISSING_INPUT; =20 - if (!ghcb_sw_exit_code_is_valid(ghcb) || - !ghcb_sw_exit_info_1_is_valid(ghcb) || - !ghcb_sw_exit_info_2_is_valid(ghcb)) + if (!kvm_ghcb_sw_exit_code_is_valid(svm) || + !kvm_ghcb_sw_exit_info_1_is_valid(svm) || + !kvm_ghcb_sw_exit_info_2_is_valid(svm)) goto vmgexit_err; =20 switch (ghcb_get_sw_exit_code(ghcb)) { case SVM_EXIT_READ_DR7: break; case SVM_EXIT_WRITE_DR7: - if (!ghcb_rax_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_RDTSC: break; case SVM_EXIT_RDPMC: - if (!ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_CPUID: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; if (ghcb_get_rax(ghcb) =3D=3D 0xd) - if (!ghcb_xcr0_is_valid(ghcb)) + if (!kvm_ghcb_xcr0_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_INVD: break; case SVM_EXIT_IOIO: if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) { - if (!ghcb_sw_scratch_is_valid(ghcb)) + if (!kvm_ghcb_sw_scratch_is_valid(svm)) goto vmgexit_err; } else { if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK)) - if (!ghcb_rax_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm)) goto vmgexit_err; } break; case SVM_EXIT_MSR: - if (!ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; if (ghcb_get_sw_exit_info_1(ghcb)) { - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rdx_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rdx_is_valid(svm)) goto vmgexit_err; } break; case SVM_EXIT_VMMCALL: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_cpl_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_cpl_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_RDTSCP: @@ -2521,19 +2525,19 @@ static int sev_es_validate_vmgexit(struct vcpu_svm = *svm) case SVM_EXIT_WBINVD: break; case SVM_EXIT_MONITOR: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rcx_is_valid(ghcb) || - !ghcb_rdx_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rcx_is_valid(svm) || + !kvm_ghcb_rdx_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_MWAIT: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; break; case SVM_VMGEXIT_MMIO_READ: case SVM_VMGEXIT_MMIO_WRITE: - if (!ghcb_sw_scratch_is_valid(ghcb)) + if (!kvm_ghcb_sw_scratch_is_valid(svm)) goto vmgexit_err; break; case SVM_VMGEXIT_NMI_COMPLETE: @@ -2563,9 +2567,6 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *s= vm) dump_ghcb(svm); } =20 - /* Clear the valid entries fields */ - memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); - ghcb_set_sw_exit_info_1(ghcb, 2); ghcb_set_sw_exit_info_2(ghcb, reason); =20 @@ -2586,7 +2587,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm) */ if (svm->sev_es.ghcb_sa_sync) { kvm_write_guest(svm->vcpu.kvm, - ghcb_get_sw_scratch(svm->sev_es.ghcb), + svm->sev_es.sw_scratch, svm->sev_es.ghcb_sa, svm->sev_es.ghcb_sa_len); svm->sev_es.ghcb_sa_sync =3D false; @@ -2637,7 +2638,7 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm= , bool sync, u64 len) u64 scratch_gpa_beg, scratch_gpa_end; void *scratch_va; =20 - scratch_gpa_beg =3D ghcb_get_sw_scratch(ghcb); + scratch_gpa_beg =3D svm->sev_es.sw_scratch; if (!scratch_gpa_beg) { pr_err("vmgexit: scratch gpa not provided\n"); goto e_scratch; @@ -2853,11 +2854,11 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) =20 exit_code =3D ghcb_get_sw_exit_code(ghcb); =20 + sev_es_sync_from_ghcb(svm); ret =3D sev_es_validate_vmgexit(svm); if (ret) return ret; =20 - sev_es_sync_from_ghcb(svm); ghcb_set_sw_exit_info_1(ghcb, 0); ghcb_set_sw_exit_info_2(ghcb, 0); =20 diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 18af7e712a5a..8239c8de45ac 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -190,10 +190,12 @@ struct vcpu_sev_es_state { /* SEV-ES support */ struct sev_es_save_area *vmsa; struct ghcb *ghcb; + u8 valid_bitmap[16]; struct kvm_host_map ghcb_map; bool received_first_sipi; =20 /* SEV-ES scratch area support */ + u64 sw_scratch; void *ghcb_sa; u32 ghcb_sa_len; bool ghcb_sa_sync; @@ -744,4 +746,28 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm); void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepte= d); void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); =20 +#define DEFINE_KVM_GHCB_ACCESSORS(field) \ + static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu= _svm *svm) \ + { \ + return test_bit(GHCB_BITMAP_IDX(field), \ + (unsigned long *)&svm->sev_es.valid_bitmap); \ + } \ + \ + static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_sv= m *svm, struct ghcb *ghcb) \ + { \ + return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \ + } \ + +DEFINE_KVM_GHCB_ACCESSORS(cpl) +DEFINE_KVM_GHCB_ACCESSORS(rax) +DEFINE_KVM_GHCB_ACCESSORS(rcx) +DEFINE_KVM_GHCB_ACCESSORS(rdx) +DEFINE_KVM_GHCB_ACCESSORS(rbx) +DEFINE_KVM_GHCB_ACCESSORS(rsi) +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code) +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1) +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2) +DEFINE_KVM_GHCB_ACCESSORS(sw_scratch) +DEFINE_KVM_GHCB_ACCESSORS(xcr0) + #endif --=20 2.39.0 From nobody Sun Feb 8 08:13:59 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id DCEC1C04A6A for ; Fri, 4 Aug 2023 17:35:02 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231830AbjHDRfB (ORCPT ); Fri, 4 Aug 2023 13:35:01 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:58046 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232041AbjHDRe5 (ORCPT ); Fri, 4 Aug 2023 13:34:57 -0400 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id CC2F74C01 for ; Fri, 4 Aug 2023 10:34:02 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1691170441; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=5HCex7OscvVPSb+aVsDqyfjlMqtwIJUHlLvy33ZAyJg=; b=PNbq6GifP4HN4vKdzZ5RCoHAGQEWEw8YT2QKCfNGTPDBxFCWKKD8dE9ShDFcC8VPUtjSeU c0FetJIYdSZQYycPYx1E9dgncsjGzWN9ulY5IRC0zGc0oPu8RBxd2d7TPdHi5AUcbGSvrD Jmyks8QQB7DWgBUzD+fxKniqGHtJuIY= Received: from mimecast-mx02.redhat.com (mimecast-mx02.redhat.com [66.187.233.88]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-665-3gt5amElNl-qUyHzN-fuzA-1; Fri, 04 Aug 2023 13:33:57 -0400 X-MC-Unique: 3gt5amElNl-qUyHzN-fuzA-1 Received: from smtp.corp.redhat.com (int-mx08.intmail.prod.int.rdu2.redhat.com [10.11.54.8]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx02.redhat.com (Postfix) with ESMTPS id 083F780027F; Fri, 4 Aug 2023 17:33:57 +0000 (UTC) Received: from virtlab511.virt.lab.eng.bos.redhat.com (virtlab511.virt.lab.eng.bos.redhat.com [10.19.152.198]) by smtp.corp.redhat.com (Postfix) with ESMTP id C7025C5796B; Fri, 4 Aug 2023 17:33:56 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: pgonda@google.com, seanjc@google.com, theflow@google.com, vkuznets@redhat.com, thomas.lendacky@amd.com, stable@vger.kernel.org Subject: [PATCH 2/3] KVM: SEV: only access GHCB fields once Date: Fri, 4 Aug 2023 13:33:54 -0400 Message-Id: <20230804173355.51753-3-pbonzini@redhat.com> In-Reply-To: <20230804173355.51753-1-pbonzini@redhat.com> References: <20230804173355.51753-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 3.1 on 10.11.54.8 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" A KVM guest using SEV-ES or SEV-SNP with multiple vCPUs can trigger a double fetch race condition vulnerability and invoke the VMGEXIT handler recursively. sev_handle_vmgexit() maps the GHCB page using kvm_vcpu_map() and then fetches the exit code using ghcb_get_sw_exit_code(). Soon after, sev_es_validate_vmgexit() fetches the exit code again. Since the GHCB page is shared with the guest, the guest is able to quickly swap the values with another vCPU and hence bypass the validation. One vmexit code that can be rejected by sev_es_validate_vmgexit() is SVM_EXIT_VMGEXIT; if sev_handle_vmgexit() observes it in the second fetch, the call to svm_invoke_exit_handler() will invoke sev_handle_vmgexit() again recursively. To avoid the race, always fetch the GHCB data from the places where sev_es_sync_from_ghcb stores it. Exploiting recursions on linux kernel has been proven feasible in the past, but the impact is mitigated by stack guard pages (CONFIG_VMAP_STACK). Still, if an attacker manages to call the handler multiple times, they can theoretically trigger a stack overflow and cause a denial-of-service, or potentially guest-to-host escape in kernel configurations without stack guard pages. Note that winning the race reliably in every iteration is very tricky due to the very tight window of the fetches; depending on the compiler settings, they are often consecutive because of optimization and inlining. Tested by booting an SEV-ES RHEL9 guest. Fixes: CVE-2023-4155 Fixes: 291bd20d5d88 ("KVM: SVM: Add initial support for a VMGEXIT VMEXIT") Cc: stable@vger.kernel.org Reported-by: Andy Nguyen Signed-off-by: Paolo Bonzini Reviewed-by: Tom Lendacky Tested-by: Peter Gonda --- arch/x86/kvm/svm/sev.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index e898f0b2b0ba..ca4ba5fe9a01 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2445,9 +2445,15 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *s= vm) memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); } =20 +static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control) +{ + return (((u64)control->exit_code_hi) << 32) | control->exit_code; +} + static int sev_es_validate_vmgexit(struct vcpu_svm *svm) { - struct kvm_vcpu *vcpu; + struct vmcb_control_area *control =3D &svm->vmcb->control; + struct kvm_vcpu *vcpu =3D &svm->vcpu; struct ghcb *ghcb; u64 exit_code; u64 reason; @@ -2458,7 +2464,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *s= vm) * Retrieve the exit code now even though it may not be marked valid * as it could help with debugging. */ - exit_code =3D ghcb_get_sw_exit_code(ghcb); + exit_code =3D kvm_ghcb_get_sw_exit_code(control); =20 /* Only GHCB Usage code 0 is supported */ if (ghcb->ghcb_usage) { @@ -2473,7 +2479,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *s= vm) !kvm_ghcb_sw_exit_info_2_is_valid(svm)) goto vmgexit_err; =20 - switch (ghcb_get_sw_exit_code(ghcb)) { + switch (exit_code) { case SVM_EXIT_READ_DR7: break; case SVM_EXIT_WRITE_DR7: @@ -2490,18 +2496,18 @@ static int sev_es_validate_vmgexit(struct vcpu_svm = *svm) if (!kvm_ghcb_rax_is_valid(svm) || !kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; - if (ghcb_get_rax(ghcb) =3D=3D 0xd) + if (vcpu->arch.regs[VCPU_REGS_RAX] =3D=3D 0xd) if (!kvm_ghcb_xcr0_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_INVD: break; case SVM_EXIT_IOIO: - if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) { + if (control->exit_info_1 & SVM_IOIO_STR_MASK) { if (!kvm_ghcb_sw_scratch_is_valid(svm)) goto vmgexit_err; } else { - if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK)) + if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK)) if (!kvm_ghcb_rax_is_valid(svm)) goto vmgexit_err; } @@ -2509,7 +2515,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *s= vm) case SVM_EXIT_MSR: if (!kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; - if (ghcb_get_sw_exit_info_1(ghcb)) { + if (control->exit_info_1) { if (!kvm_ghcb_rax_is_valid(svm) || !kvm_ghcb_rdx_is_valid(svm)) goto vmgexit_err; @@ -2553,8 +2559,6 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *s= vm) return 0; =20 vmgexit_err: - vcpu =3D &svm->vcpu; - if (reason =3D=3D GHCB_ERR_INVALID_USAGE) { vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n", ghcb->ghcb_usage); @@ -2852,8 +2856,6 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) =20 trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb); =20 - exit_code =3D ghcb_get_sw_exit_code(ghcb); - sev_es_sync_from_ghcb(svm); ret =3D sev_es_validate_vmgexit(svm); if (ret) @@ -2862,6 +2864,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) ghcb_set_sw_exit_info_1(ghcb, 0); ghcb_set_sw_exit_info_2(ghcb, 0); =20 + exit_code =3D kvm_ghcb_get_sw_exit_code(control); switch (exit_code) { case SVM_VMGEXIT_MMIO_READ: ret =3D setup_vmgexit_scratch(svm, true, control->exit_info_2); --=20 2.39.0 From nobody Sun Feb 8 08:13:59 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5FDB5C001DF for ; Fri, 4 Aug 2023 17:34:55 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232316AbjHDRey (ORCPT ); Fri, 4 Aug 2023 13:34:54 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:58072 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230264AbjHDReu (ORCPT ); Fri, 4 Aug 2023 13:34:50 -0400 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 4440D4C10 for ; Fri, 4 Aug 2023 10:34:01 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1691170440; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=7rUT4DyFNHLsbrKuTmkoJFFXnreb/7vwjwFohU+6Wls=; b=JfGNZs+XHnL3jRym9GHlEzmqd/sK71PLI+n9Ujxr7u3IE2kZpd5yqAkxwTR7M2LG5I2m3t +HybW1KNKE0Rnn7AMgzxWmd+oY/1rMTAvu2zW9RgkXP+QEt+y/z/XGQszf0H5IFMYfRyNk UOIi9U/Ph2/qI5ZSLQD5OFuM4vUnK2M= Received: from mimecast-mx02.redhat.com (mimecast-mx02.redhat.com [66.187.233.88]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-665-ihzmKkq0Mf6RPlBorRO-IA-1; Fri, 04 Aug 2023 13:33:57 -0400 X-MC-Unique: ihzmKkq0Mf6RPlBorRO-IA-1 Received: from smtp.corp.redhat.com (int-mx08.intmail.prod.int.rdu2.redhat.com [10.11.54.8]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx02.redhat.com (Postfix) with ESMTPS id 3F270104458A; Fri, 4 Aug 2023 17:33:57 +0000 (UTC) Received: from virtlab511.virt.lab.eng.bos.redhat.com (virtlab511.virt.lab.eng.bos.redhat.com [10.19.152.198]) by smtp.corp.redhat.com (Postfix) with ESMTP id 0FB16C5796B; Fri, 4 Aug 2023 17:33:57 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: pgonda@google.com, seanjc@google.com, theflow@google.com, vkuznets@redhat.com, thomas.lendacky@amd.com Subject: [PATCH 3/3] KVM: SEV: remove ghcb variable declarations Date: Fri, 4 Aug 2023 13:33:55 -0400 Message-Id: <20230804173355.51753-4-pbonzini@redhat.com> In-Reply-To: <20230804173355.51753-1-pbonzini@redhat.com> References: <20230804173355.51753-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 3.1 on 10.11.54.8 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" To avoid possible time-of-check/time-of-use issues, the GHCB should almost never be accessed outside dump_ghcb, sev_es_sync_to_ghcb and sev_es_sync_from_ghcb. The only legitimate uses are to set the exitinfo fields and to find the address of the scratch area embedded in the ghcb. Accessing ghcb_usage also goes through svm->sev_es.ghcb in sev_es_validate_vmgexit(), but that is because anyway the value is not used. Removing a shortcut variable that contains the value of svm->sev_es.ghcb makes these cases a bit more verbose, but it limits the chance of someone reading the ghcb by mistake. Signed-off-by: Paolo Bonzini Reviewed-by: Tom Lendacky Tested-by: Peter Gonda --- arch/x86/kvm/svm/sev.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index ca4ba5fe9a01..d3aec1f2cad2 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2454,12 +2454,9 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *= svm) { struct vmcb_control_area *control =3D &svm->vmcb->control; struct kvm_vcpu *vcpu =3D &svm->vcpu; - struct ghcb *ghcb; u64 exit_code; u64 reason; =20 - ghcb =3D svm->sev_es.ghcb; - /* * Retrieve the exit code now even though it may not be marked valid * as it could help with debugging. @@ -2467,7 +2464,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *s= vm) exit_code =3D kvm_ghcb_get_sw_exit_code(control); =20 /* Only GHCB Usage code 0 is supported */ - if (ghcb->ghcb_usage) { + if (svm->sev_es.ghcb->ghcb_usage) { reason =3D GHCB_ERR_INVALID_USAGE; goto vmgexit_err; } @@ -2561,7 +2558,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *s= vm) vmgexit_err: if (reason =3D=3D GHCB_ERR_INVALID_USAGE) { vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n", - ghcb->ghcb_usage); + svm->sev_es.ghcb->ghcb_usage); } else if (reason =3D=3D GHCB_ERR_INVALID_EVENT) { vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n", exit_code); @@ -2571,8 +2568,8 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *s= vm) dump_ghcb(svm); } =20 - ghcb_set_sw_exit_info_1(ghcb, 2); - ghcb_set_sw_exit_info_2(ghcb, reason); + ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason); =20 /* Resume the guest to "return" the error code. */ return 1; @@ -2637,7 +2634,6 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) { struct vmcb_control_area *control =3D &svm->vmcb->control; - struct ghcb *ghcb =3D svm->sev_es.ghcb; u64 ghcb_scratch_beg, ghcb_scratch_end; u64 scratch_gpa_beg, scratch_gpa_end; void *scratch_va; @@ -2713,8 +2709,8 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm= , bool sync, u64 len) return 0; =20 e_scratch: - ghcb_set_sw_exit_info_1(ghcb, 2); - ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA); + ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA); =20 return 1; } @@ -2827,7 +2823,6 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) struct vcpu_svm *svm =3D to_svm(vcpu); struct vmcb_control_area *control =3D &svm->vmcb->control; u64 ghcb_gpa, exit_code; - struct ghcb *ghcb; int ret; =20 /* Validate the GHCB */ @@ -2852,17 +2847,16 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) } =20 svm->sev_es.ghcb =3D svm->sev_es.ghcb_map.hva; - ghcb =3D svm->sev_es.ghcb_map.hva; =20 - trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb); + trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb); =20 sev_es_sync_from_ghcb(svm); ret =3D sev_es_validate_vmgexit(svm); if (ret) return ret; =20 - ghcb_set_sw_exit_info_1(ghcb, 0); - ghcb_set_sw_exit_info_2(ghcb, 0); + ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0); =20 exit_code =3D kvm_ghcb_get_sw_exit_code(control); switch (exit_code) { @@ -2902,13 +2896,13 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) break; case 1: /* Get AP jump table address */ - ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table); break; default: pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1= =3D%#llx\n", control->exit_info_1); - ghcb_set_sw_exit_info_1(ghcb, 2); - ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT); + ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT); } =20 ret =3D 1; --=20 2.39.0