When restricted injection is active, only #HV exceptions can be injected into
the SEV-SNP guest.
Detect that restricted injection feature is active for the guest, and then
follow the #HV doorbell communication from the GHCB specification to inject the
interrupt or exception.
Co-developed-by: Thomas Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Thomas Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Melody Wang <huibo.wang@amd.com>
---
arch/x86/include/uapi/asm/kvm.h | 1 +
arch/x86/kvm/svm/sev.c | 153 ++++++++++++++++++++++++++++++++
arch/x86/kvm/svm/svm.c | 19 +++-
arch/x86/kvm/svm/svm.h | 21 ++++-
4 files changed, 190 insertions(+), 4 deletions(-)
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index bf57a824f722..f5d85174e658 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -35,6 +35,7 @@
#define MC_VECTOR 18
#define XM_VECTOR 19
#define VE_VECTOR 20
+#define HV_VECTOR 28
/* Select x86 specific features in <linux/kvm.h> */
#define __KVM_HAVE_PIT
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index e65867ea768d..f7623fa64307 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -5023,3 +5023,156 @@ int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
return level;
}
+
+static void prepare_hv_injection(struct vcpu_svm *svm, struct hvdb *hvdb)
+{
+ if (hvdb->events.no_further_signal)
+ return;
+
+ svm->vmcb->control.event_inj = HV_VECTOR |
+ SVM_EVTINJ_TYPE_EXEPT |
+ SVM_EVTINJ_VALID;
+ svm->vmcb->control.event_inj_err = 0;
+
+ hvdb->events.no_further_signal = 1;
+}
+
+static void unmap_hvdb(struct kvm_vcpu *vcpu, struct kvm_host_map *map)
+{
+ kvm_vcpu_unmap(vcpu, map, true);
+}
+
+static struct hvdb *map_hvdb(struct kvm_vcpu *vcpu, struct kvm_host_map *map)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (!VALID_PAGE(svm->sev_es.hvdb_gpa))
+ return NULL;
+
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->sev_es.hvdb_gpa), map)) {
+ /* Unable to map #HV doorbell page from guest */
+ vcpu_unimpl(vcpu, "snp: error mapping #HV doorbell page [%#llx] from guest\n",
+ svm->sev_es.hvdb_gpa);
+
+ return NULL;
+ }
+
+ return map->hva;
+}
+
+static bool __sev_snp_inject(enum inject_type type, struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct kvm_host_map hvdb_map;
+ struct hvdb *hvdb;
+
+ hvdb = map_hvdb(vcpu, &hvdb_map);
+ if (!hvdb)
+ return false;
+
+ hvdb->events.vector = vcpu->arch.interrupt.nr;
+
+ prepare_hv_injection(svm, hvdb);
+
+ unmap_hvdb(vcpu, &hvdb_map);
+
+ return true;
+}
+
+bool sev_snp_queue_exception(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (!sev_snp_is_rinj_active(vcpu))
+ return false;
+
+ /*
+ * Restricted injection is enabled, only #HV is supported.
+ * If the vector is not HV_VECTOR, do not inject the exception,
+ * then return true to skip the original injection path.
+ */
+ if (WARN_ONCE(vcpu->arch.exception.vector != HV_VECTOR,
+ "restricted injection enabled, exception %u injection not supported\n",
+ vcpu->arch.exception.vector))
+ return true;
+
+ /*
+ * An intercept likely occurred during #HV delivery, so re-inject it
+ * using the current HVDB pending event values.
+ */
+ svm->vmcb->control.event_inj = HV_VECTOR |
+ SVM_EVTINJ_TYPE_EXEPT |
+ SVM_EVTINJ_VALID;
+ svm->vmcb->control.event_inj_err = 0;
+
+ return true;
+}
+
+bool sev_snp_inject(enum inject_type type, struct kvm_vcpu *vcpu)
+{
+ if (!sev_snp_is_rinj_active(vcpu))
+ return false;
+
+ return __sev_snp_inject(type, vcpu);
+}
+
+void sev_snp_cancel_injection(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct kvm_host_map hvdb_map;
+ struct hvdb *hvdb;
+
+ if (!sev_snp_is_rinj_active(vcpu))
+ return;
+
+ if (!svm->vmcb->control.event_inj)
+ return;
+
+ if ((svm->vmcb->control.event_inj & SVM_EVTINJ_VEC_MASK) != HV_VECTOR)
+ return;
+
+ /*
+ * Copy the information in the doorbell page into the event injection
+ * fields to complete the cancellation flow.
+ */
+ hvdb = map_hvdb(vcpu, &hvdb_map);
+ if (!hvdb)
+ return;
+
+ if (!hvdb->events.pending_events) {
+ /* No pending events, then event_inj field should be 0 */
+ WARN_ON_ONCE(svm->vmcb->control.event_inj);
+ goto out;
+ }
+
+ /* Copy info back into event_inj field (replaces #HV) */
+ svm->vmcb->control.event_inj = SVM_EVTINJ_VALID;
+
+ if (hvdb->events.vector)
+ svm->vmcb->control.event_inj |= hvdb->events.vector |
+ SVM_EVTINJ_TYPE_INTR;
+
+ hvdb->events.pending_events = 0;
+
+out:
+ unmap_hvdb(vcpu, &hvdb_map);
+}
+
+bool sev_snp_blocked(enum inject_type type, struct kvm_vcpu *vcpu)
+{
+ struct kvm_host_map hvdb_map;
+ struct hvdb *hvdb;
+ bool blocked;
+
+ /* Indicate interrupts are blocked if doorbell page can't be mapped */
+ hvdb = map_hvdb(vcpu, &hvdb_map);
+ if (!hvdb)
+ return true;
+
+ /* Indicate interrupts blocked based on guest acknowledgment */
+ blocked = !!hvdb->events.vector;
+
+ unmap_hvdb(vcpu, &hvdb_map);
+
+ return blocked;
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d6f252555ab3..a48388d99c97 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -447,6 +447,9 @@ static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu)
svm->soft_int_old_rip = old_rip;
svm->soft_int_next_rip = rip;
+ if (sev_snp_queue_exception(vcpu))
+ return 0;
+
if (nrips)
kvm_rip_write(vcpu, old_rip);
@@ -467,6 +470,9 @@ static void svm_inject_exception(struct kvm_vcpu *vcpu)
svm_update_soft_interrupt_rip(vcpu))
return;
+ if (sev_snp_queue_exception(vcpu))
+ return;
+
svm->vmcb->control.event_inj = ex->vector
| SVM_EVTINJ_VALID
| (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
@@ -3662,10 +3668,12 @@ static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
trace_kvm_inj_virq(vcpu->arch.interrupt.nr,
vcpu->arch.interrupt.soft, reinjected);
- ++vcpu->stat.irq_injections;
- svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
- SVM_EVTINJ_VALID | type;
+ if (!sev_snp_inject(INJECT_IRQ, vcpu))
+ svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
+ SVM_EVTINJ_VALID | type;
+
+ ++vcpu->stat.irq_injections;
}
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
@@ -3810,6 +3818,9 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
if (!gif_set(svm))
return true;
+ if (sev_snp_is_rinj_active(vcpu))
+ return sev_snp_blocked(INJECT_IRQ, vcpu);
+
if (is_guest_mode(vcpu)) {
/* As long as interrupts are being delivered... */
if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
@@ -4128,6 +4139,8 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control;
+ sev_snp_cancel_injection(vcpu);
+
control->exit_int_info = control->event_inj;
control->exit_int_info_err = control->event_inj_err;
control->event_inj = 0;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index f0f14801e122..95c0a7070bd1 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -41,6 +41,10 @@ extern bool x2avic_enabled;
extern bool vnmi;
extern int lbrv;
+enum inject_type {
+ INJECT_IRQ,
+};
+
/*
* Clean bits in VMCB.
* VMCB_ALL_CLEAN_MASK might also need to
@@ -751,6 +755,17 @@ void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
+bool sev_snp_queue_exception(struct kvm_vcpu *vcpu);
+bool sev_snp_inject(enum inject_type type, struct kvm_vcpu *vcpu);
+void sev_snp_cancel_injection(struct kvm_vcpu *vcpu);
+bool sev_snp_blocked(enum inject_type type, struct kvm_vcpu *vcpu);
+static inline bool sev_snp_is_rinj_active(struct kvm_vcpu *vcpu)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
+
+ return sev_snp_guest(vcpu->kvm) &&
+ (sev->vmsa_features & SVM_SEV_FEAT_RESTRICTED_INJECTION);
+};
#else
static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
{
@@ -781,7 +796,11 @@ static inline int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
{
return 0;
}
-
+static inline bool sev_snp_queue_exception(struct kvm_vcpu *vcpu) { return false; }
+static inline bool sev_snp_inject(enum inject_type type, struct kvm_vcpu *vcpu) { return false; }
+static inline void sev_snp_cancel_injection(struct kvm_vcpu *vcpu) {}
+static inline bool sev_snp_blocked(enum inject_type type, struct kvm_vcpu *vcpu) { return false; }
+static inline bool sev_snp_is_rinj_active(struct kvm_vcpu *vcpu) { return false; }
#endif
/* vmenter.S */
--
2.34.1
Hello On Tue, Sep 10, 2024 at 2:05 PM Melody Wang <huibo.wang@amd.com> wrote: > +static bool __sev_snp_inject(enum inject_type type, struct kvm_vcpu *vcpu) > +{ > + struct vcpu_svm *svm = to_svm(vcpu); > + struct kvm_host_map hvdb_map; > + struct hvdb *hvdb; > + > + hvdb = map_hvdb(vcpu, &hvdb_map); > + if (!hvdb) > + return false; old_vector = hvdb->events.vector; > + > + hvdb->events.vector = vcpu->arch.interrupt.nr; > + > + prepare_hv_injection(svm, hvdb); The specification recommends saving an HV injection on the following condition: if (!(type == INJECT_IRQ && old_vector)) prepare_hv_injection(svm, hvdb) If PendingEvent.Vector was previously non-zero, because the guest has not yet chosen to acknowledge the interrupt, the previous vector number can be overwritten by the new vector number without sending another #HV (regardless of the value of PendingEvent.NoFurtherSignal), because the guest should already have been informed that a vector was pending. > + > + unmap_hvdb(vcpu, &hvdb_map); > + > + return true; > +} > +
© 2016 - 2024 Red Hat, Inc.