kvm_handle_nmi() directly calls spapr_mce_req_event() which is machine
level code. Apart from being ugly, this forces spapr_mce_req_event()
to rely on qdev_get_machine() to get a pointer to the machine state.
This is a bit unfortunate since POWER CPUs have a backlink to the
virtual hypervisor, which happens to be the machine itself with
sPAPR.
Turn spapr_mce_req_event() into a PPC virtual hypervisor operation,
and adapt kvm_handle_nmi() to call it as such.
Signed-off-by: Greg Kurz <groug@kaod.org>
---
include/hw/ppc/spapr.h | 3 ++-
target/ppc/cpu.h | 2 ++
hw/ppc/spapr.c | 1 +
hw/ppc/spapr_events.c | 5 +++--
target/ppc/kvm.c | 9 +++++++--
5 files changed, 15 insertions(+), 5 deletions(-)
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index e0f10f252c08..476c5b809794 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -852,7 +852,8 @@ void spapr_clear_pending_hotplug_events(SpaprMachineState *spapr);
int spapr_max_server_number(SpaprMachineState *spapr);
void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
uint64_t pte0, uint64_t pte1);
-void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered);
+void spapr_mce_req_event(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
+ bool recovered);
bool spapr_machine_using_legacy_numa(SpaprMachineState *spapr);
/* DRC callbacks. */
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 2609e4082ed8..5bac68aec826 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1219,6 +1219,8 @@ struct PPCVirtualHypervisorClass {
target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp);
void (*cpu_exec_enter)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
void (*cpu_exec_exit)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
+ void (*mce_req_event)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
+ bool recovered);
};
#define TYPE_PPC_VIRTUAL_HYPERVISOR "ppc-virtual-hypervisor"
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index aca7d7af283a..09fc605f11ba 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -4441,6 +4441,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr;
vhc->cpu_exec_enter = spapr_cpu_exec_enter;
vhc->cpu_exec_exit = spapr_cpu_exec_exit;
+ vhc->mce_req_event = spapr_mce_req_event;
xic->ics_get = spapr_ics_get;
xic->ics_resend = spapr_ics_resend;
xic->icp_get = spapr_icp_get;
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
index 3f37b49fd8ad..8e988eb939da 100644
--- a/hw/ppc/spapr_events.c
+++ b/hw/ppc/spapr_events.c
@@ -868,9 +868,10 @@ static void spapr_mce_dispatch_elog(SpaprMachineState *spapr, PowerPCCPU *cpu,
ppc_cpu_do_fwnmi_machine_check(cs, spapr->fwnmi_machine_check_addr);
}
-void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered)
+void spapr_mce_req_event(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
+ bool recovered)
{
- SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
CPUState *cs = CPU(cpu);
int ret;
Error *local_err = NULL;
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index daf690a67820..ba6edf178a39 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -2816,10 +2816,15 @@ int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run)
{
uint16_t flags = run->flags & KVM_RUN_PPC_NMI_DISP_MASK;
- cpu_synchronize_state(CPU(cpu));
+ if (cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
- spapr_mce_req_event(cpu, flags == KVM_RUN_PPC_NMI_DISP_FULLY_RECOV);
+ cpu_synchronize_state(CPU(cpu));
+ vhc->mce_req_event(cpu->vhyp, cpu,
+ flags == KVM_RUN_PPC_NMI_DISP_FULLY_RECOV);
+ }
return 0;
}
#endif
--
2.26.2
On Wed, Dec 09, 2020 at 06:00:52PM +0100, Greg Kurz wrote: > kvm_handle_nmi() directly calls spapr_mce_req_event() which is machine > level code. Apart from being ugly, this forces spapr_mce_req_event() > to rely on qdev_get_machine() to get a pointer to the machine state. > This is a bit unfortunate since POWER CPUs have a backlink to the > virtual hypervisor, which happens to be the machine itself with > sPAPR. > > Turn spapr_mce_req_event() into a PPC virtual hypervisor operation, > and adapt kvm_handle_nmi() to call it as such. > > Signed-off-by: Greg Kurz <groug@kaod.org> I have somewhat mixed thoughts on this. Putting it in vhyp makes a certain sense. But on the other hand, the MCE event from KVM is an explicitly PAPR specific interface, so it can't really go to any other implementation. -- David Gibson | I'll have my music baroque, and my code david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_ | _way_ _around_! http://www.ozlabs.org/~dgibson
On Thu, 10 Dec 2020 14:54:08 +1100 David Gibson <david@gibson.dropbear.id.au> wrote: > On Wed, Dec 09, 2020 at 06:00:52PM +0100, Greg Kurz wrote: > > kvm_handle_nmi() directly calls spapr_mce_req_event() which is machine > > level code. Apart from being ugly, this forces spapr_mce_req_event() > > to rely on qdev_get_machine() to get a pointer to the machine state. > > This is a bit unfortunate since POWER CPUs have a backlink to the > > virtual hypervisor, which happens to be the machine itself with > > sPAPR. > > > > Turn spapr_mce_req_event() into a PPC virtual hypervisor operation, > > and adapt kvm_handle_nmi() to call it as such. > > > > Signed-off-by: Greg Kurz <groug@kaod.org> > > I have somewhat mixed thoughts on this. Putting it in vhyp makes a > certain sense. But on the other hand, the MCE event from KVM is an > explicitly PAPR specific interface, so it can't really go to any other > implementation. > True. Same thing goest for the hypercalls actually. So I guess it's better to keep this dependency explicit, as long as we don't have to support non-PAPR KVM guests. Please ignore this patch.
© 2016 - 2026 Red Hat, Inc.