In order to dispatch over AccelOpsClass::handle_interrupt(),
we need it always defined, not calling a hidden handler under
the hood. Make AccelOpsClass::handle_interrupt() mandatory.
Expose generic_handle_interrupt() prototype and register it
for each accelerator.
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Reviewed-by: Zhao Liu <zhao1.liu@intel.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Message-Id: <20250703173248.44995-29-philmd@linaro.org>
---
include/system/accel-ops.h | 3 +++
accel/hvf/hvf-accel-ops.c | 1 +
accel/kvm/kvm-accel-ops.c | 1 +
accel/qtest/qtest.c | 1 +
accel/xen/xen-all.c | 1 +
system/cpus.c | 10 ++++------
target/i386/nvmm/nvmm-accel-ops.c | 1 +
target/i386/whpx/whpx-accel-ops.c | 1 +
8 files changed, 13 insertions(+), 6 deletions(-)
diff --git a/include/system/accel-ops.h b/include/system/accel-ops.h
index a4e706b49c9..e775ecc348c 100644
--- a/include/system/accel-ops.h
+++ b/include/system/accel-ops.h
@@ -62,6 +62,7 @@ struct AccelOpsClass {
void (*synchronize_pre_loadvm)(CPUState *cpu);
void (*synchronize_pre_resume)(bool step_pending);
+ /* handle_interrupt is mandatory. */
void (*handle_interrupt)(CPUState *cpu, int mask);
/**
@@ -86,4 +87,6 @@ struct AccelOpsClass {
void (*remove_all_breakpoints)(CPUState *cpu);
};
+void generic_handle_interrupt(CPUState *cpu, int mask);
+
#endif /* ACCEL_OPS_H */
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
index 395b5a8e1c0..be8724ac896 100644
--- a/accel/hvf/hvf-accel-ops.c
+++ b/accel/hvf/hvf-accel-ops.c
@@ -353,6 +353,7 @@ static void hvf_accel_ops_class_init(ObjectClass *oc, const void *data)
ops->create_vcpu_thread = hvf_start_vcpu_thread;
ops->kick_vcpu_thread = hvf_kick_vcpu_thread;
+ ops->handle_interrupt = generic_handle_interrupt;
ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c
index e5c15449aa6..0eafc902c3f 100644
--- a/accel/kvm/kvm-accel-ops.c
+++ b/accel/kvm/kvm-accel-ops.c
@@ -101,6 +101,7 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, const void *data)
ops->synchronize_post_init = kvm_cpu_synchronize_post_init;
ops->synchronize_state = kvm_cpu_synchronize_state;
ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
+ ops->handle_interrupt = generic_handle_interrupt;
#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
ops->update_guest_debug = kvm_update_guest_debug_ops;
diff --git a/accel/qtest/qtest.c b/accel/qtest/qtest.c
index 612cede160b..5474ce73135 100644
--- a/accel/qtest/qtest.c
+++ b/accel/qtest/qtest.c
@@ -67,6 +67,7 @@ static void qtest_accel_ops_class_init(ObjectClass *oc, const void *data)
ops->create_vcpu_thread = dummy_start_vcpu_thread;
ops->get_virtual_clock = qtest_get_virtual_clock;
ops->set_virtual_clock = qtest_set_virtual_clock;
+ ops->handle_interrupt = generic_handle_interrupt;
};
static const TypeInfo qtest_accel_ops_type = {
diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c
index c150dd43cab..c12c22de785 100644
--- a/accel/xen/xen-all.c
+++ b/accel/xen/xen-all.c
@@ -153,6 +153,7 @@ static void xen_accel_ops_class_init(ObjectClass *oc, const void *data)
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
ops->create_vcpu_thread = dummy_start_vcpu_thread;
+ ops->handle_interrupt = generic_handle_interrupt;
}
static const TypeInfo xen_accel_ops_type = {
diff --git a/system/cpus.c b/system/cpus.c
index a43e0e4e796..0d0eec82a2f 100644
--- a/system/cpus.c
+++ b/system/cpus.c
@@ -254,7 +254,7 @@ int64_t cpus_get_elapsed_ticks(void)
return cpu_get_ticks();
}
-static void generic_handle_interrupt(CPUState *cpu, int mask)
+void generic_handle_interrupt(CPUState *cpu, int mask)
{
cpu->interrupt_request |= mask;
@@ -267,11 +267,7 @@ void cpu_interrupt(CPUState *cpu, int mask)
{
g_assert(bql_locked());
- if (cpus_accel->handle_interrupt) {
- cpus_accel->handle_interrupt(cpu, mask);
- } else {
- generic_handle_interrupt(cpu, mask);
- }
+ cpus_accel->handle_interrupt(cpu, mask);
}
/*
@@ -680,6 +676,8 @@ void cpus_register_accel(const AccelOpsClass *ops)
{
assert(ops != NULL);
assert(ops->create_vcpu_thread != NULL); /* mandatory */
+ assert(ops->handle_interrupt);
+
cpus_accel = ops;
}
diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c
index 21443078b72..a5517b0abf3 100644
--- a/target/i386/nvmm/nvmm-accel-ops.c
+++ b/target/i386/nvmm/nvmm-accel-ops.c
@@ -87,6 +87,7 @@ static void nvmm_accel_ops_class_init(ObjectClass *oc, const void *data)
ops->create_vcpu_thread = nvmm_start_vcpu_thread;
ops->kick_vcpu_thread = nvmm_kick_vcpu_thread;
+ ops->handle_interrupt = generic_handle_interrupt;
ops->synchronize_post_reset = nvmm_cpu_synchronize_post_reset;
ops->synchronize_post_init = nvmm_cpu_synchronize_post_init;
diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c
index b8bebe403c9..31cf15f0045 100644
--- a/target/i386/whpx/whpx-accel-ops.c
+++ b/target/i386/whpx/whpx-accel-ops.c
@@ -90,6 +90,7 @@ static void whpx_accel_ops_class_init(ObjectClass *oc, const void *data)
ops->create_vcpu_thread = whpx_start_vcpu_thread;
ops->kick_vcpu_thread = whpx_kick_vcpu_thread;
ops->cpu_thread_is_idle = whpx_vcpu_thread_is_idle;
+ ops->handle_interrupt = generic_handle_interrupt;
ops->synchronize_post_reset = whpx_cpu_synchronize_post_reset;
ops->synchronize_post_init = whpx_cpu_synchronize_post_init;
--
2.49.0