From: Xuemei Liu <liu.xuemei1@zte.com.cn>
Add save and restore funtction if kvm_irqchip_in_kernel() return
true, it is to get and set IMSIC irqchip state from KVM kernel.
Signed-off-by: Xuemei Liu <liu.xuemei1@zte.com.cn>
---
hw/intc/riscv_imsic.c | 171 +++++++++++++++++++++++++++++++---
include/hw/intc/riscv_imsic.h | 3 +
include/qemu/bitops.h | 1 +
migration/vmstate-types.c | 1 -
4 files changed, 161 insertions(+), 15 deletions(-)
diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c
index 7c9a012033..1c9c706b03 100644
--- a/hw/intc/riscv_imsic.c
+++ b/hw/intc/riscv_imsic.c
@@ -34,6 +34,7 @@
#include "system/system.h"
#include "system/kvm.h"
#include "migration/vmstate.h"
+#include "kvm/kvm_riscv.h"
#define IMSIC_MMIO_PAGE_LE 0x00
#define IMSIC_MMIO_PAGE_BE 0x04
@@ -363,11 +364,16 @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
- imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
- imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
imsic->eistate = g_new0(uint32_t, imsic->num_eistate);
+ } else {
+ imsic->nr_eix = 2 * BITS_TO_U64S(imsic->num_irqs);
+ imsic->eie = g_new0(uint32_t, imsic->nr_eix);
+ imsic->eip = g_new0(uint32_t, imsic->nr_eix);
}
+ imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
+ imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
+
memory_region_init_io(&imsic->mmio, OBJECT(dev), &riscv_imsic_ops,
imsic, TYPE_RISCV_IMSIC,
IMSIC_MMIO_SIZE(imsic->num_pages));
@@ -398,23 +404,17 @@ static const Property riscv_imsic_properties[] = {
DEFINE_PROP_UINT32("num-irqs", RISCVIMSICState, num_irqs, 0),
};
-static bool riscv_imsic_state_needed(void *opaque)
+static bool riscv_imsic_emul_state_needed(void *opaque)
{
return !kvm_irqchip_in_kernel();
}
-static const VMStateDescription vmstate_riscv_imsic = {
- .name = "riscv_imsic",
- .version_id = 2,
- .minimum_version_id = 2,
- .needed = riscv_imsic_state_needed,
+static const VMStateDescription vmstate_riscv_imsic_emul = {
+ .name = "riscv_imsic_emul",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = riscv_imsic_emul_state_needed,
.fields = (const VMStateField[]) {
- VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState,
- num_pages, 0,
- vmstate_info_uint32, uint32_t),
- VMSTATE_VARRAY_UINT32(eithreshold, RISCVIMSICState,
- num_pages, 0,
- vmstate_info_uint32, uint32_t),
VMSTATE_VARRAY_UINT32(eistate, RISCVIMSICState,
num_eistate, 0,
vmstate_info_uint32, uint32_t),
@@ -422,6 +422,149 @@ static const VMStateDescription vmstate_riscv_imsic = {
}
};
+static bool riscv_imsic_in_kernel_state_needed(void *opaque)
+{
+ return kvm_irqchip_in_kernel();
+}
+
+static int riscv_imsic_in_kernel_pre_save(void *opaque)
+{
+ RISCVIMSICState *imsic = opaque;
+ RISCVCPU *rcpu = RISCV_CPU(cpu_by_arch_id(imsic->hartid));
+ bool is_32bit = riscv_cpu_is_32bit(rcpu);
+ uint32_t inc = 2;
+ uint64_t attr;
+
+ if (is_32bit) {
+ inc = 1;
+ }
+
+ if (kvm_irqchip_in_kernel()) {
+ for (uint32_t i = 0; i < imsic->nr_eix; i += inc) {
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIE0 + i);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eie + i, false);
+
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIP0 + i);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eip + i, false);
+ }
+ }
+
+ return 0;
+}
+
+static int riscv_imsic_in_kernel_post_load(void *opaque, int version_id)
+{
+ RISCVIMSICState *imsic = opaque;
+ RISCVCPU *rcpu = RISCV_CPU(cpu_by_arch_id(imsic->hartid));
+ bool is_32bit = riscv_cpu_is_32bit(rcpu);
+ uint32_t inc = 2;
+ uint64_t attr;
+
+ if (is_32bit) {
+ inc = 1;
+ }
+
+ if (kvm_irqchip_in_kernel()) {
+ for (uint32_t i = 0; i < imsic->nr_eix; i += inc) {
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIE0 + i);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eie + i, true);
+
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIP0 + i);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eip + i, true);
+ }
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_riscv_imsic_in_kernel = {
+ .name = "riscv_imsic_in_kernel",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = riscv_imsic_in_kernel_state_needed,
+ .pre_save = riscv_imsic_in_kernel_pre_save,
+ .post_load = riscv_imsic_in_kernel_post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_VARRAY_UINT32(eie, RISCVIMSICState,
+ nr_eix, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_VARRAY_UINT32(eip, RISCVIMSICState,
+ nr_eix, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int riscv_imsic_pre_save(void *opaque)
+{
+ RISCVIMSICState *imsic = opaque;
+ uint64_t attr;
+
+ if (kvm_irqchip_in_kernel()) {
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIDELIVERY);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eidelivery, false);
+
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EITHRESHOLD);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eithreshold, false);
+ }
+
+ return 0;
+}
+
+static int riscv_imsic_post_load(void *opaque, int version_id)
+{
+ RISCVIMSICState *imsic = opaque;
+ uint64_t attr;
+
+ if (kvm_irqchip_in_kernel()) {
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIDELIVERY);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eidelivery, true);
+
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EITHRESHOLD);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eithreshold, true);
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_riscv_imsic = {
+ .name = "riscv_imsic",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .pre_save = riscv_imsic_pre_save,
+ .post_load = riscv_imsic_post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState,
+ num_pages, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_VARRAY_UINT32(eithreshold, RISCVIMSICState,
+ num_pages, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * const []) {
+ &vmstate_riscv_imsic_emul,
+ &vmstate_riscv_imsic_in_kernel,
+ NULL
+ }
+};
+
static void riscv_imsic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/include/hw/intc/riscv_imsic.h b/include/hw/intc/riscv_imsic.h
index fae999731d..2206d82e0c 100644
--- a/include/hw/intc/riscv_imsic.h
+++ b/include/hw/intc/riscv_imsic.h
@@ -54,12 +54,15 @@ struct RISCVIMSICState {
uint32_t *eidelivery;
uint32_t *eithreshold;
uint32_t *eistate;
+ uint32_t *eip;
+ uint32_t *eie;
/* config */
bool mmode;
uint32_t hartid;
uint32_t num_pages;
uint32_t num_irqs;
+ uint32_t nr_eix;
};
DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode,
diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h
index c7b838a628..a7f86f2ee0 100644
--- a/include/qemu/bitops.h
+++ b/include/qemu/bitops.h
@@ -20,6 +20,7 @@
#define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE)
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
#define BITS_TO_U32S(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(uint32_t))
+#define BITS_TO_U64S(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(uint64_t))
#define BIT(nr) (1UL << (nr))
#define BIT_ULL(nr) (1ULL << (nr))
diff --git a/migration/vmstate-types.c b/migration/vmstate-types.c
index 89cb211472..368a2922aa 100644
--- a/migration/vmstate-types.c
+++ b/migration/vmstate-types.c
@@ -599,7 +599,6 @@ const VMStateInfo vmstate_info_tmp = {
* is an array of 'unsigned long', which may be either 32 or 64 bits.
*/
/* This is the number of 64 bit words sent over the wire */
-#define BITS_TO_U64S(nr) DIV_ROUND_UP(nr, 64)
static int get_bitmap(QEMUFile *f, void *pv, size_t size,
const VMStateField *field)
{
--
2.27.0
© 2016 - 2026 Red Hat, Inc.