From: Xuemei Liu <liu.xuemei1@zte.com.cn>
Add save and store funtction if kvm_irqchip_in_kernel() return
true, it is to get and set IMSIC irqchip state from KVM kernel.
Signed-off-by: Xuemei Liu <liu.xuemei1@zte.com.cn>
---
hw/intc/riscv_imsic.c | 171 +++++++++++++++++++++++++++++++---
include/hw/intc/riscv_imsic.h | 2 +
2 files changed, 159 insertions(+), 14 deletions(-)
diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c
index 7c9a012033..c0b07de0fc 100644
--- a/hw/intc/riscv_imsic.c
+++ b/hw/intc/riscv_imsic.c
@@ -34,6 +34,7 @@
#include "system/system.h"
#include "system/kvm.h"
#include "migration/vmstate.h"
+#include "kvm/kvm_riscv.h"
#define IMSIC_MMIO_PAGE_LE 0x00
#define IMSIC_MMIO_PAGE_BE 0x04
@@ -363,11 +364,12 @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
- imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
- imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
imsic->eistate = g_new0(uint32_t, imsic->num_eistate);
}
+ imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
+ imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
+
memory_region_init_io(&imsic->mmio, OBJECT(dev), &riscv_imsic_ops,
imsic, TYPE_RISCV_IMSIC,
IMSIC_MMIO_SIZE(imsic->num_pages));
@@ -398,23 +400,17 @@ static const Property riscv_imsic_properties[] = {
DEFINE_PROP_UINT32("num-irqs", RISCVIMSICState, num_irqs, 0),
};
-static bool riscv_imsic_state_needed(void *opaque)
+static bool riscv_imsic_emul_state_needed(void *opaque)
{
return !kvm_irqchip_in_kernel();
}
-static const VMStateDescription vmstate_riscv_imsic = {
- .name = "riscv_imsic",
- .version_id = 2,
- .minimum_version_id = 2,
- .needed = riscv_imsic_state_needed,
+static const VMStateDescription vmstate_riscv_imsic_emul = {
+ .name = "riscv_imsic_emul",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = riscv_imsic_emul_state_needed,
.fields = (const VMStateField[]) {
- VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState,
- num_pages, 0,
- vmstate_info_uint32, uint32_t),
- VMSTATE_VARRAY_UINT32(eithreshold, RISCVIMSICState,
- num_pages, 0,
- vmstate_info_uint32, uint32_t),
VMSTATE_VARRAY_UINT32(eistate, RISCVIMSICState,
num_eistate, 0,
vmstate_info_uint32, uint32_t),
@@ -422,6 +418,153 @@ static const VMStateDescription vmstate_riscv_imsic = {
}
};
+static bool riscv_imsic_in_kernel_state_needed(void *opaque)
+{
+ return kvm_irqchip_in_kernel();
+}
+
+static int riscv_imsic_in_kernel_pre_save(void *opaque)
+{
+ RISCVIMSICState *imsic = opaque;
+ uint64_t attr;
+
+ if (kvm_irqchip_in_kernel()) {
+ for (uint32_t i = 0; i < BITS_TO_LONGS(imsic->num_irqs); i++) {
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIE0 + i * 2);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eie + i * 2 , false);
+
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIP0 + i * 2);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eip + i * 2, false);
+#ifdef CONFIG_32BIT
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIE0 + i * 2 + 1);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eie + i * 2 + 1, false);
+
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIP0 + i * 2 + 1);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eip + i * 2 + 1, false);
+#endif
+ }
+ }
+
+ return 0;
+}
+
+static int riscv_imsic_in_kernel_post_load(void *opaque, int version_id)
+{
+ RISCVIMSICState *imsic = opaque;
+ uint64_t attr;
+
+ if (kvm_irqchip_in_kernel()) {
+ for (uint32_t i = 0; i < BITS_TO_LONGS(imsic->num_irqs); i++) {
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIE0 + i * 2);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eie + i * 2, true);
+
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIP0 + i * 2);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eip + i * 2, true);
+#ifdef CONFIG_32BIT
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIE0 + i * 2 + 1);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eie + i * 2 + 1, true);
+
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIP0 + i * 2 + 1);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eip + i * 2 + 1, true);
+#endif
+ }
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_riscv_imsic_in_kernel = {
+ .name = "riscv_imsic_in_kernel",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = riscv_imsic_in_kernel_state_needed,
+ .pre_save = riscv_imsic_in_kernel_pre_save,
+ .post_load = riscv_imsic_in_kernel_post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(eie, RISCVIMSICState, 64),
+ VMSTATE_UINT64_ARRAY(eip, RISCVIMSICState, 64),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int riscv_imsic_pre_save(void *opaque)
+{
+ RISCVIMSICState *imsic = opaque;
+ uint64_t attr;
+
+ if (kvm_irqchip_in_kernel()) {
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIDELIVERY);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eidelivery, false);
+
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EITHRESHOLD);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eithreshold, false);
+ }
+
+ return 0;
+}
+
+static int riscv_imsic_post_load(void *opaque, int version_id)
+{
+ RISCVIMSICState *imsic = opaque;
+ uint64_t attr;
+
+ if (kvm_irqchip_in_kernel()) {
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EIDELIVERY);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eidelivery, true);
+
+ attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
+ ISELECT_IMSIC_EITHRESHOLD);
+ kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
+ imsic->eithreshold, true);
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_riscv_imsic = {
+ .name = "riscv_imsic",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .pre_save = riscv_imsic_pre_save,
+ .post_load = riscv_imsic_post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState,
+ num_pages, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_VARRAY_UINT32(eithreshold, RISCVIMSICState,
+ num_pages, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * const []) {
+ &vmstate_riscv_imsic_emul,
+ &vmstate_riscv_imsic_in_kernel,
+ NULL
+ }
+};
+
static void riscv_imsic_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/include/hw/intc/riscv_imsic.h b/include/hw/intc/riscv_imsic.h
index fae999731d..af7a7ba4f1 100644
--- a/include/hw/intc/riscv_imsic.h
+++ b/include/hw/intc/riscv_imsic.h
@@ -54,6 +54,8 @@ struct RISCVIMSICState {
uint32_t *eidelivery;
uint32_t *eithreshold;
uint32_t *eistate;
+ uint64_t eip[64];
+ uint64_t eie[64];
/* config */
bool mmode;
--
2.27.0
Hi Xuemei,
On Fri, Feb 27, 2026 at 06:09:37PM +0800, liu.xuemei1@zte.com.cn wrote:
> From: Xuemei Liu <liu.xuemei1@zte.com.cn>
>
> Add save and store funtction if kvm_irqchip_in_kernel() return
Typo in commit message: "save and store funtction" should be "save and
restore function".
> true, it is to get and set IMSIC irqchip state from KVM kernel.
>
> Signed-off-by: Xuemei Liu <liu.xuemei1@zte.com.cn>
> ---
> hw/intc/riscv_imsic.c | 171 +++++++++++++++++++++++++++++++---
> include/hw/intc/riscv_imsic.h | 2 +
> 2 files changed, 159 insertions(+), 14 deletions(-)
>
> diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c
> index 7c9a012033..c0b07de0fc 100644
> --- a/hw/intc/riscv_imsic.c
> +++ b/hw/intc/riscv_imsic.c
> @@ -34,6 +34,7 @@
> #include "system/system.h"
> #include "system/kvm.h"
> #include "migration/vmstate.h"
> +#include "kvm/kvm_riscv.h"
>
> #define IMSIC_MMIO_PAGE_LE 0x00
> #define IMSIC_MMIO_PAGE_BE 0x04
> @@ -363,11 +364,12 @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
> qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
>
> imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
> - imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
> - imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
> imsic->eistate = g_new0(uint32_t, imsic->num_eistate);
> }
>
> + imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
> + imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
> +
> memory_region_init_io(&imsic->mmio, OBJECT(dev), &riscv_imsic_ops,
> imsic, TYPE_RISCV_IMSIC,
> IMSIC_MMIO_SIZE(imsic->num_pages));
> @@ -398,23 +400,17 @@ static const Property riscv_imsic_properties[] = {
> DEFINE_PROP_UINT32("num-irqs", RISCVIMSICState, num_irqs, 0),
> };
>
> -static bool riscv_imsic_state_needed(void *opaque)
> +static bool riscv_imsic_emul_state_needed(void *opaque)
> {
> return !kvm_irqchip_in_kernel();
> }
>
> -static const VMStateDescription vmstate_riscv_imsic = {
> - .name = "riscv_imsic",
> - .version_id = 2,
> - .minimum_version_id = 2,
> - .needed = riscv_imsic_state_needed,
> +static const VMStateDescription vmstate_riscv_imsic_emul = {
> + .name = "riscv_imsic_emul",
> + .version_id = 1,
> + .minimum_version_id = 1,
> + .needed = riscv_imsic_emul_state_needed,
> .fields = (const VMStateField[]) {
> - VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState,
> - num_pages, 0,
> - vmstate_info_uint32, uint32_t),
> - VMSTATE_VARRAY_UINT32(eithreshold, RISCVIMSICState,
> - num_pages, 0,
> - vmstate_info_uint32, uint32_t),
> VMSTATE_VARRAY_UINT32(eistate, RISCVIMSICState,
> num_eistate, 0,
> vmstate_info_uint32, uint32_t),
> @@ -422,6 +418,153 @@ static const VMStateDescription vmstate_riscv_imsic = {
> }
> };
>
> +static bool riscv_imsic_in_kernel_state_needed(void *opaque)
> +{
> + return kvm_irqchip_in_kernel();
> +}
> +
> +static int riscv_imsic_in_kernel_pre_save(void *opaque)
> +{
> + RISCVIMSICState *imsic = opaque;
> + uint64_t attr;
> +
> + if (kvm_irqchip_in_kernel()) {
> + for (uint32_t i = 0; i < BITS_TO_LONGS(imsic->num_irqs); i++) {
> + attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
> + ISELECT_IMSIC_EIE0 + i * 2);
> + kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
> + imsic->eie + i * 2 , false);
> +
> + attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
> + ISELECT_IMSIC_EIP0 + i * 2);
> + kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
> + imsic->eip + i * 2, false);
> +#ifdef CONFIG_32BIT
why use CONFIG_32BIT?
CONFIG_32BIT is a Linux kernel macro -- it is never defined in QEMU.
This means on RV32 builds the odd-indexed EIE/EIP iselect registers
(the upper 32 bits) are silently never saved or restored, causing data
loss during migration.
> + attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
> + ISELECT_IMSIC_EIE0 + i * 2 + 1);
Line exceeds 80 columns. Please fix.
> + kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
> + imsic->eie + i * 2 + 1, false);
> +
> + attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
> + ISELECT_IMSIC_EIP0 + i * 2 + 1);
> + kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
> + imsic->eip + i * 2 + 1, false);
> +#endif
> + }
> + }
> +
> + return 0;
> +}
> +
> +static int riscv_imsic_in_kernel_post_load(void *opaque, int version_id)
> +{
> + RISCVIMSICState *imsic = opaque;
> + uint64_t attr;
> +
> + if (kvm_irqchip_in_kernel()) {
> + for (uint32_t i = 0; i < BITS_TO_LONGS(imsic->num_irqs); i++) {
> + attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
> + ISELECT_IMSIC_EIE0 + i * 2);
> + kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
> + imsic->eie + i * 2, true);
> +
> + attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
> + ISELECT_IMSIC_EIP0 + i * 2);
> + kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
> + imsic->eip + i * 2, true);
> +#ifdef CONFIG_32BIT
Ditto above.
> + attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
> + ISELECT_IMSIC_EIE0 + i * 2 + 1);
Line exceeds 80 columns too. Please fix.
> + kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
> + imsic->eie + i * 2 + 1, true);
> +
> + attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
> + ISELECT_IMSIC_EIP0 + i * 2 + 1);
Ditto above.
> + kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
> + imsic->eip + i * 2 + 1, true);
> +#endif
> + }
> + }
> +
> + return 0;
> +}
> +
> +static const VMStateDescription vmstate_riscv_imsic_in_kernel = {
> + .name = "riscv_imsic_in_kernel",
> + .version_id = 1,
> + .minimum_version_id = 1,
> + .needed = riscv_imsic_in_kernel_state_needed,
> + .pre_save = riscv_imsic_in_kernel_pre_save,
> + .post_load = riscv_imsic_in_kernel_post_load,
> + .fields = (const VMStateField[]) {
> + VMSTATE_UINT64_ARRAY(eie, RISCVIMSICState, 64),
> + VMSTATE_UINT64_ARRAY(eip, RISCVIMSICState, 64),
> + VMSTATE_END_OF_LIST()
> + }
> +};
> +
> +static int riscv_imsic_pre_save(void *opaque)
> +{
> + RISCVIMSICState *imsic = opaque;
> + uint64_t attr;
> +
> + if (kvm_irqchip_in_kernel()) {
> + attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
> + ISELECT_IMSIC_EIDELIVERY);
> + kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
> + imsic->eidelivery, false);
> +
> + attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
> + ISELECT_IMSIC_EITHRESHOLD);
> + kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
> + imsic->eithreshold, false);
> + }
> +
> + return 0;
> +}
> +
> +static int riscv_imsic_post_load(void *opaque, int version_id)
> +{
> + RISCVIMSICState *imsic = opaque;
> + uint64_t attr;
> +
> + if (kvm_irqchip_in_kernel()) {
> + attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
> + ISELECT_IMSIC_EIDELIVERY);
> + kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
> + imsic->eidelivery, true);
> +
> + attr = KVM_DEV_RISCV_AIA_IMSIC_MKATTR(imsic->hartid,
> + ISELECT_IMSIC_EITHRESHOLD);
> + kvm_riscv_aia_access_reg(KVM_DEV_RISCV_AIA_GRP_IMSIC, attr,
> + imsic->eithreshold, true);
> + }
> +
> + return 0;
> +}
> +
> +static const VMStateDescription vmstate_riscv_imsic = {
> + .name = "riscv_imsic",
> + .version_id = 3,
> + .minimum_version_id = 3,
> + .pre_save = riscv_imsic_pre_save,
> + .post_load = riscv_imsic_post_load,
> + .fields = (const VMStateField[]) {
> + VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState,
> + num_pages, 0,
> + vmstate_info_uint32, uint32_t),
> + VMSTATE_VARRAY_UINT32(eithreshold, RISCVIMSICState,
> + num_pages, 0,
> + vmstate_info_uint32, uint32_t),
> + VMSTATE_END_OF_LIST()
> + },
> + .subsections = (const VMStateDescription * const []) {
> + &vmstate_riscv_imsic_emul,
> + &vmstate_riscv_imsic_in_kernel,
> + NULL
> + }
> +};
> +
> static void riscv_imsic_class_init(ObjectClass *klass, const void *data)
> {
> DeviceClass *dc = DEVICE_CLASS(klass);
> diff --git a/include/hw/intc/riscv_imsic.h b/include/hw/intc/riscv_imsic.h
> index fae999731d..af7a7ba4f1 100644
> --- a/include/hw/intc/riscv_imsic.h
> +++ b/include/hw/intc/riscv_imsic.h
> @@ -54,6 +54,8 @@ struct RISCVIMSICState {
> uint32_t *eidelivery;
> uint32_t *eithreshold;
> uint32_t *eistate;
> + uint64_t eip[64];
> + uint64_t eie[64];
why hardcoded 64-element arrays?
Consider using dynamically allocated arrays with VMSTATE_VARRAY, or at
minimum add a comment explaining the sizing rationale.
Others look good to me.
Reviewed-by: Chao Liu <chao.liu.zevorn@gmail.com>
Thanks,
Chao
> /* config */
> bool mmode;
> --
> 2.27.0
>
© 2016 - 2026 Red Hat, Inc.