A SPI, PPI or SGI interrupt can have non-maskable property. So maintain
non-maskable property in PendingIrq and GICR/GICD. Since add new device
state, it also needs to be migrated, so also save NMI info in
vmstate_gicv3_cpu and vmstate_gicv3.
Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
Acked-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
---
v12:
- nmi_needed -> gicv3_cpu_nmi_needed.
- needed_nmi -> gicv3_nmi_needed.
- Add Reviewed-by.
v11:
- Put vmstate_gicv3_cpu_nmi and vmstate_gicv3_gicd_nmi into existing list.
- Remove the excess != 0.
v10:
- superprio -> nmi, gicr_isuperprio -> gicr_inmir0.
- Save NMI state in vmstate_gicv3_cpu and vmstate_gicv3.
- Update the commit message.
v3:
- Place this ahead of implement GICR_INMIR.
- Add Acked-by.
---
hw/intc/arm_gicv3_common.c | 38 ++++++++++++++++++++++++++++++
include/hw/intc/arm_gicv3_common.h | 4 ++++
2 files changed, 42 insertions(+)
diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c
index 2d2cea6858..9810558b07 100644
--- a/hw/intc/arm_gicv3_common.c
+++ b/hw/intc/arm_gicv3_common.c
@@ -164,6 +164,24 @@ const VMStateDescription vmstate_gicv3_gicv4 = {
}
};
+static bool gicv3_cpu_nmi_needed(void *opaque)
+{
+ GICv3CPUState *cs = opaque;
+
+ return cs->gic->nmi_support;
+}
+
+static const VMStateDescription vmstate_gicv3_cpu_nmi = {
+ .name = "arm_gicv3_cpu/nmi",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = gicv3_cpu_nmi_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32(gicr_inmir0, GICv3CPUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_gicv3_cpu = {
.name = "arm_gicv3_cpu",
.version_id = 1,
@@ -196,6 +214,7 @@ static const VMStateDescription vmstate_gicv3_cpu = {
&vmstate_gicv3_cpu_virt,
&vmstate_gicv3_cpu_sre_el1,
&vmstate_gicv3_gicv4,
+ &vmstate_gicv3_cpu_nmi,
NULL
}
};
@@ -238,6 +257,24 @@ const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug = {
}
};
+static bool gicv3_nmi_needed(void *opaque)
+{
+ GICv3State *cs = opaque;
+
+ return cs->nmi_support;
+}
+
+const VMStateDescription vmstate_gicv3_gicd_nmi = {
+ .name = "arm_gicv3/gicd_nmi",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = gicv3_nmi_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(nmi, GICv3State, GICV3_BMP_SIZE),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_gicv3 = {
.name = "arm_gicv3",
.version_id = 1,
@@ -266,6 +303,7 @@ static const VMStateDescription vmstate_gicv3 = {
},
.subsections = (const VMStateDescription * const []) {
&vmstate_gicv3_gicd_no_migration_shift_bug,
+ &vmstate_gicv3_gicd_nmi,
NULL
}
};
diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h
index 4358c5319c..88533749eb 100644
--- a/include/hw/intc/arm_gicv3_common.h
+++ b/include/hw/intc/arm_gicv3_common.h
@@ -146,6 +146,7 @@ typedef struct {
int irq;
uint8_t prio;
int grp;
+ bool nmi;
} PendingIrq;
struct GICv3CPUState {
@@ -172,6 +173,7 @@ struct GICv3CPUState {
uint32_t gicr_ienabler0;
uint32_t gicr_ipendr0;
uint32_t gicr_iactiver0;
+ uint32_t gicr_inmir0;
uint32_t edge_trigger; /* ICFGR0 and ICFGR1 even bits */
uint32_t gicr_igrpmodr0;
uint32_t gicr_nsacr;
@@ -275,6 +277,7 @@ struct GICv3State {
GIC_DECLARE_BITMAP(active); /* GICD_ISACTIVER */
GIC_DECLARE_BITMAP(level); /* Current level */
GIC_DECLARE_BITMAP(edge_trigger); /* GICD_ICFGR even bits */
+ GIC_DECLARE_BITMAP(nmi); /* GICD_INMIR */
uint8_t gicd_ipriority[GICV3_MAXIRQ];
uint64_t gicd_irouter[GICV3_MAXIRQ];
/* Cached information: pointer to the cpu i/f for the CPUs specified
@@ -314,6 +317,7 @@ GICV3_BITMAP_ACCESSORS(pending)
GICV3_BITMAP_ACCESSORS(active)
GICV3_BITMAP_ACCESSORS(level)
GICV3_BITMAP_ACCESSORS(edge_trigger)
+GICV3_BITMAP_ACCESSORS(nmi)
#define TYPE_ARM_GICV3_COMMON "arm-gicv3-common"
typedef struct ARMGICv3CommonClass ARMGICv3CommonClass;
--
2.34.1