This only implements the external delivery method via the GICv3.
Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
---
v3:
- Not include CPU_INTERRUPT_NMI when FEAT_NMI not enabled
- Add ARM_CPU_VNMI.
- Refator nmi mask in arm_excp_unmasked().
- Test SCTLR_ELx.NMI for ALLINT mask for NMI.
---
target/arm/cpu-qom.h | 4 +++-
target/arm/cpu.c | 54 ++++++++++++++++++++++++++++++++++++--------
target/arm/cpu.h | 4 ++++
target/arm/helper.c | 2 ++
4 files changed, 54 insertions(+), 10 deletions(-)
diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h
index 8e032691db..e0c9e18036 100644
--- a/target/arm/cpu-qom.h
+++ b/target/arm/cpu-qom.h
@@ -36,11 +36,13 @@ DECLARE_CLASS_CHECKERS(AArch64CPUClass, AARCH64_CPU,
#define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU
#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
-/* Meanings of the ARMCPU object's four inbound GPIO lines */
+/* Meanings of the ARMCPU object's six inbound GPIO lines */
#define ARM_CPU_IRQ 0
#define ARM_CPU_FIQ 1
#define ARM_CPU_VIRQ 2
#define ARM_CPU_VFIQ 3
+#define ARM_CPU_NMI 4
+#define ARM_CPU_VNMI 5
/* For M profile, some registers are banked secure vs non-secure;
* these are represented as a 2-element array where the first element
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 5fa86bc8d5..d40ada9c75 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -126,11 +126,20 @@ static bool arm_cpu_has_work(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
- return (cpu->power_state != PSCI_OFF)
- && cs->interrupt_request &
- (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
- | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
- | CPU_INTERRUPT_EXITTB);
+ if (cpu_isar_feature(aa64_nmi, cpu)) {
+ return (cpu->power_state != PSCI_OFF)
+ && cs->interrupt_request &
+ (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
+ | CPU_INTERRUPT_NMI | CPU_INTERRUPT_VNMI
+ | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
+ | CPU_INTERRUPT_EXITTB);
+ } else {
+ return (cpu->power_state != PSCI_OFF)
+ && cs->interrupt_request &
+ (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
+ | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
+ | CPU_INTERRUPT_EXITTB);
+ }
}
static int arm_cpu_mmu_index(CPUState *cs, bool ifetch)
@@ -668,6 +677,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
CPUARMState *env = cpu_env(cs);
bool pstate_unmasked;
bool unmasked = false;
+ bool nmi_unmasked = true;
/*
* Don't take exceptions if they target a lower EL.
@@ -678,13 +688,26 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
return false;
}
+ if (cpu_isar_feature(aa64_nmi, env_archcpu(env))) {
+ nmi_unmasked = (cur_el == target_el) &&
+ (((env->cp15.sctlr_el[target_el] & SCTLR_NMI) &&
+ (env->allint & PSTATE_ALLINT)) ||
+ ((env->cp15.sctlr_el[target_el] & SCTLR_SPINTMASK) &&
+ (env->pstate & PSTATE_SP)));
+ nmi_unmasked = !nmi_unmasked;
+ }
+
switch (excp_idx) {
+ case EXCP_NMI:
+ pstate_unmasked = nmi_unmasked;
+ break;
+
case EXCP_FIQ:
- pstate_unmasked = !(env->daif & PSTATE_F);
+ pstate_unmasked = (!(env->daif & PSTATE_F)) & nmi_unmasked;
break;
case EXCP_IRQ:
- pstate_unmasked = !(env->daif & PSTATE_I);
+ pstate_unmasked = (!(env->daif & PSTATE_I)) & nmi_unmasked;
break;
case EXCP_VFIQ:
@@ -804,6 +827,16 @@ static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
/* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
+ if (cpu_isar_feature(aa64_nmi, env_archcpu(env))) {
+ if (interrupt_request & CPU_INTERRUPT_NMI) {
+ excp_idx = EXCP_NMI;
+ target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
+ if (arm_excp_unmasked(cs, excp_idx, target_el,
+ cur_el, secure, hcr_el2)) {
+ goto found;
+ }
+ }
+ }
if (interrupt_request & CPU_INTERRUPT_FIQ) {
excp_idx = EXCP_FIQ;
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
@@ -929,7 +962,9 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level)
[ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
[ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
[ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
- [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
+ [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ,
+ [ARM_CPU_NMI] = CPU_INTERRUPT_NMI,
+ [ARM_CPU_VNMI] = CPU_INTERRUPT_VNMI
};
if (!arm_feature(env, ARM_FEATURE_EL2) &&
@@ -957,6 +992,7 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level)
break;
case ARM_CPU_IRQ:
case ARM_CPU_FIQ:
+ case ARM_CPU_NMI:
if (level) {
cpu_interrupt(cs, mask[irq]);
} else {
@@ -1355,7 +1391,7 @@ static void arm_cpu_initfn(Object *obj)
*/
qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
} else {
- qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
+ qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 6);
}
qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index f9646dbbfb..b23be7fc24 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -60,6 +60,8 @@
#define EXCP_DIVBYZERO 23 /* v7M DIVBYZERO UsageFault */
#define EXCP_VSERR 24
#define EXCP_GPC 25 /* v9 Granule Protection Check Fault */
+#define EXCP_NMI 26
+#define EXCP_VNMI 27
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
#define ARMV7M_EXCP_RESET 1
@@ -79,6 +81,8 @@
#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
#define CPU_INTERRUPT_VSERR CPU_INTERRUPT_TGT_INT_0
+#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_4
+#define CPU_INTERRUPT_VNMI CPU_INTERRUPT_TGT_EXT_0
/* The usual mapping for an AArch64 system register to its AArch32
* counterpart is for the 32 bit world to have access to the lower
diff --git a/target/arm/helper.c b/target/arm/helper.c
index b01f23b939..376e0d91ff 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -10629,6 +10629,8 @@ void arm_log_exception(CPUState *cs)
[EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
[EXCP_VSERR] = "Virtual SERR",
[EXCP_GPC] = "Granule Protection Check",
+ [EXCP_NMI] = "NMI",
+ [EXCP_VNMI] = "virtual NMI"
};
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
--
2.34.1
On 2/23/24 00:32, Jinjie Ruan via wrote:
> This only implements the external delivery method via the GICv3.
>
> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
> ---
> v3:
> - Not include CPU_INTERRUPT_NMI when FEAT_NMI not enabled
> - Add ARM_CPU_VNMI.
> - Refator nmi mask in arm_excp_unmasked().
> - Test SCTLR_ELx.NMI for ALLINT mask for NMI.
> ---
> target/arm/cpu-qom.h | 4 +++-
> target/arm/cpu.c | 54 ++++++++++++++++++++++++++++++++++++--------
> target/arm/cpu.h | 4 ++++
> target/arm/helper.c | 2 ++
> 4 files changed, 54 insertions(+), 10 deletions(-)
>
> diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h
> index 8e032691db..e0c9e18036 100644
> --- a/target/arm/cpu-qom.h
> +++ b/target/arm/cpu-qom.h
> @@ -36,11 +36,13 @@ DECLARE_CLASS_CHECKERS(AArch64CPUClass, AARCH64_CPU,
> #define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU
> #define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
>
> -/* Meanings of the ARMCPU object's four inbound GPIO lines */
> +/* Meanings of the ARMCPU object's six inbound GPIO lines */
> #define ARM_CPU_IRQ 0
> #define ARM_CPU_FIQ 1
> #define ARM_CPU_VIRQ 2
> #define ARM_CPU_VFIQ 3
> +#define ARM_CPU_NMI 4
> +#define ARM_CPU_VNMI 5
>
> /* For M profile, some registers are banked secure vs non-secure;
> * these are represented as a 2-element array where the first element
> diff --git a/target/arm/cpu.c b/target/arm/cpu.c
> index 5fa86bc8d5..d40ada9c75 100644
> --- a/target/arm/cpu.c
> +++ b/target/arm/cpu.c
> @@ -126,11 +126,20 @@ static bool arm_cpu_has_work(CPUState *cs)
> {
> ARMCPU *cpu = ARM_CPU(cs);
>
> - return (cpu->power_state != PSCI_OFF)
> - && cs->interrupt_request &
> - (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
> - | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
> - | CPU_INTERRUPT_EXITTB);
> + if (cpu_isar_feature(aa64_nmi, cpu)) {
> + return (cpu->power_state != PSCI_OFF)
> + && cs->interrupt_request &
> + (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
> + | CPU_INTERRUPT_NMI | CPU_INTERRUPT_VNMI
> + | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
> + | CPU_INTERRUPT_EXITTB);
> + } else {
> + return (cpu->power_state != PSCI_OFF)
> + && cs->interrupt_request &
> + (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
> + | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
> + | CPU_INTERRUPT_EXITTB);
> + }
This can be factored better, to avoid repeating everything.
However, I am reconsidering my previous advice to ignore NMI if FEAT_NMI is not present.
Consider R_MHWBP, where IRQ with Superpriority, with SCTLR_ELx.NMI == 0, is masked
identically with IRQ without Superpriority. Moreover, if the GIC is configured so that
FEAT_GICv3_NMI is only set if FEAT_NMI is set, then we won't ever see CPU_INTERRUPT_*NMI
anyway.
So we might as well accept NMI here unconditionally. But document this choice here with a
comment.
> @@ -678,13 +688,26 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
> return false;
> }
>
> + if (cpu_isar_feature(aa64_nmi, env_archcpu(env))) {
> + nmi_unmasked = (cur_el == target_el) &&
> + (((env->cp15.sctlr_el[target_el] & SCTLR_NMI) &&
> + (env->allint & PSTATE_ALLINT)) ||
> + ((env->cp15.sctlr_el[target_el] & SCTLR_SPINTMASK) &&
> + (env->pstate & PSTATE_SP)));
In the manual, this is "allintmask". It is easier to follow the logic if you use this...
> + nmi_unmasked = !nmi_unmasked;
... and not the inverse.
> case EXCP_FIQ:
> - pstate_unmasked = !(env->daif & PSTATE_F);
> + pstate_unmasked = (!(env->daif & PSTATE_F)) & nmi_unmasked;
Clearer with "&&".
> + if (cpu_isar_feature(aa64_nmi, env_archcpu(env))) {
> + if (interrupt_request & CPU_INTERRUPT_NMI) {
> + excp_idx = EXCP_NMI;
> + target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
> + if (arm_excp_unmasked(cs, excp_idx, target_el,
> + cur_el, secure, hcr_el2)) {
> + goto found;
> + }
> + }
> + }
Handling for vNMI?
> @@ -957,6 +992,7 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level)
> break;
> case ARM_CPU_IRQ:
> case ARM_CPU_FIQ:
> + case ARM_CPU_NMI:
> if (level) {
> cpu_interrupt(cs, mask[irq]);
> } else {
Likewise.
r~
On 2024/2/24 3:55, Richard Henderson wrote:
> On 2/23/24 00:32, Jinjie Ruan via wrote:
>> This only implements the external delivery method via the GICv3.
>>
>> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
>> ---
>> v3:
>> - Not include CPU_INTERRUPT_NMI when FEAT_NMI not enabled
>> - Add ARM_CPU_VNMI.
>> - Refator nmi mask in arm_excp_unmasked().
>> - Test SCTLR_ELx.NMI for ALLINT mask for NMI.
>> ---
>> target/arm/cpu-qom.h | 4 +++-
>> target/arm/cpu.c | 54 ++++++++++++++++++++++++++++++++++++--------
>> target/arm/cpu.h | 4 ++++
>> target/arm/helper.c | 2 ++
>> 4 files changed, 54 insertions(+), 10 deletions(-)
>>
>> diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h
>> index 8e032691db..e0c9e18036 100644
>> --- a/target/arm/cpu-qom.h
>> +++ b/target/arm/cpu-qom.h
>> @@ -36,11 +36,13 @@ DECLARE_CLASS_CHECKERS(AArch64CPUClass, AARCH64_CPU,
>> #define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU
>> #define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
>> -/* Meanings of the ARMCPU object's four inbound GPIO lines */
>> +/* Meanings of the ARMCPU object's six inbound GPIO lines */
>> #define ARM_CPU_IRQ 0
>> #define ARM_CPU_FIQ 1
>> #define ARM_CPU_VIRQ 2
>> #define ARM_CPU_VFIQ 3
>> +#define ARM_CPU_NMI 4
>> +#define ARM_CPU_VNMI 5
>> /* For M profile, some registers are banked secure vs non-secure;
>> * these are represented as a 2-element array where the first element
>> diff --git a/target/arm/cpu.c b/target/arm/cpu.c
>> index 5fa86bc8d5..d40ada9c75 100644
>> --- a/target/arm/cpu.c
>> +++ b/target/arm/cpu.c
>> @@ -126,11 +126,20 @@ static bool arm_cpu_has_work(CPUState *cs)
>> {
>> ARMCPU *cpu = ARM_CPU(cs);
>> - return (cpu->power_state != PSCI_OFF)
>> - && cs->interrupt_request &
>> - (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
>> - | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
>> - | CPU_INTERRUPT_EXITTB);
>> + if (cpu_isar_feature(aa64_nmi, cpu)) {
>> + return (cpu->power_state != PSCI_OFF)
>> + && cs->interrupt_request &
>> + (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
>> + | CPU_INTERRUPT_NMI | CPU_INTERRUPT_VNMI
>> + | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ |
>> CPU_INTERRUPT_VSERR
>> + | CPU_INTERRUPT_EXITTB);
>> + } else {
>> + return (cpu->power_state != PSCI_OFF)
>> + && cs->interrupt_request &
>> + (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
>> + | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ |
>> CPU_INTERRUPT_VSERR
>> + | CPU_INTERRUPT_EXITTB);
>> + }
>
> This can be factored better, to avoid repeating everything.
>
> However, I am reconsidering my previous advice to ignore NMI if FEAT_NMI
> is not present.
>
> Consider R_MHWBP, where IRQ with Superpriority, with SCTLR_ELx.NMI == 0,
> is masked identically with IRQ without Superpriority. Moreover, if the
> GIC is configured so that FEAT_GICv3_NMI is only set if FEAT_NMI is set,
> then we won't ever see CPU_INTERRUPT_*NMI anyway.
>
> So we might as well accept NMI here unconditionally. But document this
> choice here with a comment.
>
>
>> @@ -678,13 +688,26 @@ static inline bool arm_excp_unmasked(CPUState
>> *cs, unsigned int excp_idx,
>> return false;
>> }
>> + if (cpu_isar_feature(aa64_nmi, env_archcpu(env))) {
>> + nmi_unmasked = (cur_el == target_el) &&
>> + (((env->cp15.sctlr_el[target_el] & SCTLR_NMI) &&
>> + (env->allint & PSTATE_ALLINT)) ||
>> + ((env->cp15.sctlr_el[target_el] &
>> SCTLR_SPINTMASK) &&
>> + (env->pstate & PSTATE_SP)));
>
> In the manual, this is "allintmask". It is easier to follow the logic
> if you use this...
The mannual also require that it must be same EL.
An interrupt is controlled by PSTATE.ALLINT when all of the following apply:
• SCTLR_ELx.NMI is 1.
• The interrupt is targeted at ELx.
• Execution is at ELx.
An interrupt is controlled by PSTATE.SP when all of the following apply:
• SCTLR_ELx.NMI is 1.
• SCTLR_ELx.SPINTMASK is 1.
• The interrupt is targeted at ELx.
• Execution is at ELx.
>
>> + nmi_unmasked = !nmi_unmasked;
>
> ... and not the inverse.
>
>> case EXCP_FIQ:
>> - pstate_unmasked = !(env->daif & PSTATE_F);
>> + pstate_unmasked = (!(env->daif & PSTATE_F)) & nmi_unmasked;
>
> Clearer with "&&".
>
>> + if (cpu_isar_feature(aa64_nmi, env_archcpu(env))) {
>> + if (interrupt_request & CPU_INTERRUPT_NMI) {
>> + excp_idx = EXCP_NMI;
>> + target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el,
>> secure);
>> + if (arm_excp_unmasked(cs, excp_idx, target_el,
>> + cur_el, secure, hcr_el2)) {
>> + goto found;
>> + }
>> + }
>> + }
>
> Handling for vNMI?
>
>> @@ -957,6 +992,7 @@ static void arm_cpu_set_irq(void *opaque, int irq,
>> int level)
>> break;
>> case ARM_CPU_IRQ:
>> case ARM_CPU_FIQ:
>> + case ARM_CPU_NMI:
>> if (level) {
>> cpu_interrupt(cs, mask[irq]);
>> } else {
>
> Likewise.
>
>
> r~
© 2016 - 2026 Red Hat, Inc.