The nesting level for the inner loop of define_one_arm_cp_reg
was overly deep. Split out that code into two functions, for
the AArch32 and AArch64 paths separately. Simplify the innermost
loop to a switch statement over r->state.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/helper.c | 147 +++++++++++++++++++++++---------------------
1 file changed, 76 insertions(+), 71 deletions(-)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 8b63eacb91..ec78c8f08f 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -7612,6 +7612,66 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
}
+static void add_cpreg_to_hashtable_aa32(ARMCPU *cpu, const ARMCPRegInfo *r,
+ int crm, int opc1, int opc2)
+{
+ /*
+ * Under AArch32 CP registers can be common
+ * (same for secure and non-secure world) or banked.
+ */
+ char *name;
+
+ assert(!(r->type & ARM_CP_ADD_TLBI_NXS)); /* aa64 only */
+
+ switch (r->secure) {
+ case ARM_CP_SECSTATE_S:
+ case ARM_CP_SECSTATE_NS:
+ add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA32,
+ r->secure, crm, opc1, opc2, r->name);
+ break;
+ case ARM_CP_SECSTATE_BOTH:
+ name = g_strdup_printf("%s_S", r->name);
+ add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA32,
+ ARM_CP_SECSTATE_S, crm, opc1, opc2, name);
+ g_free(name);
+ add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA32,
+ ARM_CP_SECSTATE_NS, crm, opc1, opc2, r->name);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void add_cpreg_to_hashtable_aa64(ARMCPU *cpu, const ARMCPRegInfo *r,
+ int crm, int opc1, int opc2)
+{
+ if ((r->type & ARM_CP_ADD_TLBI_NXS) &&
+ cpu_isar_feature(aa64_xs, cpu)) {
+ /*
+ * This is a TLBI insn which has an NXS variant. The
+ * NXS variant is at the same encoding except that
+ * crn is +1, and has the same behaviour except for
+ * fine-grained trapping. Add the NXS insn here and
+ * then fall through to add the normal register.
+ * add_cpreg_to_hashtable() copies the cpreg struct
+ * and name that it is passed, so it's OK to use
+ * a local struct here.
+ */
+ ARMCPRegInfo nxs_ri = *r;
+ g_autofree char *name = g_strdup_printf("%sNXS", r->name);
+
+ assert(nxs_ri.crn < 0xf);
+ nxs_ri.crn++;
+ if (nxs_ri.fgt) {
+ nxs_ri.fgt |= R_FGT_NXS_MASK;
+ }
+ add_cpreg_to_hashtable(cpu, &nxs_ri, ARM_CP_STATE_AA64,
+ ARM_CP_SECSTATE_NS, crm, opc1, opc2, name);
+ }
+
+ add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA64, ARM_CP_SECSTATE_NS,
+ crm, opc1, opc2, r->name);
+}
void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *r)
{
@@ -7639,14 +7699,12 @@ void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *r)
* bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
* the register, if any.
*/
- int crm, opc1, opc2;
int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
- CPState state;
/* 64 bit registers have only CRm and Opc1 fields */
assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
@@ -7743,75 +7801,22 @@ void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *r)
}
}
- for (crm = crmmin; crm <= crmmax; crm++) {
- for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
- for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
- for (state = ARM_CP_STATE_AA32;
- state <= ARM_CP_STATE_AA64; state++) {
- if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
- continue;
- }
- if ((r->type & ARM_CP_ADD_TLBI_NXS) &&
- cpu_isar_feature(aa64_xs, cpu)) {
- /*
- * This is a TLBI insn which has an NXS variant. The
- * NXS variant is at the same encoding except that
- * crn is +1, and has the same behaviour except for
- * fine-grained trapping. Add the NXS insn here and
- * then fall through to add the normal register.
- * add_cpreg_to_hashtable() copies the cpreg struct
- * and name that it is passed, so it's OK to use
- * a local struct here.
- */
- ARMCPRegInfo nxs_ri = *r;
- g_autofree char *name = g_strdup_printf("%sNXS", r->name);
-
- assert(state == ARM_CP_STATE_AA64);
- assert(nxs_ri.crn < 0xf);
- nxs_ri.crn++;
- if (nxs_ri.fgt) {
- nxs_ri.fgt |= R_FGT_NXS_MASK;
- }
- add_cpreg_to_hashtable(cpu, &nxs_ri, state,
- ARM_CP_SECSTATE_NS,
- crm, opc1, opc2, name);
- }
- if (state == ARM_CP_STATE_AA32) {
- /*
- * Under AArch32 CP registers can be common
- * (same for secure and non-secure world) or banked.
- */
- char *name;
-
- switch (r->secure) {
- case ARM_CP_SECSTATE_S:
- case ARM_CP_SECSTATE_NS:
- add_cpreg_to_hashtable(cpu, r, state,
- r->secure, crm, opc1, opc2,
- r->name);
- break;
- case ARM_CP_SECSTATE_BOTH:
- name = g_strdup_printf("%s_S", r->name);
- add_cpreg_to_hashtable(cpu, r, state,
- ARM_CP_SECSTATE_S,
- crm, opc1, opc2, name);
- g_free(name);
- add_cpreg_to_hashtable(cpu, r, state,
- ARM_CP_SECSTATE_NS,
- crm, opc1, opc2, r->name);
- break;
- default:
- g_assert_not_reached();
- }
- } else {
- /*
- * AArch64 registers get mapped to non-secure instance
- * of AArch32
- */
- add_cpreg_to_hashtable(cpu, r, state,
- ARM_CP_SECSTATE_NS,
- crm, opc1, opc2, r->name);
- }
+ for (int crm = crmmin; crm <= crmmax; crm++) {
+ for (int opc1 = opc1min; opc1 <= opc1max; opc1++) {
+ for (int opc2 = opc2min; opc2 <= opc2max; opc2++) {
+ switch (r->state) {
+ case ARM_CP_STATE_AA32:
+ add_cpreg_to_hashtable_aa32(cpu, r, crm, opc1, opc2);
+ break;
+ case ARM_CP_STATE_AA64:
+ add_cpreg_to_hashtable_aa64(cpu, r, crm, opc1, opc2);
+ break;
+ case ARM_CP_STATE_BOTH:
+ add_cpreg_to_hashtable_aa32(cpu, r, crm, opc1, opc2);
+ add_cpreg_to_hashtable_aa64(cpu, r, crm, opc1, opc2);
+ break;
+ default:
+ g_assert_not_reached();
}
}
}
--
2.43.0