1
Arm patches for rc3 : just a handful of bug fixes.
1
Just one patch for rc2, a revert.
2
2
3
thanks
4
-- PMM
3
-- PMM
5
4
5
The following changes since commit 49aaac3548bc5a4632a14de939d5312b28dc1ba2:
6
6
7
The following changes since commit 4ecc984210ca1bf508a96a550ec8a93a5f833f6c:
7
Merge tag 'linux-user-for-6.2-pull-request' of git://github.com/vivier/qemu into staging (2021-11-22 10:33:13 +0100)
8
9
Merge remote-tracking branch 'remotes/palmer/tags/riscv-for-master-4.2-rc3' into staging (2019-11-26 12:36:40 +0000)
10
8
11
are available in the Git repository at:
9
are available in the Git repository at:
12
10
13
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20191126
11
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20211122
14
12
15
for you to fetch changes up to 6a4ef4e5d1084ce41fafa7d470a644b0fd3d9317:
13
for you to fetch changes up to 4825eaae4fdd56fba0febdfbdd7bf9684ae3ee0d:
16
14
17
target/arm: Honor HCR_EL2.TID3 trapping requirements (2019-11-26 13:55:37 +0000)
15
Revert "arm: tcg: Adhere to SMCCC 1.3 section 5.2" (2021-11-22 13:41:48 +0000)
18
16
19
----------------------------------------------------------------
17
----------------------------------------------------------------
20
target-arm queue:
18
target-arm queue:
21
* handle FTYPE flag correctly in v7M exception return
19
* revert SMCCC/PSCI change, as it regresses some usecases for some boards
22
for v7M CPUs with an FPU (v8M CPUs were already correct)
23
* versal: Add the CRP as unimplemented
24
* Fix ISR_EL1 tracking when executing at EL2
25
* Honor HCR_EL2.TID3 trapping requirements
26
20
27
----------------------------------------------------------------
21
----------------------------------------------------------------
28
Edgar E. Iglesias (1):
22
Peter Maydell (1):
29
hw/arm: versal: Add the CRP as unimplemented
23
Revert "arm: tcg: Adhere to SMCCC 1.3 section 5.2"
30
24
31
Jean-Hugues Deschênes (1):
25
target/arm/psci.c | 35 +++++++++++++++++++++++++++++------
32
target/arm: Fix handling of cortex-m FTYPE flag in EXCRET
26
1 file changed, 29 insertions(+), 6 deletions(-)
33
27
34
Marc Zyngier (2):
35
target/arm: Fix ISR_EL1 tracking when executing at EL2
36
target/arm: Honor HCR_EL2.TID3 trapping requirements
37
38
include/hw/arm/xlnx-versal.h | 3 ++
39
hw/arm/xlnx-versal.c | 2 ++
40
target/arm/helper.c | 83 ++++++++++++++++++++++++++++++++++++++++++--
41
target/arm/m_helper.c | 7 ++--
42
4 files changed, 89 insertions(+), 6 deletions(-)
43
diff view generated by jsdifflib
Deleted patch
1
From: Jean-Hugues Deschênes <Jean-Hugues.Deschenes@ossiaco.com>
2
1
3
According to the PushStack() pseudocode in the armv7m RM,
4
bit 4 of the LR should be set to NOT(CONTROL.PFCA) when
5
an FPU is present. Current implementation is doing it for
6
armv8, but not for armv7. This patch makes the existing
7
logic applicable to both code paths.
8
9
Signed-off-by: Jean-Hugues Deschenes <jean-hugues.deschenes@ossiaco.com>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/m_helper.c | 7 +++----
14
1 file changed, 3 insertions(+), 4 deletions(-)
15
16
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/m_helper.c
19
+++ b/target/arm/m_helper.c
20
@@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
21
if (env->v7m.secure) {
22
lr |= R_V7M_EXCRET_S_MASK;
23
}
24
- if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
25
- lr |= R_V7M_EXCRET_FTYPE_MASK;
26
- }
27
} else {
28
lr = R_V7M_EXCRET_RES1_MASK |
29
R_V7M_EXCRET_S_MASK |
30
R_V7M_EXCRET_DCRS_MASK |
31
- R_V7M_EXCRET_FTYPE_MASK |
32
R_V7M_EXCRET_ES_MASK;
33
if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
34
lr |= R_V7M_EXCRET_SPSEL_MASK;
35
}
36
}
37
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
38
+ lr |= R_V7M_EXCRET_FTYPE_MASK;
39
+ }
40
if (!arm_v7m_is_handler_mode(env)) {
41
lr |= R_V7M_EXCRET_MODE_MASK;
42
}
43
--
44
2.20.1
45
46
diff view generated by jsdifflib
Deleted patch
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
1
3
Add the CRP as unimplemented thus avoiding bus errors when
4
guests access these registers.
5
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Message-id: 20191115154734.26449-2-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
include/hw/arm/xlnx-versal.h | 3 +++
13
hw/arm/xlnx-versal.c | 2 ++
14
2 files changed, 5 insertions(+)
15
16
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/hw/arm/xlnx-versal.h
19
+++ b/include/hw/arm/xlnx-versal.h
20
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
21
#define MM_IOU_SCNTRS_SIZE 0x10000
22
#define MM_FPD_CRF 0xfd1a0000U
23
#define MM_FPD_CRF_SIZE 0x140000
24
+
25
+#define MM_PMC_CRP 0xf1260000U
26
+#define MM_PMC_CRP_SIZE 0x10000
27
#endif
28
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/hw/arm/xlnx-versal.c
31
+++ b/hw/arm/xlnx-versal.c
32
@@ -XXX,XX +XXX,XX @@ static void versal_unimp(Versal *s)
33
MM_CRL, MM_CRL_SIZE);
34
versal_unimp_area(s, "crf", &s->mr_ps,
35
MM_FPD_CRF, MM_FPD_CRF_SIZE);
36
+ versal_unimp_area(s, "crp", &s->mr_ps,
37
+ MM_PMC_CRP, MM_PMC_CRP_SIZE);
38
versal_unimp_area(s, "iou-scntr", &s->mr_ps,
39
MM_IOU_SCNTR, MM_IOU_SCNTR_SIZE);
40
versal_unimp_area(s, "iou-scntr-seucre", &s->mr_ps,
41
--
42
2.20.1
43
44
diff view generated by jsdifflib
1
From: Marc Zyngier <maz@kernel.org>
1
This reverts commit 9fcd15b9193e819b6cc2fd0a45e3506148812bb4.
2
2
3
The ARMv8 ARM states when executing at EL2, EL3 or Secure EL1,
3
This change turns out to cause regressions, for instance on the
4
ISR_EL1 shows the pending status of the physical IRQ, FIQ, or
4
imx6ul boards as described here:
5
SError interrupts.
5
https://lore.kernel.org/qemu-devel/c8b89685-7490-328b-51a3-48711c140a84@tribudubois.net/
6
6
7
Unfortunately, QEMU's implementation only considers the HCR_EL2
7
The primary cause of that regression is that the guest code running
8
bits, and ignores the current exception level. This means a hypervisor
8
at EL3 expects SMCs (not related to PSCI) to do what they would if
9
trying to look at its own interrupt state actually sees the guest
9
our PSCI emulation was not present at all, but after this change
10
state, which is unexpected and breaks KVM as of Linux 5.3.
10
they instead set a value in R0/X0 and continue.
11
11
12
Instead, check for the running EL and return the physical bits
12
We could fix that by a refactoring that allowed us to only turn on
13
if not running in a virtualized context.
13
the PSCI emulation if we weren't booting the guest at EL3, but there
14
is a more tangled problem with the highbank board, which:
15
(1) wants to enable PSCI emulation
16
(2) has a bit of guest code that it wants to run at EL3 and
17
to perform SMC calls that trap to the monitor vector table:
18
this is the boot stub code that is written to memory by
19
arm_write_secure_board_setup_dummy_smc() and which the
20
highbank board enables by setting bootinfo->secure_board_setup
14
21
15
Fixes: 636540e9c40b
22
We can't satisfy both of those and also have the PSCI emulation
16
Cc: qemu-stable@nongnu.org
23
handle all SMC instruction executions regardless of function
17
Reported-by: Quentin Perret <qperret@google.com>
24
identifier value.
18
Signed-off-by: Marc Zyngier <maz@kernel.org>
25
19
Message-id: 20191122135833.28953-1-maz@kernel.org
26
This is too tricky to try to sort out before 6.2 is released;
20
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
27
revert this commit so we can take the time to get it right in
21
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
28
the 7.0 release.
29
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
30
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
31
Message-id: 20211119163419.557623-1-peter.maydell@linaro.org
23
---
32
---
24
target/arm/helper.c | 7 +++++--
33
target/arm/psci.c | 35 +++++++++++++++++++++++++++++------
25
1 file changed, 5 insertions(+), 2 deletions(-)
34
1 file changed, 29 insertions(+), 6 deletions(-)
26
35
27
diff --git a/target/arm/helper.c b/target/arm/helper.c
36
diff --git a/target/arm/psci.c b/target/arm/psci.c
28
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/helper.c
38
--- a/target/arm/psci.c
30
+++ b/target/arm/helper.c
39
+++ b/target/arm/psci.c
31
@@ -XXX,XX +XXX,XX @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
40
@@ -XXX,XX +XXX,XX @@
32
CPUState *cs = env_cpu(env);
41
33
uint64_t hcr_el2 = arm_hcr_el2_eff(env);
42
bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
34
uint64_t ret = 0;
43
{
35
+ bool allow_virt = (arm_current_el(env) == 1 &&
44
- /*
36
+ (!arm_is_secure_below_el3(env) ||
45
- * Return true if the exception type matches the configured PSCI conduit.
37
+ (env->cp15.scr_el3 & SCR_EEL2)));
46
- * This is called before the SMC/HVC instruction is executed, to decide
38
47
- * whether we should treat it as a PSCI call or with the architecturally
39
- if (hcr_el2 & HCR_IMO) {
48
+ /* Return true if the r0/x0 value indicates a PSCI call and
40
+ if (allow_virt && (hcr_el2 & HCR_IMO)) {
49
+ * the exception type matches the configured PSCI conduit. This is
41
if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
50
+ * called before the SMC/HVC instruction is executed, to decide whether
42
ret |= CPSR_I;
51
+ * we should treat it as a PSCI call or with the architecturally
43
}
52
* defined behaviour for an SMC or HVC (which might be UNDEF or trap
44
@@ -XXX,XX +XXX,XX @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
53
* to EL2 or to EL3).
45
}
54
*/
55
+ CPUARMState *env = &cpu->env;
56
+ uint64_t param = is_a64(env) ? env->xregs[0] : env->regs[0];
57
58
switch (excp_type) {
59
case EXCP_HVC:
60
@@ -XXX,XX +XXX,XX @@ bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
61
return false;
46
}
62
}
47
63
48
- if (hcr_el2 & HCR_FMO) {
64
- return true;
49
+ if (allow_virt && (hcr_el2 & HCR_FMO)) {
65
+ switch (param) {
50
if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
66
+ case QEMU_PSCI_0_2_FN_PSCI_VERSION:
51
ret |= CPSR_F;
67
+ case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
52
}
68
+ case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
69
+ case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
70
+ case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
71
+ case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
72
+ case QEMU_PSCI_0_1_FN_CPU_ON:
73
+ case QEMU_PSCI_0_2_FN_CPU_ON:
74
+ case QEMU_PSCI_0_2_FN64_CPU_ON:
75
+ case QEMU_PSCI_0_1_FN_CPU_OFF:
76
+ case QEMU_PSCI_0_2_FN_CPU_OFF:
77
+ case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
78
+ case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
79
+ case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
80
+ case QEMU_PSCI_0_1_FN_MIGRATE:
81
+ case QEMU_PSCI_0_2_FN_MIGRATE:
82
+ return true;
83
+ default:
84
+ return false;
85
+ }
86
}
87
88
void arm_handle_psci_call(ARMCPU *cpu)
89
@@ -XXX,XX +XXX,XX @@ void arm_handle_psci_call(ARMCPU *cpu)
90
break;
91
case QEMU_PSCI_0_1_FN_MIGRATE:
92
case QEMU_PSCI_0_2_FN_MIGRATE:
93
- default:
94
ret = QEMU_PSCI_RET_NOT_SUPPORTED;
95
break;
96
+ default:
97
+ g_assert_not_reached();
98
}
99
100
err:
53
--
101
--
54
2.20.1
102
2.25.1
55
103
56
104
diff view generated by jsdifflib
Deleted patch
1
From: Marc Zyngier <maz@kernel.org>
2
1
3
HCR_EL2.TID3 mandates that access from EL1 to a long list of id
4
registers traps to EL2, and QEMU has so far ignored this requirement.
5
6
This breaks (among other things) KVM guests that have PtrAuth enabled,
7
while the hypervisor doesn't want to expose the feature to its guest.
8
To achieve this, KVM traps the ID registers (ID_AA64ISAR1_EL1 in this
9
case), and masks out the unsupported feature.
10
11
QEMU not honoring the trap request means that the guest observes
12
that the feature is present in the HW, starts using it, and dies
13
a horrible death when KVM injects an UNDEF, because the feature
14
*really* isn't supported.
15
16
Do the right thing by trapping to EL2 if HCR_EL2.TID3 is set.
17
18
Note that this change does not include trapping of the MVFR
19
registers from AArch32 (they are accessed via the VMRS
20
instruction and need to be handled in a different way).
21
22
Reported-by: Will Deacon <will@kernel.org>
23
Signed-off-by: Marc Zyngier <maz@kernel.org>
24
Tested-by: Will Deacon <will@kernel.org>
25
Message-id: 20191123115618.29230-1-maz@kernel.org
26
[PMM: added missing accessfn line for ID_AA4PFR2_EL1_RESERVED;
27
changed names of access functions to include _tid3]
28
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
29
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
30
---
31
target/arm/helper.c | 76 +++++++++++++++++++++++++++++++++++++++++++++
32
1 file changed, 76 insertions(+)
33
34
diff --git a/target/arm/helper.c b/target/arm/helper.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/helper.c
37
+++ b/target/arm/helper.c
38
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo predinv_reginfo[] = {
39
REGINFO_SENTINEL
40
};
41
42
+static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
43
+ bool isread)
44
+{
45
+ if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
46
+ return CP_ACCESS_TRAP_EL2;
47
+ }
48
+
49
+ return CP_ACCESS_OK;
50
+}
51
+
52
+static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
53
+ bool isread)
54
+{
55
+ if (arm_feature(env, ARM_FEATURE_V8)) {
56
+ return access_aa64_tid3(env, ri, isread);
57
+ }
58
+
59
+ return CP_ACCESS_OK;
60
+}
61
+
62
void register_cp_regs_for_features(ARMCPU *cpu)
63
{
64
/* Register all the coprocessor registers based on feature bits */
65
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
66
{ .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
67
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
68
.access = PL1_R, .type = ARM_CP_CONST,
69
+ .accessfn = access_aa32_tid3,
70
.resetvalue = cpu->id_pfr0 },
71
/* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
72
* the value of the GIC field until after we define these regs.
73
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
74
{ .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
75
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
76
.access = PL1_R, .type = ARM_CP_NO_RAW,
77
+ .accessfn = access_aa32_tid3,
78
.readfn = id_pfr1_read,
79
.writefn = arm_cp_write_ignore },
80
{ .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
81
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
82
.access = PL1_R, .type = ARM_CP_CONST,
83
+ .accessfn = access_aa32_tid3,
84
.resetvalue = cpu->id_dfr0 },
85
{ .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
86
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
87
.access = PL1_R, .type = ARM_CP_CONST,
88
+ .accessfn = access_aa32_tid3,
89
.resetvalue = cpu->id_afr0 },
90
{ .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
91
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
92
.access = PL1_R, .type = ARM_CP_CONST,
93
+ .accessfn = access_aa32_tid3,
94
.resetvalue = cpu->id_mmfr0 },
95
{ .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
96
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
97
.access = PL1_R, .type = ARM_CP_CONST,
98
+ .accessfn = access_aa32_tid3,
99
.resetvalue = cpu->id_mmfr1 },
100
{ .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
101
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
102
.access = PL1_R, .type = ARM_CP_CONST,
103
+ .accessfn = access_aa32_tid3,
104
.resetvalue = cpu->id_mmfr2 },
105
{ .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
106
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
107
.access = PL1_R, .type = ARM_CP_CONST,
108
+ .accessfn = access_aa32_tid3,
109
.resetvalue = cpu->id_mmfr3 },
110
{ .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
111
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
112
.access = PL1_R, .type = ARM_CP_CONST,
113
+ .accessfn = access_aa32_tid3,
114
.resetvalue = cpu->isar.id_isar0 },
115
{ .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
116
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
117
.access = PL1_R, .type = ARM_CP_CONST,
118
+ .accessfn = access_aa32_tid3,
119
.resetvalue = cpu->isar.id_isar1 },
120
{ .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
121
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
122
.access = PL1_R, .type = ARM_CP_CONST,
123
+ .accessfn = access_aa32_tid3,
124
.resetvalue = cpu->isar.id_isar2 },
125
{ .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
126
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
127
.access = PL1_R, .type = ARM_CP_CONST,
128
+ .accessfn = access_aa32_tid3,
129
.resetvalue = cpu->isar.id_isar3 },
130
{ .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
131
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
132
.access = PL1_R, .type = ARM_CP_CONST,
133
+ .accessfn = access_aa32_tid3,
134
.resetvalue = cpu->isar.id_isar4 },
135
{ .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
136
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
137
.access = PL1_R, .type = ARM_CP_CONST,
138
+ .accessfn = access_aa32_tid3,
139
.resetvalue = cpu->isar.id_isar5 },
140
{ .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
141
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
142
.access = PL1_R, .type = ARM_CP_CONST,
143
+ .accessfn = access_aa32_tid3,
144
.resetvalue = cpu->id_mmfr4 },
145
{ .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
146
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
147
.access = PL1_R, .type = ARM_CP_CONST,
148
+ .accessfn = access_aa32_tid3,
149
.resetvalue = cpu->isar.id_isar6 },
150
REGINFO_SENTINEL
151
};
152
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
153
{ .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
154
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
155
.access = PL1_R, .type = ARM_CP_NO_RAW,
156
+ .accessfn = access_aa64_tid3,
157
.readfn = id_aa64pfr0_read,
158
.writefn = arm_cp_write_ignore },
159
{ .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
160
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
161
.access = PL1_R, .type = ARM_CP_CONST,
162
+ .accessfn = access_aa64_tid3,
163
.resetvalue = cpu->isar.id_aa64pfr1},
164
{ .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
165
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
166
.access = PL1_R, .type = ARM_CP_CONST,
167
+ .accessfn = access_aa64_tid3,
168
.resetvalue = 0 },
169
{ .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
170
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
171
.access = PL1_R, .type = ARM_CP_CONST,
172
+ .accessfn = access_aa64_tid3,
173
.resetvalue = 0 },
174
{ .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
175
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
176
.access = PL1_R, .type = ARM_CP_CONST,
177
+ .accessfn = access_aa64_tid3,
178
/* At present, only SVEver == 0 is defined anyway. */
179
.resetvalue = 0 },
180
{ .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
181
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
182
.access = PL1_R, .type = ARM_CP_CONST,
183
+ .accessfn = access_aa64_tid3,
184
.resetvalue = 0 },
185
{ .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
186
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
187
.access = PL1_R, .type = ARM_CP_CONST,
188
+ .accessfn = access_aa64_tid3,
189
.resetvalue = 0 },
190
{ .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
191
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
192
.access = PL1_R, .type = ARM_CP_CONST,
193
+ .accessfn = access_aa64_tid3,
194
.resetvalue = 0 },
195
{ .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
196
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
197
.access = PL1_R, .type = ARM_CP_CONST,
198
+ .accessfn = access_aa64_tid3,
199
.resetvalue = cpu->id_aa64dfr0 },
200
{ .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
201
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
202
.access = PL1_R, .type = ARM_CP_CONST,
203
+ .accessfn = access_aa64_tid3,
204
.resetvalue = cpu->id_aa64dfr1 },
205
{ .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
206
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
207
.access = PL1_R, .type = ARM_CP_CONST,
208
+ .accessfn = access_aa64_tid3,
209
.resetvalue = 0 },
210
{ .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
211
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
212
.access = PL1_R, .type = ARM_CP_CONST,
213
+ .accessfn = access_aa64_tid3,
214
.resetvalue = 0 },
215
{ .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
216
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
217
.access = PL1_R, .type = ARM_CP_CONST,
218
+ .accessfn = access_aa64_tid3,
219
.resetvalue = cpu->id_aa64afr0 },
220
{ .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
221
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
222
.access = PL1_R, .type = ARM_CP_CONST,
223
+ .accessfn = access_aa64_tid3,
224
.resetvalue = cpu->id_aa64afr1 },
225
{ .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
226
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
227
.access = PL1_R, .type = ARM_CP_CONST,
228
+ .accessfn = access_aa64_tid3,
229
.resetvalue = 0 },
230
{ .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
231
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
232
.access = PL1_R, .type = ARM_CP_CONST,
233
+ .accessfn = access_aa64_tid3,
234
.resetvalue = 0 },
235
{ .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
236
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
237
.access = PL1_R, .type = ARM_CP_CONST,
238
+ .accessfn = access_aa64_tid3,
239
.resetvalue = cpu->isar.id_aa64isar0 },
240
{ .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
241
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
242
.access = PL1_R, .type = ARM_CP_CONST,
243
+ .accessfn = access_aa64_tid3,
244
.resetvalue = cpu->isar.id_aa64isar1 },
245
{ .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
246
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
247
.access = PL1_R, .type = ARM_CP_CONST,
248
+ .accessfn = access_aa64_tid3,
249
.resetvalue = 0 },
250
{ .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
251
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
252
.access = PL1_R, .type = ARM_CP_CONST,
253
+ .accessfn = access_aa64_tid3,
254
.resetvalue = 0 },
255
{ .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
256
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
257
.access = PL1_R, .type = ARM_CP_CONST,
258
+ .accessfn = access_aa64_tid3,
259
.resetvalue = 0 },
260
{ .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
261
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
262
.access = PL1_R, .type = ARM_CP_CONST,
263
+ .accessfn = access_aa64_tid3,
264
.resetvalue = 0 },
265
{ .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
266
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
267
.access = PL1_R, .type = ARM_CP_CONST,
268
+ .accessfn = access_aa64_tid3,
269
.resetvalue = 0 },
270
{ .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
271
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
272
.access = PL1_R, .type = ARM_CP_CONST,
273
+ .accessfn = access_aa64_tid3,
274
.resetvalue = 0 },
275
{ .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
276
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
277
.access = PL1_R, .type = ARM_CP_CONST,
278
+ .accessfn = access_aa64_tid3,
279
.resetvalue = cpu->isar.id_aa64mmfr0 },
280
{ .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
281
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
282
.access = PL1_R, .type = ARM_CP_CONST,
283
+ .accessfn = access_aa64_tid3,
284
.resetvalue = cpu->isar.id_aa64mmfr1 },
285
{ .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
286
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
287
.access = PL1_R, .type = ARM_CP_CONST,
288
+ .accessfn = access_aa64_tid3,
289
.resetvalue = 0 },
290
{ .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
291
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
292
.access = PL1_R, .type = ARM_CP_CONST,
293
+ .accessfn = access_aa64_tid3,
294
.resetvalue = 0 },
295
{ .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
296
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
297
.access = PL1_R, .type = ARM_CP_CONST,
298
+ .accessfn = access_aa64_tid3,
299
.resetvalue = 0 },
300
{ .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
301
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
302
.access = PL1_R, .type = ARM_CP_CONST,
303
+ .accessfn = access_aa64_tid3,
304
.resetvalue = 0 },
305
{ .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
306
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
307
.access = PL1_R, .type = ARM_CP_CONST,
308
+ .accessfn = access_aa64_tid3,
309
.resetvalue = 0 },
310
{ .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
311
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
312
.access = PL1_R, .type = ARM_CP_CONST,
313
+ .accessfn = access_aa64_tid3,
314
.resetvalue = 0 },
315
{ .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
316
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
317
.access = PL1_R, .type = ARM_CP_CONST,
318
+ .accessfn = access_aa64_tid3,
319
.resetvalue = cpu->isar.mvfr0 },
320
{ .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
321
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
322
.access = PL1_R, .type = ARM_CP_CONST,
323
+ .accessfn = access_aa64_tid3,
324
.resetvalue = cpu->isar.mvfr1 },
325
{ .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
326
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
327
.access = PL1_R, .type = ARM_CP_CONST,
328
+ .accessfn = access_aa64_tid3,
329
.resetvalue = cpu->isar.mvfr2 },
330
{ .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
331
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
332
.access = PL1_R, .type = ARM_CP_CONST,
333
+ .accessfn = access_aa64_tid3,
334
.resetvalue = 0 },
335
{ .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
336
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
337
.access = PL1_R, .type = ARM_CP_CONST,
338
+ .accessfn = access_aa64_tid3,
339
.resetvalue = 0 },
340
{ .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
341
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
342
.access = PL1_R, .type = ARM_CP_CONST,
343
+ .accessfn = access_aa64_tid3,
344
.resetvalue = 0 },
345
{ .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
346
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
347
.access = PL1_R, .type = ARM_CP_CONST,
348
+ .accessfn = access_aa64_tid3,
349
.resetvalue = 0 },
350
{ .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
351
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
352
.access = PL1_R, .type = ARM_CP_CONST,
353
+ .accessfn = access_aa64_tid3,
354
.resetvalue = 0 },
355
{ .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
356
.cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
357
--
358
2.20.1
359
360
diff view generated by jsdifflib