1
Arm patches for rc3 : just a handful of bug fixes.
1
Only thing for Arm for rc1 is RTH's fix for the KVM SVE probe code.
2
2
3
thanks
4
-- PMM
3
-- PMM
5
4
5
The following changes since commit 4e06b3fc1b5e1ec03f22190eabe56891dc9c2236:
6
6
7
The following changes since commit 4ecc984210ca1bf508a96a550ec8a93a5f833f6c:
7
Merge tag 'pull-hex-20220731' of https://github.com/quic/qemu into staging (2022-07-31 21:38:54 -0700)
8
9
Merge remote-tracking branch 'remotes/palmer/tags/riscv-for-master-4.2-rc3' into staging (2019-11-26 12:36:40 +0000)
10
8
11
are available in the Git repository at:
9
are available in the Git repository at:
12
10
13
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20191126
11
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220801
14
12
15
for you to fetch changes up to 6a4ef4e5d1084ce41fafa7d470a644b0fd3d9317:
13
for you to fetch changes up to 5265d24c981dfdda8d29b44f7e84a514da75eedc:
16
14
17
target/arm: Honor HCR_EL2.TID3 trapping requirements (2019-11-26 13:55:37 +0000)
15
target/arm: Move sve probe inside kvm >= 4.15 branch (2022-08-01 16:21:18 +0100)
18
16
19
----------------------------------------------------------------
17
----------------------------------------------------------------
20
target-arm queue:
18
target-arm queue:
21
* handle FTYPE flag correctly in v7M exception return
19
* Fix KVM SVE ID register probe code
22
for v7M CPUs with an FPU (v8M CPUs were already correct)
23
* versal: Add the CRP as unimplemented
24
* Fix ISR_EL1 tracking when executing at EL2
25
* Honor HCR_EL2.TID3 trapping requirements
26
20
27
----------------------------------------------------------------
21
----------------------------------------------------------------
28
Edgar E. Iglesias (1):
22
Richard Henderson (3):
29
hw/arm: versal: Add the CRP as unimplemented
23
target/arm: Use kvm_arm_sve_supported in kvm_arm_get_host_cpu_features
24
target/arm: Set KVM_ARM_VCPU_SVE while probing the host
25
target/arm: Move sve probe inside kvm >= 4.15 branch
30
26
31
Jean-Hugues Deschênes (1):
27
target/arm/kvm64.c | 45 ++++++++++++++++++++++-----------------------
32
target/arm: Fix handling of cortex-m FTYPE flag in EXCRET
28
1 file changed, 22 insertions(+), 23 deletions(-)
33
34
Marc Zyngier (2):
35
target/arm: Fix ISR_EL1 tracking when executing at EL2
36
target/arm: Honor HCR_EL2.TID3 trapping requirements
37
38
include/hw/arm/xlnx-versal.h | 3 ++
39
hw/arm/xlnx-versal.c | 2 ++
40
target/arm/helper.c | 83 ++++++++++++++++++++++++++++++++++++++++++--
41
target/arm/m_helper.c | 7 ++--
42
4 files changed, 89 insertions(+), 6 deletions(-)
43
diff view generated by jsdifflib
1
From: Marc Zyngier <maz@kernel.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The ARMv8 ARM states when executing at EL2, EL3 or Secure EL1,
3
Indication for support for SVE will not depend on whether we
4
ISR_EL1 shows the pending status of the physical IRQ, FIQ, or
4
perform the query on the main kvm_state or the temp vcpu.
5
SError interrupts.
6
5
7
Unfortunately, QEMU's implementation only considers the HCR_EL2
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
bits, and ignores the current exception level. This means a hypervisor
7
Message-id: 20220726045828.53697-2-richard.henderson@linaro.org
9
trying to look at its own interrupt state actually sees the guest
10
state, which is unexpected and breaks KVM as of Linux 5.3.
11
12
Instead, check for the running EL and return the physical bits
13
if not running in a virtualized context.
14
15
Fixes: 636540e9c40b
16
Cc: qemu-stable@nongnu.org
17
Reported-by: Quentin Perret <qperret@google.com>
18
Signed-off-by: Marc Zyngier <maz@kernel.org>
19
Message-id: 20191122135833.28953-1-maz@kernel.org
20
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
21
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
---
10
---
24
target/arm/helper.c | 7 +++++--
11
target/arm/kvm64.c | 2 +-
25
1 file changed, 5 insertions(+), 2 deletions(-)
12
1 file changed, 1 insertion(+), 1 deletion(-)
26
13
27
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
28
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/helper.c
16
--- a/target/arm/kvm64.c
30
+++ b/target/arm/helper.c
17
+++ b/target/arm/kvm64.c
31
@@ -XXX,XX +XXX,XX @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
18
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
32
CPUState *cs = env_cpu(env);
33
uint64_t hcr_el2 = arm_hcr_el2_eff(env);
34
uint64_t ret = 0;
35
+ bool allow_virt = (arm_current_el(env) == 1 &&
36
+ (!arm_is_secure_below_el3(env) ||
37
+ (env->cp15.scr_el3 & SCR_EEL2)));
38
39
- if (hcr_el2 & HCR_IMO) {
40
+ if (allow_virt && (hcr_el2 & HCR_IMO)) {
41
if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
42
ret |= CPSR_I;
43
}
44
@@ -XXX,XX +XXX,XX @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
45
}
19
}
46
}
20
}
47
21
48
- if (hcr_el2 & HCR_FMO) {
22
- sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
49
+ if (allow_virt && (hcr_el2 & HCR_FMO)) {
23
+ sve_supported = kvm_arm_sve_supported();
50
if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
24
51
ret |= CPSR_F;
25
/* Add feature bits that can't appear until after VCPU init. */
52
}
26
if (sve_supported) {
53
--
27
--
54
2.20.1
28
2.25.1
55
56
diff view generated by jsdifflib
1
From: Marc Zyngier <maz@kernel.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
HCR_EL2.TID3 mandates that access from EL1 to a long list of id
3
Because we weren't setting this flag, our probe of ID_AA64ZFR0
4
registers traps to EL2, and QEMU has so far ignored this requirement.
4
was always returning zero. This also obviates the adjustment
5
of ID_AA64PFR0, which had sanitized the SVE field.
5
6
6
This breaks (among other things) KVM guests that have PtrAuth enabled,
7
The effects of the bug are not visible, because the only thing that
7
while the hypervisor doesn't want to expose the feature to its guest.
8
ID_AA64ZFR0 is used for within qemu at present is tcg translation.
8
To achieve this, KVM traps the ID registers (ID_AA64ISAR1_EL1 in this
9
The other tests for SVE within KVM are via ID_AA64PFR0.SVE.
9
case), and masks out the unsupported feature.
10
10
11
QEMU not honoring the trap request means that the guest observes
11
Reported-by: Zenghui Yu <yuzenghui@huawei.com>
12
that the feature is present in the HW, starts using it, and dies
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
a horrible death when KVM injects an UNDEF, because the feature
13
Message-id: 20220726045828.53697-3-richard.henderson@linaro.org
14
*really* isn't supported.
15
16
Do the right thing by trapping to EL2 if HCR_EL2.TID3 is set.
17
18
Note that this change does not include trapping of the MVFR
19
registers from AArch32 (they are accessed via the VMRS
20
instruction and need to be handled in a different way).
21
22
Reported-by: Will Deacon <will@kernel.org>
23
Signed-off-by: Marc Zyngier <maz@kernel.org>
24
Tested-by: Will Deacon <will@kernel.org>
25
Message-id: 20191123115618.29230-1-maz@kernel.org
26
[PMM: added missing accessfn line for ID_AA4PFR2_EL1_RESERVED;
27
changed names of access functions to include _tid3]
28
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
29
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
30
---
16
---
31
target/arm/helper.c | 76 +++++++++++++++++++++++++++++++++++++++++++++
17
target/arm/kvm64.c | 27 +++++++++++++--------------
32
1 file changed, 76 insertions(+)
18
1 file changed, 13 insertions(+), 14 deletions(-)
33
19
34
diff --git a/target/arm/helper.c b/target/arm/helper.c
20
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
35
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/helper.c
22
--- a/target/arm/kvm64.c
37
+++ b/target/arm/helper.c
23
+++ b/target/arm/kvm64.c
38
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo predinv_reginfo[] = {
24
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
39
REGINFO_SENTINEL
25
bool sve_supported;
40
};
26
bool pmu_supported = false;
41
27
uint64_t features = 0;
42
+static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
28
- uint64_t t;
43
+ bool isread)
29
int err;
44
+{
30
45
+ if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
31
/* Old kernels may not know about the PREFERRED_TARGET ioctl: however
46
+ return CP_ACCESS_TRAP_EL2;
32
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
33
struct kvm_vcpu_init init = { .target = -1, };
34
35
/*
36
- * Ask for Pointer Authentication if supported. We can't play the
37
- * SVE trick of synthesising the ID reg as KVM won't tell us
38
- * whether we have the architected or IMPDEF version of PAuth, so
39
- * we have to use the actual ID regs.
40
+ * Ask for SVE if supported, so that we can query ID_AA64ZFR0,
41
+ * which is otherwise RAZ.
42
+ */
43
+ sve_supported = kvm_arm_sve_supported();
44
+ if (sve_supported) {
45
+ init.features[0] |= 1 << KVM_ARM_VCPU_SVE;
47
+ }
46
+ }
48
+
47
+
49
+ return CP_ACCESS_OK;
48
+ /*
50
+}
49
+ * Ask for Pointer Authentication if supported, so that we get
51
+
50
+ * the unsanitized field values for AA64ISAR1_EL1.
52
+static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
51
*/
53
+ bool isread)
52
if (kvm_arm_pauth_supported()) {
54
+{
53
init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
55
+ if (arm_feature(env, ARM_FEATURE_V8)) {
54
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
56
+ return access_aa64_tid3(env, ri, isread);
55
}
57
+ }
56
}
58
+
57
59
+ return CP_ACCESS_OK;
58
- sve_supported = kvm_arm_sve_supported();
60
+}
59
-
61
+
60
- /* Add feature bits that can't appear until after VCPU init. */
62
void register_cp_regs_for_features(ARMCPU *cpu)
61
if (sve_supported) {
63
{
62
- t = ahcf->isar.id_aa64pfr0;
64
/* Register all the coprocessor registers based on feature bits */
63
- t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
65
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
64
- ahcf->isar.id_aa64pfr0 = t;
66
{ .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
65
-
67
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
66
/*
68
.access = PL1_R, .type = ARM_CP_CONST,
67
* There is a range of kernels between kernel commit 73433762fcae
69
+ .accessfn = access_aa32_tid3,
68
* and f81cb2c3ad41 which have a bug where the kernel doesn't expose
70
.resetvalue = cpu->id_pfr0 },
69
* SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has enabled
71
/* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
70
- * SVE support, so we only read it here, rather than together with all
72
* the value of the GIC field until after we define these regs.
71
- * the other ID registers earlier.
73
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
72
+ * SVE support, which resulted in an error rather than RAZ.
74
{ .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
73
+ * So only read the register if we set KVM_ARM_VCPU_SVE above.
75
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
74
*/
76
.access = PL1_R, .type = ARM_CP_NO_RAW,
75
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
77
+ .accessfn = access_aa32_tid3,
76
ARM64_SYS_REG(3, 0, 0, 4, 4));
78
.readfn = id_pfr1_read,
79
.writefn = arm_cp_write_ignore },
80
{ .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
81
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
82
.access = PL1_R, .type = ARM_CP_CONST,
83
+ .accessfn = access_aa32_tid3,
84
.resetvalue = cpu->id_dfr0 },
85
{ .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
86
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
87
.access = PL1_R, .type = ARM_CP_CONST,
88
+ .accessfn = access_aa32_tid3,
89
.resetvalue = cpu->id_afr0 },
90
{ .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
91
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
92
.access = PL1_R, .type = ARM_CP_CONST,
93
+ .accessfn = access_aa32_tid3,
94
.resetvalue = cpu->id_mmfr0 },
95
{ .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
96
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
97
.access = PL1_R, .type = ARM_CP_CONST,
98
+ .accessfn = access_aa32_tid3,
99
.resetvalue = cpu->id_mmfr1 },
100
{ .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
101
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
102
.access = PL1_R, .type = ARM_CP_CONST,
103
+ .accessfn = access_aa32_tid3,
104
.resetvalue = cpu->id_mmfr2 },
105
{ .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
106
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
107
.access = PL1_R, .type = ARM_CP_CONST,
108
+ .accessfn = access_aa32_tid3,
109
.resetvalue = cpu->id_mmfr3 },
110
{ .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
111
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
112
.access = PL1_R, .type = ARM_CP_CONST,
113
+ .accessfn = access_aa32_tid3,
114
.resetvalue = cpu->isar.id_isar0 },
115
{ .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
116
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
117
.access = PL1_R, .type = ARM_CP_CONST,
118
+ .accessfn = access_aa32_tid3,
119
.resetvalue = cpu->isar.id_isar1 },
120
{ .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
121
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
122
.access = PL1_R, .type = ARM_CP_CONST,
123
+ .accessfn = access_aa32_tid3,
124
.resetvalue = cpu->isar.id_isar2 },
125
{ .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
126
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
127
.access = PL1_R, .type = ARM_CP_CONST,
128
+ .accessfn = access_aa32_tid3,
129
.resetvalue = cpu->isar.id_isar3 },
130
{ .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
131
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
132
.access = PL1_R, .type = ARM_CP_CONST,
133
+ .accessfn = access_aa32_tid3,
134
.resetvalue = cpu->isar.id_isar4 },
135
{ .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
136
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
137
.access = PL1_R, .type = ARM_CP_CONST,
138
+ .accessfn = access_aa32_tid3,
139
.resetvalue = cpu->isar.id_isar5 },
140
{ .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
141
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
142
.access = PL1_R, .type = ARM_CP_CONST,
143
+ .accessfn = access_aa32_tid3,
144
.resetvalue = cpu->id_mmfr4 },
145
{ .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
146
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
147
.access = PL1_R, .type = ARM_CP_CONST,
148
+ .accessfn = access_aa32_tid3,
149
.resetvalue = cpu->isar.id_isar6 },
150
REGINFO_SENTINEL
151
};
152
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
153
{ .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
154
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
155
.access = PL1_R, .type = ARM_CP_NO_RAW,
156
+ .accessfn = access_aa64_tid3,
157
.readfn = id_aa64pfr0_read,
158
.writefn = arm_cp_write_ignore },
159
{ .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
160
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
161
.access = PL1_R, .type = ARM_CP_CONST,
162
+ .accessfn = access_aa64_tid3,
163
.resetvalue = cpu->isar.id_aa64pfr1},
164
{ .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
165
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
166
.access = PL1_R, .type = ARM_CP_CONST,
167
+ .accessfn = access_aa64_tid3,
168
.resetvalue = 0 },
169
{ .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
170
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
171
.access = PL1_R, .type = ARM_CP_CONST,
172
+ .accessfn = access_aa64_tid3,
173
.resetvalue = 0 },
174
{ .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
175
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
176
.access = PL1_R, .type = ARM_CP_CONST,
177
+ .accessfn = access_aa64_tid3,
178
/* At present, only SVEver == 0 is defined anyway. */
179
.resetvalue = 0 },
180
{ .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
181
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
182
.access = PL1_R, .type = ARM_CP_CONST,
183
+ .accessfn = access_aa64_tid3,
184
.resetvalue = 0 },
185
{ .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
186
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
187
.access = PL1_R, .type = ARM_CP_CONST,
188
+ .accessfn = access_aa64_tid3,
189
.resetvalue = 0 },
190
{ .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
191
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
192
.access = PL1_R, .type = ARM_CP_CONST,
193
+ .accessfn = access_aa64_tid3,
194
.resetvalue = 0 },
195
{ .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
196
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
197
.access = PL1_R, .type = ARM_CP_CONST,
198
+ .accessfn = access_aa64_tid3,
199
.resetvalue = cpu->id_aa64dfr0 },
200
{ .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
201
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
202
.access = PL1_R, .type = ARM_CP_CONST,
203
+ .accessfn = access_aa64_tid3,
204
.resetvalue = cpu->id_aa64dfr1 },
205
{ .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
206
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
207
.access = PL1_R, .type = ARM_CP_CONST,
208
+ .accessfn = access_aa64_tid3,
209
.resetvalue = 0 },
210
{ .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
211
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
212
.access = PL1_R, .type = ARM_CP_CONST,
213
+ .accessfn = access_aa64_tid3,
214
.resetvalue = 0 },
215
{ .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
216
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
217
.access = PL1_R, .type = ARM_CP_CONST,
218
+ .accessfn = access_aa64_tid3,
219
.resetvalue = cpu->id_aa64afr0 },
220
{ .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
221
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
222
.access = PL1_R, .type = ARM_CP_CONST,
223
+ .accessfn = access_aa64_tid3,
224
.resetvalue = cpu->id_aa64afr1 },
225
{ .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
226
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
227
.access = PL1_R, .type = ARM_CP_CONST,
228
+ .accessfn = access_aa64_tid3,
229
.resetvalue = 0 },
230
{ .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
231
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
232
.access = PL1_R, .type = ARM_CP_CONST,
233
+ .accessfn = access_aa64_tid3,
234
.resetvalue = 0 },
235
{ .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
236
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
237
.access = PL1_R, .type = ARM_CP_CONST,
238
+ .accessfn = access_aa64_tid3,
239
.resetvalue = cpu->isar.id_aa64isar0 },
240
{ .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
241
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
242
.access = PL1_R, .type = ARM_CP_CONST,
243
+ .accessfn = access_aa64_tid3,
244
.resetvalue = cpu->isar.id_aa64isar1 },
245
{ .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
246
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
247
.access = PL1_R, .type = ARM_CP_CONST,
248
+ .accessfn = access_aa64_tid3,
249
.resetvalue = 0 },
250
{ .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
251
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
252
.access = PL1_R, .type = ARM_CP_CONST,
253
+ .accessfn = access_aa64_tid3,
254
.resetvalue = 0 },
255
{ .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
256
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
257
.access = PL1_R, .type = ARM_CP_CONST,
258
+ .accessfn = access_aa64_tid3,
259
.resetvalue = 0 },
260
{ .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
261
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
262
.access = PL1_R, .type = ARM_CP_CONST,
263
+ .accessfn = access_aa64_tid3,
264
.resetvalue = 0 },
265
{ .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
266
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
267
.access = PL1_R, .type = ARM_CP_CONST,
268
+ .accessfn = access_aa64_tid3,
269
.resetvalue = 0 },
270
{ .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
271
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
272
.access = PL1_R, .type = ARM_CP_CONST,
273
+ .accessfn = access_aa64_tid3,
274
.resetvalue = 0 },
275
{ .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
276
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
277
.access = PL1_R, .type = ARM_CP_CONST,
278
+ .accessfn = access_aa64_tid3,
279
.resetvalue = cpu->isar.id_aa64mmfr0 },
280
{ .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
281
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
282
.access = PL1_R, .type = ARM_CP_CONST,
283
+ .accessfn = access_aa64_tid3,
284
.resetvalue = cpu->isar.id_aa64mmfr1 },
285
{ .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
286
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
287
.access = PL1_R, .type = ARM_CP_CONST,
288
+ .accessfn = access_aa64_tid3,
289
.resetvalue = 0 },
290
{ .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
291
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
292
.access = PL1_R, .type = ARM_CP_CONST,
293
+ .accessfn = access_aa64_tid3,
294
.resetvalue = 0 },
295
{ .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
296
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
297
.access = PL1_R, .type = ARM_CP_CONST,
298
+ .accessfn = access_aa64_tid3,
299
.resetvalue = 0 },
300
{ .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
301
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
302
.access = PL1_R, .type = ARM_CP_CONST,
303
+ .accessfn = access_aa64_tid3,
304
.resetvalue = 0 },
305
{ .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
306
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
307
.access = PL1_R, .type = ARM_CP_CONST,
308
+ .accessfn = access_aa64_tid3,
309
.resetvalue = 0 },
310
{ .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
311
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
312
.access = PL1_R, .type = ARM_CP_CONST,
313
+ .accessfn = access_aa64_tid3,
314
.resetvalue = 0 },
315
{ .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
316
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
317
.access = PL1_R, .type = ARM_CP_CONST,
318
+ .accessfn = access_aa64_tid3,
319
.resetvalue = cpu->isar.mvfr0 },
320
{ .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
321
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
322
.access = PL1_R, .type = ARM_CP_CONST,
323
+ .accessfn = access_aa64_tid3,
324
.resetvalue = cpu->isar.mvfr1 },
325
{ .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
326
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
327
.access = PL1_R, .type = ARM_CP_CONST,
328
+ .accessfn = access_aa64_tid3,
329
.resetvalue = cpu->isar.mvfr2 },
330
{ .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
331
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
332
.access = PL1_R, .type = ARM_CP_CONST,
333
+ .accessfn = access_aa64_tid3,
334
.resetvalue = 0 },
335
{ .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
336
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
337
.access = PL1_R, .type = ARM_CP_CONST,
338
+ .accessfn = access_aa64_tid3,
339
.resetvalue = 0 },
340
{ .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
341
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
342
.access = PL1_R, .type = ARM_CP_CONST,
343
+ .accessfn = access_aa64_tid3,
344
.resetvalue = 0 },
345
{ .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
346
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
347
.access = PL1_R, .type = ARM_CP_CONST,
348
+ .accessfn = access_aa64_tid3,
349
.resetvalue = 0 },
350
{ .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
351
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
352
.access = PL1_R, .type = ARM_CP_CONST,
353
+ .accessfn = access_aa64_tid3,
354
.resetvalue = 0 },
355
{ .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
356
.cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
357
--
77
--
358
2.20.1
78
2.25.1
359
360
diff view generated by jsdifflib
1
From: Jean-Hugues Deschênes <Jean-Hugues.Deschenes@ossiaco.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
According to the PushStack() pseudocode in the armv7m RM,
3
The test for the IF block indicates no ID registers are exposed, much
4
bit 4 of the LR should be set to NOT(CONTROL.PFCA) when
4
less host support for SVE. Move the SVE probe into the ELSE block.
5
an FPU is present. Current implementation is doing it for
6
armv8, but not for armv7. This patch makes the existing
7
logic applicable to both code paths.
8
5
9
Signed-off-by: Jean-Hugues Deschenes <jean-hugues.deschenes@ossiaco.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220726045828.53697-4-richard.henderson@linaro.org
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
10
---
13
target/arm/m_helper.c | 7 +++----
11
target/arm/kvm64.c | 22 +++++++++++-----------
14
1 file changed, 3 insertions(+), 4 deletions(-)
12
1 file changed, 11 insertions(+), 11 deletions(-)
15
13
16
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
14
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
17
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/m_helper.c
16
--- a/target/arm/kvm64.c
19
+++ b/target/arm/m_helper.c
17
+++ b/target/arm/kvm64.c
20
@@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
18
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
21
if (env->v7m.secure) {
19
err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0,
22
lr |= R_V7M_EXCRET_S_MASK;
20
ARM64_SYS_REG(3, 3, 9, 12, 0));
23
}
21
}
24
- if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
22
- }
25
- lr |= R_V7M_EXCRET_FTYPE_MASK;
23
26
- }
24
- if (sve_supported) {
27
} else {
25
- /*
28
lr = R_V7M_EXCRET_RES1_MASK |
26
- * There is a range of kernels between kernel commit 73433762fcae
29
R_V7M_EXCRET_S_MASK |
27
- * and f81cb2c3ad41 which have a bug where the kernel doesn't expose
30
R_V7M_EXCRET_DCRS_MASK |
28
- * SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has enabled
31
- R_V7M_EXCRET_FTYPE_MASK |
29
- * SVE support, which resulted in an error rather than RAZ.
32
R_V7M_EXCRET_ES_MASK;
30
- * So only read the register if we set KVM_ARM_VCPU_SVE above.
33
if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
31
- */
34
lr |= R_V7M_EXCRET_SPSEL_MASK;
32
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
35
}
33
- ARM64_SYS_REG(3, 0, 0, 4, 4));
34
+ if (sve_supported) {
35
+ /*
36
+ * There is a range of kernels between kernel commit 73433762fcae
37
+ * and f81cb2c3ad41 which have a bug where the kernel doesn't
38
+ * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has
39
+ * enabled SVE support, which resulted in an error rather than RAZ.
40
+ * So only read the register if we set KVM_ARM_VCPU_SVE above.
41
+ */
42
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
43
+ ARM64_SYS_REG(3, 0, 0, 4, 4));
44
+ }
36
}
45
}
37
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
46
38
+ lr |= R_V7M_EXCRET_FTYPE_MASK;
47
kvm_arm_destroy_scratch_host_vcpu(fdarray);
39
+ }
40
if (!arm_v7m_is_handler_mode(env)) {
41
lr |= R_V7M_EXCRET_MODE_MASK;
42
}
43
--
48
--
44
2.20.1
49
2.25.1
45
46
diff view generated by jsdifflib
Deleted patch
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
2
1
3
Add the CRP as unimplemented thus avoiding bus errors when
4
guests access these registers.
5
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
9
Message-id: 20191115154734.26449-2-edgar.iglesias@gmail.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
include/hw/arm/xlnx-versal.h | 3 +++
13
hw/arm/xlnx-versal.c | 2 ++
14
2 files changed, 5 insertions(+)
15
16
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/hw/arm/xlnx-versal.h
19
+++ b/include/hw/arm/xlnx-versal.h
20
@@ -XXX,XX +XXX,XX @@ typedef struct Versal {
21
#define MM_IOU_SCNTRS_SIZE 0x10000
22
#define MM_FPD_CRF 0xfd1a0000U
23
#define MM_FPD_CRF_SIZE 0x140000
24
+
25
+#define MM_PMC_CRP 0xf1260000U
26
+#define MM_PMC_CRP_SIZE 0x10000
27
#endif
28
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/hw/arm/xlnx-versal.c
31
+++ b/hw/arm/xlnx-versal.c
32
@@ -XXX,XX +XXX,XX @@ static void versal_unimp(Versal *s)
33
MM_CRL, MM_CRL_SIZE);
34
versal_unimp_area(s, "crf", &s->mr_ps,
35
MM_FPD_CRF, MM_FPD_CRF_SIZE);
36
+ versal_unimp_area(s, "crp", &s->mr_ps,
37
+ MM_PMC_CRP, MM_PMC_CRP_SIZE);
38
versal_unimp_area(s, "iou-scntr", &s->mr_ps,
39
MM_IOU_SCNTR, MM_IOU_SCNTR_SIZE);
40
versal_unimp_area(s, "iou-scntr-seucre", &s->mr_ps,
41
--
42
2.20.1
43
44
diff view generated by jsdifflib