1
Ten arm-related bug fixes for 2.12...
1
A last collection of patches to squeeze in before rc0.
2
The patches from me are all bugfixes. Philippe's are just
3
code-movement, but I wanted to get these into 4.1 because
4
that kind of patch is so painful to have to rebase.
5
(The diffstat is huge but it's just code moving from file to file.)
2
6
3
thanks
7
thanks
4
-- PMM
8
-- PMM
5
9
6
The following changes since commit 4c2c1015905fa1d616750dfe024b4c0b35875950:
10
The following changes since commit 234e256511e588680300600ce087c5185d68cf2a:
7
11
8
Merge remote-tracking branch 'remotes/borntraeger/tags/s390x-20180323' into staging (2018-03-23 10:20:54 +0000)
12
Merge remote-tracking branch 'remotes/armbru/tags/pull-build-2019-07-02-v2' into staging (2019-07-04 15:58:46 +0100)
9
13
10
are available in the Git repository at:
14
are available in the Git repository at:
11
15
12
git://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20180323
16
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20190704
13
17
14
for you to fetch changes up to 548f514cf89dd9ab39c0cb4c063097bccf141fdd:
18
for you to fetch changes up to b75f3735802b5b33f10e4bfe374d4b17bb86d29a:
15
19
16
target/arm: Always set FAR to a known unknown value for debug exceptions (2018-03-23 18:26:46 +0000)
20
target/arm: Correct VMOV_imm_dp handling of short vectors (2019-07-04 16:52:05 +0100)
17
21
18
----------------------------------------------------------------
22
----------------------------------------------------------------
19
target-arm queue:
23
target-arm queue:
20
* arm/translate-a64: don't lose interrupts after unmasking via write to DAIF
24
* more code-movement to separate TCG-only functions into their own files
21
* sdhci: fix incorrect use of Error *
25
* Correct VMOV_imm_dp handling of short vectors
22
* hw/intc/arm_gicv3: Fix secure-GIC NS ICC_PMR and ICC_RPR accesses
26
* Execute Thumb instructions when their condbits are 0xf
23
* hw/arm/bcm2836: Use the Cortex-A7 instead of Cortex-A15
27
* armv7m_systick: Forbid non-privileged accesses
24
* i.MX: Support serial RS-232 break properly
28
* Use _ra versions of cpu_stl_data() in v7M helpers
25
* mach-virt: Set VM's SMBIOS system version to mc->name
29
* v8M: Check state of exception being returned from
26
* target/arm: Honour MDCR_EL2.TDE when routing exceptions due to BKPT/BRK
30
* v8M: Forcibly clear negative-priority exceptions on deactivate
27
* target/arm: Factor out code to calculate FSR for debug exceptions
28
* target/arm: Set FSR for BKPT, BRK when raising exception
29
* target/arm: Always set FAR to a known unknown value for debug exceptions
30
31
31
----------------------------------------------------------------
32
----------------------------------------------------------------
32
Paolo Bonzini (1):
33
Peter Maydell (6):
33
sdhci: fix incorrect use of Error *
34
arm v8M: Forcibly clear negative-priority exceptions on deactivate
35
target/arm: v8M: Check state of exception being returned from
36
target/arm: Use _ra versions of cpu_stl_data() in v7M helpers
37
hw/timer/armv7m_systick: Forbid non-privileged accesses
38
target/arm: Execute Thumb instructions when their condbits are 0xf
39
target/arm: Correct VMOV_imm_dp handling of short vectors
34
40
35
Peter Maydell (6):
41
Philippe Mathieu-Daudé (3):
36
hw/intc/arm_gicv3: Fix secure-GIC NS ICC_PMR and ICC_RPR accesses
42
target/arm: Move debug routines to debug_helper.c
37
hw/arm/bcm2836: Use the Cortex-A7 instead of Cortex-A15
43
target/arm: Restrict semi-hosting to TCG
38
target/arm: Honour MDCR_EL2.TDE when routing exceptions due to BKPT/BRK
44
target/arm/helper: Move M profile routines to m_helper.c
39
target/arm: Factor out code to calculate FSR for debug exceptions
40
target/arm: Set FSR for BKPT, BRK when raising exception
41
target/arm: Always set FAR to a known unknown value for debug exceptions
42
45
43
Trent Piepho (1):
46
target/arm/Makefile.objs | 5 +-
44
i.MX: Support serial RS-232 break properly
47
target/arm/cpu.h | 7 +
48
hw/intc/armv7m_nvic.c | 54 +-
49
hw/timer/armv7m_systick.c | 26 +-
50
target/arm/cpu.c | 9 +-
51
target/arm/debug_helper.c | 311 +++++
52
target/arm/helper.c | 2646 +--------------------------------------
53
target/arm/m_helper.c | 2679 ++++++++++++++++++++++++++++++++++++++++
54
target/arm/op_helper.c | 295 -----
55
target/arm/translate-vfp.inc.c | 2 +-
56
target/arm/translate.c | 15 +-
57
11 files changed, 3096 insertions(+), 2953 deletions(-)
58
create mode 100644 target/arm/debug_helper.c
59
create mode 100644 target/arm/m_helper.c
45
60
46
Victor Kamensky (1):
47
arm/translate-a64: treat DISAS_UPDATE as variant of DISAS_EXIT
48
49
Wei Huang (1):
50
mach-virt: Set VM's SMBIOS system version to mc->name
51
52
include/hw/arm/virt.h | 1 +
53
include/hw/char/imx_serial.h | 1 +
54
target/arm/helper.h | 1 +
55
target/arm/internals.h | 25 +++++++++++++++++++++++++
56
hw/arm/bcm2836.c | 2 +-
57
hw/arm/raspi.c | 2 +-
58
hw/arm/virt.c | 8 +++++++-
59
hw/char/imx_serial.c | 5 ++++-
60
hw/intc/arm_gicv3_cpuif.c | 6 +++---
61
hw/sd/sdhci.c | 4 ++--
62
target/arm/helper.c | 1 -
63
target/arm/op_helper.c | 33 ++++++++++++++++++++++-----------
64
target/arm/translate-a64.c | 21 ++++++++++++++++-----
65
target/arm/translate.c | 19 ++++++++++++++-----
66
14 files changed, 98 insertions(+), 31 deletions(-)
67
diff view generated by jsdifflib
Deleted patch
1
From: Victor Kamensky <kamensky@cisco.com>
2
1
3
In OE project 4.15 linux kernel boot hang was observed under
4
single cpu aarch64 qemu. Kernel code was in a loop waiting for
5
vtimer arrival, spinning in TC generated blocks, while interrupt
6
was pending unprocessed. This happened because when qemu tried to
7
handle vtimer interrupt target had interrupts disabled, as
8
result flag indicating TCG exit, cpu->icount_decr.u16.high,
9
was cleared but arm_cpu_exec_interrupt function did not call
10
arm_cpu_do_interrupt to process interrupt. Later when target
11
reenabled interrupts, it happened without exit into main loop, so
12
following code that waited for result of interrupt execution
13
run in infinite loop.
14
15
To solve the problem instructions that operate on CPU sys state
16
(i.e enable/disable interrupt), and marked as DISAS_UPDATE,
17
should be considered as DISAS_EXIT variant, and should be
18
forced to exit back to main loop so qemu will have a chance
19
processing pending CPU state updates, including pending
20
interrupts.
21
22
This change brings consistency with how DISAS_UPDATE is treated
23
in aarch32 case.
24
25
CC: Peter Maydell <peter.maydell@linaro.org>
26
CC: Alex Bennée <alex.bennee@linaro.org>
27
CC: qemu-stable@nongnu.org
28
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
29
Signed-off-by: Victor Kamensky <kamensky@cisco.com>
30
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
31
Message-id: 1521526368-1996-1-git-send-email-kamensky@cisco.com
32
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
33
---
34
target/arm/translate-a64.c | 6 +++---
35
1 file changed, 3 insertions(+), 3 deletions(-)
36
37
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/translate-a64.c
40
+++ b/target/arm/translate-a64.c
41
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
42
case DISAS_UPDATE:
43
gen_a64_set_pc_im(dc->pc);
44
/* fall through */
45
- case DISAS_JUMP:
46
- tcg_gen_lookup_and_goto_ptr();
47
- break;
48
case DISAS_EXIT:
49
tcg_gen_exit_tb(0);
50
break;
51
+ case DISAS_JUMP:
52
+ tcg_gen_lookup_and_goto_ptr();
53
+ break;
54
case DISAS_NORETURN:
55
case DISAS_SWI:
56
break;
57
--
58
2.16.2
59
60
diff view generated by jsdifflib
1
For debug exceptions due to breakpoints or the BKPT instruction which
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
are taken to AArch32, the Fault Address Register is architecturally
3
UNKNOWN. We were using that as license to simply not set
4
env->exception.vaddress, but this isn't correct, because it will
5
expose to the guest whatever old value was in that field when
6
arm_cpu_do_interrupt_aarch32() writes it to the guest IFSR. That old
7
value might be a FAR for a previous guest EL2 or secure exception, in
8
which case we shouldn't show it to an EL1 or non-secure exception
9
handler. It might also be a non-deterministic value, which is bad
10
for record-and-replay.
11
2
12
Clear env->exception.vaddress before taking breakpoint debug
3
These routines are TCG specific.
13
exceptions, to avoid this minor information leak.
14
4
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190701194942.10092-2-philmd@redhat.com
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
17
Message-id: 20180320134114.30418-5-peter.maydell@linaro.org
18
---
9
---
19
target/arm/op_helper.c | 11 ++++++++++-
10
target/arm/Makefile.objs | 2 +-
20
1 file changed, 10 insertions(+), 1 deletion(-)
11
target/arm/cpu.c | 9 +-
12
target/arm/debug_helper.c | 311 ++++++++++++++++++++++++++++++++++++++
13
target/arm/op_helper.c | 295 ------------------------------------
14
4 files changed, 315 insertions(+), 302 deletions(-)
15
create mode 100644 target/arm/debug_helper.c
21
16
17
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/Makefile.objs
20
+++ b/target/arm/Makefile.objs
21
@@ -XXX,XX +XXX,XX @@ target/arm/translate-sve.o: target/arm/decode-sve.inc.c
22
target/arm/translate.o: target/arm/decode-vfp.inc.c
23
target/arm/translate.o: target/arm/decode-vfp-uncond.inc.c
24
25
-obj-y += tlb_helper.o
26
+obj-y += tlb_helper.o debug_helper.o
27
obj-y += translate.o op_helper.o
28
obj-y += crypto_helper.o
29
obj-y += iwmmxt_helper.o vec_helper.o neon_helper.o
30
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/cpu.c
33
+++ b/target/arm/cpu.c
34
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
35
cc->gdb_arch_name = arm_gdb_arch_name;
36
cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
37
cc->gdb_stop_before_watchpoint = true;
38
- cc->debug_excp_handler = arm_debug_excp_handler;
39
- cc->debug_check_watchpoint = arm_debug_check_watchpoint;
40
-#if !defined(CONFIG_USER_ONLY)
41
- cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
42
-#endif
43
-
44
cc->disas_set_info = arm_disas_set_info;
45
#ifdef CONFIG_TCG
46
cc->tcg_initialize = arm_translate_init;
47
cc->tlb_fill = arm_cpu_tlb_fill;
48
+ cc->debug_excp_handler = arm_debug_excp_handler;
49
+ cc->debug_check_watchpoint = arm_debug_check_watchpoint;
50
#if !defined(CONFIG_USER_ONLY)
51
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
52
cc->do_transaction_failed = arm_cpu_do_transaction_failed;
53
+ cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
54
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
55
#endif
56
}
57
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
58
new file mode 100644
59
index XXXXXXX..XXXXXXX
60
--- /dev/null
61
+++ b/target/arm/debug_helper.c
62
@@ -XXX,XX +XXX,XX @@
63
+/*
64
+ * ARM debug helpers.
65
+ *
66
+ * This code is licensed under the GNU GPL v2 or later.
67
+ *
68
+ * SPDX-License-Identifier: GPL-2.0-or-later
69
+ */
70
+#include "qemu/osdep.h"
71
+#include "cpu.h"
72
+#include "internals.h"
73
+#include "exec/exec-all.h"
74
+#include "exec/helper-proto.h"
75
+
76
+/* Return true if the linked breakpoint entry lbn passes its checks */
77
+static bool linked_bp_matches(ARMCPU *cpu, int lbn)
78
+{
79
+ CPUARMState *env = &cpu->env;
80
+ uint64_t bcr = env->cp15.dbgbcr[lbn];
81
+ int brps = extract32(cpu->dbgdidr, 24, 4);
82
+ int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
83
+ int bt;
84
+ uint32_t contextidr;
85
+
86
+ /*
87
+ * Links to unimplemented or non-context aware breakpoints are
88
+ * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
89
+ * as if linked to an UNKNOWN context-aware breakpoint (in which
90
+ * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
91
+ * We choose the former.
92
+ */
93
+ if (lbn > brps || lbn < (brps - ctx_cmps)) {
94
+ return false;
95
+ }
96
+
97
+ bcr = env->cp15.dbgbcr[lbn];
98
+
99
+ if (extract64(bcr, 0, 1) == 0) {
100
+ /* Linked breakpoint disabled : generate no events */
101
+ return false;
102
+ }
103
+
104
+ bt = extract64(bcr, 20, 4);
105
+
106
+ /*
107
+ * We match the whole register even if this is AArch32 using the
108
+ * short descriptor format (in which case it holds both PROCID and ASID),
109
+ * since we don't implement the optional v7 context ID masking.
110
+ */
111
+ contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
112
+
113
+ switch (bt) {
114
+ case 3: /* linked context ID match */
115
+ if (arm_current_el(env) > 1) {
116
+ /* Context matches never fire in EL2 or (AArch64) EL3 */
117
+ return false;
118
+ }
119
+ return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
120
+ case 5: /* linked address mismatch (reserved in AArch64) */
121
+ case 9: /* linked VMID match (reserved if no EL2) */
122
+ case 11: /* linked context ID and VMID match (reserved if no EL2) */
123
+ default:
124
+ /*
125
+ * Links to Unlinked context breakpoints must generate no
126
+ * events; we choose to do the same for reserved values too.
127
+ */
128
+ return false;
129
+ }
130
+
131
+ return false;
132
+}
133
+
134
+static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
135
+{
136
+ CPUARMState *env = &cpu->env;
137
+ uint64_t cr;
138
+ int pac, hmc, ssc, wt, lbn;
139
+ /*
140
+ * Note that for watchpoints the check is against the CPU security
141
+ * state, not the S/NS attribute on the offending data access.
142
+ */
143
+ bool is_secure = arm_is_secure(env);
144
+ int access_el = arm_current_el(env);
145
+
146
+ if (is_wp) {
147
+ CPUWatchpoint *wp = env->cpu_watchpoint[n];
148
+
149
+ if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
150
+ return false;
151
+ }
152
+ cr = env->cp15.dbgwcr[n];
153
+ if (wp->hitattrs.user) {
154
+ /*
155
+ * The LDRT/STRT/LDT/STT "unprivileged access" instructions should
156
+ * match watchpoints as if they were accesses done at EL0, even if
157
+ * the CPU is at EL1 or higher.
158
+ */
159
+ access_el = 0;
160
+ }
161
+ } else {
162
+ uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
163
+
164
+ if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
165
+ return false;
166
+ }
167
+ cr = env->cp15.dbgbcr[n];
168
+ }
169
+ /*
170
+ * The WATCHPOINT_HIT flag guarantees us that the watchpoint is
171
+ * enabled and that the address and access type match; for breakpoints
172
+ * we know the address matched; check the remaining fields, including
173
+ * linked breakpoints. We rely on WCR and BCR having the same layout
174
+ * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
175
+ * Note that some combinations of {PAC, HMC, SSC} are reserved and
176
+ * must act either like some valid combination or as if the watchpoint
177
+ * were disabled. We choose the former, and use this together with
178
+ * the fact that EL3 must always be Secure and EL2 must always be
179
+ * Non-Secure to simplify the code slightly compared to the full
180
+ * table in the ARM ARM.
181
+ */
182
+ pac = extract64(cr, 1, 2);
183
+ hmc = extract64(cr, 13, 1);
184
+ ssc = extract64(cr, 14, 2);
185
+
186
+ switch (ssc) {
187
+ case 0:
188
+ break;
189
+ case 1:
190
+ case 3:
191
+ if (is_secure) {
192
+ return false;
193
+ }
194
+ break;
195
+ case 2:
196
+ if (!is_secure) {
197
+ return false;
198
+ }
199
+ break;
200
+ }
201
+
202
+ switch (access_el) {
203
+ case 3:
204
+ case 2:
205
+ if (!hmc) {
206
+ return false;
207
+ }
208
+ break;
209
+ case 1:
210
+ if (extract32(pac, 0, 1) == 0) {
211
+ return false;
212
+ }
213
+ break;
214
+ case 0:
215
+ if (extract32(pac, 1, 1) == 0) {
216
+ return false;
217
+ }
218
+ break;
219
+ default:
220
+ g_assert_not_reached();
221
+ }
222
+
223
+ wt = extract64(cr, 20, 1);
224
+ lbn = extract64(cr, 16, 4);
225
+
226
+ if (wt && !linked_bp_matches(cpu, lbn)) {
227
+ return false;
228
+ }
229
+
230
+ return true;
231
+}
232
+
233
+static bool check_watchpoints(ARMCPU *cpu)
234
+{
235
+ CPUARMState *env = &cpu->env;
236
+ int n;
237
+
238
+ /*
239
+ * If watchpoints are disabled globally or we can't take debug
240
+ * exceptions here then watchpoint firings are ignored.
241
+ */
242
+ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
243
+ || !arm_generate_debug_exceptions(env)) {
244
+ return false;
245
+ }
246
+
247
+ for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
248
+ if (bp_wp_matches(cpu, n, true)) {
249
+ return true;
250
+ }
251
+ }
252
+ return false;
253
+}
254
+
255
+static bool check_breakpoints(ARMCPU *cpu)
256
+{
257
+ CPUARMState *env = &cpu->env;
258
+ int n;
259
+
260
+ /*
261
+ * If breakpoints are disabled globally or we can't take debug
262
+ * exceptions here then breakpoint firings are ignored.
263
+ */
264
+ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
265
+ || !arm_generate_debug_exceptions(env)) {
266
+ return false;
267
+ }
268
+
269
+ for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
270
+ if (bp_wp_matches(cpu, n, false)) {
271
+ return true;
272
+ }
273
+ }
274
+ return false;
275
+}
276
+
277
+void HELPER(check_breakpoints)(CPUARMState *env)
278
+{
279
+ ARMCPU *cpu = env_archcpu(env);
280
+
281
+ if (check_breakpoints(cpu)) {
282
+ HELPER(exception_internal(env, EXCP_DEBUG));
283
+ }
284
+}
285
+
286
+bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
287
+{
288
+ /*
289
+ * Called by core code when a CPU watchpoint fires; need to check if this
290
+ * is also an architectural watchpoint match.
291
+ */
292
+ ARMCPU *cpu = ARM_CPU(cs);
293
+
294
+ return check_watchpoints(cpu);
295
+}
296
+
297
+void arm_debug_excp_handler(CPUState *cs)
298
+{
299
+ /*
300
+ * Called by core code when a watchpoint or breakpoint fires;
301
+ * need to check which one and raise the appropriate exception.
302
+ */
303
+ ARMCPU *cpu = ARM_CPU(cs);
304
+ CPUARMState *env = &cpu->env;
305
+ CPUWatchpoint *wp_hit = cs->watchpoint_hit;
306
+
307
+ if (wp_hit) {
308
+ if (wp_hit->flags & BP_CPU) {
309
+ bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
310
+ bool same_el = arm_debug_target_el(env) == arm_current_el(env);
311
+
312
+ cs->watchpoint_hit = NULL;
313
+
314
+ env->exception.fsr = arm_debug_exception_fsr(env);
315
+ env->exception.vaddress = wp_hit->hitaddr;
316
+ raise_exception(env, EXCP_DATA_ABORT,
317
+ syn_watchpoint(same_el, 0, wnr),
318
+ arm_debug_target_el(env));
319
+ }
320
+ } else {
321
+ uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
322
+ bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
323
+
324
+ /*
325
+ * (1) GDB breakpoints should be handled first.
326
+ * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
327
+ * since singlestep is also done by generating a debug internal
328
+ * exception.
329
+ */
330
+ if (cpu_breakpoint_test(cs, pc, BP_GDB)
331
+ || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
332
+ return;
333
+ }
334
+
335
+ env->exception.fsr = arm_debug_exception_fsr(env);
336
+ /*
337
+ * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
338
+ * values to the guest that it shouldn't be able to see at its
339
+ * exception/security level.
340
+ */
341
+ env->exception.vaddress = 0;
342
+ raise_exception(env, EXCP_PREFETCH_ABORT,
343
+ syn_breakpoint(same_el),
344
+ arm_debug_target_el(env));
345
+ }
346
+}
347
+
348
+#if !defined(CONFIG_USER_ONLY)
349
+
350
+vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
351
+{
352
+ ARMCPU *cpu = ARM_CPU(cs);
353
+ CPUARMState *env = &cpu->env;
354
+
355
+ /*
356
+ * In BE32 system mode, target memory is stored byteswapped (on a
357
+ * little-endian host system), and by the time we reach here (via an
358
+ * opcode helper) the addresses of subword accesses have been adjusted
359
+ * to account for that, which means that watchpoints will not match.
360
+ * Undo the adjustment here.
361
+ */
362
+ if (arm_sctlr_b(env)) {
363
+ if (len == 1) {
364
+ addr ^= 3;
365
+ } else if (len == 2) {
366
+ addr ^= 2;
367
+ }
368
+ }
369
+
370
+ return addr;
371
+}
372
+
373
+#endif
22
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
374
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
23
index XXXXXXX..XXXXXXX 100644
375
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/op_helper.c
376
--- a/target/arm/op_helper.c
25
+++ b/target/arm/op_helper.c
377
+++ b/target/arm/op_helper.c
26
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
378
@@ -XXX,XX +XXX,XX @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
27
{
379
}
28
/* FSR will only be used if the debug target EL is AArch32. */
29
env->exception.fsr = arm_debug_exception_fsr(env);
30
+ /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
31
+ * values to the guest that it shouldn't be able to see at its
32
+ * exception/security level.
33
+ */
34
+ env->exception.vaddress = 0;
35
raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env));
36
}
380
}
37
381
38
@@ -XXX,XX +XXX,XX @@ void arm_debug_excp_handler(CPUState *cs)
382
-/* Return true if the linked breakpoint entry lbn passes its checks */
39
}
383
-static bool linked_bp_matches(ARMCPU *cpu, int lbn)
40
384
-{
41
env->exception.fsr = arm_debug_exception_fsr(env);
385
- CPUARMState *env = &cpu->env;
42
- /* FAR is UNKNOWN, so doesn't need setting */
386
- uint64_t bcr = env->cp15.dbgbcr[lbn];
43
+ /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
387
- int brps = extract32(cpu->dbgdidr, 24, 4);
44
+ * values to the guest that it shouldn't be able to see at its
388
- int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
45
+ * exception/security level.
389
- int bt;
46
+ */
390
- uint32_t contextidr;
47
+ env->exception.vaddress = 0;
391
-
48
raise_exception(env, EXCP_PREFETCH_ABORT,
392
- /*
49
syn_breakpoint(same_el),
393
- * Links to unimplemented or non-context aware breakpoints are
50
arm_debug_target_el(env));
394
- * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
395
- * as if linked to an UNKNOWN context-aware breakpoint (in which
396
- * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
397
- * We choose the former.
398
- */
399
- if (lbn > brps || lbn < (brps - ctx_cmps)) {
400
- return false;
401
- }
402
-
403
- bcr = env->cp15.dbgbcr[lbn];
404
-
405
- if (extract64(bcr, 0, 1) == 0) {
406
- /* Linked breakpoint disabled : generate no events */
407
- return false;
408
- }
409
-
410
- bt = extract64(bcr, 20, 4);
411
-
412
- /*
413
- * We match the whole register even if this is AArch32 using the
414
- * short descriptor format (in which case it holds both PROCID and ASID),
415
- * since we don't implement the optional v7 context ID masking.
416
- */
417
- contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
418
-
419
- switch (bt) {
420
- case 3: /* linked context ID match */
421
- if (arm_current_el(env) > 1) {
422
- /* Context matches never fire in EL2 or (AArch64) EL3 */
423
- return false;
424
- }
425
- return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
426
- case 5: /* linked address mismatch (reserved in AArch64) */
427
- case 9: /* linked VMID match (reserved if no EL2) */
428
- case 11: /* linked context ID and VMID match (reserved if no EL2) */
429
- default:
430
- /*
431
- * Links to Unlinked context breakpoints must generate no
432
- * events; we choose to do the same for reserved values too.
433
- */
434
- return false;
435
- }
436
-
437
- return false;
438
-}
439
-
440
-static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
441
-{
442
- CPUARMState *env = &cpu->env;
443
- uint64_t cr;
444
- int pac, hmc, ssc, wt, lbn;
445
- /*
446
- * Note that for watchpoints the check is against the CPU security
447
- * state, not the S/NS attribute on the offending data access.
448
- */
449
- bool is_secure = arm_is_secure(env);
450
- int access_el = arm_current_el(env);
451
-
452
- if (is_wp) {
453
- CPUWatchpoint *wp = env->cpu_watchpoint[n];
454
-
455
- if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
456
- return false;
457
- }
458
- cr = env->cp15.dbgwcr[n];
459
- if (wp->hitattrs.user) {
460
- /*
461
- * The LDRT/STRT/LDT/STT "unprivileged access" instructions should
462
- * match watchpoints as if they were accesses done at EL0, even if
463
- * the CPU is at EL1 or higher.
464
- */
465
- access_el = 0;
466
- }
467
- } else {
468
- uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
469
-
470
- if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
471
- return false;
472
- }
473
- cr = env->cp15.dbgbcr[n];
474
- }
475
- /*
476
- * The WATCHPOINT_HIT flag guarantees us that the watchpoint is
477
- * enabled and that the address and access type match; for breakpoints
478
- * we know the address matched; check the remaining fields, including
479
- * linked breakpoints. We rely on WCR and BCR having the same layout
480
- * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
481
- * Note that some combinations of {PAC, HMC, SSC} are reserved and
482
- * must act either like some valid combination or as if the watchpoint
483
- * were disabled. We choose the former, and use this together with
484
- * the fact that EL3 must always be Secure and EL2 must always be
485
- * Non-Secure to simplify the code slightly compared to the full
486
- * table in the ARM ARM.
487
- */
488
- pac = extract64(cr, 1, 2);
489
- hmc = extract64(cr, 13, 1);
490
- ssc = extract64(cr, 14, 2);
491
-
492
- switch (ssc) {
493
- case 0:
494
- break;
495
- case 1:
496
- case 3:
497
- if (is_secure) {
498
- return false;
499
- }
500
- break;
501
- case 2:
502
- if (!is_secure) {
503
- return false;
504
- }
505
- break;
506
- }
507
-
508
- switch (access_el) {
509
- case 3:
510
- case 2:
511
- if (!hmc) {
512
- return false;
513
- }
514
- break;
515
- case 1:
516
- if (extract32(pac, 0, 1) == 0) {
517
- return false;
518
- }
519
- break;
520
- case 0:
521
- if (extract32(pac, 1, 1) == 0) {
522
- return false;
523
- }
524
- break;
525
- default:
526
- g_assert_not_reached();
527
- }
528
-
529
- wt = extract64(cr, 20, 1);
530
- lbn = extract64(cr, 16, 4);
531
-
532
- if (wt && !linked_bp_matches(cpu, lbn)) {
533
- return false;
534
- }
535
-
536
- return true;
537
-}
538
-
539
-static bool check_watchpoints(ARMCPU *cpu)
540
-{
541
- CPUARMState *env = &cpu->env;
542
- int n;
543
-
544
- /*
545
- * If watchpoints are disabled globally or we can't take debug
546
- * exceptions here then watchpoint firings are ignored.
547
- */
548
- if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
549
- || !arm_generate_debug_exceptions(env)) {
550
- return false;
551
- }
552
-
553
- for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
554
- if (bp_wp_matches(cpu, n, true)) {
555
- return true;
556
- }
557
- }
558
- return false;
559
-}
560
-
561
-static bool check_breakpoints(ARMCPU *cpu)
562
-{
563
- CPUARMState *env = &cpu->env;
564
- int n;
565
-
566
- /*
567
- * If breakpoints are disabled globally or we can't take debug
568
- * exceptions here then breakpoint firings are ignored.
569
- */
570
- if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
571
- || !arm_generate_debug_exceptions(env)) {
572
- return false;
573
- }
574
-
575
- for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
576
- if (bp_wp_matches(cpu, n, false)) {
577
- return true;
578
- }
579
- }
580
- return false;
581
-}
582
-
583
-void HELPER(check_breakpoints)(CPUARMState *env)
584
-{
585
- ARMCPU *cpu = env_archcpu(env);
586
-
587
- if (check_breakpoints(cpu)) {
588
- HELPER(exception_internal(env, EXCP_DEBUG));
589
- }
590
-}
591
-
592
-bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
593
-{
594
- /*
595
- * Called by core code when a CPU watchpoint fires; need to check if this
596
- * is also an architectural watchpoint match.
597
- */
598
- ARMCPU *cpu = ARM_CPU(cs);
599
-
600
- return check_watchpoints(cpu);
601
-}
602
-
603
-vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
604
-{
605
- ARMCPU *cpu = ARM_CPU(cs);
606
- CPUARMState *env = &cpu->env;
607
-
608
- /*
609
- * In BE32 system mode, target memory is stored byteswapped (on a
610
- * little-endian host system), and by the time we reach here (via an
611
- * opcode helper) the addresses of subword accesses have been adjusted
612
- * to account for that, which means that watchpoints will not match.
613
- * Undo the adjustment here.
614
- */
615
- if (arm_sctlr_b(env)) {
616
- if (len == 1) {
617
- addr ^= 3;
618
- } else if (len == 2) {
619
- addr ^= 2;
620
- }
621
- }
622
-
623
- return addr;
624
-}
625
-
626
-void arm_debug_excp_handler(CPUState *cs)
627
-{
628
- /*
629
- * Called by core code when a watchpoint or breakpoint fires;
630
- * need to check which one and raise the appropriate exception.
631
- */
632
- ARMCPU *cpu = ARM_CPU(cs);
633
- CPUARMState *env = &cpu->env;
634
- CPUWatchpoint *wp_hit = cs->watchpoint_hit;
635
-
636
- if (wp_hit) {
637
- if (wp_hit->flags & BP_CPU) {
638
- bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
639
- bool same_el = arm_debug_target_el(env) == arm_current_el(env);
640
-
641
- cs->watchpoint_hit = NULL;
642
-
643
- env->exception.fsr = arm_debug_exception_fsr(env);
644
- env->exception.vaddress = wp_hit->hitaddr;
645
- raise_exception(env, EXCP_DATA_ABORT,
646
- syn_watchpoint(same_el, 0, wnr),
647
- arm_debug_target_el(env));
648
- }
649
- } else {
650
- uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
651
- bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
652
-
653
- /*
654
- * (1) GDB breakpoints should be handled first.
655
- * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
656
- * since singlestep is also done by generating a debug internal
657
- * exception.
658
- */
659
- if (cpu_breakpoint_test(cs, pc, BP_GDB)
660
- || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
661
- return;
662
- }
663
-
664
- env->exception.fsr = arm_debug_exception_fsr(env);
665
- /*
666
- * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
667
- * values to the guest that it shouldn't be able to see at its
668
- * exception/security level.
669
- */
670
- env->exception.vaddress = 0;
671
- raise_exception(env, EXCP_PREFETCH_ABORT,
672
- syn_breakpoint(same_el),
673
- arm_debug_target_el(env));
674
- }
675
-}
676
-
677
/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
678
The only way to do that in TCG is a conditional branch, which clobbers
679
all our temporaries. For now implement these as helper functions. */
51
--
680
--
52
2.16.2
681
2.20.1
53
682
54
683
diff view generated by jsdifflib
1
Now that we have a helper function specifically for the BRK and
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
BKPT instructions, we can set the exception.fsr there rather
3
than in arm_cpu_do_interrupt_aarch32(). This allows us to
4
use our new arm_debug_exception_fsr() helper.
5
2
6
In particular this fixes a bug where we were hardcoding the
3
Per Peter Maydell:
7
short-form IFSR value, which is wrong if the target exception
8
level has LPAE enabled.
9
4
10
Fixes: https://bugs.launchpad.net/qemu/+bug/1756927
5
Semihosting hooks either SVC or HLT instructions, and inside KVM
6
both of those go to EL1, ie to the guest, and can't be trapped to
7
KVM.
8
9
Let check_for_semihosting() return False when not running on TCG.
10
11
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
12
Message-id: 20190701194942.10092-3-philmd@redhat.com
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Message-id: 20180320134114.30418-4-peter.maydell@linaro.org
14
---
15
---
15
target/arm/helper.c | 1 -
16
target/arm/Makefile.objs | 2 +-
16
target/arm/op_helper.c | 2 ++
17
target/arm/cpu.h | 7 +++++++
17
2 files changed, 2 insertions(+), 1 deletion(-)
18
target/arm/helper.c | 8 +++++++-
19
3 files changed, 15 insertions(+), 2 deletions(-)
18
20
21
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/Makefile.objs
24
+++ b/target/arm/Makefile.objs
25
@@ -XXX,XX +XXX,XX @@
26
-obj-y += arm-semi.o
27
+obj-$(CONFIG_TCG) += arm-semi.o
28
obj-y += helper.o vfp_helper.o
29
obj-y += cpu.o gdbstub.o
30
obj-$(TARGET_AARCH64) += cpu64.o gdbstub64.o
31
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/cpu.h
34
+++ b/target/arm/cpu.h
35
@@ -XXX,XX +XXX,XX @@ static inline void aarch64_sve_change_el(CPUARMState *env, int o,
36
{ }
37
#endif
38
39
+#if !defined(CONFIG_TCG)
40
+static inline target_ulong do_arm_semihosting(CPUARMState *env)
41
+{
42
+ g_assert_not_reached();
43
+}
44
+#else
45
target_ulong do_arm_semihosting(CPUARMState *env);
46
+#endif
47
void aarch64_sync_32_to_64(CPUARMState *env);
48
void aarch64_sync_64_to_32(CPUARMState *env);
49
19
diff --git a/target/arm/helper.c b/target/arm/helper.c
50
diff --git a/target/arm/helper.c b/target/arm/helper.c
20
index XXXXXXX..XXXXXXX 100644
51
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper.c
52
--- a/target/arm/helper.c
22
+++ b/target/arm/helper.c
53
+++ b/target/arm/helper.c
23
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
54
@@ -XXX,XX +XXX,XX @@
24
offset = 0;
55
#include "qemu/qemu-print.h"
25
break;
56
#include "exec/exec-all.h"
26
case EXCP_BKPT:
57
#include "exec/cpu_ldst.h"
27
- env->exception.fsr = 2;
58
-#include "arm_ldst.h"
28
/* Fall through to prefetch abort. */
59
#include <zlib.h> /* For crc32 */
29
case EXCP_PREFETCH_ABORT:
60
#include "hw/semihosting/semihost.h"
30
A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
61
#include "sysemu/cpus.h"
31
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
62
@@ -XXX,XX +XXX,XX @@
32
index XXXXXXX..XXXXXXX 100644
63
#include "qapi/qapi-commands-machine-target.h"
33
--- a/target/arm/op_helper.c
64
#include "qapi/error.h"
34
+++ b/target/arm/op_helper.c
65
#include "qemu/guest-random.h"
35
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
66
+#ifdef CONFIG_TCG
36
*/
67
+#include "arm_ldst.h"
37
void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
68
+#endif
69
70
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
71
72
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
73
74
static inline bool check_for_semihosting(CPUState *cs)
38
{
75
{
39
+ /* FSR will only be used if the debug target EL is AArch32. */
76
+#ifdef CONFIG_TCG
40
+ env->exception.fsr = arm_debug_exception_fsr(env);
77
/* Check whether this exception is a semihosting call; if so
41
raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env));
78
* then handle it and return true; otherwise return false.
79
*/
80
@@ -XXX,XX +XXX,XX @@ static inline bool check_for_semihosting(CPUState *cs)
81
env->regs[0] = do_arm_semihosting(env);
82
return true;
83
}
84
+#else
85
+ return false;
86
+#endif
42
}
87
}
43
88
89
/* Handle a CPU exception for A and R profile CPUs.
44
--
90
--
45
2.16.2
91
2.20.1
46
92
47
93
diff view generated by jsdifflib
1
From: Trent Piepho <tpiepho@impinj.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
Linux does not detect a break from this IMX serial driver as a magic
3
In preparation for supporting TCG disablement on ARM, we move most
4
sysrq. Nor does it note a break in the port error counts.
4
of TCG related v7m/v8m helpers and APIs into their own file.
5
5
6
The former is because the Linux driver uses the BRCD bit in the USR2
6
Note: It is easier to review this commit using the 'histogram'
7
register to trigger the RS-232 break handler in the kernel, which is
7
diff algorithm:
8
where sysrq hooks in. The emulated UART was not setting this status
9
bit.
10
8
11
The latter is because the Linux driver expects, in addition to the BRK
9
$ git diff --diff-algorithm=histogram ...
12
bit, that the ERR bit is set when a break is read in the FIFO. A break
10
or
13
should also count as a frame error, so add that bit too.
11
$ git diff --histogram ...
14
12
15
Cc: Andrey Smirnov <andrew.smirnov@gmail.com>
13
Suggested-by: Samuel Ortiz <sameo@linux.intel.com>
16
Signed-off-by: Trent Piepho <tpiepho@impinj.com>
14
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
17
Message-id: 20180320013657.25038-1-tpiepho@impinj.com
15
Message-id: 20190702144335.10717-2-philmd@redhat.com
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
---
18
---
21
include/hw/char/imx_serial.h | 1 +
19
target/arm/Makefile.objs | 1 +
22
hw/char/imx_serial.c | 5 ++++-
20
target/arm/helper.c | 2638 +------------------------------------
23
2 files changed, 5 insertions(+), 1 deletion(-)
21
target/arm/m_helper.c | 2676 ++++++++++++++++++++++++++++++++++++++
22
3 files changed, 2681 insertions(+), 2634 deletions(-)
23
create mode 100644 target/arm/m_helper.c
24
24
25
diff --git a/include/hw/char/imx_serial.h b/include/hw/char/imx_serial.h
25
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
26
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
27
--- a/include/hw/char/imx_serial.h
27
--- a/target/arm/Makefile.objs
28
+++ b/include/hw/char/imx_serial.h
28
+++ b/target/arm/Makefile.objs
29
@@ -XXX,XX +XXX,XX @@ obj-y += tlb_helper.o debug_helper.o
30
obj-y += translate.o op_helper.o
31
obj-y += crypto_helper.o
32
obj-y += iwmmxt_helper.o vec_helper.o neon_helper.o
33
+obj-y += m_helper.o
34
35
obj-$(CONFIG_SOFTMMU) += psci.o
36
37
diff --git a/target/arm/helper.c b/target/arm/helper.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/helper.c
40
+++ b/target/arm/helper.c
29
@@ -XXX,XX +XXX,XX @@
41
@@ -XXX,XX +XXX,XX @@
30
42
#include "qemu/crc32c.h"
31
#define URXD_CHARRDY (1<<15) /* character read is valid */
43
#include "qemu/qemu-print.h"
32
#define URXD_ERR (1<<14) /* Character has error */
44
#include "exec/exec-all.h"
33
+#define URXD_FRMERR (1<<12) /* Character has frame error */
45
-#include "exec/cpu_ldst.h"
34
#define URXD_BRK (1<<11) /* Break received */
46
#include <zlib.h> /* For crc32 */
35
47
#include "hw/semihosting/semihost.h"
36
#define USR1_PARTYER (1<<15) /* Parity Error */
48
#include "sysemu/cpus.h"
37
diff --git a/hw/char/imx_serial.c b/hw/char/imx_serial.c
49
@@ -XXX,XX +XXX,XX @@
38
index XXXXXXX..XXXXXXX 100644
50
#include "qemu/guest-random.h"
39
--- a/hw/char/imx_serial.c
51
#ifdef CONFIG_TCG
40
+++ b/hw/char/imx_serial.c
52
#include "arm_ldst.h"
41
@@ -XXX,XX +XXX,XX @@ static void imx_put_data(void *opaque, uint32_t value)
53
+#include "exec/cpu_ldst.h"
42
s->usr2 |= USR2_RDR;
54
#endif
43
s->uts1 &= ~UTS1_RXEMPTY;
55
44
s->readbuff = value;
56
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
45
+ if (value & URXD_BRK) {
57
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rbit)(uint32_t x)
46
+ s->usr2 |= USR2_BRCD;
58
47
+ }
59
#ifdef CONFIG_USER_ONLY
48
imx_update(s);
60
49
}
61
-/* These should probably raise undefined insn exceptions. */
50
62
-void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
51
@@ -XXX,XX +XXX,XX @@ static void imx_receive(void *opaque, const uint8_t *buf, int size)
63
-{
52
static void imx_event(void *opaque, int event)
64
- ARMCPU *cpu = env_archcpu(env);
65
-
66
- cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
67
-}
68
-
69
-uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
70
-{
71
- ARMCPU *cpu = env_archcpu(env);
72
-
73
- cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
74
- return 0;
75
-}
76
-
77
-void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
78
-{
79
- /* translate.c should never generate calls here in user-only mode */
80
- g_assert_not_reached();
81
-}
82
-
83
-void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
84
-{
85
- /* translate.c should never generate calls here in user-only mode */
86
- g_assert_not_reached();
87
-}
88
-
89
-void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
90
-{
91
- /* translate.c should never generate calls here in user-only mode */
92
- g_assert_not_reached();
93
-}
94
-
95
-void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
96
-{
97
- /* translate.c should never generate calls here in user-only mode */
98
- g_assert_not_reached();
99
-}
100
-
101
-void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
102
-{
103
- /* translate.c should never generate calls here in user-only mode */
104
- g_assert_not_reached();
105
-}
106
-
107
-uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
108
-{
109
- /*
110
- * The TT instructions can be used by unprivileged code, but in
111
- * user-only emulation we don't have the MPU.
112
- * Luckily since we know we are NonSecure unprivileged (and that in
113
- * turn means that the A flag wasn't specified), all the bits in the
114
- * register must be zero:
115
- * IREGION: 0 because IRVALID is 0
116
- * IRVALID: 0 because NS
117
- * S: 0 because NS
118
- * NSRW: 0 because NS
119
- * NSR: 0 because NS
120
- * RW: 0 because unpriv and A flag not set
121
- * R: 0 because unpriv and A flag not set
122
- * SRVALID: 0 because NS
123
- * MRVALID: 0 because unpriv and A flag not set
124
- * SREGION: 0 becaus SRVALID is 0
125
- * MREGION: 0 because MRVALID is 0
126
- */
127
- return 0;
128
-}
129
-
130
static void switch_mode(CPUARMState *env, int mode)
53
{
131
{
54
if (event == CHR_EVENT_BREAK) {
132
ARMCPU *cpu = env_archcpu(env);
55
- imx_put_data(opaque, URXD_BRK);
133
@@ -XXX,XX +XXX,XX @@ void arm_log_exception(int idx)
56
+ imx_put_data(opaque, URXD_BRK | URXD_FRMERR | URXD_ERR);
57
}
134
}
58
}
135
}
59
136
137
-/*
138
- * What kind of stack write are we doing? This affects how exceptions
139
- * generated during the stacking are treated.
140
- */
141
-typedef enum StackingMode {
142
- STACK_NORMAL,
143
- STACK_IGNFAULTS,
144
- STACK_LAZYFP,
145
-} StackingMode;
146
-
147
-static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
148
- ARMMMUIdx mmu_idx, StackingMode mode)
149
-{
150
- CPUState *cs = CPU(cpu);
151
- CPUARMState *env = &cpu->env;
152
- MemTxAttrs attrs = {};
153
- MemTxResult txres;
154
- target_ulong page_size;
155
- hwaddr physaddr;
156
- int prot;
157
- ARMMMUFaultInfo fi = {};
158
- bool secure = mmu_idx & ARM_MMU_IDX_M_S;
159
- int exc;
160
- bool exc_secure;
161
-
162
- if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
163
- &attrs, &prot, &page_size, &fi, NULL)) {
164
- /* MPU/SAU lookup failed */
165
- if (fi.type == ARMFault_QEMU_SFault) {
166
- if (mode == STACK_LAZYFP) {
167
- qemu_log_mask(CPU_LOG_INT,
168
- "...SecureFault with SFSR.LSPERR "
169
- "during lazy stacking\n");
170
- env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
171
- } else {
172
- qemu_log_mask(CPU_LOG_INT,
173
- "...SecureFault with SFSR.AUVIOL "
174
- "during stacking\n");
175
- env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
176
- }
177
- env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
178
- env->v7m.sfar = addr;
179
- exc = ARMV7M_EXCP_SECURE;
180
- exc_secure = false;
181
- } else {
182
- if (mode == STACK_LAZYFP) {
183
- qemu_log_mask(CPU_LOG_INT,
184
- "...MemManageFault with CFSR.MLSPERR\n");
185
- env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
186
- } else {
187
- qemu_log_mask(CPU_LOG_INT,
188
- "...MemManageFault with CFSR.MSTKERR\n");
189
- env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
190
- }
191
- exc = ARMV7M_EXCP_MEM;
192
- exc_secure = secure;
193
- }
194
- goto pend_fault;
195
- }
196
- address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
197
- attrs, &txres);
198
- if (txres != MEMTX_OK) {
199
- /* BusFault trying to write the data */
200
- if (mode == STACK_LAZYFP) {
201
- qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
202
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
203
- } else {
204
- qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
205
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
206
- }
207
- exc = ARMV7M_EXCP_BUS;
208
- exc_secure = false;
209
- goto pend_fault;
210
- }
211
- return true;
212
-
213
-pend_fault:
214
- /*
215
- * By pending the exception at this point we are making
216
- * the IMPDEF choice "overridden exceptions pended" (see the
217
- * MergeExcInfo() pseudocode). The other choice would be to not
218
- * pend them now and then make a choice about which to throw away
219
- * later if we have two derived exceptions.
220
- * The only case when we must not pend the exception but instead
221
- * throw it away is if we are doing the push of the callee registers
222
- * and we've already generated a derived exception (this is indicated
223
- * by the caller passing STACK_IGNFAULTS). Even in this case we will
224
- * still update the fault status registers.
225
- */
226
- switch (mode) {
227
- case STACK_NORMAL:
228
- armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
229
- break;
230
- case STACK_LAZYFP:
231
- armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
232
- break;
233
- case STACK_IGNFAULTS:
234
- break;
235
- }
236
- return false;
237
-}
238
-
239
-static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
240
- ARMMMUIdx mmu_idx)
241
-{
242
- CPUState *cs = CPU(cpu);
243
- CPUARMState *env = &cpu->env;
244
- MemTxAttrs attrs = {};
245
- MemTxResult txres;
246
- target_ulong page_size;
247
- hwaddr physaddr;
248
- int prot;
249
- ARMMMUFaultInfo fi = {};
250
- bool secure = mmu_idx & ARM_MMU_IDX_M_S;
251
- int exc;
252
- bool exc_secure;
253
- uint32_t value;
254
-
255
- if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
256
- &attrs, &prot, &page_size, &fi, NULL)) {
257
- /* MPU/SAU lookup failed */
258
- if (fi.type == ARMFault_QEMU_SFault) {
259
- qemu_log_mask(CPU_LOG_INT,
260
- "...SecureFault with SFSR.AUVIOL during unstack\n");
261
- env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
262
- env->v7m.sfar = addr;
263
- exc = ARMV7M_EXCP_SECURE;
264
- exc_secure = false;
265
- } else {
266
- qemu_log_mask(CPU_LOG_INT,
267
- "...MemManageFault with CFSR.MUNSTKERR\n");
268
- env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
269
- exc = ARMV7M_EXCP_MEM;
270
- exc_secure = secure;
271
- }
272
- goto pend_fault;
273
- }
274
-
275
- value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
276
- attrs, &txres);
277
- if (txres != MEMTX_OK) {
278
- /* BusFault trying to read the data */
279
- qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
280
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
281
- exc = ARMV7M_EXCP_BUS;
282
- exc_secure = false;
283
- goto pend_fault;
284
- }
285
-
286
- *dest = value;
287
- return true;
288
-
289
-pend_fault:
290
- /*
291
- * By pending the exception at this point we are making
292
- * the IMPDEF choice "overridden exceptions pended" (see the
293
- * MergeExcInfo() pseudocode). The other choice would be to not
294
- * pend them now and then make a choice about which to throw away
295
- * later if we have two derived exceptions.
296
- */
297
- armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
298
- return false;
299
-}
300
-
301
-void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
302
-{
303
- /*
304
- * Preserve FP state (because LSPACT was set and we are about
305
- * to execute an FP instruction). This corresponds to the
306
- * PreserveFPState() pseudocode.
307
- * We may throw an exception if the stacking fails.
308
- */
309
- ARMCPU *cpu = env_archcpu(env);
310
- bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
311
- bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
312
- bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
313
- bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
314
- uint32_t fpcar = env->v7m.fpcar[is_secure];
315
- bool stacked_ok = true;
316
- bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
317
- bool take_exception;
318
-
319
- /* Take the iothread lock as we are going to touch the NVIC */
320
- qemu_mutex_lock_iothread();
321
-
322
- /* Check the background context had access to the FPU */
323
- if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
324
- armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
325
- env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
326
- stacked_ok = false;
327
- } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
328
- armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
329
- env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
330
- stacked_ok = false;
331
- }
332
-
333
- if (!splimviol && stacked_ok) {
334
- /* We only stack if the stack limit wasn't violated */
335
- int i;
336
- ARMMMUIdx mmu_idx;
337
-
338
- mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
339
- for (i = 0; i < (ts ? 32 : 16); i += 2) {
340
- uint64_t dn = *aa32_vfp_dreg(env, i / 2);
341
- uint32_t faddr = fpcar + 4 * i;
342
- uint32_t slo = extract64(dn, 0, 32);
343
- uint32_t shi = extract64(dn, 32, 32);
344
-
345
- if (i >= 16) {
346
- faddr += 8; /* skip the slot for the FPSCR */
347
- }
348
- stacked_ok = stacked_ok &&
349
- v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
350
- v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
351
- }
352
-
353
- stacked_ok = stacked_ok &&
354
- v7m_stack_write(cpu, fpcar + 0x40,
355
- vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
356
- }
357
-
358
- /*
359
- * We definitely pended an exception, but it's possible that it
360
- * might not be able to be taken now. If its priority permits us
361
- * to take it now, then we must not update the LSPACT or FP regs,
362
- * but instead jump out to take the exception immediately.
363
- * If it's just pending and won't be taken until the current
364
- * handler exits, then we do update LSPACT and the FP regs.
365
- */
366
- take_exception = !stacked_ok &&
367
- armv7m_nvic_can_take_pending_exception(env->nvic);
368
-
369
- qemu_mutex_unlock_iothread();
370
-
371
- if (take_exception) {
372
- raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
373
- }
374
-
375
- env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
376
-
377
- if (ts) {
378
- /* Clear s0 to s31 and the FPSCR */
379
- int i;
380
-
381
- for (i = 0; i < 32; i += 2) {
382
- *aa32_vfp_dreg(env, i / 2) = 0;
383
- }
384
- vfp_set_fpscr(env, 0);
385
- }
386
- /*
387
- * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
388
- * unchanged.
389
- */
390
-}
391
-
392
-/*
393
- * Write to v7M CONTROL.SPSEL bit for the specified security bank.
394
- * This may change the current stack pointer between Main and Process
395
- * stack pointers if it is done for the CONTROL register for the current
396
- * security state.
397
- */
398
-static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
399
- bool new_spsel,
400
- bool secstate)
401
-{
402
- bool old_is_psp = v7m_using_psp(env);
403
-
404
- env->v7m.control[secstate] =
405
- deposit32(env->v7m.control[secstate],
406
- R_V7M_CONTROL_SPSEL_SHIFT,
407
- R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
408
-
409
- if (secstate == env->v7m.secure) {
410
- bool new_is_psp = v7m_using_psp(env);
411
- uint32_t tmp;
412
-
413
- if (old_is_psp != new_is_psp) {
414
- tmp = env->v7m.other_sp;
415
- env->v7m.other_sp = env->regs[13];
416
- env->regs[13] = tmp;
417
- }
418
- }
419
-}
420
-
421
-/*
422
- * Write to v7M CONTROL.SPSEL bit. This may change the current
423
- * stack pointer between Main and Process stack pointers.
424
- */
425
-static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
426
-{
427
- write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
428
-}
429
-
430
-void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
431
-{
432
- /*
433
- * Write a new value to v7m.exception, thus transitioning into or out
434
- * of Handler mode; this may result in a change of active stack pointer.
435
- */
436
- bool new_is_psp, old_is_psp = v7m_using_psp(env);
437
- uint32_t tmp;
438
-
439
- env->v7m.exception = new_exc;
440
-
441
- new_is_psp = v7m_using_psp(env);
442
-
443
- if (old_is_psp != new_is_psp) {
444
- tmp = env->v7m.other_sp;
445
- env->v7m.other_sp = env->regs[13];
446
- env->regs[13] = tmp;
447
- }
448
-}
449
-
450
-/* Switch M profile security state between NS and S */
451
-static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
452
-{
453
- uint32_t new_ss_msp, new_ss_psp;
454
-
455
- if (env->v7m.secure == new_secstate) {
456
- return;
457
- }
458
-
459
- /*
460
- * All the banked state is accessed by looking at env->v7m.secure
461
- * except for the stack pointer; rearrange the SP appropriately.
462
- */
463
- new_ss_msp = env->v7m.other_ss_msp;
464
- new_ss_psp = env->v7m.other_ss_psp;
465
-
466
- if (v7m_using_psp(env)) {
467
- env->v7m.other_ss_psp = env->regs[13];
468
- env->v7m.other_ss_msp = env->v7m.other_sp;
469
- } else {
470
- env->v7m.other_ss_msp = env->regs[13];
471
- env->v7m.other_ss_psp = env->v7m.other_sp;
472
- }
473
-
474
- env->v7m.secure = new_secstate;
475
-
476
- if (v7m_using_psp(env)) {
477
- env->regs[13] = new_ss_psp;
478
- env->v7m.other_sp = new_ss_msp;
479
- } else {
480
- env->regs[13] = new_ss_msp;
481
- env->v7m.other_sp = new_ss_psp;
482
- }
483
-}
484
-
485
-void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
486
-{
487
- /*
488
- * Handle v7M BXNS:
489
- * - if the return value is a magic value, do exception return (like BX)
490
- * - otherwise bit 0 of the return value is the target security state
491
- */
492
- uint32_t min_magic;
493
-
494
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
495
- /* Covers FNC_RETURN and EXC_RETURN magic */
496
- min_magic = FNC_RETURN_MIN_MAGIC;
497
- } else {
498
- /* EXC_RETURN magic only */
499
- min_magic = EXC_RETURN_MIN_MAGIC;
500
- }
501
-
502
- if (dest >= min_magic) {
503
- /*
504
- * This is an exception return magic value; put it where
505
- * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
506
- * Note that if we ever add gen_ss_advance() singlestep support to
507
- * M profile this should count as an "instruction execution complete"
508
- * event (compare gen_bx_excret_final_code()).
509
- */
510
- env->regs[15] = dest & ~1;
511
- env->thumb = dest & 1;
512
- HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
513
- /* notreached */
514
- }
515
-
516
- /* translate.c should have made BXNS UNDEF unless we're secure */
517
- assert(env->v7m.secure);
518
-
519
- if (!(dest & 1)) {
520
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
521
- }
522
- switch_v7m_security_state(env, dest & 1);
523
- env->thumb = 1;
524
- env->regs[15] = dest & ~1;
525
-}
526
-
527
-void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
528
-{
529
- /*
530
- * Handle v7M BLXNS:
531
- * - bit 0 of the destination address is the target security state
532
- */
533
-
534
- /* At this point regs[15] is the address just after the BLXNS */
535
- uint32_t nextinst = env->regs[15] | 1;
536
- uint32_t sp = env->regs[13] - 8;
537
- uint32_t saved_psr;
538
-
539
- /* translate.c will have made BLXNS UNDEF unless we're secure */
540
- assert(env->v7m.secure);
541
-
542
- if (dest & 1) {
543
- /*
544
- * Target is Secure, so this is just a normal BLX,
545
- * except that the low bit doesn't indicate Thumb/not.
546
- */
547
- env->regs[14] = nextinst;
548
- env->thumb = 1;
549
- env->regs[15] = dest & ~1;
550
- return;
551
- }
552
-
553
- /* Target is non-secure: first push a stack frame */
554
- if (!QEMU_IS_ALIGNED(sp, 8)) {
555
- qemu_log_mask(LOG_GUEST_ERROR,
556
- "BLXNS with misaligned SP is UNPREDICTABLE\n");
557
- }
558
-
559
- if (sp < v7m_sp_limit(env)) {
560
- raise_exception(env, EXCP_STKOF, 0, 1);
561
- }
562
-
563
- saved_psr = env->v7m.exception;
564
- if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
565
- saved_psr |= XPSR_SFPA;
566
- }
567
-
568
- /* Note that these stores can throw exceptions on MPU faults */
569
- cpu_stl_data(env, sp, nextinst);
570
- cpu_stl_data(env, sp + 4, saved_psr);
571
-
572
- env->regs[13] = sp;
573
- env->regs[14] = 0xfeffffff;
574
- if (arm_v7m_is_handler_mode(env)) {
575
- /*
576
- * Write a dummy value to IPSR, to avoid leaking the current secure
577
- * exception number to non-secure code. This is guaranteed not
578
- * to cause write_v7m_exception() to actually change stacks.
579
- */
580
- write_v7m_exception(env, 1);
581
- }
582
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
583
- switch_v7m_security_state(env, 0);
584
- env->thumb = 1;
585
- env->regs[15] = dest;
586
-}
587
-
588
-static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
589
- bool spsel)
590
-{
591
- /*
592
- * Return a pointer to the location where we currently store the
593
- * stack pointer for the requested security state and thread mode.
594
- * This pointer will become invalid if the CPU state is updated
595
- * such that the stack pointers are switched around (eg changing
596
- * the SPSEL control bit).
597
- * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
598
- * Unlike that pseudocode, we require the caller to pass us in the
599
- * SPSEL control bit value; this is because we also use this
600
- * function in handling of pushing of the callee-saves registers
601
- * part of the v8M stack frame (pseudocode PushCalleeStack()),
602
- * and in the tailchain codepath the SPSEL bit comes from the exception
603
- * return magic LR value from the previous exception. The pseudocode
604
- * opencodes the stack-selection in PushCalleeStack(), but we prefer
605
- * to make this utility function generic enough to do the job.
606
- */
607
- bool want_psp = threadmode && spsel;
608
-
609
- if (secure == env->v7m.secure) {
610
- if (want_psp == v7m_using_psp(env)) {
611
- return &env->regs[13];
612
- } else {
613
- return &env->v7m.other_sp;
614
- }
615
- } else {
616
- if (want_psp) {
617
- return &env->v7m.other_ss_psp;
618
- } else {
619
- return &env->v7m.other_ss_msp;
620
- }
621
- }
622
-}
623
-
624
-static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
625
- uint32_t *pvec)
626
-{
627
- CPUState *cs = CPU(cpu);
628
- CPUARMState *env = &cpu->env;
629
- MemTxResult result;
630
- uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
631
- uint32_t vector_entry;
632
- MemTxAttrs attrs = {};
633
- ARMMMUIdx mmu_idx;
634
- bool exc_secure;
635
-
636
- mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
637
-
638
- /*
639
- * We don't do a get_phys_addr() here because the rules for vector
640
- * loads are special: they always use the default memory map, and
641
- * the default memory map permits reads from all addresses.
642
- * Since there's no easy way to pass through to pmsav8_mpu_lookup()
643
- * that we want this special case which would always say "yes",
644
- * we just do the SAU lookup here followed by a direct physical load.
645
- */
646
- attrs.secure = targets_secure;
647
- attrs.user = false;
648
-
649
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
650
- V8M_SAttributes sattrs = {};
651
-
652
- v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
653
- if (sattrs.ns) {
654
- attrs.secure = false;
655
- } else if (!targets_secure) {
656
- /* NS access to S memory */
657
- goto load_fail;
658
- }
659
- }
660
-
661
- vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
662
- attrs, &result);
663
- if (result != MEMTX_OK) {
664
- goto load_fail;
665
- }
666
- *pvec = vector_entry;
667
- return true;
668
-
669
-load_fail:
670
- /*
671
- * All vector table fetch fails are reported as HardFault, with
672
- * HFSR.VECTTBL and .FORCED set. (FORCED is set because
673
- * technically the underlying exception is a MemManage or BusFault
674
- * that is escalated to HardFault.) This is a terminal exception,
675
- * so we will either take the HardFault immediately or else enter
676
- * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
677
- */
678
- exc_secure = targets_secure ||
679
- !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
680
- env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
681
- armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
682
- return false;
683
-}
684
-
685
-static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
686
-{
687
- /*
688
- * Return the integrity signature value for the callee-saves
689
- * stack frame section. @lr is the exception return payload/LR value
690
- * whose FType bit forms bit 0 of the signature if FP is present.
691
- */
692
- uint32_t sig = 0xfefa125a;
693
-
694
- if (!arm_feature(env, ARM_FEATURE_VFP) || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
695
- sig |= 1;
696
- }
697
- return sig;
698
-}
699
-
700
-static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
701
- bool ignore_faults)
702
-{
703
- /*
704
- * For v8M, push the callee-saves register part of the stack frame.
705
- * Compare the v8M pseudocode PushCalleeStack().
706
- * In the tailchaining case this may not be the current stack.
707
- */
708
- CPUARMState *env = &cpu->env;
709
- uint32_t *frame_sp_p;
710
- uint32_t frameptr;
711
- ARMMMUIdx mmu_idx;
712
- bool stacked_ok;
713
- uint32_t limit;
714
- bool want_psp;
715
- uint32_t sig;
716
- StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
717
-
718
- if (dotailchain) {
719
- bool mode = lr & R_V7M_EXCRET_MODE_MASK;
720
- bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
721
- !mode;
722
-
723
- mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
724
- frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
725
- lr & R_V7M_EXCRET_SPSEL_MASK);
726
- want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
727
- if (want_psp) {
728
- limit = env->v7m.psplim[M_REG_S];
729
- } else {
730
- limit = env->v7m.msplim[M_REG_S];
731
- }
732
- } else {
733
- mmu_idx = arm_mmu_idx(env);
734
- frame_sp_p = &env->regs[13];
735
- limit = v7m_sp_limit(env);
736
- }
737
-
738
- frameptr = *frame_sp_p - 0x28;
739
- if (frameptr < limit) {
740
- /*
741
- * Stack limit failure: set SP to the limit value, and generate
742
- * STKOF UsageFault. Stack pushes below the limit must not be
743
- * performed. It is IMPDEF whether pushes above the limit are
744
- * performed; we choose not to.
745
- */
746
- qemu_log_mask(CPU_LOG_INT,
747
- "...STKOF during callee-saves register stacking\n");
748
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
749
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
750
- env->v7m.secure);
751
- *frame_sp_p = limit;
752
- return true;
753
- }
754
-
755
- /*
756
- * Write as much of the stack frame as we can. A write failure may
757
- * cause us to pend a derived exception.
758
- */
759
- sig = v7m_integrity_sig(env, lr);
760
- stacked_ok =
761
- v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
762
- v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
763
- v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
764
- v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
765
- v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
766
- v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
767
- v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
768
- v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
769
- v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
770
-
771
- /* Update SP regardless of whether any of the stack accesses failed. */
772
- *frame_sp_p = frameptr;
773
-
774
- return !stacked_ok;
775
-}
776
-
777
-static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
778
- bool ignore_stackfaults)
779
-{
780
- /*
781
- * Do the "take the exception" parts of exception entry,
782
- * but not the pushing of state to the stack. This is
783
- * similar to the pseudocode ExceptionTaken() function.
784
- */
785
- CPUARMState *env = &cpu->env;
786
- uint32_t addr;
787
- bool targets_secure;
788
- int exc;
789
- bool push_failed = false;
790
-
791
- armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
792
- qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
793
- targets_secure ? "secure" : "nonsecure", exc);
794
-
795
- if (dotailchain) {
796
- /* Sanitize LR FType and PREFIX bits */
797
- if (!arm_feature(env, ARM_FEATURE_VFP)) {
798
- lr |= R_V7M_EXCRET_FTYPE_MASK;
799
- }
800
- lr = deposit32(lr, 24, 8, 0xff);
801
- }
802
-
803
- if (arm_feature(env, ARM_FEATURE_V8)) {
804
- if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
805
- (lr & R_V7M_EXCRET_S_MASK)) {
806
- /*
807
- * The background code (the owner of the registers in the
808
- * exception frame) is Secure. This means it may either already
809
- * have or now needs to push callee-saves registers.
810
- */
811
- if (targets_secure) {
812
- if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
813
- /*
814
- * We took an exception from Secure to NonSecure
815
- * (which means the callee-saved registers got stacked)
816
- * and are now tailchaining to a Secure exception.
817
- * Clear DCRS so eventual return from this Secure
818
- * exception unstacks the callee-saved registers.
819
- */
820
- lr &= ~R_V7M_EXCRET_DCRS_MASK;
821
- }
822
- } else {
823
- /*
824
- * We're going to a non-secure exception; push the
825
- * callee-saves registers to the stack now, if they're
826
- * not already saved.
827
- */
828
- if (lr & R_V7M_EXCRET_DCRS_MASK &&
829
- !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
830
- push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
831
- ignore_stackfaults);
832
- }
833
- lr |= R_V7M_EXCRET_DCRS_MASK;
834
- }
835
- }
836
-
837
- lr &= ~R_V7M_EXCRET_ES_MASK;
838
- if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
839
- lr |= R_V7M_EXCRET_ES_MASK;
840
- }
841
- lr &= ~R_V7M_EXCRET_SPSEL_MASK;
842
- if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
843
- lr |= R_V7M_EXCRET_SPSEL_MASK;
844
- }
845
-
846
- /*
847
- * Clear registers if necessary to prevent non-secure exception
848
- * code being able to see register values from secure code.
849
- * Where register values become architecturally UNKNOWN we leave
850
- * them with their previous values.
851
- */
852
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
853
- if (!targets_secure) {
854
- /*
855
- * Always clear the caller-saved registers (they have been
856
- * pushed to the stack earlier in v7m_push_stack()).
857
- * Clear callee-saved registers if the background code is
858
- * Secure (in which case these regs were saved in
859
- * v7m_push_callee_stack()).
860
- */
861
- int i;
862
-
863
- for (i = 0; i < 13; i++) {
864
- /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
865
- if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
866
- env->regs[i] = 0;
867
- }
868
- }
869
- /* Clear EAPSR */
870
- xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
871
- }
872
- }
873
- }
874
-
875
- if (push_failed && !ignore_stackfaults) {
876
- /*
877
- * Derived exception on callee-saves register stacking:
878
- * we might now want to take a different exception which
879
- * targets a different security state, so try again from the top.
880
- */
881
- qemu_log_mask(CPU_LOG_INT,
882
- "...derived exception on callee-saves register stacking");
883
- v7m_exception_taken(cpu, lr, true, true);
884
- return;
885
- }
886
-
887
- if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
888
- /* Vector load failed: derived exception */
889
- qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
890
- v7m_exception_taken(cpu, lr, true, true);
891
- return;
892
- }
893
-
894
- /*
895
- * Now we've done everything that might cause a derived exception
896
- * we can go ahead and activate whichever exception we're going to
897
- * take (which might now be the derived exception).
898
- */
899
- armv7m_nvic_acknowledge_irq(env->nvic);
900
-
901
- /* Switch to target security state -- must do this before writing SPSEL */
902
- switch_v7m_security_state(env, targets_secure);
903
- write_v7m_control_spsel(env, 0);
904
- arm_clear_exclusive(env);
905
- /* Clear SFPA and FPCA (has no effect if no FPU) */
906
- env->v7m.control[M_REG_S] &=
907
- ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
908
- /* Clear IT bits */
909
- env->condexec_bits = 0;
910
- env->regs[14] = lr;
911
- env->regs[15] = addr & 0xfffffffe;
912
- env->thumb = addr & 1;
913
-}
914
-
915
-static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
916
- bool apply_splim)
917
-{
918
- /*
919
- * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
920
- * that we will need later in order to do lazy FP reg stacking.
921
- */
922
- bool is_secure = env->v7m.secure;
923
- void *nvic = env->nvic;
924
- /*
925
- * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
926
- * are banked and we want to update the bit in the bank for the
927
- * current security state; and in one case we want to specifically
928
- * update the NS banked version of a bit even if we are secure.
929
- */
930
- uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
931
- uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
932
- uint32_t *fpccr = &env->v7m.fpccr[is_secure];
933
- bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
934
-
935
- env->v7m.fpcar[is_secure] = frameptr & ~0x7;
936
-
937
- if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
938
- bool splimviol;
939
- uint32_t splim = v7m_sp_limit(env);
940
- bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
941
- (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
942
-
943
- splimviol = !ign && frameptr < splim;
944
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
945
- }
946
-
947
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
948
-
949
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
950
-
951
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
952
-
953
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
954
- !arm_v7m_is_handler_mode(env));
955
-
956
- hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
957
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
958
-
959
- bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
960
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
961
-
962
- mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
963
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
964
-
965
- ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
966
- *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
967
-
968
- monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
969
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
970
-
971
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
972
- s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
973
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
974
-
975
- sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
976
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
977
- }
978
-}
979
-
980
-void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
981
-{
982
- /* fptr is the value of Rn, the frame pointer we store the FP regs to */
983
- bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
984
- bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
985
-
986
- assert(env->v7m.secure);
987
-
988
- if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
989
- return;
990
- }
991
-
992
- /* Check access to the coprocessor is permitted */
993
- if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
994
- raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
995
- }
996
-
997
- if (lspact) {
998
- /* LSPACT should not be active when there is active FP state */
999
- raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
1000
- }
1001
-
1002
- if (fptr & 7) {
1003
- raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1004
- }
1005
-
1006
- /*
1007
- * Note that we do not use v7m_stack_write() here, because the
1008
- * accesses should not set the FSR bits for stacking errors if they
1009
- * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1010
- * or AccType_LAZYFP). Faults in cpu_stl_data() will throw exceptions
1011
- * and longjmp out.
1012
- */
1013
- if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1014
- bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1015
- int i;
1016
-
1017
- for (i = 0; i < (ts ? 32 : 16); i += 2) {
1018
- uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1019
- uint32_t faddr = fptr + 4 * i;
1020
- uint32_t slo = extract64(dn, 0, 32);
1021
- uint32_t shi = extract64(dn, 32, 32);
1022
-
1023
- if (i >= 16) {
1024
- faddr += 8; /* skip the slot for the FPSCR */
1025
- }
1026
- cpu_stl_data(env, faddr, slo);
1027
- cpu_stl_data(env, faddr + 4, shi);
1028
- }
1029
- cpu_stl_data(env, fptr + 0x40, vfp_get_fpscr(env));
1030
-
1031
- /*
1032
- * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
1033
- * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1034
- */
1035
- if (ts) {
1036
- for (i = 0; i < 32; i += 2) {
1037
- *aa32_vfp_dreg(env, i / 2) = 0;
1038
- }
1039
- vfp_set_fpscr(env, 0);
1040
- }
1041
- } else {
1042
- v7m_update_fpccr(env, fptr, false);
1043
- }
1044
-
1045
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1046
-}
1047
-
1048
-void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1049
-{
1050
- /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1051
- assert(env->v7m.secure);
1052
-
1053
- if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1054
- return;
1055
- }
1056
-
1057
- /* Check access to the coprocessor is permitted */
1058
- if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1059
- raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1060
- }
1061
-
1062
- if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1063
- /* State in FP is still valid */
1064
- env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1065
- } else {
1066
- bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1067
- int i;
1068
- uint32_t fpscr;
1069
-
1070
- if (fptr & 7) {
1071
- raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1072
- }
1073
-
1074
- for (i = 0; i < (ts ? 32 : 16); i += 2) {
1075
- uint32_t slo, shi;
1076
- uint64_t dn;
1077
- uint32_t faddr = fptr + 4 * i;
1078
-
1079
- if (i >= 16) {
1080
- faddr += 8; /* skip the slot for the FPSCR */
1081
- }
1082
-
1083
- slo = cpu_ldl_data(env, faddr);
1084
- shi = cpu_ldl_data(env, faddr + 4);
1085
-
1086
- dn = (uint64_t) shi << 32 | slo;
1087
- *aa32_vfp_dreg(env, i / 2) = dn;
1088
- }
1089
- fpscr = cpu_ldl_data(env, fptr + 0x40);
1090
- vfp_set_fpscr(env, fpscr);
1091
- }
1092
-
1093
- env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1094
-}
1095
-
1096
-static bool v7m_push_stack(ARMCPU *cpu)
1097
-{
1098
- /*
1099
- * Do the "set up stack frame" part of exception entry,
1100
- * similar to pseudocode PushStack().
1101
- * Return true if we generate a derived exception (and so
1102
- * should ignore further stack faults trying to process
1103
- * that derived exception.)
1104
- */
1105
- bool stacked_ok = true, limitviol = false;
1106
- CPUARMState *env = &cpu->env;
1107
- uint32_t xpsr = xpsr_read(env);
1108
- uint32_t frameptr = env->regs[13];
1109
- ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1110
- uint32_t framesize;
1111
- bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1112
-
1113
- if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1114
- (env->v7m.secure || nsacr_cp10)) {
1115
- if (env->v7m.secure &&
1116
- env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1117
- framesize = 0xa8;
1118
- } else {
1119
- framesize = 0x68;
1120
- }
1121
- } else {
1122
- framesize = 0x20;
1123
- }
1124
-
1125
- /* Align stack pointer if the guest wants that */
1126
- if ((frameptr & 4) &&
1127
- (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1128
- frameptr -= 4;
1129
- xpsr |= XPSR_SPREALIGN;
1130
- }
1131
-
1132
- xpsr &= ~XPSR_SFPA;
1133
- if (env->v7m.secure &&
1134
- (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1135
- xpsr |= XPSR_SFPA;
1136
- }
1137
-
1138
- frameptr -= framesize;
1139
-
1140
- if (arm_feature(env, ARM_FEATURE_V8)) {
1141
- uint32_t limit = v7m_sp_limit(env);
1142
-
1143
- if (frameptr < limit) {
1144
- /*
1145
- * Stack limit failure: set SP to the limit value, and generate
1146
- * STKOF UsageFault. Stack pushes below the limit must not be
1147
- * performed. It is IMPDEF whether pushes above the limit are
1148
- * performed; we choose not to.
1149
- */
1150
- qemu_log_mask(CPU_LOG_INT,
1151
- "...STKOF during stacking\n");
1152
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1153
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1154
- env->v7m.secure);
1155
- env->regs[13] = limit;
1156
- /*
1157
- * We won't try to perform any further memory accesses but
1158
- * we must continue through the following code to check for
1159
- * permission faults during FPU state preservation, and we
1160
- * must update FPCCR if lazy stacking is enabled.
1161
- */
1162
- limitviol = true;
1163
- stacked_ok = false;
1164
- }
1165
- }
1166
-
1167
- /*
1168
- * Write as much of the stack frame as we can. If we fail a stack
1169
- * write this will result in a derived exception being pended
1170
- * (which may be taken in preference to the one we started with
1171
- * if it has higher priority).
1172
- */
1173
- stacked_ok = stacked_ok &&
1174
- v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1175
- v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1176
- mmu_idx, STACK_NORMAL) &&
1177
- v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1178
- mmu_idx, STACK_NORMAL) &&
1179
- v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1180
- mmu_idx, STACK_NORMAL) &&
1181
- v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1182
- mmu_idx, STACK_NORMAL) &&
1183
- v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1184
- mmu_idx, STACK_NORMAL) &&
1185
- v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1186
- mmu_idx, STACK_NORMAL) &&
1187
- v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1188
-
1189
- if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1190
- /* FPU is active, try to save its registers */
1191
- bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1192
- bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1193
-
1194
- if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1195
- qemu_log_mask(CPU_LOG_INT,
1196
- "...SecureFault because LSPACT and FPCA both set\n");
1197
- env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1198
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1199
- } else if (!env->v7m.secure && !nsacr_cp10) {
1200
- qemu_log_mask(CPU_LOG_INT,
1201
- "...Secure UsageFault with CFSR.NOCP because "
1202
- "NSACR.CP10 prevents stacking FP regs\n");
1203
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1204
- env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1205
- } else {
1206
- if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1207
- /* Lazy stacking disabled, save registers now */
1208
- int i;
1209
- bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1210
- arm_current_el(env) != 0);
1211
-
1212
- if (stacked_ok && !cpacr_pass) {
1213
- /*
1214
- * Take UsageFault if CPACR forbids access. The pseudocode
1215
- * here does a full CheckCPEnabled() but we know the NSACR
1216
- * check can never fail as we have already handled that.
1217
- */
1218
- qemu_log_mask(CPU_LOG_INT,
1219
- "...UsageFault with CFSR.NOCP because "
1220
- "CPACR.CP10 prevents stacking FP regs\n");
1221
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1222
- env->v7m.secure);
1223
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1224
- stacked_ok = false;
1225
- }
1226
-
1227
- for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1228
- uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1229
- uint32_t faddr = frameptr + 0x20 + 4 * i;
1230
- uint32_t slo = extract64(dn, 0, 32);
1231
- uint32_t shi = extract64(dn, 32, 32);
1232
-
1233
- if (i >= 16) {
1234
- faddr += 8; /* skip the slot for the FPSCR */
1235
- }
1236
- stacked_ok = stacked_ok &&
1237
- v7m_stack_write(cpu, faddr, slo,
1238
- mmu_idx, STACK_NORMAL) &&
1239
- v7m_stack_write(cpu, faddr + 4, shi,
1240
- mmu_idx, STACK_NORMAL);
1241
- }
1242
- stacked_ok = stacked_ok &&
1243
- v7m_stack_write(cpu, frameptr + 0x60,
1244
- vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1245
- if (cpacr_pass) {
1246
- for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1247
- *aa32_vfp_dreg(env, i / 2) = 0;
1248
- }
1249
- vfp_set_fpscr(env, 0);
1250
- }
1251
- } else {
1252
- /* Lazy stacking enabled, save necessary info to stack later */
1253
- v7m_update_fpccr(env, frameptr + 0x20, true);
1254
- }
1255
- }
1256
- }
1257
-
1258
- /*
1259
- * If we broke a stack limit then SP was already updated earlier;
1260
- * otherwise we update SP regardless of whether any of the stack
1261
- * accesses failed or we took some other kind of fault.
1262
- */
1263
- if (!limitviol) {
1264
- env->regs[13] = frameptr;
1265
- }
1266
-
1267
- return !stacked_ok;
1268
-}
1269
-
1270
-static void do_v7m_exception_exit(ARMCPU *cpu)
1271
-{
1272
- CPUARMState *env = &cpu->env;
1273
- uint32_t excret;
1274
- uint32_t xpsr, xpsr_mask;
1275
- bool ufault = false;
1276
- bool sfault = false;
1277
- bool return_to_sp_process;
1278
- bool return_to_handler;
1279
- bool rettobase = false;
1280
- bool exc_secure = false;
1281
- bool return_to_secure;
1282
- bool ftype;
1283
- bool restore_s16_s31;
1284
-
1285
- /*
1286
- * If we're not in Handler mode then jumps to magic exception-exit
1287
- * addresses don't have magic behaviour. However for the v8M
1288
- * security extensions the magic secure-function-return has to
1289
- * work in thread mode too, so to avoid doing an extra check in
1290
- * the generated code we allow exception-exit magic to also cause the
1291
- * internal exception and bring us here in thread mode. Correct code
1292
- * will never try to do this (the following insn fetch will always
1293
- * fault) so we the overhead of having taken an unnecessary exception
1294
- * doesn't matter.
1295
- */
1296
- if (!arm_v7m_is_handler_mode(env)) {
1297
- return;
1298
- }
1299
-
1300
- /*
1301
- * In the spec pseudocode ExceptionReturn() is called directly
1302
- * from BXWritePC() and gets the full target PC value including
1303
- * bit zero. In QEMU's implementation we treat it as a normal
1304
- * jump-to-register (which is then caught later on), and so split
1305
- * the target value up between env->regs[15] and env->thumb in
1306
- * gen_bx(). Reconstitute it.
1307
- */
1308
- excret = env->regs[15];
1309
- if (env->thumb) {
1310
- excret |= 1;
1311
- }
1312
-
1313
- qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1314
- " previous exception %d\n",
1315
- excret, env->v7m.exception);
1316
-
1317
- if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1318
- qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1319
- "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1320
- excret);
1321
- }
1322
-
1323
- ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1324
-
1325
- if (!arm_feature(env, ARM_FEATURE_VFP) && !ftype) {
1326
- qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1327
- "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1328
- "if FPU not present\n",
1329
- excret);
1330
- ftype = true;
1331
- }
1332
-
1333
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1334
- /*
1335
- * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1336
- * we pick which FAULTMASK to clear.
1337
- */
1338
- if (!env->v7m.secure &&
1339
- ((excret & R_V7M_EXCRET_ES_MASK) ||
1340
- !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1341
- sfault = 1;
1342
- /* For all other purposes, treat ES as 0 (R_HXSR) */
1343
- excret &= ~R_V7M_EXCRET_ES_MASK;
1344
- }
1345
- exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1346
- }
1347
-
1348
- if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1349
- /*
1350
- * Auto-clear FAULTMASK on return from other than NMI.
1351
- * If the security extension is implemented then this only
1352
- * happens if the raw execution priority is >= 0; the
1353
- * value of the ES bit in the exception return value indicates
1354
- * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1355
- */
1356
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1357
- if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1358
- env->v7m.faultmask[exc_secure] = 0;
1359
- }
1360
- } else {
1361
- env->v7m.faultmask[M_REG_NS] = 0;
1362
- }
1363
- }
1364
-
1365
- switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1366
- exc_secure)) {
1367
- case -1:
1368
- /* attempt to exit an exception that isn't active */
1369
- ufault = true;
1370
- break;
1371
- case 0:
1372
- /* still an irq active now */
1373
- break;
1374
- case 1:
1375
- /*
1376
- * We returned to base exception level, no nesting.
1377
- * (In the pseudocode this is written using "NestedActivation != 1"
1378
- * where we have 'rettobase == false'.)
1379
- */
1380
- rettobase = true;
1381
- break;
1382
- default:
1383
- g_assert_not_reached();
1384
- }
1385
-
1386
- return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1387
- return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1388
- return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1389
- (excret & R_V7M_EXCRET_S_MASK);
1390
-
1391
- if (arm_feature(env, ARM_FEATURE_V8)) {
1392
- if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1393
- /*
1394
- * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1395
- * we choose to take the UsageFault.
1396
- */
1397
- if ((excret & R_V7M_EXCRET_S_MASK) ||
1398
- (excret & R_V7M_EXCRET_ES_MASK) ||
1399
- !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1400
- ufault = true;
1401
- }
1402
- }
1403
- if (excret & R_V7M_EXCRET_RES0_MASK) {
1404
- ufault = true;
1405
- }
1406
- } else {
1407
- /* For v7M we only recognize certain combinations of the low bits */
1408
- switch (excret & 0xf) {
1409
- case 1: /* Return to Handler */
1410
- break;
1411
- case 13: /* Return to Thread using Process stack */
1412
- case 9: /* Return to Thread using Main stack */
1413
- /*
1414
- * We only need to check NONBASETHRDENA for v7M, because in
1415
- * v8M this bit does not exist (it is RES1).
1416
- */
1417
- if (!rettobase &&
1418
- !(env->v7m.ccr[env->v7m.secure] &
1419
- R_V7M_CCR_NONBASETHRDENA_MASK)) {
1420
- ufault = true;
1421
- }
1422
- break;
1423
- default:
1424
- ufault = true;
1425
- }
1426
- }
1427
-
1428
- /*
1429
- * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1430
- * Handler mode (and will be until we write the new XPSR.Interrupt
1431
- * field) this does not switch around the current stack pointer.
1432
- * We must do this before we do any kind of tailchaining, including
1433
- * for the derived exceptions on integrity check failures, or we will
1434
- * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1435
- */
1436
- write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1437
-
1438
- /*
1439
- * Clear scratch FP values left in caller saved registers; this
1440
- * must happen before any kind of tail chaining.
1441
- */
1442
- if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1443
- (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1444
- if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1445
- env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1446
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1447
- qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1448
- "stackframe: error during lazy state deactivation\n");
1449
- v7m_exception_taken(cpu, excret, true, false);
1450
- return;
1451
- } else {
1452
- /* Clear s0..s15 and FPSCR */
1453
- int i;
1454
-
1455
- for (i = 0; i < 16; i += 2) {
1456
- *aa32_vfp_dreg(env, i / 2) = 0;
1457
- }
1458
- vfp_set_fpscr(env, 0);
1459
- }
1460
- }
1461
-
1462
- if (sfault) {
1463
- env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1464
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1465
- qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1466
- "stackframe: failed EXC_RETURN.ES validity check\n");
1467
- v7m_exception_taken(cpu, excret, true, false);
1468
- return;
1469
- }
1470
-
1471
- if (ufault) {
1472
- /*
1473
- * Bad exception return: instead of popping the exception
1474
- * stack, directly take a usage fault on the current stack.
1475
- */
1476
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1477
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1478
- qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1479
- "stackframe: failed exception return integrity check\n");
1480
- v7m_exception_taken(cpu, excret, true, false);
1481
- return;
1482
- }
1483
-
1484
- /*
1485
- * Tailchaining: if there is currently a pending exception that
1486
- * is high enough priority to preempt execution at the level we're
1487
- * about to return to, then just directly take that exception now,
1488
- * avoiding an unstack-and-then-stack. Note that now we have
1489
- * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1490
- * our current execution priority is already the execution priority we are
1491
- * returning to -- none of the state we would unstack or set based on
1492
- * the EXCRET value affects it.
1493
- */
1494
- if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1495
- qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1496
- v7m_exception_taken(cpu, excret, true, false);
1497
- return;
1498
- }
1499
-
1500
- switch_v7m_security_state(env, return_to_secure);
1501
-
1502
- {
1503
- /*
1504
- * The stack pointer we should be reading the exception frame from
1505
- * depends on bits in the magic exception return type value (and
1506
- * for v8M isn't necessarily the stack pointer we will eventually
1507
- * end up resuming execution with). Get a pointer to the location
1508
- * in the CPU state struct where the SP we need is currently being
1509
- * stored; we will use and modify it in place.
1510
- * We use this limited C variable scope so we don't accidentally
1511
- * use 'frame_sp_p' after we do something that makes it invalid.
1512
- */
1513
- uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
1514
- return_to_secure,
1515
- !return_to_handler,
1516
- return_to_sp_process);
1517
- uint32_t frameptr = *frame_sp_p;
1518
- bool pop_ok = true;
1519
- ARMMMUIdx mmu_idx;
1520
- bool return_to_priv = return_to_handler ||
1521
- !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1522
-
1523
- mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1524
- return_to_priv);
1525
-
1526
- if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1527
- arm_feature(env, ARM_FEATURE_V8)) {
1528
- qemu_log_mask(LOG_GUEST_ERROR,
1529
- "M profile exception return with non-8-aligned SP "
1530
- "for destination state is UNPREDICTABLE\n");
1531
- }
1532
-
1533
- /* Do we need to pop callee-saved registers? */
1534
- if (return_to_secure &&
1535
- ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1536
- (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1537
- uint32_t actual_sig;
1538
-
1539
- pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1540
-
1541
- if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1542
- /* Take a SecureFault on the current stack */
1543
- env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1544
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1545
- qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1546
- "stackframe: failed exception return integrity "
1547
- "signature check\n");
1548
- v7m_exception_taken(cpu, excret, true, false);
1549
- return;
1550
- }
1551
-
1552
- pop_ok = pop_ok &&
1553
- v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1554
- v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1555
- v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1556
- v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1557
- v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1558
- v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1559
- v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1560
- v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1561
-
1562
- frameptr += 0x28;
1563
- }
1564
-
1565
- /* Pop registers */
1566
- pop_ok = pop_ok &&
1567
- v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1568
- v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1569
- v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1570
- v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1571
- v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1572
- v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1573
- v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1574
- v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1575
-
1576
- if (!pop_ok) {
1577
- /*
1578
- * v7m_stack_read() pended a fault, so take it (as a tail
1579
- * chained exception on the same stack frame)
1580
- */
1581
- qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1582
- v7m_exception_taken(cpu, excret, true, false);
1583
- return;
1584
- }
1585
-
1586
- /*
1587
- * Returning from an exception with a PC with bit 0 set is defined
1588
- * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1589
- * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1590
- * the lsbit, and there are several RTOSes out there which incorrectly
1591
- * assume the r15 in the stack frame should be a Thumb-style "lsbit
1592
- * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1593
- * complain about the badly behaved guest.
1594
- */
1595
- if (env->regs[15] & 1) {
1596
- env->regs[15] &= ~1U;
1597
- if (!arm_feature(env, ARM_FEATURE_V8)) {
1598
- qemu_log_mask(LOG_GUEST_ERROR,
1599
- "M profile return from interrupt with misaligned "
1600
- "PC is UNPREDICTABLE on v7M\n");
1601
- }
1602
- }
1603
-
1604
- if (arm_feature(env, ARM_FEATURE_V8)) {
1605
- /*
1606
- * For v8M we have to check whether the xPSR exception field
1607
- * matches the EXCRET value for return to handler/thread
1608
- * before we commit to changing the SP and xPSR.
1609
- */
1610
- bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1611
- if (return_to_handler != will_be_handler) {
1612
- /*
1613
- * Take an INVPC UsageFault on the current stack.
1614
- * By this point we will have switched to the security state
1615
- * for the background state, so this UsageFault will target
1616
- * that state.
1617
- */
1618
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1619
- env->v7m.secure);
1620
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1621
- qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1622
- "stackframe: failed exception return integrity "
1623
- "check\n");
1624
- v7m_exception_taken(cpu, excret, true, false);
1625
- return;
1626
- }
1627
- }
1628
-
1629
- if (!ftype) {
1630
- /* FP present and we need to handle it */
1631
- if (!return_to_secure &&
1632
- (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1633
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1634
- env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1635
- qemu_log_mask(CPU_LOG_INT,
1636
- "...taking SecureFault on existing stackframe: "
1637
- "Secure LSPACT set but exception return is "
1638
- "not to secure state\n");
1639
- v7m_exception_taken(cpu, excret, true, false);
1640
- return;
1641
- }
1642
-
1643
- restore_s16_s31 = return_to_secure &&
1644
- (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1645
-
1646
- if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1647
- /* State in FPU is still valid, just clear LSPACT */
1648
- env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1649
- } else {
1650
- int i;
1651
- uint32_t fpscr;
1652
- bool cpacr_pass, nsacr_pass;
1653
-
1654
- cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1655
- return_to_priv);
1656
- nsacr_pass = return_to_secure ||
1657
- extract32(env->v7m.nsacr, 10, 1);
1658
-
1659
- if (!cpacr_pass) {
1660
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1661
- return_to_secure);
1662
- env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1663
- qemu_log_mask(CPU_LOG_INT,
1664
- "...taking UsageFault on existing "
1665
- "stackframe: CPACR.CP10 prevents unstacking "
1666
- "FP regs\n");
1667
- v7m_exception_taken(cpu, excret, true, false);
1668
- return;
1669
- } else if (!nsacr_pass) {
1670
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1671
- env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1672
- qemu_log_mask(CPU_LOG_INT,
1673
- "...taking Secure UsageFault on existing "
1674
- "stackframe: NSACR.CP10 prevents unstacking "
1675
- "FP regs\n");
1676
- v7m_exception_taken(cpu, excret, true, false);
1677
- return;
1678
- }
1679
-
1680
- for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1681
- uint32_t slo, shi;
1682
- uint64_t dn;
1683
- uint32_t faddr = frameptr + 0x20 + 4 * i;
1684
-
1685
- if (i >= 16) {
1686
- faddr += 8; /* Skip the slot for the FPSCR */
1687
- }
1688
-
1689
- pop_ok = pop_ok &&
1690
- v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1691
- v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1692
-
1693
- if (!pop_ok) {
1694
- break;
1695
- }
1696
-
1697
- dn = (uint64_t)shi << 32 | slo;
1698
- *aa32_vfp_dreg(env, i / 2) = dn;
1699
- }
1700
- pop_ok = pop_ok &&
1701
- v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1702
- if (pop_ok) {
1703
- vfp_set_fpscr(env, fpscr);
1704
- }
1705
- if (!pop_ok) {
1706
- /*
1707
- * These regs are 0 if security extension present;
1708
- * otherwise merely UNKNOWN. We zero always.
1709
- */
1710
- for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1711
- *aa32_vfp_dreg(env, i / 2) = 0;
1712
- }
1713
- vfp_set_fpscr(env, 0);
1714
- }
1715
- }
1716
- }
1717
- env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1718
- V7M_CONTROL, FPCA, !ftype);
1719
-
1720
- /* Commit to consuming the stack frame */
1721
- frameptr += 0x20;
1722
- if (!ftype) {
1723
- frameptr += 0x48;
1724
- if (restore_s16_s31) {
1725
- frameptr += 0x40;
1726
- }
1727
- }
1728
- /*
1729
- * Undo stack alignment (the SPREALIGN bit indicates that the original
1730
- * pre-exception SP was not 8-aligned and we added a padding word to
1731
- * align it, so we undo this by ORing in the bit that increases it
1732
- * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1733
- * would work too but a logical OR is how the pseudocode specifies it.)
1734
- */
1735
- if (xpsr & XPSR_SPREALIGN) {
1736
- frameptr |= 4;
1737
- }
1738
- *frame_sp_p = frameptr;
1739
- }
1740
-
1741
- xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1742
- if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1743
- xpsr_mask &= ~XPSR_GE;
1744
- }
1745
- /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1746
- xpsr_write(env, xpsr, xpsr_mask);
1747
-
1748
- if (env->v7m.secure) {
1749
- bool sfpa = xpsr & XPSR_SFPA;
1750
-
1751
- env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1752
- V7M_CONTROL, SFPA, sfpa);
1753
- }
1754
-
1755
- /*
1756
- * The restored xPSR exception field will be zero if we're
1757
- * resuming in Thread mode. If that doesn't match what the
1758
- * exception return excret specified then this is a UsageFault.
1759
- * v7M requires we make this check here; v8M did it earlier.
1760
- */
1761
- if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1762
- /*
1763
- * Take an INVPC UsageFault by pushing the stack again;
1764
- * we know we're v7M so this is never a Secure UsageFault.
1765
- */
1766
- bool ignore_stackfaults;
1767
-
1768
- assert(!arm_feature(env, ARM_FEATURE_V8));
1769
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1770
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1771
- ignore_stackfaults = v7m_push_stack(cpu);
1772
- qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1773
- "failed exception return integrity check\n");
1774
- v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1775
- return;
1776
- }
1777
-
1778
- /* Otherwise, we have a successful exception exit. */
1779
- arm_clear_exclusive(env);
1780
- qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1781
-}
1782
-
1783
-static bool do_v7m_function_return(ARMCPU *cpu)
1784
-{
1785
- /*
1786
- * v8M security extensions magic function return.
1787
- * We may either:
1788
- * (1) throw an exception (longjump)
1789
- * (2) return true if we successfully handled the function return
1790
- * (3) return false if we failed a consistency check and have
1791
- * pended a UsageFault that needs to be taken now
1792
- *
1793
- * At this point the magic return value is split between env->regs[15]
1794
- * and env->thumb. We don't bother to reconstitute it because we don't
1795
- * need it (all values are handled the same way).
1796
- */
1797
- CPUARMState *env = &cpu->env;
1798
- uint32_t newpc, newpsr, newpsr_exc;
1799
-
1800
- qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1801
-
1802
- {
1803
- bool threadmode, spsel;
1804
- TCGMemOpIdx oi;
1805
- ARMMMUIdx mmu_idx;
1806
- uint32_t *frame_sp_p;
1807
- uint32_t frameptr;
1808
-
1809
- /* Pull the return address and IPSR from the Secure stack */
1810
- threadmode = !arm_v7m_is_handler_mode(env);
1811
- spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1812
-
1813
- frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
1814
- frameptr = *frame_sp_p;
1815
-
1816
- /*
1817
- * These loads may throw an exception (for MPU faults). We want to
1818
- * do them as secure, so work out what MMU index that is.
1819
- */
1820
- mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1821
- oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
1822
- newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
1823
- newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
1824
-
1825
- /* Consistency checks on new IPSR */
1826
- newpsr_exc = newpsr & XPSR_EXCP;
1827
- if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1828
- (env->v7m.exception == 1 && newpsr_exc != 0))) {
1829
- /* Pend the fault and tell our caller to take it */
1830
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1831
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1832
- env->v7m.secure);
1833
- qemu_log_mask(CPU_LOG_INT,
1834
- "...taking INVPC UsageFault: "
1835
- "IPSR consistency check failed\n");
1836
- return false;
1837
- }
1838
-
1839
- *frame_sp_p = frameptr + 8;
1840
- }
1841
-
1842
- /* This invalidates frame_sp_p */
1843
- switch_v7m_security_state(env, true);
1844
- env->v7m.exception = newpsr_exc;
1845
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1846
- if (newpsr & XPSR_SFPA) {
1847
- env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1848
- }
1849
- xpsr_write(env, 0, XPSR_IT);
1850
- env->thumb = newpc & 1;
1851
- env->regs[15] = newpc & ~1;
1852
-
1853
- qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1854
- return true;
1855
-}
1856
-
1857
-static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1858
- uint32_t addr, uint16_t *insn)
1859
-{
1860
- /*
1861
- * Load a 16-bit portion of a v7M instruction, returning true on success,
1862
- * or false on failure (in which case we will have pended the appropriate
1863
- * exception).
1864
- * We need to do the instruction fetch's MPU and SAU checks
1865
- * like this because there is no MMU index that would allow
1866
- * doing the load with a single function call. Instead we must
1867
- * first check that the security attributes permit the load
1868
- * and that they don't mismatch on the two halves of the instruction,
1869
- * and then we do the load as a secure load (ie using the security
1870
- * attributes of the address, not the CPU, as architecturally required).
1871
- */
1872
- CPUState *cs = CPU(cpu);
1873
- CPUARMState *env = &cpu->env;
1874
- V8M_SAttributes sattrs = {};
1875
- MemTxAttrs attrs = {};
1876
- ARMMMUFaultInfo fi = {};
1877
- MemTxResult txres;
1878
- target_ulong page_size;
1879
- hwaddr physaddr;
1880
- int prot;
1881
-
1882
- v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
1883
- if (!sattrs.nsc || sattrs.ns) {
1884
- /*
1885
- * This must be the second half of the insn, and it straddles a
1886
- * region boundary with the second half not being S&NSC.
1887
- */
1888
- env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
1889
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1890
- qemu_log_mask(CPU_LOG_INT,
1891
- "...really SecureFault with SFSR.INVEP\n");
1892
- return false;
1893
- }
1894
- if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
1895
- &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
1896
- /* the MPU lookup failed */
1897
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
1898
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
1899
- qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
1900
- return false;
1901
- }
1902
- *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
1903
- attrs, &txres);
1904
- if (txres != MEMTX_OK) {
1905
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
1906
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
1907
- qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
1908
- return false;
1909
- }
1910
- return true;
1911
-}
1912
-
1913
-static bool v7m_handle_execute_nsc(ARMCPU *cpu)
1914
-{
1915
- /*
1916
- * Check whether this attempt to execute code in a Secure & NS-Callable
1917
- * memory region is for an SG instruction; if so, then emulate the
1918
- * effect of the SG instruction and return true. Otherwise pend
1919
- * the correct kind of exception and return false.
1920
- */
1921
- CPUARMState *env = &cpu->env;
1922
- ARMMMUIdx mmu_idx;
1923
- uint16_t insn;
1924
-
1925
- /*
1926
- * We should never get here unless get_phys_addr_pmsav8() caused
1927
- * an exception for NS executing in S&NSC memory.
1928
- */
1929
- assert(!env->v7m.secure);
1930
- assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
1931
-
1932
- /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
1933
- mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1934
-
1935
- if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
1936
- return false;
1937
- }
1938
-
1939
- if (!env->thumb) {
1940
- goto gen_invep;
1941
- }
1942
-
1943
- if (insn != 0xe97f) {
1944
- /*
1945
- * Not an SG instruction first half (we choose the IMPDEF
1946
- * early-SG-check option).
1947
- */
1948
- goto gen_invep;
1949
- }
1950
-
1951
- if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
1952
- return false;
1953
- }
1954
-
1955
- if (insn != 0xe97f) {
1956
- /*
1957
- * Not an SG instruction second half (yes, both halves of the SG
1958
- * insn have the same hex value)
1959
- */
1960
- goto gen_invep;
1961
- }
1962
-
1963
- /*
1964
- * OK, we have confirmed that we really have an SG instruction.
1965
- * We know we're NS in S memory so don't need to repeat those checks.
1966
- */
1967
- qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
1968
- ", executing it\n", env->regs[15]);
1969
- env->regs[14] &= ~1;
1970
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1971
- switch_v7m_security_state(env, true);
1972
- xpsr_write(env, 0, XPSR_IT);
1973
- env->regs[15] += 4;
1974
- return true;
1975
-
1976
-gen_invep:
1977
- env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
1978
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1979
- qemu_log_mask(CPU_LOG_INT,
1980
- "...really SecureFault with SFSR.INVEP\n");
1981
- return false;
1982
-}
1983
-
1984
-void arm_v7m_cpu_do_interrupt(CPUState *cs)
1985
-{
1986
- ARMCPU *cpu = ARM_CPU(cs);
1987
- CPUARMState *env = &cpu->env;
1988
- uint32_t lr;
1989
- bool ignore_stackfaults;
1990
-
1991
- arm_log_exception(cs->exception_index);
1992
-
1993
- /*
1994
- * For exceptions we just mark as pending on the NVIC, and let that
1995
- * handle it.
1996
- */
1997
- switch (cs->exception_index) {
1998
- case EXCP_UDEF:
1999
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2000
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
2001
- break;
2002
- case EXCP_NOCP:
2003
- {
2004
- /*
2005
- * NOCP might be directed to something other than the current
2006
- * security state if this fault is because of NSACR; we indicate
2007
- * the target security state using exception.target_el.
2008
- */
2009
- int target_secstate;
2010
-
2011
- if (env->exception.target_el == 3) {
2012
- target_secstate = M_REG_S;
2013
- } else {
2014
- target_secstate = env->v7m.secure;
2015
- }
2016
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2017
- env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2018
- break;
2019
- }
2020
- case EXCP_INVSTATE:
2021
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2022
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2023
- break;
2024
- case EXCP_STKOF:
2025
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2026
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2027
- break;
2028
- case EXCP_LSERR:
2029
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2030
- env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2031
- break;
2032
- case EXCP_UNALIGNED:
2033
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2034
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2035
- break;
2036
- case EXCP_SWI:
2037
- /* The PC already points to the next instruction. */
2038
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2039
- break;
2040
- case EXCP_PREFETCH_ABORT:
2041
- case EXCP_DATA_ABORT:
2042
- /*
2043
- * Note that for M profile we don't have a guest facing FSR, but
2044
- * the env->exception.fsr will be populated by the code that
2045
- * raises the fault, in the A profile short-descriptor format.
2046
- */
2047
- switch (env->exception.fsr & 0xf) {
2048
- case M_FAKE_FSR_NSC_EXEC:
2049
- /*
2050
- * Exception generated when we try to execute code at an address
2051
- * which is marked as Secure & Non-Secure Callable and the CPU
2052
- * is in the Non-Secure state. The only instruction which can
2053
- * be executed like this is SG (and that only if both halves of
2054
- * the SG instruction have the same security attributes.)
2055
- * Everything else must generate an INVEP SecureFault, so we
2056
- * emulate the SG instruction here.
2057
- */
2058
- if (v7m_handle_execute_nsc(cpu)) {
2059
- return;
2060
- }
2061
- break;
2062
- case M_FAKE_FSR_SFAULT:
2063
- /*
2064
- * Various flavours of SecureFault for attempts to execute or
2065
- * access data in the wrong security state.
2066
- */
2067
- switch (cs->exception_index) {
2068
- case EXCP_PREFETCH_ABORT:
2069
- if (env->v7m.secure) {
2070
- env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2071
- qemu_log_mask(CPU_LOG_INT,
2072
- "...really SecureFault with SFSR.INVTRAN\n");
2073
- } else {
2074
- env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2075
- qemu_log_mask(CPU_LOG_INT,
2076
- "...really SecureFault with SFSR.INVEP\n");
2077
- }
2078
- break;
2079
- case EXCP_DATA_ABORT:
2080
- /* This must be an NS access to S memory */
2081
- env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2082
- qemu_log_mask(CPU_LOG_INT,
2083
- "...really SecureFault with SFSR.AUVIOL\n");
2084
- break;
2085
- }
2086
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2087
- break;
2088
- case 0x8: /* External Abort */
2089
- switch (cs->exception_index) {
2090
- case EXCP_PREFETCH_ABORT:
2091
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2092
- qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2093
- break;
2094
- case EXCP_DATA_ABORT:
2095
- env->v7m.cfsr[M_REG_NS] |=
2096
- (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2097
- env->v7m.bfar = env->exception.vaddress;
2098
- qemu_log_mask(CPU_LOG_INT,
2099
- "...with CFSR.PRECISERR and BFAR 0x%x\n",
2100
- env->v7m.bfar);
2101
- break;
2102
- }
2103
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2104
- break;
2105
- default:
2106
- /*
2107
- * All other FSR values are either MPU faults or "can't happen
2108
- * for M profile" cases.
2109
- */
2110
- switch (cs->exception_index) {
2111
- case EXCP_PREFETCH_ABORT:
2112
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2113
- qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2114
- break;
2115
- case EXCP_DATA_ABORT:
2116
- env->v7m.cfsr[env->v7m.secure] |=
2117
- (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2118
- env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2119
- qemu_log_mask(CPU_LOG_INT,
2120
- "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2121
- env->v7m.mmfar[env->v7m.secure]);
2122
- break;
2123
- }
2124
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2125
- env->v7m.secure);
2126
- break;
2127
- }
2128
- break;
2129
- case EXCP_BKPT:
2130
- if (semihosting_enabled()) {
2131
- int nr;
2132
- nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
2133
- if (nr == 0xab) {
2134
- env->regs[15] += 2;
2135
- qemu_log_mask(CPU_LOG_INT,
2136
- "...handling as semihosting call 0x%x\n",
2137
- env->regs[0]);
2138
- env->regs[0] = do_arm_semihosting(env);
2139
- return;
2140
- }
2141
- }
2142
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2143
- break;
2144
- case EXCP_IRQ:
2145
- break;
2146
- case EXCP_EXCEPTION_EXIT:
2147
- if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2148
- /* Must be v8M security extension function return */
2149
- assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2150
- assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2151
- if (do_v7m_function_return(cpu)) {
2152
- return;
2153
- }
2154
- } else {
2155
- do_v7m_exception_exit(cpu);
2156
- return;
2157
- }
2158
- break;
2159
- case EXCP_LAZYFP:
2160
- /*
2161
- * We already pended the specific exception in the NVIC in the
2162
- * v7m_preserve_fp_state() helper function.
2163
- */
2164
- break;
2165
- default:
2166
- cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2167
- return; /* Never happens. Keep compiler happy. */
2168
- }
2169
-
2170
- if (arm_feature(env, ARM_FEATURE_V8)) {
2171
- lr = R_V7M_EXCRET_RES1_MASK |
2172
- R_V7M_EXCRET_DCRS_MASK;
2173
- /*
2174
- * The S bit indicates whether we should return to Secure
2175
- * or NonSecure (ie our current state).
2176
- * The ES bit indicates whether we're taking this exception
2177
- * to Secure or NonSecure (ie our target state). We set it
2178
- * later, in v7m_exception_taken().
2179
- * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2180
- * This corresponds to the ARM ARM pseudocode for v8M setting
2181
- * some LR bits in PushStack() and some in ExceptionTaken();
2182
- * the distinction matters for the tailchain cases where we
2183
- * can take an exception without pushing the stack.
2184
- */
2185
- if (env->v7m.secure) {
2186
- lr |= R_V7M_EXCRET_S_MASK;
2187
- }
2188
- if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2189
- lr |= R_V7M_EXCRET_FTYPE_MASK;
2190
- }
2191
- } else {
2192
- lr = R_V7M_EXCRET_RES1_MASK |
2193
- R_V7M_EXCRET_S_MASK |
2194
- R_V7M_EXCRET_DCRS_MASK |
2195
- R_V7M_EXCRET_FTYPE_MASK |
2196
- R_V7M_EXCRET_ES_MASK;
2197
- if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2198
- lr |= R_V7M_EXCRET_SPSEL_MASK;
2199
- }
2200
- }
2201
- if (!arm_v7m_is_handler_mode(env)) {
2202
- lr |= R_V7M_EXCRET_MODE_MASK;
2203
- }
2204
-
2205
- ignore_stackfaults = v7m_push_stack(cpu);
2206
- v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2207
-}
2208
-
2209
/*
2210
* Function used to synchronize QEMU's AArch64 register set with AArch32
2211
* register set. This is necessary when switching between AArch32 and AArch64
2212
@@ -XXX,XX +XXX,XX @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
2213
return phys_addr;
2214
}
2215
2216
-uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2217
-{
2218
- uint32_t mask;
2219
- unsigned el = arm_current_el(env);
2220
-
2221
- /* First handle registers which unprivileged can read */
2222
-
2223
- switch (reg) {
2224
- case 0 ... 7: /* xPSR sub-fields */
2225
- mask = 0;
2226
- if ((reg & 1) && el) {
2227
- mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
2228
- }
2229
- if (!(reg & 4)) {
2230
- mask |= XPSR_NZCV | XPSR_Q; /* APSR */
2231
- if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
2232
- mask |= XPSR_GE;
2233
- }
2234
- }
2235
- /* EPSR reads as zero */
2236
- return xpsr_read(env) & mask;
2237
- break;
2238
- case 20: /* CONTROL */
2239
- {
2240
- uint32_t value = env->v7m.control[env->v7m.secure];
2241
- if (!env->v7m.secure) {
2242
- /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
2243
- value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
2244
- }
2245
- return value;
2246
- }
2247
- case 0x94: /* CONTROL_NS */
2248
- /*
2249
- * We have to handle this here because unprivileged Secure code
2250
- * can read the NS CONTROL register.
2251
- */
2252
- if (!env->v7m.secure) {
2253
- return 0;
2254
- }
2255
- return env->v7m.control[M_REG_NS] |
2256
- (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2257
- }
2258
-
2259
- if (el == 0) {
2260
- return 0; /* unprivileged reads others as zero */
2261
- }
2262
-
2263
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2264
- switch (reg) {
2265
- case 0x88: /* MSP_NS */
2266
- if (!env->v7m.secure) {
2267
- return 0;
2268
- }
2269
- return env->v7m.other_ss_msp;
2270
- case 0x89: /* PSP_NS */
2271
- if (!env->v7m.secure) {
2272
- return 0;
2273
- }
2274
- return env->v7m.other_ss_psp;
2275
- case 0x8a: /* MSPLIM_NS */
2276
- if (!env->v7m.secure) {
2277
- return 0;
2278
- }
2279
- return env->v7m.msplim[M_REG_NS];
2280
- case 0x8b: /* PSPLIM_NS */
2281
- if (!env->v7m.secure) {
2282
- return 0;
2283
- }
2284
- return env->v7m.psplim[M_REG_NS];
2285
- case 0x90: /* PRIMASK_NS */
2286
- if (!env->v7m.secure) {
2287
- return 0;
2288
- }
2289
- return env->v7m.primask[M_REG_NS];
2290
- case 0x91: /* BASEPRI_NS */
2291
- if (!env->v7m.secure) {
2292
- return 0;
2293
- }
2294
- return env->v7m.basepri[M_REG_NS];
2295
- case 0x93: /* FAULTMASK_NS */
2296
- if (!env->v7m.secure) {
2297
- return 0;
2298
- }
2299
- return env->v7m.faultmask[M_REG_NS];
2300
- case 0x98: /* SP_NS */
2301
- {
2302
- /*
2303
- * This gives the non-secure SP selected based on whether we're
2304
- * currently in handler mode or not, using the NS CONTROL.SPSEL.
2305
- */
2306
- bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2307
-
2308
- if (!env->v7m.secure) {
2309
- return 0;
2310
- }
2311
- if (!arm_v7m_is_handler_mode(env) && spsel) {
2312
- return env->v7m.other_ss_psp;
2313
- } else {
2314
- return env->v7m.other_ss_msp;
2315
- }
2316
- }
2317
- default:
2318
- break;
2319
- }
2320
- }
2321
-
2322
- switch (reg) {
2323
- case 8: /* MSP */
2324
- return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2325
- case 9: /* PSP */
2326
- return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2327
- case 10: /* MSPLIM */
2328
- if (!arm_feature(env, ARM_FEATURE_V8)) {
2329
- goto bad_reg;
2330
- }
2331
- return env->v7m.msplim[env->v7m.secure];
2332
- case 11: /* PSPLIM */
2333
- if (!arm_feature(env, ARM_FEATURE_V8)) {
2334
- goto bad_reg;
2335
- }
2336
- return env->v7m.psplim[env->v7m.secure];
2337
- case 16: /* PRIMASK */
2338
- return env->v7m.primask[env->v7m.secure];
2339
- case 17: /* BASEPRI */
2340
- case 18: /* BASEPRI_MAX */
2341
- return env->v7m.basepri[env->v7m.secure];
2342
- case 19: /* FAULTMASK */
2343
- return env->v7m.faultmask[env->v7m.secure];
2344
- default:
2345
- bad_reg:
2346
- qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2347
- " register %d\n", reg);
2348
- return 0;
2349
- }
2350
-}
2351
-
2352
-void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2353
-{
2354
- /*
2355
- * We're passed bits [11..0] of the instruction; extract
2356
- * SYSm and the mask bits.
2357
- * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2358
- * we choose to treat them as if the mask bits were valid.
2359
- * NB that the pseudocode 'mask' variable is bits [11..10],
2360
- * whereas ours is [11..8].
2361
- */
2362
- uint32_t mask = extract32(maskreg, 8, 4);
2363
- uint32_t reg = extract32(maskreg, 0, 8);
2364
- int cur_el = arm_current_el(env);
2365
-
2366
- if (cur_el == 0 && reg > 7 && reg != 20) {
2367
- /*
2368
- * only xPSR sub-fields and CONTROL.SFPA may be written by
2369
- * unprivileged code
2370
- */
2371
- return;
2372
- }
2373
-
2374
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2375
- switch (reg) {
2376
- case 0x88: /* MSP_NS */
2377
- if (!env->v7m.secure) {
2378
- return;
2379
- }
2380
- env->v7m.other_ss_msp = val;
2381
- return;
2382
- case 0x89: /* PSP_NS */
2383
- if (!env->v7m.secure) {
2384
- return;
2385
- }
2386
- env->v7m.other_ss_psp = val;
2387
- return;
2388
- case 0x8a: /* MSPLIM_NS */
2389
- if (!env->v7m.secure) {
2390
- return;
2391
- }
2392
- env->v7m.msplim[M_REG_NS] = val & ~7;
2393
- return;
2394
- case 0x8b: /* PSPLIM_NS */
2395
- if (!env->v7m.secure) {
2396
- return;
2397
- }
2398
- env->v7m.psplim[M_REG_NS] = val & ~7;
2399
- return;
2400
- case 0x90: /* PRIMASK_NS */
2401
- if (!env->v7m.secure) {
2402
- return;
2403
- }
2404
- env->v7m.primask[M_REG_NS] = val & 1;
2405
- return;
2406
- case 0x91: /* BASEPRI_NS */
2407
- if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2408
- return;
2409
- }
2410
- env->v7m.basepri[M_REG_NS] = val & 0xff;
2411
- return;
2412
- case 0x93: /* FAULTMASK_NS */
2413
- if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2414
- return;
2415
- }
2416
- env->v7m.faultmask[M_REG_NS] = val & 1;
2417
- return;
2418
- case 0x94: /* CONTROL_NS */
2419
- if (!env->v7m.secure) {
2420
- return;
2421
- }
2422
- write_v7m_control_spsel_for_secstate(env,
2423
- val & R_V7M_CONTROL_SPSEL_MASK,
2424
- M_REG_NS);
2425
- if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2426
- env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2427
- env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2428
- }
2429
- /*
2430
- * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2431
- * RES0 if the FPU is not present, and is stored in the S bank
2432
- */
2433
- if (arm_feature(env, ARM_FEATURE_VFP) &&
2434
- extract32(env->v7m.nsacr, 10, 1)) {
2435
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2436
- env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2437
- }
2438
- return;
2439
- case 0x98: /* SP_NS */
2440
- {
2441
- /*
2442
- * This gives the non-secure SP selected based on whether we're
2443
- * currently in handler mode or not, using the NS CONTROL.SPSEL.
2444
- */
2445
- bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2446
- bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2447
- uint32_t limit;
2448
-
2449
- if (!env->v7m.secure) {
2450
- return;
2451
- }
2452
-
2453
- limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2454
-
2455
- if (val < limit) {
2456
- CPUState *cs = env_cpu(env);
2457
-
2458
- cpu_restore_state(cs, GETPC(), true);
2459
- raise_exception(env, EXCP_STKOF, 0, 1);
2460
- }
2461
-
2462
- if (is_psp) {
2463
- env->v7m.other_ss_psp = val;
2464
- } else {
2465
- env->v7m.other_ss_msp = val;
2466
- }
2467
- return;
2468
- }
2469
- default:
2470
- break;
2471
- }
2472
- }
2473
-
2474
- switch (reg) {
2475
- case 0 ... 7: /* xPSR sub-fields */
2476
- /* only APSR is actually writable */
2477
- if (!(reg & 4)) {
2478
- uint32_t apsrmask = 0;
2479
-
2480
- if (mask & 8) {
2481
- apsrmask |= XPSR_NZCV | XPSR_Q;
2482
- }
2483
- if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
2484
- apsrmask |= XPSR_GE;
2485
- }
2486
- xpsr_write(env, val, apsrmask);
2487
- }
2488
- break;
2489
- case 8: /* MSP */
2490
- if (v7m_using_psp(env)) {
2491
- env->v7m.other_sp = val;
2492
- } else {
2493
- env->regs[13] = val;
2494
- }
2495
- break;
2496
- case 9: /* PSP */
2497
- if (v7m_using_psp(env)) {
2498
- env->regs[13] = val;
2499
- } else {
2500
- env->v7m.other_sp = val;
2501
- }
2502
- break;
2503
- case 10: /* MSPLIM */
2504
- if (!arm_feature(env, ARM_FEATURE_V8)) {
2505
- goto bad_reg;
2506
- }
2507
- env->v7m.msplim[env->v7m.secure] = val & ~7;
2508
- break;
2509
- case 11: /* PSPLIM */
2510
- if (!arm_feature(env, ARM_FEATURE_V8)) {
2511
- goto bad_reg;
2512
- }
2513
- env->v7m.psplim[env->v7m.secure] = val & ~7;
2514
- break;
2515
- case 16: /* PRIMASK */
2516
- env->v7m.primask[env->v7m.secure] = val & 1;
2517
- break;
2518
- case 17: /* BASEPRI */
2519
- if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2520
- goto bad_reg;
2521
- }
2522
- env->v7m.basepri[env->v7m.secure] = val & 0xff;
2523
- break;
2524
- case 18: /* BASEPRI_MAX */
2525
- if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2526
- goto bad_reg;
2527
- }
2528
- val &= 0xff;
2529
- if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2530
- || env->v7m.basepri[env->v7m.secure] == 0)) {
2531
- env->v7m.basepri[env->v7m.secure] = val;
2532
- }
2533
- break;
2534
- case 19: /* FAULTMASK */
2535
- if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2536
- goto bad_reg;
2537
- }
2538
- env->v7m.faultmask[env->v7m.secure] = val & 1;
2539
- break;
2540
- case 20: /* CONTROL */
2541
- /*
2542
- * Writing to the SPSEL bit only has an effect if we are in
2543
- * thread mode; other bits can be updated by any privileged code.
2544
- * write_v7m_control_spsel() deals with updating the SPSEL bit in
2545
- * env->v7m.control, so we only need update the others.
2546
- * For v7M, we must just ignore explicit writes to SPSEL in handler
2547
- * mode; for v8M the write is permitted but will have no effect.
2548
- * All these bits are writes-ignored from non-privileged code,
2549
- * except for SFPA.
2550
- */
2551
- if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2552
- !arm_v7m_is_handler_mode(env))) {
2553
- write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2554
- }
2555
- if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2556
- env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2557
- env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2558
- }
2559
- if (arm_feature(env, ARM_FEATURE_VFP)) {
2560
- /*
2561
- * SFPA is RAZ/WI from NS or if no FPU.
2562
- * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2563
- * Both are stored in the S bank.
2564
- */
2565
- if (env->v7m.secure) {
2566
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2567
- env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2568
- }
2569
- if (cur_el > 0 &&
2570
- (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2571
- extract32(env->v7m.nsacr, 10, 1))) {
2572
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2573
- env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2574
- }
2575
- }
2576
- break;
2577
- default:
2578
- bad_reg:
2579
- qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2580
- " register %d\n", reg);
2581
- return;
2582
- }
2583
-}
2584
-
2585
-uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2586
-{
2587
- /* Implement the TT instruction. op is bits [7:6] of the insn. */
2588
- bool forceunpriv = op & 1;
2589
- bool alt = op & 2;
2590
- V8M_SAttributes sattrs = {};
2591
- uint32_t tt_resp;
2592
- bool r, rw, nsr, nsrw, mrvalid;
2593
- int prot;
2594
- ARMMMUFaultInfo fi = {};
2595
- MemTxAttrs attrs = {};
2596
- hwaddr phys_addr;
2597
- ARMMMUIdx mmu_idx;
2598
- uint32_t mregion;
2599
- bool targetpriv;
2600
- bool targetsec = env->v7m.secure;
2601
- bool is_subpage;
2602
-
2603
- /*
2604
- * Work out what the security state and privilege level we're
2605
- * interested in is...
2606
- */
2607
- if (alt) {
2608
- targetsec = !targetsec;
2609
- }
2610
-
2611
- if (forceunpriv) {
2612
- targetpriv = false;
2613
- } else {
2614
- targetpriv = arm_v7m_is_handler_mode(env) ||
2615
- !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2616
- }
2617
-
2618
- /* ...and then figure out which MMU index this is */
2619
- mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2620
-
2621
- /*
2622
- * We know that the MPU and SAU don't care about the access type
2623
- * for our purposes beyond that we don't want to claim to be
2624
- * an insn fetch, so we arbitrarily call this a read.
2625
- */
2626
-
2627
- /*
2628
- * MPU region info only available for privileged or if
2629
- * inspecting the other MPU state.
2630
- */
2631
- if (arm_current_el(env) != 0 || alt) {
2632
- /* We can ignore the return value as prot is always set */
2633
- pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2634
- &phys_addr, &attrs, &prot, &is_subpage,
2635
- &fi, &mregion);
2636
- if (mregion == -1) {
2637
- mrvalid = false;
2638
- mregion = 0;
2639
- } else {
2640
- mrvalid = true;
2641
- }
2642
- r = prot & PAGE_READ;
2643
- rw = prot & PAGE_WRITE;
2644
- } else {
2645
- r = false;
2646
- rw = false;
2647
- mrvalid = false;
2648
- mregion = 0;
2649
- }
2650
-
2651
- if (env->v7m.secure) {
2652
- v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
2653
- nsr = sattrs.ns && r;
2654
- nsrw = sattrs.ns && rw;
2655
- } else {
2656
- sattrs.ns = true;
2657
- nsr = false;
2658
- nsrw = false;
2659
- }
2660
-
2661
- tt_resp = (sattrs.iregion << 24) |
2662
- (sattrs.irvalid << 23) |
2663
- ((!sattrs.ns) << 22) |
2664
- (nsrw << 21) |
2665
- (nsr << 20) |
2666
- (rw << 19) |
2667
- (r << 18) |
2668
- (sattrs.srvalid << 17) |
2669
- (mrvalid << 16) |
2670
- (sattrs.sregion << 8) |
2671
- mregion;
2672
-
2673
- return tt_resp;
2674
-}
2675
-
2676
#endif
2677
2678
/* Note that signed overflow is undefined in C. The following routines are
2679
@@ -XXX,XX +XXX,XX @@ int fp_exception_el(CPUARMState *env, int cur_el)
2680
return 0;
2681
}
2682
2683
-ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
2684
- bool secstate, bool priv, bool negpri)
2685
-{
2686
- ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
2687
-
2688
- if (priv) {
2689
- mmu_idx |= ARM_MMU_IDX_M_PRIV;
2690
- }
2691
-
2692
- if (negpri) {
2693
- mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
2694
- }
2695
-
2696
- if (secstate) {
2697
- mmu_idx |= ARM_MMU_IDX_M_S;
2698
- }
2699
-
2700
- return mmu_idx;
2701
-}
2702
-
2703
-ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
2704
- bool secstate, bool priv)
2705
-{
2706
- bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
2707
-
2708
- return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
2709
-}
2710
-
2711
-/* Return the MMU index for a v7M CPU in the specified security state */
2712
+#ifndef CONFIG_TCG
2713
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
2714
{
2715
- bool priv = arm_current_el(env) != 0;
2716
-
2717
- return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
2718
+ g_assert_not_reached();
2719
}
2720
+#endif
2721
2722
ARMMMUIdx arm_mmu_idx(CPUARMState *env)
2723
{
2724
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
2725
new file mode 100644
2726
index XXXXXXX..XXXXXXX
2727
--- /dev/null
2728
+++ b/target/arm/m_helper.c
2729
@@ -XXX,XX +XXX,XX @@
2730
+/*
2731
+ * ARM generic helpers.
2732
+ *
2733
+ * This code is licensed under the GNU GPL v2 or later.
2734
+ *
2735
+ * SPDX-License-Identifier: GPL-2.0-or-later
2736
+ */
2737
+#include "qemu/osdep.h"
2738
+#include "qemu/units.h"
2739
+#include "target/arm/idau.h"
2740
+#include "trace.h"
2741
+#include "cpu.h"
2742
+#include "internals.h"
2743
+#include "exec/gdbstub.h"
2744
+#include "exec/helper-proto.h"
2745
+#include "qemu/host-utils.h"
2746
+#include "sysemu/sysemu.h"
2747
+#include "qemu/bitops.h"
2748
+#include "qemu/crc32c.h"
2749
+#include "qemu/qemu-print.h"
2750
+#include "exec/exec-all.h"
2751
+#include <zlib.h> /* For crc32 */
2752
+#include "hw/semihosting/semihost.h"
2753
+#include "sysemu/cpus.h"
2754
+#include "sysemu/kvm.h"
2755
+#include "qemu/range.h"
2756
+#include "qapi/qapi-commands-target.h"
2757
+#include "qapi/error.h"
2758
+#include "qemu/guest-random.h"
2759
+#ifdef CONFIG_TCG
2760
+#include "arm_ldst.h"
2761
+#include "exec/cpu_ldst.h"
2762
+#endif
2763
+
2764
+#ifdef CONFIG_USER_ONLY
2765
+
2766
+/* These should probably raise undefined insn exceptions. */
2767
+void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
2768
+{
2769
+ ARMCPU *cpu = env_archcpu(env);
2770
+
2771
+ cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
2772
+}
2773
+
2774
+uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2775
+{
2776
+ ARMCPU *cpu = env_archcpu(env);
2777
+
2778
+ cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
2779
+ return 0;
2780
+}
2781
+
2782
+void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
2783
+{
2784
+ /* translate.c should never generate calls here in user-only mode */
2785
+ g_assert_not_reached();
2786
+}
2787
+
2788
+void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
2789
+{
2790
+ /* translate.c should never generate calls here in user-only mode */
2791
+ g_assert_not_reached();
2792
+}
2793
+
2794
+void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
2795
+{
2796
+ /* translate.c should never generate calls here in user-only mode */
2797
+ g_assert_not_reached();
2798
+}
2799
+
2800
+void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
2801
+{
2802
+ /* translate.c should never generate calls here in user-only mode */
2803
+ g_assert_not_reached();
2804
+}
2805
+
2806
+void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
2807
+{
2808
+ /* translate.c should never generate calls here in user-only mode */
2809
+ g_assert_not_reached();
2810
+}
2811
+
2812
+uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2813
+{
2814
+ /*
2815
+ * The TT instructions can be used by unprivileged code, but in
2816
+ * user-only emulation we don't have the MPU.
2817
+ * Luckily since we know we are NonSecure unprivileged (and that in
2818
+ * turn means that the A flag wasn't specified), all the bits in the
2819
+ * register must be zero:
2820
+ * IREGION: 0 because IRVALID is 0
2821
+ * IRVALID: 0 because NS
2822
+ * S: 0 because NS
2823
+ * NSRW: 0 because NS
2824
+ * NSR: 0 because NS
2825
+ * RW: 0 because unpriv and A flag not set
2826
+ * R: 0 because unpriv and A flag not set
2827
+ * SRVALID: 0 because NS
2828
+ * MRVALID: 0 because unpriv and A flag not set
2829
+ * SREGION: 0 becaus SRVALID is 0
2830
+ * MREGION: 0 because MRVALID is 0
2831
+ */
2832
+ return 0;
2833
+}
2834
+
2835
+#else
2836
+
2837
+/*
2838
+ * What kind of stack write are we doing? This affects how exceptions
2839
+ * generated during the stacking are treated.
2840
+ */
2841
+typedef enum StackingMode {
2842
+ STACK_NORMAL,
2843
+ STACK_IGNFAULTS,
2844
+ STACK_LAZYFP,
2845
+} StackingMode;
2846
+
2847
+static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
2848
+ ARMMMUIdx mmu_idx, StackingMode mode)
2849
+{
2850
+ CPUState *cs = CPU(cpu);
2851
+ CPUARMState *env = &cpu->env;
2852
+ MemTxAttrs attrs = {};
2853
+ MemTxResult txres;
2854
+ target_ulong page_size;
2855
+ hwaddr physaddr;
2856
+ int prot;
2857
+ ARMMMUFaultInfo fi = {};
2858
+ bool secure = mmu_idx & ARM_MMU_IDX_M_S;
2859
+ int exc;
2860
+ bool exc_secure;
2861
+
2862
+ if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
2863
+ &attrs, &prot, &page_size, &fi, NULL)) {
2864
+ /* MPU/SAU lookup failed */
2865
+ if (fi.type == ARMFault_QEMU_SFault) {
2866
+ if (mode == STACK_LAZYFP) {
2867
+ qemu_log_mask(CPU_LOG_INT,
2868
+ "...SecureFault with SFSR.LSPERR "
2869
+ "during lazy stacking\n");
2870
+ env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
2871
+ } else {
2872
+ qemu_log_mask(CPU_LOG_INT,
2873
+ "...SecureFault with SFSR.AUVIOL "
2874
+ "during stacking\n");
2875
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2876
+ }
2877
+ env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
2878
+ env->v7m.sfar = addr;
2879
+ exc = ARMV7M_EXCP_SECURE;
2880
+ exc_secure = false;
2881
+ } else {
2882
+ if (mode == STACK_LAZYFP) {
2883
+ qemu_log_mask(CPU_LOG_INT,
2884
+ "...MemManageFault with CFSR.MLSPERR\n");
2885
+ env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
2886
+ } else {
2887
+ qemu_log_mask(CPU_LOG_INT,
2888
+ "...MemManageFault with CFSR.MSTKERR\n");
2889
+ env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
2890
+ }
2891
+ exc = ARMV7M_EXCP_MEM;
2892
+ exc_secure = secure;
2893
+ }
2894
+ goto pend_fault;
2895
+ }
2896
+ address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
2897
+ attrs, &txres);
2898
+ if (txres != MEMTX_OK) {
2899
+ /* BusFault trying to write the data */
2900
+ if (mode == STACK_LAZYFP) {
2901
+ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
2902
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
2903
+ } else {
2904
+ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
2905
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
2906
+ }
2907
+ exc = ARMV7M_EXCP_BUS;
2908
+ exc_secure = false;
2909
+ goto pend_fault;
2910
+ }
2911
+ return true;
2912
+
2913
+pend_fault:
2914
+ /*
2915
+ * By pending the exception at this point we are making
2916
+ * the IMPDEF choice "overridden exceptions pended" (see the
2917
+ * MergeExcInfo() pseudocode). The other choice would be to not
2918
+ * pend them now and then make a choice about which to throw away
2919
+ * later if we have two derived exceptions.
2920
+ * The only case when we must not pend the exception but instead
2921
+ * throw it away is if we are doing the push of the callee registers
2922
+ * and we've already generated a derived exception (this is indicated
2923
+ * by the caller passing STACK_IGNFAULTS). Even in this case we will
2924
+ * still update the fault status registers.
2925
+ */
2926
+ switch (mode) {
2927
+ case STACK_NORMAL:
2928
+ armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
2929
+ break;
2930
+ case STACK_LAZYFP:
2931
+ armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
2932
+ break;
2933
+ case STACK_IGNFAULTS:
2934
+ break;
2935
+ }
2936
+ return false;
2937
+}
2938
+
2939
+static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
2940
+ ARMMMUIdx mmu_idx)
2941
+{
2942
+ CPUState *cs = CPU(cpu);
2943
+ CPUARMState *env = &cpu->env;
2944
+ MemTxAttrs attrs = {};
2945
+ MemTxResult txres;
2946
+ target_ulong page_size;
2947
+ hwaddr physaddr;
2948
+ int prot;
2949
+ ARMMMUFaultInfo fi = {};
2950
+ bool secure = mmu_idx & ARM_MMU_IDX_M_S;
2951
+ int exc;
2952
+ bool exc_secure;
2953
+ uint32_t value;
2954
+
2955
+ if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
2956
+ &attrs, &prot, &page_size, &fi, NULL)) {
2957
+ /* MPU/SAU lookup failed */
2958
+ if (fi.type == ARMFault_QEMU_SFault) {
2959
+ qemu_log_mask(CPU_LOG_INT,
2960
+ "...SecureFault with SFSR.AUVIOL during unstack\n");
2961
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
2962
+ env->v7m.sfar = addr;
2963
+ exc = ARMV7M_EXCP_SECURE;
2964
+ exc_secure = false;
2965
+ } else {
2966
+ qemu_log_mask(CPU_LOG_INT,
2967
+ "...MemManageFault with CFSR.MUNSTKERR\n");
2968
+ env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
2969
+ exc = ARMV7M_EXCP_MEM;
2970
+ exc_secure = secure;
2971
+ }
2972
+ goto pend_fault;
2973
+ }
2974
+
2975
+ value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
2976
+ attrs, &txres);
2977
+ if (txres != MEMTX_OK) {
2978
+ /* BusFault trying to read the data */
2979
+ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
2980
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
2981
+ exc = ARMV7M_EXCP_BUS;
2982
+ exc_secure = false;
2983
+ goto pend_fault;
2984
+ }
2985
+
2986
+ *dest = value;
2987
+ return true;
2988
+
2989
+pend_fault:
2990
+ /*
2991
+ * By pending the exception at this point we are making
2992
+ * the IMPDEF choice "overridden exceptions pended" (see the
2993
+ * MergeExcInfo() pseudocode). The other choice would be to not
2994
+ * pend them now and then make a choice about which to throw away
2995
+ * later if we have two derived exceptions.
2996
+ */
2997
+ armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
2998
+ return false;
2999
+}
3000
+
3001
+void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
3002
+{
3003
+ /*
3004
+ * Preserve FP state (because LSPACT was set and we are about
3005
+ * to execute an FP instruction). This corresponds to the
3006
+ * PreserveFPState() pseudocode.
3007
+ * We may throw an exception if the stacking fails.
3008
+ */
3009
+ ARMCPU *cpu = env_archcpu(env);
3010
+ bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
3011
+ bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
3012
+ bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
3013
+ bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
3014
+ uint32_t fpcar = env->v7m.fpcar[is_secure];
3015
+ bool stacked_ok = true;
3016
+ bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
3017
+ bool take_exception;
3018
+
3019
+ /* Take the iothread lock as we are going to touch the NVIC */
3020
+ qemu_mutex_lock_iothread();
3021
+
3022
+ /* Check the background context had access to the FPU */
3023
+ if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
3024
+ armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
3025
+ env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
3026
+ stacked_ok = false;
3027
+ } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
3028
+ armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
3029
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
3030
+ stacked_ok = false;
3031
+ }
3032
+
3033
+ if (!splimviol && stacked_ok) {
3034
+ /* We only stack if the stack limit wasn't violated */
3035
+ int i;
3036
+ ARMMMUIdx mmu_idx;
3037
+
3038
+ mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
3039
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
3040
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
3041
+ uint32_t faddr = fpcar + 4 * i;
3042
+ uint32_t slo = extract64(dn, 0, 32);
3043
+ uint32_t shi = extract64(dn, 32, 32);
3044
+
3045
+ if (i >= 16) {
3046
+ faddr += 8; /* skip the slot for the FPSCR */
3047
+ }
3048
+ stacked_ok = stacked_ok &&
3049
+ v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
3050
+ v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
3051
+ }
3052
+
3053
+ stacked_ok = stacked_ok &&
3054
+ v7m_stack_write(cpu, fpcar + 0x40,
3055
+ vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
3056
+ }
3057
+
3058
+ /*
3059
+ * We definitely pended an exception, but it's possible that it
3060
+ * might not be able to be taken now. If its priority permits us
3061
+ * to take it now, then we must not update the LSPACT or FP regs,
3062
+ * but instead jump out to take the exception immediately.
3063
+ * If it's just pending and won't be taken until the current
3064
+ * handler exits, then we do update LSPACT and the FP regs.
3065
+ */
3066
+ take_exception = !stacked_ok &&
3067
+ armv7m_nvic_can_take_pending_exception(env->nvic);
3068
+
3069
+ qemu_mutex_unlock_iothread();
3070
+
3071
+ if (take_exception) {
3072
+ raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
3073
+ }
3074
+
3075
+ env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
3076
+
3077
+ if (ts) {
3078
+ /* Clear s0 to s31 and the FPSCR */
3079
+ int i;
3080
+
3081
+ for (i = 0; i < 32; i += 2) {
3082
+ *aa32_vfp_dreg(env, i / 2) = 0;
3083
+ }
3084
+ vfp_set_fpscr(env, 0);
3085
+ }
3086
+ /*
3087
+ * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
3088
+ * unchanged.
3089
+ */
3090
+}
3091
+
3092
+/*
3093
+ * Write to v7M CONTROL.SPSEL bit for the specified security bank.
3094
+ * This may change the current stack pointer between Main and Process
3095
+ * stack pointers if it is done for the CONTROL register for the current
3096
+ * security state.
3097
+ */
3098
+static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
3099
+ bool new_spsel,
3100
+ bool secstate)
3101
+{
3102
+ bool old_is_psp = v7m_using_psp(env);
3103
+
3104
+ env->v7m.control[secstate] =
3105
+ deposit32(env->v7m.control[secstate],
3106
+ R_V7M_CONTROL_SPSEL_SHIFT,
3107
+ R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
3108
+
3109
+ if (secstate == env->v7m.secure) {
3110
+ bool new_is_psp = v7m_using_psp(env);
3111
+ uint32_t tmp;
3112
+
3113
+ if (old_is_psp != new_is_psp) {
3114
+ tmp = env->v7m.other_sp;
3115
+ env->v7m.other_sp = env->regs[13];
3116
+ env->regs[13] = tmp;
3117
+ }
3118
+ }
3119
+}
3120
+
3121
+/*
3122
+ * Write to v7M CONTROL.SPSEL bit. This may change the current
3123
+ * stack pointer between Main and Process stack pointers.
3124
+ */
3125
+static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
3126
+{
3127
+ write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
3128
+}
3129
+
3130
+void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
3131
+{
3132
+ /*
3133
+ * Write a new value to v7m.exception, thus transitioning into or out
3134
+ * of Handler mode; this may result in a change of active stack pointer.
3135
+ */
3136
+ bool new_is_psp, old_is_psp = v7m_using_psp(env);
3137
+ uint32_t tmp;
3138
+
3139
+ env->v7m.exception = new_exc;
3140
+
3141
+ new_is_psp = v7m_using_psp(env);
3142
+
3143
+ if (old_is_psp != new_is_psp) {
3144
+ tmp = env->v7m.other_sp;
3145
+ env->v7m.other_sp = env->regs[13];
3146
+ env->regs[13] = tmp;
3147
+ }
3148
+}
3149
+
3150
+/* Switch M profile security state between NS and S */
3151
+static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
3152
+{
3153
+ uint32_t new_ss_msp, new_ss_psp;
3154
+
3155
+ if (env->v7m.secure == new_secstate) {
3156
+ return;
3157
+ }
3158
+
3159
+ /*
3160
+ * All the banked state is accessed by looking at env->v7m.secure
3161
+ * except for the stack pointer; rearrange the SP appropriately.
3162
+ */
3163
+ new_ss_msp = env->v7m.other_ss_msp;
3164
+ new_ss_psp = env->v7m.other_ss_psp;
3165
+
3166
+ if (v7m_using_psp(env)) {
3167
+ env->v7m.other_ss_psp = env->regs[13];
3168
+ env->v7m.other_ss_msp = env->v7m.other_sp;
3169
+ } else {
3170
+ env->v7m.other_ss_msp = env->regs[13];
3171
+ env->v7m.other_ss_psp = env->v7m.other_sp;
3172
+ }
3173
+
3174
+ env->v7m.secure = new_secstate;
3175
+
3176
+ if (v7m_using_psp(env)) {
3177
+ env->regs[13] = new_ss_psp;
3178
+ env->v7m.other_sp = new_ss_msp;
3179
+ } else {
3180
+ env->regs[13] = new_ss_msp;
3181
+ env->v7m.other_sp = new_ss_psp;
3182
+ }
3183
+}
3184
+
3185
+void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
3186
+{
3187
+ /*
3188
+ * Handle v7M BXNS:
3189
+ * - if the return value is a magic value, do exception return (like BX)
3190
+ * - otherwise bit 0 of the return value is the target security state
3191
+ */
3192
+ uint32_t min_magic;
3193
+
3194
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3195
+ /* Covers FNC_RETURN and EXC_RETURN magic */
3196
+ min_magic = FNC_RETURN_MIN_MAGIC;
3197
+ } else {
3198
+ /* EXC_RETURN magic only */
3199
+ min_magic = EXC_RETURN_MIN_MAGIC;
3200
+ }
3201
+
3202
+ if (dest >= min_magic) {
3203
+ /*
3204
+ * This is an exception return magic value; put it where
3205
+ * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
3206
+ * Note that if we ever add gen_ss_advance() singlestep support to
3207
+ * M profile this should count as an "instruction execution complete"
3208
+ * event (compare gen_bx_excret_final_code()).
3209
+ */
3210
+ env->regs[15] = dest & ~1;
3211
+ env->thumb = dest & 1;
3212
+ HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
3213
+ /* notreached */
3214
+ }
3215
+
3216
+ /* translate.c should have made BXNS UNDEF unless we're secure */
3217
+ assert(env->v7m.secure);
3218
+
3219
+ if (!(dest & 1)) {
3220
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
3221
+ }
3222
+ switch_v7m_security_state(env, dest & 1);
3223
+ env->thumb = 1;
3224
+ env->regs[15] = dest & ~1;
3225
+}
3226
+
3227
+void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
3228
+{
3229
+ /*
3230
+ * Handle v7M BLXNS:
3231
+ * - bit 0 of the destination address is the target security state
3232
+ */
3233
+
3234
+ /* At this point regs[15] is the address just after the BLXNS */
3235
+ uint32_t nextinst = env->regs[15] | 1;
3236
+ uint32_t sp = env->regs[13] - 8;
3237
+ uint32_t saved_psr;
3238
+
3239
+ /* translate.c will have made BLXNS UNDEF unless we're secure */
3240
+ assert(env->v7m.secure);
3241
+
3242
+ if (dest & 1) {
3243
+ /*
3244
+ * Target is Secure, so this is just a normal BLX,
3245
+ * except that the low bit doesn't indicate Thumb/not.
3246
+ */
3247
+ env->regs[14] = nextinst;
3248
+ env->thumb = 1;
3249
+ env->regs[15] = dest & ~1;
3250
+ return;
3251
+ }
3252
+
3253
+ /* Target is non-secure: first push a stack frame */
3254
+ if (!QEMU_IS_ALIGNED(sp, 8)) {
3255
+ qemu_log_mask(LOG_GUEST_ERROR,
3256
+ "BLXNS with misaligned SP is UNPREDICTABLE\n");
3257
+ }
3258
+
3259
+ if (sp < v7m_sp_limit(env)) {
3260
+ raise_exception(env, EXCP_STKOF, 0, 1);
3261
+ }
3262
+
3263
+ saved_psr = env->v7m.exception;
3264
+ if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
3265
+ saved_psr |= XPSR_SFPA;
3266
+ }
3267
+
3268
+ /* Note that these stores can throw exceptions on MPU faults */
3269
+ cpu_stl_data(env, sp, nextinst);
3270
+ cpu_stl_data(env, sp + 4, saved_psr);
3271
+
3272
+ env->regs[13] = sp;
3273
+ env->regs[14] = 0xfeffffff;
3274
+ if (arm_v7m_is_handler_mode(env)) {
3275
+ /*
3276
+ * Write a dummy value to IPSR, to avoid leaking the current secure
3277
+ * exception number to non-secure code. This is guaranteed not
3278
+ * to cause write_v7m_exception() to actually change stacks.
3279
+ */
3280
+ write_v7m_exception(env, 1);
3281
+ }
3282
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
3283
+ switch_v7m_security_state(env, 0);
3284
+ env->thumb = 1;
3285
+ env->regs[15] = dest;
3286
+}
3287
+
3288
+static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
3289
+ bool spsel)
3290
+{
3291
+ /*
3292
+ * Return a pointer to the location where we currently store the
3293
+ * stack pointer for the requested security state and thread mode.
3294
+ * This pointer will become invalid if the CPU state is updated
3295
+ * such that the stack pointers are switched around (eg changing
3296
+ * the SPSEL control bit).
3297
+ * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
3298
+ * Unlike that pseudocode, we require the caller to pass us in the
3299
+ * SPSEL control bit value; this is because we also use this
3300
+ * function in handling of pushing of the callee-saves registers
3301
+ * part of the v8M stack frame (pseudocode PushCalleeStack()),
3302
+ * and in the tailchain codepath the SPSEL bit comes from the exception
3303
+ * return magic LR value from the previous exception. The pseudocode
3304
+ * opencodes the stack-selection in PushCalleeStack(), but we prefer
3305
+ * to make this utility function generic enough to do the job.
3306
+ */
3307
+ bool want_psp = threadmode && spsel;
3308
+
3309
+ if (secure == env->v7m.secure) {
3310
+ if (want_psp == v7m_using_psp(env)) {
3311
+ return &env->regs[13];
3312
+ } else {
3313
+ return &env->v7m.other_sp;
3314
+ }
3315
+ } else {
3316
+ if (want_psp) {
3317
+ return &env->v7m.other_ss_psp;
3318
+ } else {
3319
+ return &env->v7m.other_ss_msp;
3320
+ }
3321
+ }
3322
+}
3323
+
3324
+static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
3325
+ uint32_t *pvec)
3326
+{
3327
+ CPUState *cs = CPU(cpu);
3328
+ CPUARMState *env = &cpu->env;
3329
+ MemTxResult result;
3330
+ uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
3331
+ uint32_t vector_entry;
3332
+ MemTxAttrs attrs = {};
3333
+ ARMMMUIdx mmu_idx;
3334
+ bool exc_secure;
3335
+
3336
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
3337
+
3338
+ /*
3339
+ * We don't do a get_phys_addr() here because the rules for vector
3340
+ * loads are special: they always use the default memory map, and
3341
+ * the default memory map permits reads from all addresses.
3342
+ * Since there's no easy way to pass through to pmsav8_mpu_lookup()
3343
+ * that we want this special case which would always say "yes",
3344
+ * we just do the SAU lookup here followed by a direct physical load.
3345
+ */
3346
+ attrs.secure = targets_secure;
3347
+ attrs.user = false;
3348
+
3349
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3350
+ V8M_SAttributes sattrs = {};
3351
+
3352
+ v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
3353
+ if (sattrs.ns) {
3354
+ attrs.secure = false;
3355
+ } else if (!targets_secure) {
3356
+ /* NS access to S memory */
3357
+ goto load_fail;
3358
+ }
3359
+ }
3360
+
3361
+ vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
3362
+ attrs, &result);
3363
+ if (result != MEMTX_OK) {
3364
+ goto load_fail;
3365
+ }
3366
+ *pvec = vector_entry;
3367
+ return true;
3368
+
3369
+load_fail:
3370
+ /*
3371
+ * All vector table fetch fails are reported as HardFault, with
3372
+ * HFSR.VECTTBL and .FORCED set. (FORCED is set because
3373
+ * technically the underlying exception is a MemManage or BusFault
3374
+ * that is escalated to HardFault.) This is a terminal exception,
3375
+ * so we will either take the HardFault immediately or else enter
3376
+ * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
3377
+ */
3378
+ exc_secure = targets_secure ||
3379
+ !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
3380
+ env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
3381
+ armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
3382
+ return false;
3383
+}
3384
+
3385
+static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
3386
+{
3387
+ /*
3388
+ * Return the integrity signature value for the callee-saves
3389
+ * stack frame section. @lr is the exception return payload/LR value
3390
+ * whose FType bit forms bit 0 of the signature if FP is present.
3391
+ */
3392
+ uint32_t sig = 0xfefa125a;
3393
+
3394
+ if (!arm_feature(env, ARM_FEATURE_VFP) || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
3395
+ sig |= 1;
3396
+ }
3397
+ return sig;
3398
+}
3399
+
3400
+static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
3401
+ bool ignore_faults)
3402
+{
3403
+ /*
3404
+ * For v8M, push the callee-saves register part of the stack frame.
3405
+ * Compare the v8M pseudocode PushCalleeStack().
3406
+ * In the tailchaining case this may not be the current stack.
3407
+ */
3408
+ CPUARMState *env = &cpu->env;
3409
+ uint32_t *frame_sp_p;
3410
+ uint32_t frameptr;
3411
+ ARMMMUIdx mmu_idx;
3412
+ bool stacked_ok;
3413
+ uint32_t limit;
3414
+ bool want_psp;
3415
+ uint32_t sig;
3416
+ StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
3417
+
3418
+ if (dotailchain) {
3419
+ bool mode = lr & R_V7M_EXCRET_MODE_MASK;
3420
+ bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
3421
+ !mode;
3422
+
3423
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
3424
+ frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
3425
+ lr & R_V7M_EXCRET_SPSEL_MASK);
3426
+ want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
3427
+ if (want_psp) {
3428
+ limit = env->v7m.psplim[M_REG_S];
3429
+ } else {
3430
+ limit = env->v7m.msplim[M_REG_S];
3431
+ }
3432
+ } else {
3433
+ mmu_idx = arm_mmu_idx(env);
3434
+ frame_sp_p = &env->regs[13];
3435
+ limit = v7m_sp_limit(env);
3436
+ }
3437
+
3438
+ frameptr = *frame_sp_p - 0x28;
3439
+ if (frameptr < limit) {
3440
+ /*
3441
+ * Stack limit failure: set SP to the limit value, and generate
3442
+ * STKOF UsageFault. Stack pushes below the limit must not be
3443
+ * performed. It is IMPDEF whether pushes above the limit are
3444
+ * performed; we choose not to.
3445
+ */
3446
+ qemu_log_mask(CPU_LOG_INT,
3447
+ "...STKOF during callee-saves register stacking\n");
3448
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
3449
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
3450
+ env->v7m.secure);
3451
+ *frame_sp_p = limit;
3452
+ return true;
3453
+ }
3454
+
3455
+ /*
3456
+ * Write as much of the stack frame as we can. A write failure may
3457
+ * cause us to pend a derived exception.
3458
+ */
3459
+ sig = v7m_integrity_sig(env, lr);
3460
+ stacked_ok =
3461
+ v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
3462
+ v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
3463
+ v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
3464
+ v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
3465
+ v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
3466
+ v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
3467
+ v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
3468
+ v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
3469
+ v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
3470
+
3471
+ /* Update SP regardless of whether any of the stack accesses failed. */
3472
+ *frame_sp_p = frameptr;
3473
+
3474
+ return !stacked_ok;
3475
+}
3476
+
3477
+static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
3478
+ bool ignore_stackfaults)
3479
+{
3480
+ /*
3481
+ * Do the "take the exception" parts of exception entry,
3482
+ * but not the pushing of state to the stack. This is
3483
+ * similar to the pseudocode ExceptionTaken() function.
3484
+ */
3485
+ CPUARMState *env = &cpu->env;
3486
+ uint32_t addr;
3487
+ bool targets_secure;
3488
+ int exc;
3489
+ bool push_failed = false;
3490
+
3491
+ armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
3492
+ qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
3493
+ targets_secure ? "secure" : "nonsecure", exc);
3494
+
3495
+ if (dotailchain) {
3496
+ /* Sanitize LR FType and PREFIX bits */
3497
+ if (!arm_feature(env, ARM_FEATURE_VFP)) {
3498
+ lr |= R_V7M_EXCRET_FTYPE_MASK;
3499
+ }
3500
+ lr = deposit32(lr, 24, 8, 0xff);
3501
+ }
3502
+
3503
+ if (arm_feature(env, ARM_FEATURE_V8)) {
3504
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
3505
+ (lr & R_V7M_EXCRET_S_MASK)) {
3506
+ /*
3507
+ * The background code (the owner of the registers in the
3508
+ * exception frame) is Secure. This means it may either already
3509
+ * have or now needs to push callee-saves registers.
3510
+ */
3511
+ if (targets_secure) {
3512
+ if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
3513
+ /*
3514
+ * We took an exception from Secure to NonSecure
3515
+ * (which means the callee-saved registers got stacked)
3516
+ * and are now tailchaining to a Secure exception.
3517
+ * Clear DCRS so eventual return from this Secure
3518
+ * exception unstacks the callee-saved registers.
3519
+ */
3520
+ lr &= ~R_V7M_EXCRET_DCRS_MASK;
3521
+ }
3522
+ } else {
3523
+ /*
3524
+ * We're going to a non-secure exception; push the
3525
+ * callee-saves registers to the stack now, if they're
3526
+ * not already saved.
3527
+ */
3528
+ if (lr & R_V7M_EXCRET_DCRS_MASK &&
3529
+ !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
3530
+ push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
3531
+ ignore_stackfaults);
3532
+ }
3533
+ lr |= R_V7M_EXCRET_DCRS_MASK;
3534
+ }
3535
+ }
3536
+
3537
+ lr &= ~R_V7M_EXCRET_ES_MASK;
3538
+ if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3539
+ lr |= R_V7M_EXCRET_ES_MASK;
3540
+ }
3541
+ lr &= ~R_V7M_EXCRET_SPSEL_MASK;
3542
+ if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
3543
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
3544
+ }
3545
+
3546
+ /*
3547
+ * Clear registers if necessary to prevent non-secure exception
3548
+ * code being able to see register values from secure code.
3549
+ * Where register values become architecturally UNKNOWN we leave
3550
+ * them with their previous values.
3551
+ */
3552
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3553
+ if (!targets_secure) {
3554
+ /*
3555
+ * Always clear the caller-saved registers (they have been
3556
+ * pushed to the stack earlier in v7m_push_stack()).
3557
+ * Clear callee-saved registers if the background code is
3558
+ * Secure (in which case these regs were saved in
3559
+ * v7m_push_callee_stack()).
3560
+ */
3561
+ int i;
3562
+
3563
+ for (i = 0; i < 13; i++) {
3564
+ /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
3565
+ if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
3566
+ env->regs[i] = 0;
3567
+ }
3568
+ }
3569
+ /* Clear EAPSR */
3570
+ xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
3571
+ }
3572
+ }
3573
+ }
3574
+
3575
+ if (push_failed && !ignore_stackfaults) {
3576
+ /*
3577
+ * Derived exception on callee-saves register stacking:
3578
+ * we might now want to take a different exception which
3579
+ * targets a different security state, so try again from the top.
3580
+ */
3581
+ qemu_log_mask(CPU_LOG_INT,
3582
+ "...derived exception on callee-saves register stacking");
3583
+ v7m_exception_taken(cpu, lr, true, true);
3584
+ return;
3585
+ }
3586
+
3587
+ if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
3588
+ /* Vector load failed: derived exception */
3589
+ qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
3590
+ v7m_exception_taken(cpu, lr, true, true);
3591
+ return;
3592
+ }
3593
+
3594
+ /*
3595
+ * Now we've done everything that might cause a derived exception
3596
+ * we can go ahead and activate whichever exception we're going to
3597
+ * take (which might now be the derived exception).
3598
+ */
3599
+ armv7m_nvic_acknowledge_irq(env->nvic);
3600
+
3601
+ /* Switch to target security state -- must do this before writing SPSEL */
3602
+ switch_v7m_security_state(env, targets_secure);
3603
+ write_v7m_control_spsel(env, 0);
3604
+ arm_clear_exclusive(env);
3605
+ /* Clear SFPA and FPCA (has no effect if no FPU) */
3606
+ env->v7m.control[M_REG_S] &=
3607
+ ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
3608
+ /* Clear IT bits */
3609
+ env->condexec_bits = 0;
3610
+ env->regs[14] = lr;
3611
+ env->regs[15] = addr & 0xfffffffe;
3612
+ env->thumb = addr & 1;
3613
+}
3614
+
3615
+static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
3616
+ bool apply_splim)
3617
+{
3618
+ /*
3619
+ * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
3620
+ * that we will need later in order to do lazy FP reg stacking.
3621
+ */
3622
+ bool is_secure = env->v7m.secure;
3623
+ void *nvic = env->nvic;
3624
+ /*
3625
+ * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
3626
+ * are banked and we want to update the bit in the bank for the
3627
+ * current security state; and in one case we want to specifically
3628
+ * update the NS banked version of a bit even if we are secure.
3629
+ */
3630
+ uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
3631
+ uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
3632
+ uint32_t *fpccr = &env->v7m.fpccr[is_secure];
3633
+ bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
3634
+
3635
+ env->v7m.fpcar[is_secure] = frameptr & ~0x7;
3636
+
3637
+ if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
3638
+ bool splimviol;
3639
+ uint32_t splim = v7m_sp_limit(env);
3640
+ bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
3641
+ (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
3642
+
3643
+ splimviol = !ign && frameptr < splim;
3644
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
3645
+ }
3646
+
3647
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
3648
+
3649
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
3650
+
3651
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
3652
+
3653
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
3654
+ !arm_v7m_is_handler_mode(env));
3655
+
3656
+ hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
3657
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
3658
+
3659
+ bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
3660
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
3661
+
3662
+ mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
3663
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
3664
+
3665
+ ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
3666
+ *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
3667
+
3668
+ monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
3669
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
3670
+
3671
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3672
+ s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
3673
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
3674
+
3675
+ sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
3676
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
3677
+ }
3678
+}
3679
+
3680
+void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
3681
+{
3682
+ /* fptr is the value of Rn, the frame pointer we store the FP regs to */
3683
+ bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
3684
+ bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
3685
+
3686
+ assert(env->v7m.secure);
3687
+
3688
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
3689
+ return;
3690
+ }
3691
+
3692
+ /* Check access to the coprocessor is permitted */
3693
+ if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
3694
+ raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
3695
+ }
3696
+
3697
+ if (lspact) {
3698
+ /* LSPACT should not be active when there is active FP state */
3699
+ raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
3700
+ }
3701
+
3702
+ if (fptr & 7) {
3703
+ raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
3704
+ }
3705
+
3706
+ /*
3707
+ * Note that we do not use v7m_stack_write() here, because the
3708
+ * accesses should not set the FSR bits for stacking errors if they
3709
+ * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
3710
+ * or AccType_LAZYFP). Faults in cpu_stl_data() will throw exceptions
3711
+ * and longjmp out.
3712
+ */
3713
+ if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
3714
+ bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
3715
+ int i;
3716
+
3717
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
3718
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
3719
+ uint32_t faddr = fptr + 4 * i;
3720
+ uint32_t slo = extract64(dn, 0, 32);
3721
+ uint32_t shi = extract64(dn, 32, 32);
3722
+
3723
+ if (i >= 16) {
3724
+ faddr += 8; /* skip the slot for the FPSCR */
3725
+ }
3726
+ cpu_stl_data(env, faddr, slo);
3727
+ cpu_stl_data(env, faddr + 4, shi);
3728
+ }
3729
+ cpu_stl_data(env, fptr + 0x40, vfp_get_fpscr(env));
3730
+
3731
+ /*
3732
+ * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
3733
+ * leave them unchanged, matching our choice in v7m_preserve_fp_state.
3734
+ */
3735
+ if (ts) {
3736
+ for (i = 0; i < 32; i += 2) {
3737
+ *aa32_vfp_dreg(env, i / 2) = 0;
3738
+ }
3739
+ vfp_set_fpscr(env, 0);
3740
+ }
3741
+ } else {
3742
+ v7m_update_fpccr(env, fptr, false);
3743
+ }
3744
+
3745
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
3746
+}
3747
+
3748
+void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
3749
+{
3750
+ /* fptr is the value of Rn, the frame pointer we load the FP regs from */
3751
+ assert(env->v7m.secure);
3752
+
3753
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
3754
+ return;
3755
+ }
3756
+
3757
+ /* Check access to the coprocessor is permitted */
3758
+ if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
3759
+ raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
3760
+ }
3761
+
3762
+ if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
3763
+ /* State in FP is still valid */
3764
+ env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
3765
+ } else {
3766
+ bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
3767
+ int i;
3768
+ uint32_t fpscr;
3769
+
3770
+ if (fptr & 7) {
3771
+ raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
3772
+ }
3773
+
3774
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
3775
+ uint32_t slo, shi;
3776
+ uint64_t dn;
3777
+ uint32_t faddr = fptr + 4 * i;
3778
+
3779
+ if (i >= 16) {
3780
+ faddr += 8; /* skip the slot for the FPSCR */
3781
+ }
3782
+
3783
+ slo = cpu_ldl_data(env, faddr);
3784
+ shi = cpu_ldl_data(env, faddr + 4);
3785
+
3786
+ dn = (uint64_t) shi << 32 | slo;
3787
+ *aa32_vfp_dreg(env, i / 2) = dn;
3788
+ }
3789
+ fpscr = cpu_ldl_data(env, fptr + 0x40);
3790
+ vfp_set_fpscr(env, fpscr);
3791
+ }
3792
+
3793
+ env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
3794
+}
3795
+
3796
+static bool v7m_push_stack(ARMCPU *cpu)
3797
+{
3798
+ /*
3799
+ * Do the "set up stack frame" part of exception entry,
3800
+ * similar to pseudocode PushStack().
3801
+ * Return true if we generate a derived exception (and so
3802
+ * should ignore further stack faults trying to process
3803
+ * that derived exception.)
3804
+ */
3805
+ bool stacked_ok = true, limitviol = false;
3806
+ CPUARMState *env = &cpu->env;
3807
+ uint32_t xpsr = xpsr_read(env);
3808
+ uint32_t frameptr = env->regs[13];
3809
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
3810
+ uint32_t framesize;
3811
+ bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
3812
+
3813
+ if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
3814
+ (env->v7m.secure || nsacr_cp10)) {
3815
+ if (env->v7m.secure &&
3816
+ env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
3817
+ framesize = 0xa8;
3818
+ } else {
3819
+ framesize = 0x68;
3820
+ }
3821
+ } else {
3822
+ framesize = 0x20;
3823
+ }
3824
+
3825
+ /* Align stack pointer if the guest wants that */
3826
+ if ((frameptr & 4) &&
3827
+ (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
3828
+ frameptr -= 4;
3829
+ xpsr |= XPSR_SPREALIGN;
3830
+ }
3831
+
3832
+ xpsr &= ~XPSR_SFPA;
3833
+ if (env->v7m.secure &&
3834
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
3835
+ xpsr |= XPSR_SFPA;
3836
+ }
3837
+
3838
+ frameptr -= framesize;
3839
+
3840
+ if (arm_feature(env, ARM_FEATURE_V8)) {
3841
+ uint32_t limit = v7m_sp_limit(env);
3842
+
3843
+ if (frameptr < limit) {
3844
+ /*
3845
+ * Stack limit failure: set SP to the limit value, and generate
3846
+ * STKOF UsageFault. Stack pushes below the limit must not be
3847
+ * performed. It is IMPDEF whether pushes above the limit are
3848
+ * performed; we choose not to.
3849
+ */
3850
+ qemu_log_mask(CPU_LOG_INT,
3851
+ "...STKOF during stacking\n");
3852
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
3853
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
3854
+ env->v7m.secure);
3855
+ env->regs[13] = limit;
3856
+ /*
3857
+ * We won't try to perform any further memory accesses but
3858
+ * we must continue through the following code to check for
3859
+ * permission faults during FPU state preservation, and we
3860
+ * must update FPCCR if lazy stacking is enabled.
3861
+ */
3862
+ limitviol = true;
3863
+ stacked_ok = false;
3864
+ }
3865
+ }
3866
+
3867
+ /*
3868
+ * Write as much of the stack frame as we can. If we fail a stack
3869
+ * write this will result in a derived exception being pended
3870
+ * (which may be taken in preference to the one we started with
3871
+ * if it has higher priority).
3872
+ */
3873
+ stacked_ok = stacked_ok &&
3874
+ v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
3875
+ v7m_stack_write(cpu, frameptr + 4, env->regs[1],
3876
+ mmu_idx, STACK_NORMAL) &&
3877
+ v7m_stack_write(cpu, frameptr + 8, env->regs[2],
3878
+ mmu_idx, STACK_NORMAL) &&
3879
+ v7m_stack_write(cpu, frameptr + 12, env->regs[3],
3880
+ mmu_idx, STACK_NORMAL) &&
3881
+ v7m_stack_write(cpu, frameptr + 16, env->regs[12],
3882
+ mmu_idx, STACK_NORMAL) &&
3883
+ v7m_stack_write(cpu, frameptr + 20, env->regs[14],
3884
+ mmu_idx, STACK_NORMAL) &&
3885
+ v7m_stack_write(cpu, frameptr + 24, env->regs[15],
3886
+ mmu_idx, STACK_NORMAL) &&
3887
+ v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
3888
+
3889
+ if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
3890
+ /* FPU is active, try to save its registers */
3891
+ bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
3892
+ bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
3893
+
3894
+ if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3895
+ qemu_log_mask(CPU_LOG_INT,
3896
+ "...SecureFault because LSPACT and FPCA both set\n");
3897
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
3898
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
3899
+ } else if (!env->v7m.secure && !nsacr_cp10) {
3900
+ qemu_log_mask(CPU_LOG_INT,
3901
+ "...Secure UsageFault with CFSR.NOCP because "
3902
+ "NSACR.CP10 prevents stacking FP regs\n");
3903
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
3904
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
3905
+ } else {
3906
+ if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
3907
+ /* Lazy stacking disabled, save registers now */
3908
+ int i;
3909
+ bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
3910
+ arm_current_el(env) != 0);
3911
+
3912
+ if (stacked_ok && !cpacr_pass) {
3913
+ /*
3914
+ * Take UsageFault if CPACR forbids access. The pseudocode
3915
+ * here does a full CheckCPEnabled() but we know the NSACR
3916
+ * check can never fail as we have already handled that.
3917
+ */
3918
+ qemu_log_mask(CPU_LOG_INT,
3919
+ "...UsageFault with CFSR.NOCP because "
3920
+ "CPACR.CP10 prevents stacking FP regs\n");
3921
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
3922
+ env->v7m.secure);
3923
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
3924
+ stacked_ok = false;
3925
+ }
3926
+
3927
+ for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
3928
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
3929
+ uint32_t faddr = frameptr + 0x20 + 4 * i;
3930
+ uint32_t slo = extract64(dn, 0, 32);
3931
+ uint32_t shi = extract64(dn, 32, 32);
3932
+
3933
+ if (i >= 16) {
3934
+ faddr += 8; /* skip the slot for the FPSCR */
3935
+ }
3936
+ stacked_ok = stacked_ok &&
3937
+ v7m_stack_write(cpu, faddr, slo,
3938
+ mmu_idx, STACK_NORMAL) &&
3939
+ v7m_stack_write(cpu, faddr + 4, shi,
3940
+ mmu_idx, STACK_NORMAL);
3941
+ }
3942
+ stacked_ok = stacked_ok &&
3943
+ v7m_stack_write(cpu, frameptr + 0x60,
3944
+ vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
3945
+ if (cpacr_pass) {
3946
+ for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
3947
+ *aa32_vfp_dreg(env, i / 2) = 0;
3948
+ }
3949
+ vfp_set_fpscr(env, 0);
3950
+ }
3951
+ } else {
3952
+ /* Lazy stacking enabled, save necessary info to stack later */
3953
+ v7m_update_fpccr(env, frameptr + 0x20, true);
3954
+ }
3955
+ }
3956
+ }
3957
+
3958
+ /*
3959
+ * If we broke a stack limit then SP was already updated earlier;
3960
+ * otherwise we update SP regardless of whether any of the stack
3961
+ * accesses failed or we took some other kind of fault.
3962
+ */
3963
+ if (!limitviol) {
3964
+ env->regs[13] = frameptr;
3965
+ }
3966
+
3967
+ return !stacked_ok;
3968
+}
3969
+
3970
+static void do_v7m_exception_exit(ARMCPU *cpu)
3971
+{
3972
+ CPUARMState *env = &cpu->env;
3973
+ uint32_t excret;
3974
+ uint32_t xpsr, xpsr_mask;
3975
+ bool ufault = false;
3976
+ bool sfault = false;
3977
+ bool return_to_sp_process;
3978
+ bool return_to_handler;
3979
+ bool rettobase = false;
3980
+ bool exc_secure = false;
3981
+ bool return_to_secure;
3982
+ bool ftype;
3983
+ bool restore_s16_s31;
3984
+
3985
+ /*
3986
+ * If we're not in Handler mode then jumps to magic exception-exit
3987
+ * addresses don't have magic behaviour. However for the v8M
3988
+ * security extensions the magic secure-function-return has to
3989
+ * work in thread mode too, so to avoid doing an extra check in
3990
+ * the generated code we allow exception-exit magic to also cause the
3991
+ * internal exception and bring us here in thread mode. Correct code
3992
+ * will never try to do this (the following insn fetch will always
3993
+ * fault) so we the overhead of having taken an unnecessary exception
3994
+ * doesn't matter.
3995
+ */
3996
+ if (!arm_v7m_is_handler_mode(env)) {
3997
+ return;
3998
+ }
3999
+
4000
+ /*
4001
+ * In the spec pseudocode ExceptionReturn() is called directly
4002
+ * from BXWritePC() and gets the full target PC value including
4003
+ * bit zero. In QEMU's implementation we treat it as a normal
4004
+ * jump-to-register (which is then caught later on), and so split
4005
+ * the target value up between env->regs[15] and env->thumb in
4006
+ * gen_bx(). Reconstitute it.
4007
+ */
4008
+ excret = env->regs[15];
4009
+ if (env->thumb) {
4010
+ excret |= 1;
4011
+ }
4012
+
4013
+ qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
4014
+ " previous exception %d\n",
4015
+ excret, env->v7m.exception);
4016
+
4017
+ if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
4018
+ qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
4019
+ "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
4020
+ excret);
4021
+ }
4022
+
4023
+ ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
4024
+
4025
+ if (!arm_feature(env, ARM_FEATURE_VFP) && !ftype) {
4026
+ qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
4027
+ "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
4028
+ "if FPU not present\n",
4029
+ excret);
4030
+ ftype = true;
4031
+ }
4032
+
4033
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
4034
+ /*
4035
+ * EXC_RETURN.ES validation check (R_SMFL). We must do this before
4036
+ * we pick which FAULTMASK to clear.
4037
+ */
4038
+ if (!env->v7m.secure &&
4039
+ ((excret & R_V7M_EXCRET_ES_MASK) ||
4040
+ !(excret & R_V7M_EXCRET_DCRS_MASK))) {
4041
+ sfault = 1;
4042
+ /* For all other purposes, treat ES as 0 (R_HXSR) */
4043
+ excret &= ~R_V7M_EXCRET_ES_MASK;
4044
+ }
4045
+ exc_secure = excret & R_V7M_EXCRET_ES_MASK;
4046
+ }
4047
+
4048
+ if (env->v7m.exception != ARMV7M_EXCP_NMI) {
4049
+ /*
4050
+ * Auto-clear FAULTMASK on return from other than NMI.
4051
+ * If the security extension is implemented then this only
4052
+ * happens if the raw execution priority is >= 0; the
4053
+ * value of the ES bit in the exception return value indicates
4054
+ * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
4055
+ */
4056
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
4057
+ if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
4058
+ env->v7m.faultmask[exc_secure] = 0;
4059
+ }
4060
+ } else {
4061
+ env->v7m.faultmask[M_REG_NS] = 0;
4062
+ }
4063
+ }
4064
+
4065
+ switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
4066
+ exc_secure)) {
4067
+ case -1:
4068
+ /* attempt to exit an exception that isn't active */
4069
+ ufault = true;
4070
+ break;
4071
+ case 0:
4072
+ /* still an irq active now */
4073
+ break;
4074
+ case 1:
4075
+ /*
4076
+ * We returned to base exception level, no nesting.
4077
+ * (In the pseudocode this is written using "NestedActivation != 1"
4078
+ * where we have 'rettobase == false'.)
4079
+ */
4080
+ rettobase = true;
4081
+ break;
4082
+ default:
4083
+ g_assert_not_reached();
4084
+ }
4085
+
4086
+ return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
4087
+ return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
4088
+ return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
4089
+ (excret & R_V7M_EXCRET_S_MASK);
4090
+
4091
+ if (arm_feature(env, ARM_FEATURE_V8)) {
4092
+ if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
4093
+ /*
4094
+ * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
4095
+ * we choose to take the UsageFault.
4096
+ */
4097
+ if ((excret & R_V7M_EXCRET_S_MASK) ||
4098
+ (excret & R_V7M_EXCRET_ES_MASK) ||
4099
+ !(excret & R_V7M_EXCRET_DCRS_MASK)) {
4100
+ ufault = true;
4101
+ }
4102
+ }
4103
+ if (excret & R_V7M_EXCRET_RES0_MASK) {
4104
+ ufault = true;
4105
+ }
4106
+ } else {
4107
+ /* For v7M we only recognize certain combinations of the low bits */
4108
+ switch (excret & 0xf) {
4109
+ case 1: /* Return to Handler */
4110
+ break;
4111
+ case 13: /* Return to Thread using Process stack */
4112
+ case 9: /* Return to Thread using Main stack */
4113
+ /*
4114
+ * We only need to check NONBASETHRDENA for v7M, because in
4115
+ * v8M this bit does not exist (it is RES1).
4116
+ */
4117
+ if (!rettobase &&
4118
+ !(env->v7m.ccr[env->v7m.secure] &
4119
+ R_V7M_CCR_NONBASETHRDENA_MASK)) {
4120
+ ufault = true;
4121
+ }
4122
+ break;
4123
+ default:
4124
+ ufault = true;
4125
+ }
4126
+ }
4127
+
4128
+ /*
4129
+ * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
4130
+ * Handler mode (and will be until we write the new XPSR.Interrupt
4131
+ * field) this does not switch around the current stack pointer.
4132
+ * We must do this before we do any kind of tailchaining, including
4133
+ * for the derived exceptions on integrity check failures, or we will
4134
+ * give the guest an incorrect EXCRET.SPSEL value on exception entry.
4135
+ */
4136
+ write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
4137
+
4138
+ /*
4139
+ * Clear scratch FP values left in caller saved registers; this
4140
+ * must happen before any kind of tail chaining.
4141
+ */
4142
+ if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
4143
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
4144
+ if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
4145
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
4146
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4147
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
4148
+ "stackframe: error during lazy state deactivation\n");
4149
+ v7m_exception_taken(cpu, excret, true, false);
4150
+ return;
4151
+ } else {
4152
+ /* Clear s0..s15 and FPSCR */
4153
+ int i;
4154
+
4155
+ for (i = 0; i < 16; i += 2) {
4156
+ *aa32_vfp_dreg(env, i / 2) = 0;
4157
+ }
4158
+ vfp_set_fpscr(env, 0);
4159
+ }
4160
+ }
4161
+
4162
+ if (sfault) {
4163
+ env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
4164
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4165
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
4166
+ "stackframe: failed EXC_RETURN.ES validity check\n");
4167
+ v7m_exception_taken(cpu, excret, true, false);
4168
+ return;
4169
+ }
4170
+
4171
+ if (ufault) {
4172
+ /*
4173
+ * Bad exception return: instead of popping the exception
4174
+ * stack, directly take a usage fault on the current stack.
4175
+ */
4176
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
4177
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4178
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
4179
+ "stackframe: failed exception return integrity check\n");
4180
+ v7m_exception_taken(cpu, excret, true, false);
4181
+ return;
4182
+ }
4183
+
4184
+ /*
4185
+ * Tailchaining: if there is currently a pending exception that
4186
+ * is high enough priority to preempt execution at the level we're
4187
+ * about to return to, then just directly take that exception now,
4188
+ * avoiding an unstack-and-then-stack. Note that now we have
4189
+ * deactivated the previous exception by calling armv7m_nvic_complete_irq()
4190
+ * our current execution priority is already the execution priority we are
4191
+ * returning to -- none of the state we would unstack or set based on
4192
+ * the EXCRET value affects it.
4193
+ */
4194
+ if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
4195
+ qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
4196
+ v7m_exception_taken(cpu, excret, true, false);
4197
+ return;
4198
+ }
4199
+
4200
+ switch_v7m_security_state(env, return_to_secure);
4201
+
4202
+ {
4203
+ /*
4204
+ * The stack pointer we should be reading the exception frame from
4205
+ * depends on bits in the magic exception return type value (and
4206
+ * for v8M isn't necessarily the stack pointer we will eventually
4207
+ * end up resuming execution with). Get a pointer to the location
4208
+ * in the CPU state struct where the SP we need is currently being
4209
+ * stored; we will use and modify it in place.
4210
+ * We use this limited C variable scope so we don't accidentally
4211
+ * use 'frame_sp_p' after we do something that makes it invalid.
4212
+ */
4213
+ uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
4214
+ return_to_secure,
4215
+ !return_to_handler,
4216
+ return_to_sp_process);
4217
+ uint32_t frameptr = *frame_sp_p;
4218
+ bool pop_ok = true;
4219
+ ARMMMUIdx mmu_idx;
4220
+ bool return_to_priv = return_to_handler ||
4221
+ !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
4222
+
4223
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
4224
+ return_to_priv);
4225
+
4226
+ if (!QEMU_IS_ALIGNED(frameptr, 8) &&
4227
+ arm_feature(env, ARM_FEATURE_V8)) {
4228
+ qemu_log_mask(LOG_GUEST_ERROR,
4229
+ "M profile exception return with non-8-aligned SP "
4230
+ "for destination state is UNPREDICTABLE\n");
4231
+ }
4232
+
4233
+ /* Do we need to pop callee-saved registers? */
4234
+ if (return_to_secure &&
4235
+ ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
4236
+ (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
4237
+ uint32_t actual_sig;
4238
+
4239
+ pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
4240
+
4241
+ if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
4242
+ /* Take a SecureFault on the current stack */
4243
+ env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
4244
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4245
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
4246
+ "stackframe: failed exception return integrity "
4247
+ "signature check\n");
4248
+ v7m_exception_taken(cpu, excret, true, false);
4249
+ return;
4250
+ }
4251
+
4252
+ pop_ok = pop_ok &&
4253
+ v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
4254
+ v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
4255
+ v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
4256
+ v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
4257
+ v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
4258
+ v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
4259
+ v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
4260
+ v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
4261
+
4262
+ frameptr += 0x28;
4263
+ }
4264
+
4265
+ /* Pop registers */
4266
+ pop_ok = pop_ok &&
4267
+ v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
4268
+ v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
4269
+ v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
4270
+ v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
4271
+ v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
4272
+ v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
4273
+ v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
4274
+ v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
4275
+
4276
+ if (!pop_ok) {
4277
+ /*
4278
+ * v7m_stack_read() pended a fault, so take it (as a tail
4279
+ * chained exception on the same stack frame)
4280
+ */
4281
+ qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
4282
+ v7m_exception_taken(cpu, excret, true, false);
4283
+ return;
4284
+ }
4285
+
4286
+ /*
4287
+ * Returning from an exception with a PC with bit 0 set is defined
4288
+ * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
4289
+ * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
4290
+ * the lsbit, and there are several RTOSes out there which incorrectly
4291
+ * assume the r15 in the stack frame should be a Thumb-style "lsbit
4292
+ * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
4293
+ * complain about the badly behaved guest.
4294
+ */
4295
+ if (env->regs[15] & 1) {
4296
+ env->regs[15] &= ~1U;
4297
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
4298
+ qemu_log_mask(LOG_GUEST_ERROR,
4299
+ "M profile return from interrupt with misaligned "
4300
+ "PC is UNPREDICTABLE on v7M\n");
4301
+ }
4302
+ }
4303
+
4304
+ if (arm_feature(env, ARM_FEATURE_V8)) {
4305
+ /*
4306
+ * For v8M we have to check whether the xPSR exception field
4307
+ * matches the EXCRET value for return to handler/thread
4308
+ * before we commit to changing the SP and xPSR.
4309
+ */
4310
+ bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
4311
+ if (return_to_handler != will_be_handler) {
4312
+ /*
4313
+ * Take an INVPC UsageFault on the current stack.
4314
+ * By this point we will have switched to the security state
4315
+ * for the background state, so this UsageFault will target
4316
+ * that state.
4317
+ */
4318
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
4319
+ env->v7m.secure);
4320
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
4321
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
4322
+ "stackframe: failed exception return integrity "
4323
+ "check\n");
4324
+ v7m_exception_taken(cpu, excret, true, false);
4325
+ return;
4326
+ }
4327
+ }
4328
+
4329
+ if (!ftype) {
4330
+ /* FP present and we need to handle it */
4331
+ if (!return_to_secure &&
4332
+ (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
4333
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4334
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
4335
+ qemu_log_mask(CPU_LOG_INT,
4336
+ "...taking SecureFault on existing stackframe: "
4337
+ "Secure LSPACT set but exception return is "
4338
+ "not to secure state\n");
4339
+ v7m_exception_taken(cpu, excret, true, false);
4340
+ return;
4341
+ }
4342
+
4343
+ restore_s16_s31 = return_to_secure &&
4344
+ (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
4345
+
4346
+ if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
4347
+ /* State in FPU is still valid, just clear LSPACT */
4348
+ env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
4349
+ } else {
4350
+ int i;
4351
+ uint32_t fpscr;
4352
+ bool cpacr_pass, nsacr_pass;
4353
+
4354
+ cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
4355
+ return_to_priv);
4356
+ nsacr_pass = return_to_secure ||
4357
+ extract32(env->v7m.nsacr, 10, 1);
4358
+
4359
+ if (!cpacr_pass) {
4360
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
4361
+ return_to_secure);
4362
+ env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
4363
+ qemu_log_mask(CPU_LOG_INT,
4364
+ "...taking UsageFault on existing "
4365
+ "stackframe: CPACR.CP10 prevents unstacking "
4366
+ "FP regs\n");
4367
+ v7m_exception_taken(cpu, excret, true, false);
4368
+ return;
4369
+ } else if (!nsacr_pass) {
4370
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
4371
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
4372
+ qemu_log_mask(CPU_LOG_INT,
4373
+ "...taking Secure UsageFault on existing "
4374
+ "stackframe: NSACR.CP10 prevents unstacking "
4375
+ "FP regs\n");
4376
+ v7m_exception_taken(cpu, excret, true, false);
4377
+ return;
4378
+ }
4379
+
4380
+ for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
4381
+ uint32_t slo, shi;
4382
+ uint64_t dn;
4383
+ uint32_t faddr = frameptr + 0x20 + 4 * i;
4384
+
4385
+ if (i >= 16) {
4386
+ faddr += 8; /* Skip the slot for the FPSCR */
4387
+ }
4388
+
4389
+ pop_ok = pop_ok &&
4390
+ v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
4391
+ v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
4392
+
4393
+ if (!pop_ok) {
4394
+ break;
4395
+ }
4396
+
4397
+ dn = (uint64_t)shi << 32 | slo;
4398
+ *aa32_vfp_dreg(env, i / 2) = dn;
4399
+ }
4400
+ pop_ok = pop_ok &&
4401
+ v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
4402
+ if (pop_ok) {
4403
+ vfp_set_fpscr(env, fpscr);
4404
+ }
4405
+ if (!pop_ok) {
4406
+ /*
4407
+ * These regs are 0 if security extension present;
4408
+ * otherwise merely UNKNOWN. We zero always.
4409
+ */
4410
+ for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
4411
+ *aa32_vfp_dreg(env, i / 2) = 0;
4412
+ }
4413
+ vfp_set_fpscr(env, 0);
4414
+ }
4415
+ }
4416
+ }
4417
+ env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
4418
+ V7M_CONTROL, FPCA, !ftype);
4419
+
4420
+ /* Commit to consuming the stack frame */
4421
+ frameptr += 0x20;
4422
+ if (!ftype) {
4423
+ frameptr += 0x48;
4424
+ if (restore_s16_s31) {
4425
+ frameptr += 0x40;
4426
+ }
4427
+ }
4428
+ /*
4429
+ * Undo stack alignment (the SPREALIGN bit indicates that the original
4430
+ * pre-exception SP was not 8-aligned and we added a padding word to
4431
+ * align it, so we undo this by ORing in the bit that increases it
4432
+ * from the current 8-aligned value to the 8-unaligned value. (Adding 4
4433
+ * would work too but a logical OR is how the pseudocode specifies it.)
4434
+ */
4435
+ if (xpsr & XPSR_SPREALIGN) {
4436
+ frameptr |= 4;
4437
+ }
4438
+ *frame_sp_p = frameptr;
4439
+ }
4440
+
4441
+ xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
4442
+ if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
4443
+ xpsr_mask &= ~XPSR_GE;
4444
+ }
4445
+ /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
4446
+ xpsr_write(env, xpsr, xpsr_mask);
4447
+
4448
+ if (env->v7m.secure) {
4449
+ bool sfpa = xpsr & XPSR_SFPA;
4450
+
4451
+ env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
4452
+ V7M_CONTROL, SFPA, sfpa);
4453
+ }
4454
+
4455
+ /*
4456
+ * The restored xPSR exception field will be zero if we're
4457
+ * resuming in Thread mode. If that doesn't match what the
4458
+ * exception return excret specified then this is a UsageFault.
4459
+ * v7M requires we make this check here; v8M did it earlier.
4460
+ */
4461
+ if (return_to_handler != arm_v7m_is_handler_mode(env)) {
4462
+ /*
4463
+ * Take an INVPC UsageFault by pushing the stack again;
4464
+ * we know we're v7M so this is never a Secure UsageFault.
4465
+ */
4466
+ bool ignore_stackfaults;
4467
+
4468
+ assert(!arm_feature(env, ARM_FEATURE_V8));
4469
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
4470
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
4471
+ ignore_stackfaults = v7m_push_stack(cpu);
4472
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
4473
+ "failed exception return integrity check\n");
4474
+ v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
4475
+ return;
4476
+ }
4477
+
4478
+ /* Otherwise, we have a successful exception exit. */
4479
+ arm_clear_exclusive(env);
4480
+ qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
4481
+}
4482
+
4483
+static bool do_v7m_function_return(ARMCPU *cpu)
4484
+{
4485
+ /*
4486
+ * v8M security extensions magic function return.
4487
+ * We may either:
4488
+ * (1) throw an exception (longjump)
4489
+ * (2) return true if we successfully handled the function return
4490
+ * (3) return false if we failed a consistency check and have
4491
+ * pended a UsageFault that needs to be taken now
4492
+ *
4493
+ * At this point the magic return value is split between env->regs[15]
4494
+ * and env->thumb. We don't bother to reconstitute it because we don't
4495
+ * need it (all values are handled the same way).
4496
+ */
4497
+ CPUARMState *env = &cpu->env;
4498
+ uint32_t newpc, newpsr, newpsr_exc;
4499
+
4500
+ qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
4501
+
4502
+ {
4503
+ bool threadmode, spsel;
4504
+ TCGMemOpIdx oi;
4505
+ ARMMMUIdx mmu_idx;
4506
+ uint32_t *frame_sp_p;
4507
+ uint32_t frameptr;
4508
+
4509
+ /* Pull the return address and IPSR from the Secure stack */
4510
+ threadmode = !arm_v7m_is_handler_mode(env);
4511
+ spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
4512
+
4513
+ frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
4514
+ frameptr = *frame_sp_p;
4515
+
4516
+ /*
4517
+ * These loads may throw an exception (for MPU faults). We want to
4518
+ * do them as secure, so work out what MMU index that is.
4519
+ */
4520
+ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
4521
+ oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
4522
+ newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
4523
+ newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
4524
+
4525
+ /* Consistency checks on new IPSR */
4526
+ newpsr_exc = newpsr & XPSR_EXCP;
4527
+ if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
4528
+ (env->v7m.exception == 1 && newpsr_exc != 0))) {
4529
+ /* Pend the fault and tell our caller to take it */
4530
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
4531
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
4532
+ env->v7m.secure);
4533
+ qemu_log_mask(CPU_LOG_INT,
4534
+ "...taking INVPC UsageFault: "
4535
+ "IPSR consistency check failed\n");
4536
+ return false;
4537
+ }
4538
+
4539
+ *frame_sp_p = frameptr + 8;
4540
+ }
4541
+
4542
+ /* This invalidates frame_sp_p */
4543
+ switch_v7m_security_state(env, true);
4544
+ env->v7m.exception = newpsr_exc;
4545
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
4546
+ if (newpsr & XPSR_SFPA) {
4547
+ env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
4548
+ }
4549
+ xpsr_write(env, 0, XPSR_IT);
4550
+ env->thumb = newpc & 1;
4551
+ env->regs[15] = newpc & ~1;
4552
+
4553
+ qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
4554
+ return true;
4555
+}
4556
+
4557
+static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
4558
+ uint32_t addr, uint16_t *insn)
4559
+{
4560
+ /*
4561
+ * Load a 16-bit portion of a v7M instruction, returning true on success,
4562
+ * or false on failure (in which case we will have pended the appropriate
4563
+ * exception).
4564
+ * We need to do the instruction fetch's MPU and SAU checks
4565
+ * like this because there is no MMU index that would allow
4566
+ * doing the load with a single function call. Instead we must
4567
+ * first check that the security attributes permit the load
4568
+ * and that they don't mismatch on the two halves of the instruction,
4569
+ * and then we do the load as a secure load (ie using the security
4570
+ * attributes of the address, not the CPU, as architecturally required).
4571
+ */
4572
+ CPUState *cs = CPU(cpu);
4573
+ CPUARMState *env = &cpu->env;
4574
+ V8M_SAttributes sattrs = {};
4575
+ MemTxAttrs attrs = {};
4576
+ ARMMMUFaultInfo fi = {};
4577
+ MemTxResult txres;
4578
+ target_ulong page_size;
4579
+ hwaddr physaddr;
4580
+ int prot;
4581
+
4582
+ v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
4583
+ if (!sattrs.nsc || sattrs.ns) {
4584
+ /*
4585
+ * This must be the second half of the insn, and it straddles a
4586
+ * region boundary with the second half not being S&NSC.
4587
+ */
4588
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
4589
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4590
+ qemu_log_mask(CPU_LOG_INT,
4591
+ "...really SecureFault with SFSR.INVEP\n");
4592
+ return false;
4593
+ }
4594
+ if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
4595
+ &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
4596
+ /* the MPU lookup failed */
4597
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
4598
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
4599
+ qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
4600
+ return false;
4601
+ }
4602
+ *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
4603
+ attrs, &txres);
4604
+ if (txres != MEMTX_OK) {
4605
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
4606
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
4607
+ qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
4608
+ return false;
4609
+ }
4610
+ return true;
4611
+}
4612
+
4613
+static bool v7m_handle_execute_nsc(ARMCPU *cpu)
4614
+{
4615
+ /*
4616
+ * Check whether this attempt to execute code in a Secure & NS-Callable
4617
+ * memory region is for an SG instruction; if so, then emulate the
4618
+ * effect of the SG instruction and return true. Otherwise pend
4619
+ * the correct kind of exception and return false.
4620
+ */
4621
+ CPUARMState *env = &cpu->env;
4622
+ ARMMMUIdx mmu_idx;
4623
+ uint16_t insn;
4624
+
4625
+ /*
4626
+ * We should never get here unless get_phys_addr_pmsav8() caused
4627
+ * an exception for NS executing in S&NSC memory.
4628
+ */
4629
+ assert(!env->v7m.secure);
4630
+ assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
4631
+
4632
+ /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
4633
+ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
4634
+
4635
+ if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
4636
+ return false;
4637
+ }
4638
+
4639
+ if (!env->thumb) {
4640
+ goto gen_invep;
4641
+ }
4642
+
4643
+ if (insn != 0xe97f) {
4644
+ /*
4645
+ * Not an SG instruction first half (we choose the IMPDEF
4646
+ * early-SG-check option).
4647
+ */
4648
+ goto gen_invep;
4649
+ }
4650
+
4651
+ if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
4652
+ return false;
4653
+ }
4654
+
4655
+ if (insn != 0xe97f) {
4656
+ /*
4657
+ * Not an SG instruction second half (yes, both halves of the SG
4658
+ * insn have the same hex value)
4659
+ */
4660
+ goto gen_invep;
4661
+ }
4662
+
4663
+ /*
4664
+ * OK, we have confirmed that we really have an SG instruction.
4665
+ * We know we're NS in S memory so don't need to repeat those checks.
4666
+ */
4667
+ qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
4668
+ ", executing it\n", env->regs[15]);
4669
+ env->regs[14] &= ~1;
4670
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
4671
+ switch_v7m_security_state(env, true);
4672
+ xpsr_write(env, 0, XPSR_IT);
4673
+ env->regs[15] += 4;
4674
+ return true;
4675
+
4676
+gen_invep:
4677
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
4678
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4679
+ qemu_log_mask(CPU_LOG_INT,
4680
+ "...really SecureFault with SFSR.INVEP\n");
4681
+ return false;
4682
+}
4683
+
4684
+void arm_v7m_cpu_do_interrupt(CPUState *cs)
4685
+{
4686
+ ARMCPU *cpu = ARM_CPU(cs);
4687
+ CPUARMState *env = &cpu->env;
4688
+ uint32_t lr;
4689
+ bool ignore_stackfaults;
4690
+
4691
+ arm_log_exception(cs->exception_index);
4692
+
4693
+ /*
4694
+ * For exceptions we just mark as pending on the NVIC, and let that
4695
+ * handle it.
4696
+ */
4697
+ switch (cs->exception_index) {
4698
+ case EXCP_UDEF:
4699
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4700
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
4701
+ break;
4702
+ case EXCP_NOCP:
4703
+ {
4704
+ /*
4705
+ * NOCP might be directed to something other than the current
4706
+ * security state if this fault is because of NSACR; we indicate
4707
+ * the target security state using exception.target_el.
4708
+ */
4709
+ int target_secstate;
4710
+
4711
+ if (env->exception.target_el == 3) {
4712
+ target_secstate = M_REG_S;
4713
+ } else {
4714
+ target_secstate = env->v7m.secure;
4715
+ }
4716
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
4717
+ env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
4718
+ break;
4719
+ }
4720
+ case EXCP_INVSTATE:
4721
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4722
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
4723
+ break;
4724
+ case EXCP_STKOF:
4725
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4726
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
4727
+ break;
4728
+ case EXCP_LSERR:
4729
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4730
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
4731
+ break;
4732
+ case EXCP_UNALIGNED:
4733
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4734
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
4735
+ break;
4736
+ case EXCP_SWI:
4737
+ /* The PC already points to the next instruction. */
4738
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
4739
+ break;
4740
+ case EXCP_PREFETCH_ABORT:
4741
+ case EXCP_DATA_ABORT:
4742
+ /*
4743
+ * Note that for M profile we don't have a guest facing FSR, but
4744
+ * the env->exception.fsr will be populated by the code that
4745
+ * raises the fault, in the A profile short-descriptor format.
4746
+ */
4747
+ switch (env->exception.fsr & 0xf) {
4748
+ case M_FAKE_FSR_NSC_EXEC:
4749
+ /*
4750
+ * Exception generated when we try to execute code at an address
4751
+ * which is marked as Secure & Non-Secure Callable and the CPU
4752
+ * is in the Non-Secure state. The only instruction which can
4753
+ * be executed like this is SG (and that only if both halves of
4754
+ * the SG instruction have the same security attributes.)
4755
+ * Everything else must generate an INVEP SecureFault, so we
4756
+ * emulate the SG instruction here.
4757
+ */
4758
+ if (v7m_handle_execute_nsc(cpu)) {
4759
+ return;
4760
+ }
4761
+ break;
4762
+ case M_FAKE_FSR_SFAULT:
4763
+ /*
4764
+ * Various flavours of SecureFault for attempts to execute or
4765
+ * access data in the wrong security state.
4766
+ */
4767
+ switch (cs->exception_index) {
4768
+ case EXCP_PREFETCH_ABORT:
4769
+ if (env->v7m.secure) {
4770
+ env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
4771
+ qemu_log_mask(CPU_LOG_INT,
4772
+ "...really SecureFault with SFSR.INVTRAN\n");
4773
+ } else {
4774
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
4775
+ qemu_log_mask(CPU_LOG_INT,
4776
+ "...really SecureFault with SFSR.INVEP\n");
4777
+ }
4778
+ break;
4779
+ case EXCP_DATA_ABORT:
4780
+ /* This must be an NS access to S memory */
4781
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
4782
+ qemu_log_mask(CPU_LOG_INT,
4783
+ "...really SecureFault with SFSR.AUVIOL\n");
4784
+ break;
4785
+ }
4786
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4787
+ break;
4788
+ case 0x8: /* External Abort */
4789
+ switch (cs->exception_index) {
4790
+ case EXCP_PREFETCH_ABORT:
4791
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
4792
+ qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
4793
+ break;
4794
+ case EXCP_DATA_ABORT:
4795
+ env->v7m.cfsr[M_REG_NS] |=
4796
+ (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
4797
+ env->v7m.bfar = env->exception.vaddress;
4798
+ qemu_log_mask(CPU_LOG_INT,
4799
+ "...with CFSR.PRECISERR and BFAR 0x%x\n",
4800
+ env->v7m.bfar);
4801
+ break;
4802
+ }
4803
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
4804
+ break;
4805
+ default:
4806
+ /*
4807
+ * All other FSR values are either MPU faults or "can't happen
4808
+ * for M profile" cases.
4809
+ */
4810
+ switch (cs->exception_index) {
4811
+ case EXCP_PREFETCH_ABORT:
4812
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
4813
+ qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
4814
+ break;
4815
+ case EXCP_DATA_ABORT:
4816
+ env->v7m.cfsr[env->v7m.secure] |=
4817
+ (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
4818
+ env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
4819
+ qemu_log_mask(CPU_LOG_INT,
4820
+ "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
4821
+ env->v7m.mmfar[env->v7m.secure]);
4822
+ break;
4823
+ }
4824
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
4825
+ env->v7m.secure);
4826
+ break;
4827
+ }
4828
+ break;
4829
+ case EXCP_BKPT:
4830
+ if (semihosting_enabled()) {
4831
+ int nr;
4832
+ nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
4833
+ if (nr == 0xab) {
4834
+ env->regs[15] += 2;
4835
+ qemu_log_mask(CPU_LOG_INT,
4836
+ "...handling as semihosting call 0x%x\n",
4837
+ env->regs[0]);
4838
+ env->regs[0] = do_arm_semihosting(env);
4839
+ return;
4840
+ }
4841
+ }
4842
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
4843
+ break;
4844
+ case EXCP_IRQ:
4845
+ break;
4846
+ case EXCP_EXCEPTION_EXIT:
4847
+ if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
4848
+ /* Must be v8M security extension function return */
4849
+ assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
4850
+ assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
4851
+ if (do_v7m_function_return(cpu)) {
4852
+ return;
4853
+ }
4854
+ } else {
4855
+ do_v7m_exception_exit(cpu);
4856
+ return;
4857
+ }
4858
+ break;
4859
+ case EXCP_LAZYFP:
4860
+ /*
4861
+ * We already pended the specific exception in the NVIC in the
4862
+ * v7m_preserve_fp_state() helper function.
4863
+ */
4864
+ break;
4865
+ default:
4866
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
4867
+ return; /* Never happens. Keep compiler happy. */
4868
+ }
4869
+
4870
+ if (arm_feature(env, ARM_FEATURE_V8)) {
4871
+ lr = R_V7M_EXCRET_RES1_MASK |
4872
+ R_V7M_EXCRET_DCRS_MASK;
4873
+ /*
4874
+ * The S bit indicates whether we should return to Secure
4875
+ * or NonSecure (ie our current state).
4876
+ * The ES bit indicates whether we're taking this exception
4877
+ * to Secure or NonSecure (ie our target state). We set it
4878
+ * later, in v7m_exception_taken().
4879
+ * The SPSEL bit is also set in v7m_exception_taken() for v8M.
4880
+ * This corresponds to the ARM ARM pseudocode for v8M setting
4881
+ * some LR bits in PushStack() and some in ExceptionTaken();
4882
+ * the distinction matters for the tailchain cases where we
4883
+ * can take an exception without pushing the stack.
4884
+ */
4885
+ if (env->v7m.secure) {
4886
+ lr |= R_V7M_EXCRET_S_MASK;
4887
+ }
4888
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
4889
+ lr |= R_V7M_EXCRET_FTYPE_MASK;
4890
+ }
4891
+ } else {
4892
+ lr = R_V7M_EXCRET_RES1_MASK |
4893
+ R_V7M_EXCRET_S_MASK |
4894
+ R_V7M_EXCRET_DCRS_MASK |
4895
+ R_V7M_EXCRET_FTYPE_MASK |
4896
+ R_V7M_EXCRET_ES_MASK;
4897
+ if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
4898
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
4899
+ }
4900
+ }
4901
+ if (!arm_v7m_is_handler_mode(env)) {
4902
+ lr |= R_V7M_EXCRET_MODE_MASK;
4903
+ }
4904
+
4905
+ ignore_stackfaults = v7m_push_stack(cpu);
4906
+ v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
4907
+}
4908
+
4909
+uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
4910
+{
4911
+ uint32_t mask;
4912
+ unsigned el = arm_current_el(env);
4913
+
4914
+ /* First handle registers which unprivileged can read */
4915
+
4916
+ switch (reg) {
4917
+ case 0 ... 7: /* xPSR sub-fields */
4918
+ mask = 0;
4919
+ if ((reg & 1) && el) {
4920
+ mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
4921
+ }
4922
+ if (!(reg & 4)) {
4923
+ mask |= XPSR_NZCV | XPSR_Q; /* APSR */
4924
+ if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
4925
+ mask |= XPSR_GE;
4926
+ }
4927
+ }
4928
+ /* EPSR reads as zero */
4929
+ return xpsr_read(env) & mask;
4930
+ break;
4931
+ case 20: /* CONTROL */
4932
+ {
4933
+ uint32_t value = env->v7m.control[env->v7m.secure];
4934
+ if (!env->v7m.secure) {
4935
+ /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
4936
+ value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
4937
+ }
4938
+ return value;
4939
+ }
4940
+ case 0x94: /* CONTROL_NS */
4941
+ /*
4942
+ * We have to handle this here because unprivileged Secure code
4943
+ * can read the NS CONTROL register.
4944
+ */
4945
+ if (!env->v7m.secure) {
4946
+ return 0;
4947
+ }
4948
+ return env->v7m.control[M_REG_NS] |
4949
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
4950
+ }
4951
+
4952
+ if (el == 0) {
4953
+ return 0; /* unprivileged reads others as zero */
4954
+ }
4955
+
4956
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
4957
+ switch (reg) {
4958
+ case 0x88: /* MSP_NS */
4959
+ if (!env->v7m.secure) {
4960
+ return 0;
4961
+ }
4962
+ return env->v7m.other_ss_msp;
4963
+ case 0x89: /* PSP_NS */
4964
+ if (!env->v7m.secure) {
4965
+ return 0;
4966
+ }
4967
+ return env->v7m.other_ss_psp;
4968
+ case 0x8a: /* MSPLIM_NS */
4969
+ if (!env->v7m.secure) {
4970
+ return 0;
4971
+ }
4972
+ return env->v7m.msplim[M_REG_NS];
4973
+ case 0x8b: /* PSPLIM_NS */
4974
+ if (!env->v7m.secure) {
4975
+ return 0;
4976
+ }
4977
+ return env->v7m.psplim[M_REG_NS];
4978
+ case 0x90: /* PRIMASK_NS */
4979
+ if (!env->v7m.secure) {
4980
+ return 0;
4981
+ }
4982
+ return env->v7m.primask[M_REG_NS];
4983
+ case 0x91: /* BASEPRI_NS */
4984
+ if (!env->v7m.secure) {
4985
+ return 0;
4986
+ }
4987
+ return env->v7m.basepri[M_REG_NS];
4988
+ case 0x93: /* FAULTMASK_NS */
4989
+ if (!env->v7m.secure) {
4990
+ return 0;
4991
+ }
4992
+ return env->v7m.faultmask[M_REG_NS];
4993
+ case 0x98: /* SP_NS */
4994
+ {
4995
+ /*
4996
+ * This gives the non-secure SP selected based on whether we're
4997
+ * currently in handler mode or not, using the NS CONTROL.SPSEL.
4998
+ */
4999
+ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
5000
+
5001
+ if (!env->v7m.secure) {
5002
+ return 0;
5003
+ }
5004
+ if (!arm_v7m_is_handler_mode(env) && spsel) {
5005
+ return env->v7m.other_ss_psp;
5006
+ } else {
5007
+ return env->v7m.other_ss_msp;
5008
+ }
5009
+ }
5010
+ default:
5011
+ break;
5012
+ }
5013
+ }
5014
+
5015
+ switch (reg) {
5016
+ case 8: /* MSP */
5017
+ return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
5018
+ case 9: /* PSP */
5019
+ return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
5020
+ case 10: /* MSPLIM */
5021
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
5022
+ goto bad_reg;
5023
+ }
5024
+ return env->v7m.msplim[env->v7m.secure];
5025
+ case 11: /* PSPLIM */
5026
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
5027
+ goto bad_reg;
5028
+ }
5029
+ return env->v7m.psplim[env->v7m.secure];
5030
+ case 16: /* PRIMASK */
5031
+ return env->v7m.primask[env->v7m.secure];
5032
+ case 17: /* BASEPRI */
5033
+ case 18: /* BASEPRI_MAX */
5034
+ return env->v7m.basepri[env->v7m.secure];
5035
+ case 19: /* FAULTMASK */
5036
+ return env->v7m.faultmask[env->v7m.secure];
5037
+ default:
5038
+ bad_reg:
5039
+ qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
5040
+ " register %d\n", reg);
5041
+ return 0;
5042
+ }
5043
+}
5044
+
5045
+void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
5046
+{
5047
+ /*
5048
+ * We're passed bits [11..0] of the instruction; extract
5049
+ * SYSm and the mask bits.
5050
+ * Invalid combinations of SYSm and mask are UNPREDICTABLE;
5051
+ * we choose to treat them as if the mask bits were valid.
5052
+ * NB that the pseudocode 'mask' variable is bits [11..10],
5053
+ * whereas ours is [11..8].
5054
+ */
5055
+ uint32_t mask = extract32(maskreg, 8, 4);
5056
+ uint32_t reg = extract32(maskreg, 0, 8);
5057
+ int cur_el = arm_current_el(env);
5058
+
5059
+ if (cur_el == 0 && reg > 7 && reg != 20) {
5060
+ /*
5061
+ * only xPSR sub-fields and CONTROL.SFPA may be written by
5062
+ * unprivileged code
5063
+ */
5064
+ return;
5065
+ }
5066
+
5067
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
5068
+ switch (reg) {
5069
+ case 0x88: /* MSP_NS */
5070
+ if (!env->v7m.secure) {
5071
+ return;
5072
+ }
5073
+ env->v7m.other_ss_msp = val;
5074
+ return;
5075
+ case 0x89: /* PSP_NS */
5076
+ if (!env->v7m.secure) {
5077
+ return;
5078
+ }
5079
+ env->v7m.other_ss_psp = val;
5080
+ return;
5081
+ case 0x8a: /* MSPLIM_NS */
5082
+ if (!env->v7m.secure) {
5083
+ return;
5084
+ }
5085
+ env->v7m.msplim[M_REG_NS] = val & ~7;
5086
+ return;
5087
+ case 0x8b: /* PSPLIM_NS */
5088
+ if (!env->v7m.secure) {
5089
+ return;
5090
+ }
5091
+ env->v7m.psplim[M_REG_NS] = val & ~7;
5092
+ return;
5093
+ case 0x90: /* PRIMASK_NS */
5094
+ if (!env->v7m.secure) {
5095
+ return;
5096
+ }
5097
+ env->v7m.primask[M_REG_NS] = val & 1;
5098
+ return;
5099
+ case 0x91: /* BASEPRI_NS */
5100
+ if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
5101
+ return;
5102
+ }
5103
+ env->v7m.basepri[M_REG_NS] = val & 0xff;
5104
+ return;
5105
+ case 0x93: /* FAULTMASK_NS */
5106
+ if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
5107
+ return;
5108
+ }
5109
+ env->v7m.faultmask[M_REG_NS] = val & 1;
5110
+ return;
5111
+ case 0x94: /* CONTROL_NS */
5112
+ if (!env->v7m.secure) {
5113
+ return;
5114
+ }
5115
+ write_v7m_control_spsel_for_secstate(env,
5116
+ val & R_V7M_CONTROL_SPSEL_MASK,
5117
+ M_REG_NS);
5118
+ if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
5119
+ env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
5120
+ env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
5121
+ }
5122
+ /*
5123
+ * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
5124
+ * RES0 if the FPU is not present, and is stored in the S bank
5125
+ */
5126
+ if (arm_feature(env, ARM_FEATURE_VFP) &&
5127
+ extract32(env->v7m.nsacr, 10, 1)) {
5128
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
5129
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
5130
+ }
5131
+ return;
5132
+ case 0x98: /* SP_NS */
5133
+ {
5134
+ /*
5135
+ * This gives the non-secure SP selected based on whether we're
5136
+ * currently in handler mode or not, using the NS CONTROL.SPSEL.
5137
+ */
5138
+ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
5139
+ bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
5140
+ uint32_t limit;
5141
+
5142
+ if (!env->v7m.secure) {
5143
+ return;
5144
+ }
5145
+
5146
+ limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
5147
+
5148
+ if (val < limit) {
5149
+ CPUState *cs = env_cpu(env);
5150
+
5151
+ cpu_restore_state(cs, GETPC(), true);
5152
+ raise_exception(env, EXCP_STKOF, 0, 1);
5153
+ }
5154
+
5155
+ if (is_psp) {
5156
+ env->v7m.other_ss_psp = val;
5157
+ } else {
5158
+ env->v7m.other_ss_msp = val;
5159
+ }
5160
+ return;
5161
+ }
5162
+ default:
5163
+ break;
5164
+ }
5165
+ }
5166
+
5167
+ switch (reg) {
5168
+ case 0 ... 7: /* xPSR sub-fields */
5169
+ /* only APSR is actually writable */
5170
+ if (!(reg & 4)) {
5171
+ uint32_t apsrmask = 0;
5172
+
5173
+ if (mask & 8) {
5174
+ apsrmask |= XPSR_NZCV | XPSR_Q;
5175
+ }
5176
+ if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
5177
+ apsrmask |= XPSR_GE;
5178
+ }
5179
+ xpsr_write(env, val, apsrmask);
5180
+ }
5181
+ break;
5182
+ case 8: /* MSP */
5183
+ if (v7m_using_psp(env)) {
5184
+ env->v7m.other_sp = val;
5185
+ } else {
5186
+ env->regs[13] = val;
5187
+ }
5188
+ break;
5189
+ case 9: /* PSP */
5190
+ if (v7m_using_psp(env)) {
5191
+ env->regs[13] = val;
5192
+ } else {
5193
+ env->v7m.other_sp = val;
5194
+ }
5195
+ break;
5196
+ case 10: /* MSPLIM */
5197
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
5198
+ goto bad_reg;
5199
+ }
5200
+ env->v7m.msplim[env->v7m.secure] = val & ~7;
5201
+ break;
5202
+ case 11: /* PSPLIM */
5203
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
5204
+ goto bad_reg;
5205
+ }
5206
+ env->v7m.psplim[env->v7m.secure] = val & ~7;
5207
+ break;
5208
+ case 16: /* PRIMASK */
5209
+ env->v7m.primask[env->v7m.secure] = val & 1;
5210
+ break;
5211
+ case 17: /* BASEPRI */
5212
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
5213
+ goto bad_reg;
5214
+ }
5215
+ env->v7m.basepri[env->v7m.secure] = val & 0xff;
5216
+ break;
5217
+ case 18: /* BASEPRI_MAX */
5218
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
5219
+ goto bad_reg;
5220
+ }
5221
+ val &= 0xff;
5222
+ if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
5223
+ || env->v7m.basepri[env->v7m.secure] == 0)) {
5224
+ env->v7m.basepri[env->v7m.secure] = val;
5225
+ }
5226
+ break;
5227
+ case 19: /* FAULTMASK */
5228
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
5229
+ goto bad_reg;
5230
+ }
5231
+ env->v7m.faultmask[env->v7m.secure] = val & 1;
5232
+ break;
5233
+ case 20: /* CONTROL */
5234
+ /*
5235
+ * Writing to the SPSEL bit only has an effect if we are in
5236
+ * thread mode; other bits can be updated by any privileged code.
5237
+ * write_v7m_control_spsel() deals with updating the SPSEL bit in
5238
+ * env->v7m.control, so we only need update the others.
5239
+ * For v7M, we must just ignore explicit writes to SPSEL in handler
5240
+ * mode; for v8M the write is permitted but will have no effect.
5241
+ * All these bits are writes-ignored from non-privileged code,
5242
+ * except for SFPA.
5243
+ */
5244
+ if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
5245
+ !arm_v7m_is_handler_mode(env))) {
5246
+ write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
5247
+ }
5248
+ if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
5249
+ env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
5250
+ env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
5251
+ }
5252
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
5253
+ /*
5254
+ * SFPA is RAZ/WI from NS or if no FPU.
5255
+ * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
5256
+ * Both are stored in the S bank.
5257
+ */
5258
+ if (env->v7m.secure) {
5259
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
5260
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
5261
+ }
5262
+ if (cur_el > 0 &&
5263
+ (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
5264
+ extract32(env->v7m.nsacr, 10, 1))) {
5265
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
5266
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
5267
+ }
5268
+ }
5269
+ break;
5270
+ default:
5271
+ bad_reg:
5272
+ qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
5273
+ " register %d\n", reg);
5274
+ return;
5275
+ }
5276
+}
5277
+
5278
+uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
5279
+{
5280
+ /* Implement the TT instruction. op is bits [7:6] of the insn. */
5281
+ bool forceunpriv = op & 1;
5282
+ bool alt = op & 2;
5283
+ V8M_SAttributes sattrs = {};
5284
+ uint32_t tt_resp;
5285
+ bool r, rw, nsr, nsrw, mrvalid;
5286
+ int prot;
5287
+ ARMMMUFaultInfo fi = {};
5288
+ MemTxAttrs attrs = {};
5289
+ hwaddr phys_addr;
5290
+ ARMMMUIdx mmu_idx;
5291
+ uint32_t mregion;
5292
+ bool targetpriv;
5293
+ bool targetsec = env->v7m.secure;
5294
+ bool is_subpage;
5295
+
5296
+ /*
5297
+ * Work out what the security state and privilege level we're
5298
+ * interested in is...
5299
+ */
5300
+ if (alt) {
5301
+ targetsec = !targetsec;
5302
+ }
5303
+
5304
+ if (forceunpriv) {
5305
+ targetpriv = false;
5306
+ } else {
5307
+ targetpriv = arm_v7m_is_handler_mode(env) ||
5308
+ !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
5309
+ }
5310
+
5311
+ /* ...and then figure out which MMU index this is */
5312
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
5313
+
5314
+ /*
5315
+ * We know that the MPU and SAU don't care about the access type
5316
+ * for our purposes beyond that we don't want to claim to be
5317
+ * an insn fetch, so we arbitrarily call this a read.
5318
+ */
5319
+
5320
+ /*
5321
+ * MPU region info only available for privileged or if
5322
+ * inspecting the other MPU state.
5323
+ */
5324
+ if (arm_current_el(env) != 0 || alt) {
5325
+ /* We can ignore the return value as prot is always set */
5326
+ pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
5327
+ &phys_addr, &attrs, &prot, &is_subpage,
5328
+ &fi, &mregion);
5329
+ if (mregion == -1) {
5330
+ mrvalid = false;
5331
+ mregion = 0;
5332
+ } else {
5333
+ mrvalid = true;
5334
+ }
5335
+ r = prot & PAGE_READ;
5336
+ rw = prot & PAGE_WRITE;
5337
+ } else {
5338
+ r = false;
5339
+ rw = false;
5340
+ mrvalid = false;
5341
+ mregion = 0;
5342
+ }
5343
+
5344
+ if (env->v7m.secure) {
5345
+ v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
5346
+ nsr = sattrs.ns && r;
5347
+ nsrw = sattrs.ns && rw;
5348
+ } else {
5349
+ sattrs.ns = true;
5350
+ nsr = false;
5351
+ nsrw = false;
5352
+ }
5353
+
5354
+ tt_resp = (sattrs.iregion << 24) |
5355
+ (sattrs.irvalid << 23) |
5356
+ ((!sattrs.ns) << 22) |
5357
+ (nsrw << 21) |
5358
+ (nsr << 20) |
5359
+ (rw << 19) |
5360
+ (r << 18) |
5361
+ (sattrs.srvalid << 17) |
5362
+ (mrvalid << 16) |
5363
+ (sattrs.sregion << 8) |
5364
+ mregion;
5365
+
5366
+ return tt_resp;
5367
+}
5368
+
5369
+#endif /* !CONFIG_USER_ONLY */
5370
+
5371
+ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
5372
+ bool secstate, bool priv, bool negpri)
5373
+{
5374
+ ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
5375
+
5376
+ if (priv) {
5377
+ mmu_idx |= ARM_MMU_IDX_M_PRIV;
5378
+ }
5379
+
5380
+ if (negpri) {
5381
+ mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
5382
+ }
5383
+
5384
+ if (secstate) {
5385
+ mmu_idx |= ARM_MMU_IDX_M_S;
5386
+ }
5387
+
5388
+ return mmu_idx;
5389
+}
5390
+
5391
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
5392
+ bool secstate, bool priv)
5393
+{
5394
+ bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
5395
+
5396
+ return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
5397
+}
5398
+
5399
+/* Return the MMU index for a v7M CPU in the specified security state */
5400
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
5401
+{
5402
+ bool priv = arm_current_el(env) != 0;
5403
+
5404
+ return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
5405
+}
60
--
5406
--
61
2.16.2
5407
2.20.1
62
5408
63
5409
diff view generated by jsdifflib
1
If the GIC has the security extension support enabled, then a
1
To prevent execution priority remaining negative if the guest
2
non-secure access to ICC_PMR must take account of the non-secure
2
returns from an NMI or HardFault with a corrupted IPSR, the
3
view of interrupt priorities, where real priorities 0x00..0x7f
3
v8M interrupt deactivation process forces the HardFault and NMI
4
are secure-only and not visible to the non-secure guest, and
4
to inactive based on the current raw execution priority,
5
priorities 0x80..0xff are shown to the guest as if they were
5
even if the interrupt the guest is trying to deactivate
6
0x00..0xff. We had the logic here wrong:
6
is something else. In the pseudocode this is done in the
7
* on reads, the priority is in the secure range if bit 7
7
Deactivate() function.
8
is clear, not if it is set
9
* on writes, we want to set bit 7, not mask everything else
10
8
11
Our ICC_RPR read code had the same error as ICC_PMR.
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20190617175317.27557-3-peter.maydell@linaro.org
12
---
13
hw/intc/armv7m_nvic.c | 40 +++++++++++++++++++++++++++++++++++-----
14
1 file changed, 35 insertions(+), 5 deletions(-)
12
15
13
(Compare the GICv3 spec pseudocode functions ICC_RPR_EL1
16
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
14
and ICC_PMR_EL1.)
15
16
Fixes: https://bugs.launchpad.net/qemu/+bug/1748434
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Andrew Jones <drjones@redhat.com>
19
Message-id: 20180315133441.24149-1-peter.maydell@linaro.org
20
---
21
hw/intc/arm_gicv3_cpuif.c | 6 +++---
22
1 file changed, 3 insertions(+), 3 deletions(-)
23
24
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
25
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
26
--- a/hw/intc/arm_gicv3_cpuif.c
18
--- a/hw/intc/armv7m_nvic.c
27
+++ b/hw/intc/arm_gicv3_cpuif.c
19
+++ b/hw/intc/armv7m_nvic.c
28
@@ -XXX,XX +XXX,XX @@ static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
20
@@ -XXX,XX +XXX,XX @@ void armv7m_nvic_get_pending_irq_info(void *opaque,
29
/* NS access and Group 0 is inaccessible to NS: return the
21
int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
30
* NS view of the current priority
22
{
31
*/
23
NVICState *s = (NVICState *)opaque;
32
- if (value & 0x80) {
24
- VecInfo *vec;
33
+ if ((value & 0x80) == 0) {
25
+ VecInfo *vec = NULL;
34
/* Secure priorities not visible to NS */
26
int ret;
35
value = 0;
27
36
} else if (value != 0xff) {
28
assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
37
@@ -XXX,XX +XXX,XX @@ static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
29
38
/* Current PMR in the secure range, don't allow NS to change it */
30
- if (secure && exc_is_banked(irq)) {
39
return;
31
- vec = &s->sec_vectors[irq];
40
}
32
- } else {
41
- value = (value >> 1) & 0x80;
33
- vec = &s->vectors[irq];
42
+ value = (value >> 1) | 0x80;
34
+ /*
35
+ * For negative priorities, v8M will forcibly deactivate the appropriate
36
+ * NMI or HardFault regardless of what interrupt we're being asked to
37
+ * deactivate (compare the DeActivate() pseudocode). This is a guard
38
+ * against software returning from NMI or HardFault with a corrupted
39
+ * IPSR and leaving the CPU in a negative-priority state.
40
+ * v7M does not do this, but simply deactivates the requested interrupt.
41
+ */
42
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
43
+ switch (armv7m_nvic_raw_execution_priority(s)) {
44
+ case -1:
45
+ if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
46
+ vec = &s->vectors[ARMV7M_EXCP_HARD];
47
+ } else {
48
+ vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
49
+ }
50
+ break;
51
+ case -2:
52
+ vec = &s->vectors[ARMV7M_EXCP_NMI];
53
+ break;
54
+ case -3:
55
+ vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
56
+ break;
57
+ default:
58
+ break;
59
+ }
60
+ }
61
+
62
+ if (!vec) {
63
+ if (secure && exc_is_banked(irq)) {
64
+ vec = &s->sec_vectors[irq];
65
+ } else {
66
+ vec = &s->vectors[irq];
67
+ }
43
}
68
}
44
cs->icc_pmr_el1 = value;
69
45
gicv3_cpuif_update(cs);
70
trace_nvic_complete_irq(irq, secure);
46
@@ -XXX,XX +XXX,XX @@ static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
47
if (arm_feature(env, ARM_FEATURE_EL3) &&
48
!arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
49
/* NS GIC access and Group 0 is inaccessible to NS */
50
- if (prio & 0x80) {
51
+ if ((prio & 0x80) == 0) {
52
/* NS mustn't see priorities in the Secure half of the range */
53
prio = 0;
54
} else if (prio != 0xff) {
55
--
71
--
56
2.16.2
72
2.20.1
57
73
58
74
diff view generated by jsdifflib
1
The BCM2836 uses a Cortex-A7, not a Cortex-A15. Update the device to
1
In v8M, an attempt to return from an exception which is not
2
use the correct CPU.
2
active is an illegal exception return. For this purpose,
3
https://www.raspberrypi.org/documentation/hardware/raspberrypi/bcm2836/QA7_rev3.4.pdf
3
exceptions which can configurably target either Secure or
4
NonSecure are not considered to be active if they are
5
configured for the opposite security state for the one
6
we're trying to return from (eg attempt to return from
7
an NS NMI but NMI targets Secure). In the pseudocode this
8
is handled by IsActiveForState().
4
9
5
When the BCM2836 was introduced (bad5623690b) the Cortex-A7 was not
10
Detect this case rather than counting an active exception
6
available, so the very similar Cortex-A15 was used. Since dcf578ed8ce
11
possibly of the wrong security state as being sufficient.
7
we can model the correct core.
8
12
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Alistair Francis <alistair@alistair23.me>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
15
Message-id: 20190617175317.27557-4-peter.maydell@linaro.org
12
Message-id: 20180319110215.16755-1-peter.maydell@linaro.org
13
---
16
---
14
hw/arm/bcm2836.c | 2 +-
17
hw/intc/armv7m_nvic.c | 14 +++++++++++++-
15
hw/arm/raspi.c | 2 +-
18
1 file changed, 13 insertions(+), 1 deletion(-)
16
2 files changed, 2 insertions(+), 2 deletions(-)
17
19
18
diff --git a/hw/arm/bcm2836.c b/hw/arm/bcm2836.c
20
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
19
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/arm/bcm2836.c
22
--- a/hw/intc/armv7m_nvic.c
21
+++ b/hw/arm/bcm2836.c
23
+++ b/hw/intc/armv7m_nvic.c
22
@@ -XXX,XX +XXX,XX @@ struct BCM283XInfo {
24
@@ -XXX,XX +XXX,XX @@ int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
23
static const BCM283XInfo bcm283x_socs[] = {
25
return -1;
24
{
26
}
25
.name = TYPE_BCM2836,
27
26
- .cpu_type = ARM_CPU_TYPE_NAME("cortex-a15"),
28
- ret = nvic_rettobase(s);
27
+ .cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"),
29
+ /*
28
.clusterid = 0xf,
30
+ * If this is a configurable exception and it is currently
29
},
31
+ * targeting the opposite security state from the one we're trying
30
#ifdef TARGET_AARCH64
32
+ * to complete it for, this counts as an illegal exception return.
31
diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c
33
+ * We still need to deactivate whatever vector the logic above has
32
index XXXXXXX..XXXXXXX 100644
34
+ * selected, though, as it might not be the same as the one for the
33
--- a/hw/arm/raspi.c
35
+ * requested exception number.
34
+++ b/hw/arm/raspi.c
36
+ */
35
@@ -XXX,XX +XXX,XX @@ static void raspi2_machine_init(MachineClass *mc)
37
+ if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) {
36
mc->no_parallel = 1;
38
+ ret = -1;
37
mc->no_floppy = 1;
39
+ } else {
38
mc->no_cdrom = 1;
40
+ ret = nvic_rettobase(s);
39
- mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a15");
41
+ }
40
+ mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a7");
42
41
mc->max_cpus = BCM283X_NCPUS;
43
vec->active = 0;
42
mc->min_cpus = BCM283X_NCPUS;
44
if (vec->level) {
43
mc->default_cpus = BCM283X_NCPUS;
44
--
45
--
45
2.16.2
46
2.20.1
46
47
47
48
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
In the various helper functions for v7M/v8M instructions, use
2
the _ra versions of cpu_stl_data() and friends. Otherwise we
3
may get wrong behaviour or an assert() due to not being able
4
to locate the TB if there is an exception on the memory access
5
or if it performs an IO operation when in icount mode.
2
6
3
Detected by Coverity (CID 1386072, 1386073, 1386076, 1386077). local_err
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
was unused, and this made the static analyzer unhappy.
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20190617175317.27557-5-peter.maydell@linaro.org
10
---
11
target/arm/m_helper.c | 21 ++++++++++++---------
12
1 file changed, 12 insertions(+), 9 deletions(-)
5
13
6
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
14
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
7
Message-id: 20180320151355.25854-1-pbonzini@redhat.com
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
hw/sd/sdhci.c | 4 ++--
12
1 file changed, 2 insertions(+), 2 deletions(-)
13
14
diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/sd/sdhci.c
16
--- a/target/arm/m_helper.c
17
+++ b/hw/sd/sdhci.c
17
+++ b/target/arm/m_helper.c
18
@@ -XXX,XX +XXX,XX @@ static void sdhci_pci_realize(PCIDevice *dev, Error **errp)
18
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
19
Error *local_err = NULL;
19
}
20
20
21
sdhci_initfn(s);
21
/* Note that these stores can throw exceptions on MPU faults */
22
- sdhci_common_realize(s, errp);
22
- cpu_stl_data(env, sp, nextinst);
23
+ sdhci_common_realize(s, &local_err);
23
- cpu_stl_data(env, sp + 4, saved_psr);
24
if (local_err) {
24
+ cpu_stl_data_ra(env, sp, nextinst, GETPC());
25
error_propagate(errp, local_err);
25
+ cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
26
return;
26
27
@@ -XXX,XX +XXX,XX @@ static void sdhci_sysbus_realize(DeviceState *dev, Error ** errp)
27
env->regs[13] = sp;
28
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
28
env->regs[14] = 0xfeffffff;
29
Error *local_err = NULL;
29
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
30
30
/* fptr is the value of Rn, the frame pointer we store the FP regs to */
31
- sdhci_common_realize(s, errp);
31
bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
32
+ sdhci_common_realize(s, &local_err);
32
bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
33
if (local_err) {
33
+ uintptr_t ra = GETPC();
34
error_propagate(errp, local_err);
34
35
return;
35
assert(env->v7m.secure);
36
37
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
38
* Note that we do not use v7m_stack_write() here, because the
39
* accesses should not set the FSR bits for stacking errors if they
40
* fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
41
- * or AccType_LAZYFP). Faults in cpu_stl_data() will throw exceptions
42
+ * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
43
* and longjmp out.
44
*/
45
if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
46
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
47
if (i >= 16) {
48
faddr += 8; /* skip the slot for the FPSCR */
49
}
50
- cpu_stl_data(env, faddr, slo);
51
- cpu_stl_data(env, faddr + 4, shi);
52
+ cpu_stl_data_ra(env, faddr, slo, ra);
53
+ cpu_stl_data_ra(env, faddr + 4, shi, ra);
54
}
55
- cpu_stl_data(env, fptr + 0x40, vfp_get_fpscr(env));
56
+ cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
57
58
/*
59
* If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
60
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
61
62
void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
63
{
64
+ uintptr_t ra = GETPC();
65
+
66
/* fptr is the value of Rn, the frame pointer we load the FP regs from */
67
assert(env->v7m.secure);
68
69
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
70
faddr += 8; /* skip the slot for the FPSCR */
71
}
72
73
- slo = cpu_ldl_data(env, faddr);
74
- shi = cpu_ldl_data(env, faddr + 4);
75
+ slo = cpu_ldl_data_ra(env, faddr, ra);
76
+ shi = cpu_ldl_data_ra(env, faddr + 4, ra);
77
78
dn = (uint64_t) shi << 32 | slo;
79
*aa32_vfp_dreg(env, i / 2) = dn;
80
}
81
- fpscr = cpu_ldl_data(env, fptr + 0x40);
82
+ fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
83
vfp_set_fpscr(env, fpscr);
84
}
85
36
--
86
--
37
2.16.2
87
2.20.1
38
88
39
89
diff view generated by jsdifflib
1
When a debug exception is taken to AArch32, it appears as a Prefetch
1
Like most of the v7M memory mapped system registers, the systick
2
Abort, and the Instruction Fault Status Register (IFSR) must be set.
2
registers are accessible to privileged code only and user accesses
3
The IFSR has two possible formats, depending on whether LPAE is in
3
must generate a BusFault. We implement that for registers in
4
use. Factor out the code in arm_debug_excp_handler() which picks
4
the NVIC proper already, but missed it for systick since we
5
an FSR value into its own utility function, update it to use
5
implement it as a separate device. Correct the omission.
6
arm_fi_to_lfsc() and arm_fi_to_sfsc() rather than hard-coded constants,
7
and use the correct condition to select long or short format.
8
9
In particular this fixes a bug where we could select the short
10
format because we're at EL0 and the EL1 translation regime is
11
not using LPAE, but then route the debug exception to EL2 because
12
of MDCR_EL2.TDE and hand EL2 the wrong format FSR.
13
6
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20180320134114.30418-3-peter.maydell@linaro.org
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20190617175317.27557-6-peter.maydell@linaro.org
17
---
11
---
18
target/arm/internals.h | 25 +++++++++++++++++++++++++
12
hw/timer/armv7m_systick.c | 26 ++++++++++++++++++++------
19
target/arm/op_helper.c | 12 ++----------
13
1 file changed, 20 insertions(+), 6 deletions(-)
20
2 files changed, 27 insertions(+), 10 deletions(-)
21
14
22
diff --git a/target/arm/internals.h b/target/arm/internals.h
15
diff --git a/hw/timer/armv7m_systick.c b/hw/timer/armv7m_systick.c
23
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/internals.h
17
--- a/hw/timer/armv7m_systick.c
25
+++ b/target/arm/internals.h
18
+++ b/hw/timer/armv7m_systick.c
26
@@ -XXX,XX +XXX,XX @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
19
@@ -XXX,XX +XXX,XX @@ static void systick_timer_tick(void *opaque)
27
}
20
}
28
}
21
}
29
22
30
+/* Return the FSR value for a debug exception (watchpoint, hardware
23
-static uint64_t systick_read(void *opaque, hwaddr addr, unsigned size)
31
+ * breakpoint or BKPT insn) targeting the specified exception level.
24
+static MemTxResult systick_read(void *opaque, hwaddr addr, uint64_t *data,
32
+ */
25
+ unsigned size, MemTxAttrs attrs)
33
+static inline uint32_t arm_debug_exception_fsr(CPUARMState *env)
26
{
34
+{
27
SysTickState *s = opaque;
35
+ ARMMMUFaultInfo fi = { .type = ARMFault_Debug };
28
uint32_t val;
36
+ int target_el = arm_debug_target_el(env);
29
37
+ bool using_lpae = false;
30
+ if (attrs.user) {
38
+
31
+ /* Generate BusFault for unprivileged accesses */
39
+ if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
32
+ return MEMTX_ERROR;
40
+ using_lpae = true;
41
+ } else {
42
+ if (arm_feature(env, ARM_FEATURE_LPAE) &&
43
+ (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) {
44
+ using_lpae = true;
45
+ }
46
+ }
33
+ }
47
+
34
+
48
+ if (using_lpae) {
35
switch (addr) {
49
+ return arm_fi_to_lfsc(&fi);
36
case 0x0: /* SysTick Control and Status. */
50
+ } else {
37
val = s->control;
51
+ return arm_fi_to_sfsc(&fi);
38
@@ -XXX,XX +XXX,XX @@ static uint64_t systick_read(void *opaque, hwaddr addr, unsigned size)
39
}
40
41
trace_systick_read(addr, val, size);
42
- return val;
43
+ *data = val;
44
+ return MEMTX_OK;
45
}
46
47
-static void systick_write(void *opaque, hwaddr addr,
48
- uint64_t value, unsigned size)
49
+static MemTxResult systick_write(void *opaque, hwaddr addr,
50
+ uint64_t value, unsigned size,
51
+ MemTxAttrs attrs)
52
{
53
SysTickState *s = opaque;
54
55
+ if (attrs.user) {
56
+ /* Generate BusFault for unprivileged accesses */
57
+ return MEMTX_ERROR;
52
+ }
58
+ }
53
+}
54
+
59
+
55
#endif
60
trace_systick_write(addr, value, size);
56
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
61
57
index XXXXXXX..XXXXXXX 100644
62
switch (addr) {
58
--- a/target/arm/op_helper.c
63
@@ -XXX,XX +XXX,XX @@ static void systick_write(void *opaque, hwaddr addr,
59
+++ b/target/arm/op_helper.c
64
qemu_log_mask(LOG_GUEST_ERROR,
60
@@ -XXX,XX +XXX,XX @@ void arm_debug_excp_handler(CPUState *cs)
65
"SysTick: Bad write offset 0x%" HWADDR_PRIx "\n", addr);
61
66
}
62
cs->watchpoint_hit = NULL;
67
+ return MEMTX_OK;
63
68
}
64
- if (extended_addresses_enabled(env)) {
69
65
- env->exception.fsr = (1 << 9) | 0x22;
70
static const MemoryRegionOps systick_ops = {
66
- } else {
71
- .read = systick_read,
67
- env->exception.fsr = 0x2;
72
- .write = systick_write,
68
- }
73
+ .read_with_attrs = systick_read,
69
+ env->exception.fsr = arm_debug_exception_fsr(env);
74
+ .write_with_attrs = systick_write,
70
env->exception.vaddress = wp_hit->hitaddr;
75
.endianness = DEVICE_NATIVE_ENDIAN,
71
raise_exception(env, EXCP_DATA_ABORT,
76
.valid.min_access_size = 4,
72
syn_watchpoint(same_el, 0, wnr),
77
.valid.max_access_size = 4,
73
@@ -XXX,XX +XXX,XX @@ void arm_debug_excp_handler(CPUState *cs)
74
return;
75
}
76
77
- if (extended_addresses_enabled(env)) {
78
- env->exception.fsr = (1 << 9) | 0x22;
79
- } else {
80
- env->exception.fsr = 0x2;
81
- }
82
+ env->exception.fsr = arm_debug_exception_fsr(env);
83
/* FAR is UNKNOWN, so doesn't need setting */
84
raise_exception(env, EXCP_PREFETCH_ABORT,
85
syn_breakpoint(same_el),
86
--
78
--
87
2.16.2
79
2.20.1
88
80
89
81
diff view generated by jsdifflib
1
The MDCR_EL2.TDE bit allows the exception level targeted by debug
1
Thumb instructions in an IT block are set up to be conditionally
2
exceptions to be set to EL2 for code executing at EL0. We handle
2
executed depending on a set of condition bits encoded into the IT
3
this in the arm_debug_target_el() function, but this is only used for
3
bits of the CPSR/XPSR. The architecture specifies that if the
4
hardware breakpoint and watchpoint exceptions, not for the exception
4
condition bits are 0b1111 this means "always execute" (like 0b1110),
5
generated when the guest executes an AArch32 BKPT or AArch64 BRK
5
not "never execute"; we were treating it as "never execute". (See
6
instruction. We don't have enough information for a translate-time
6
the ConditionHolds() pseudocode in both the A-profile and M-profile
7
equivalent of arm_debug_target_el(), so instead make BKPT and BRK
7
Arm ARM.)
8
call a special purpose helper which can do the routing, rather than
8
9
the generic exception_with_syndrome helper.
9
This is a bit of an obscure corner case, because the only legal
10
way to get to an 0b1111 set of condbits is to do an exception
11
return which sets the XPSR/CPSR up that way. An IT instruction
12
which encodes a condition sequence that would include an 0b1111 is
13
UNPREDICTABLE, and for v8A the CONSTRAINED UNPREDICTABLE choices
14
for such an IT insn are to NOP, UNDEF, or treat 0b1111 like 0b1110.
15
Add a comment noting that we take the latter option.
10
16
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20180320134114.30418-2-peter.maydell@linaro.org
19
Message-id: 20190617175317.27557-7-peter.maydell@linaro.org
14
---
20
---
15
target/arm/helper.h | 1 +
21
target/arm/translate.c | 15 +++++++++++++--
16
target/arm/op_helper.c | 8 ++++++++
22
1 file changed, 13 insertions(+), 2 deletions(-)
17
target/arm/translate-a64.c | 15 +++++++++++++--
18
target/arm/translate.c | 19 ++++++++++++++-----
19
4 files changed, 36 insertions(+), 7 deletions(-)
20
23
21
diff --git a/target/arm/helper.h b/target/arm/helper.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/helper.h
24
+++ b/target/arm/helper.h
25
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
26
i32, i32, i32, i32)
27
DEF_HELPER_2(exception_internal, void, env, i32)
28
DEF_HELPER_4(exception_with_syndrome, void, env, i32, i32, i32)
29
+DEF_HELPER_2(exception_bkpt_insn, void, env, i32)
30
DEF_HELPER_1(setend, void, env)
31
DEF_HELPER_2(wfi, void, env, i32)
32
DEF_HELPER_1(wfe, void, env)
33
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/op_helper.c
36
+++ b/target/arm/op_helper.c
37
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
38
raise_exception(env, excp, syndrome, target_el);
39
}
40
41
+/* Raise an EXCP_BKPT with the specified syndrome register value,
42
+ * targeting the correct exception level for debug exceptions.
43
+ */
44
+void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
45
+{
46
+ raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env));
47
+}
48
+
49
uint32_t HELPER(cpsr_read)(CPUARMState *env)
50
{
51
return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
52
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/translate-a64.c
55
+++ b/target/arm/translate-a64.c
56
@@ -XXX,XX +XXX,XX @@ static void gen_exception_insn(DisasContext *s, int offset, int excp,
57
s->base.is_jmp = DISAS_NORETURN;
58
}
59
60
+static void gen_exception_bkpt_insn(DisasContext *s, int offset,
61
+ uint32_t syndrome)
62
+{
63
+ TCGv_i32 tcg_syn;
64
+
65
+ gen_a64_set_pc_im(s->pc - offset);
66
+ tcg_syn = tcg_const_i32(syndrome);
67
+ gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
68
+ tcg_temp_free_i32(tcg_syn);
69
+ s->base.is_jmp = DISAS_NORETURN;
70
+}
71
+
72
static void gen_ss_advance(DisasContext *s)
73
{
74
/* If the singlestep state is Active-not-pending, advance to
75
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
76
break;
77
}
78
/* BRK */
79
- gen_exception_insn(s, 4, EXCP_BKPT, syn_aa64_bkpt(imm16),
80
- default_exception_el(s));
81
+ gen_exception_bkpt_insn(s, 4, syn_aa64_bkpt(imm16));
82
break;
83
case 2:
84
if (op2_ll != 0) {
85
diff --git a/target/arm/translate.c b/target/arm/translate.c
24
diff --git a/target/arm/translate.c b/target/arm/translate.c
86
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
87
--- a/target/arm/translate.c
26
--- a/target/arm/translate.c
88
+++ b/target/arm/translate.c
27
+++ b/target/arm/translate.c
89
@@ -XXX,XX +XXX,XX @@ static void gen_exception_insn(DisasContext *s, int offset, int excp,
28
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
90
s->base.is_jmp = DISAS_NORETURN;
29
gen_nop_hint(s, (insn >> 4) & 0xf);
91
}
92
93
+static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
94
+{
95
+ TCGv_i32 tcg_syn;
96
+
97
+ gen_set_condexec(s);
98
+ gen_set_pc_im(s, s->pc - offset);
99
+ tcg_syn = tcg_const_i32(syn);
100
+ gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
101
+ tcg_temp_free_i32(tcg_syn);
102
+ s->base.is_jmp = DISAS_NORETURN;
103
+}
104
+
105
/* Force a TB lookup after an instruction that changes the CPU state. */
106
static inline void gen_lookup_tb(DisasContext *s)
107
{
108
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
109
case 1:
110
/* bkpt */
111
ARCH(5);
112
- gen_exception_insn(s, 4, EXCP_BKPT,
113
- syn_aa32_bkpt(imm16, false),
114
- default_exception_el(s));
115
+ gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
116
break;
30
break;
117
case 2:
31
}
118
/* Hypervisor call (v7) */
32
- /* If Then. */
119
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
33
+ /*
120
{
34
+ * IT (If-Then)
121
int imm8 = extract32(insn, 0, 8);
35
+ *
122
ARCH(5);
36
+ * Combinations of firstcond and mask which set up an 0b1111
123
- gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
37
+ * condition are UNPREDICTABLE; we take the CONSTRAINED
124
- default_exception_el(s));
38
+ * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
125
+ gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
39
+ * i.e. both meaning "execute always".
126
break;
40
+ */
41
s->condexec_cond = (insn >> 4) & 0xe;
42
s->condexec_mask = insn & 0x1f;
43
/* No actual code generated for this insn, just setup state. */
44
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
45
if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
46
uint32_t cond = dc->condexec_cond;
47
48
- if (cond != 0x0e) { /* Skip conditional when condition is AL. */
49
+ /*
50
+ * Conditionally skip the insn. Note that both 0xe and 0xf mean
51
+ * "always"; 0xf is not "never".
52
+ */
53
+ if (cond < 0x0e) {
54
arm_skip_unless(dc, cond);
127
}
55
}
128
56
}
129
--
57
--
130
2.16.2
58
2.20.1
131
59
132
60
diff view generated by jsdifflib
1
From: Wei Huang <wei@redhat.com>
1
Coverity points out (CID 1402195) that the loop in trans_VMOV_imm_dp()
2
that iterates over the destination registers in a short-vector VMOV
3
accidentally throws away the returned updated register number
4
from vfp_advance_dreg(). Add the missing assignment. (We got this
5
correct in trans_VMOV_imm_sp().)
2
6
3
Instead of using "1.0" as the system version of SMBIOS, we should use
7
Fixes: 18cf951af9a27ae573a
4
mc->name for mach-virt machine type to be consistent other architectures.
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
With this patch, "dmidecode -t 1" (e.g., "-M virt-2.12,accel=kvm") will
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
show:
10
Message-id: 20190702105115.9465-1-peter.maydell@linaro.org
11
---
12
target/arm/translate-vfp.inc.c | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
7
14
8
Handle 0x0100, DMI type 1, 27 bytes
15
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
9
System Information
10
Manufacturer: QEMU
11
Product Name: KVM Virtual Machine
12
Version: virt-2.12
13
Serial Number: Not Specified
14
...
15
16
instead of:
17
18
Handle 0x0100, DMI type 1, 27 bytes
19
System Information
20
Manufacturer: QEMU
21
Product Name: KVM Virtual Machine
22
Version: 1.0
23
Serial Number: Not Specified
24
...
25
26
For backward compatibility, we allow older machine types to keep "1.0"
27
as the default system version.
28
29
Signed-off-by: Wei Huang <wei@redhat.com>
30
Reviewed-by: Andrew Jones <drjones@redhat.com>
31
Message-id: 20180322212318.7182-1-wei@redhat.com
32
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
33
---
34
include/hw/arm/virt.h | 1 +
35
hw/arm/virt.c | 8 +++++++-
36
2 files changed, 8 insertions(+), 1 deletion(-)
37
38
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
39
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
40
--- a/include/hw/arm/virt.h
17
--- a/target/arm/translate-vfp.inc.c
41
+++ b/include/hw/arm/virt.h
18
+++ b/target/arm/translate-vfp.inc.c
42
@@ -XXX,XX +XXX,XX @@ typedef struct {
19
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
43
bool no_its;
20
44
bool no_pmu;
21
/* Set up the operands for the next iteration */
45
bool claim_edge_triggered_timers;
22
veclen--;
46
+ bool smbios_old_sys_ver;
23
- vfp_advance_dreg(vd, delta_d);
47
} VirtMachineClass;
24
+ vd = vfp_advance_dreg(vd, delta_d);
48
49
typedef struct {
50
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/hw/arm/virt.c
53
+++ b/hw/arm/virt.c
54
@@ -XXX,XX +XXX,XX @@ static void *machvirt_dtb(const struct arm_boot_info *binfo, int *fdt_size)
55
56
static void virt_build_smbios(VirtMachineState *vms)
57
{
58
+ MachineClass *mc = MACHINE_GET_CLASS(vms);
59
+ VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
60
uint8_t *smbios_tables, *smbios_anchor;
61
size_t smbios_tables_len, smbios_anchor_len;
62
const char *product = "QEMU Virtual Machine";
63
@@ -XXX,XX +XXX,XX @@ static void virt_build_smbios(VirtMachineState *vms)
64
}
25
}
65
26
66
smbios_set_defaults("QEMU", product,
27
tcg_temp_free_i64(fd);
67
- "1.0", false, true, SMBIOS_ENTRY_POINT_30);
68
+ vmc->smbios_old_sys_ver ? "1.0" : mc->name, false,
69
+ true, SMBIOS_ENTRY_POINT_30);
70
71
smbios_get_tables(NULL, 0, &smbios_tables, &smbios_tables_len,
72
&smbios_anchor, &smbios_anchor_len);
73
@@ -XXX,XX +XXX,XX @@ static void virt_2_11_instance_init(Object *obj)
74
75
static void virt_machine_2_11_options(MachineClass *mc)
76
{
77
+ VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
78
+
79
virt_machine_2_12_options(mc);
80
SET_MACHINE_COMPAT(mc, VIRT_COMPAT_2_11);
81
+ vmc->smbios_old_sys_ver = true;
82
}
83
DEFINE_VIRT_MACHINE(2, 11)
84
85
--
28
--
86
2.16.2
29
2.20.1
87
30
88
31
diff view generated by jsdifflib