1
A last collection of patches to squeeze in before rc0.
1
Some arm patches before softfreeze. These are all bug fixes.
2
The patches from me are all bugfixes. Philippe's are just
3
code-movement, but I wanted to get these into 4.1 because
4
that kind of patch is so painful to have to rebase.
5
(The diffstat is huge but it's just code moving from file to file.)
6
2
7
thanks
8
-- PMM
3
-- PMM
9
4
10
The following changes since commit 234e256511e588680300600ce087c5185d68cf2a:
5
The following changes since commit 0ebf76aae58324b8f7bf6af798696687f5f4c2a9:
11
6
12
Merge remote-tracking branch 'remotes/armbru/tags/pull-build-2019-07-02-v2' into staging (2019-07-04 15:58:46 +0100)
7
Merge tag 'nvme-next-pull-request' of git://git.infradead.org/qemu-nvme into staging (2022-07-15 15:38:13 +0100)
13
8
14
are available in the Git repository at:
9
are available in the Git repository at:
15
10
16
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20190704
11
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220718
17
12
18
for you to fetch changes up to b75f3735802b5b33f10e4bfe374d4b17bb86d29a:
13
for you to fetch changes up to 004c8a8bc569c8b18fca6fc90ffe3223daaf17b7:
19
14
20
target/arm: Correct VMOV_imm_dp handling of short vectors (2019-07-04 16:52:05 +0100)
15
Align Raspberry Pi DMA interrupts with Linux DTS (2022-07-18 13:25:13 +0100)
21
16
22
----------------------------------------------------------------
17
----------------------------------------------------------------
23
target-arm queue:
18
target-arm queue:
24
* more code-movement to separate TCG-only functions into their own files
19
* hw/intc/armv7m_nvic: ICPRn must not unpend an IRQ that is being held high
25
* Correct VMOV_imm_dp handling of short vectors
20
* target/arm: Fill in VL for tbflags when SME enabled and SVE disabled
26
* Execute Thumb instructions when their condbits are 0xf
21
* target/arm: Fix aarch64_sve_change_el for SME
27
* armv7m_systick: Forbid non-privileged accesses
22
* linux-user/aarch64: Do not clear PROT_MTE on mprotect
28
* Use _ra versions of cpu_stl_data() in v7M helpers
23
* target/arm: Honour VTCR_EL2 bits in Secure EL2
29
* v8M: Check state of exception being returned from
24
* hw/adc: Fix CONV bit in NPCM7XX ADC CON register
30
* v8M: Forcibly clear negative-priority exceptions on deactivate
25
* hw/adc: Make adci[*] R/W in NPCM7XX ADC
26
* target/arm: Don't set syndrome ISS for loads and stores with writeback
27
* Align Raspberry Pi DMA interrupts with Linux DTS
31
28
32
----------------------------------------------------------------
29
----------------------------------------------------------------
33
Peter Maydell (6):
30
Andrey Makarov (1):
34
arm v8M: Forcibly clear negative-priority exceptions on deactivate
31
Align Raspberry Pi DMA interrupts with Linux DTS
35
target/arm: v8M: Check state of exception being returned from
36
target/arm: Use _ra versions of cpu_stl_data() in v7M helpers
37
hw/timer/armv7m_systick: Forbid non-privileged accesses
38
target/arm: Execute Thumb instructions when their condbits are 0xf
39
target/arm: Correct VMOV_imm_dp handling of short vectors
40
32
41
Philippe Mathieu-Daudé (3):
33
Hao Wu (2):
42
target/arm: Move debug routines to debug_helper.c
34
hw/adc: Fix CONV bit in NPCM7XX ADC CON register
43
target/arm: Restrict semi-hosting to TCG
35
hw/adc: Make adci[*] R/W in NPCM7XX ADC
44
target/arm/helper: Move M profile routines to m_helper.c
45
36
46
target/arm/Makefile.objs | 5 +-
37
Peter Maydell (9):
47
target/arm/cpu.h | 7 +
38
hw/intc/armv7m_nvic: ICPRn must not unpend an IRQ that is being held high
48
hw/intc/armv7m_nvic.c | 54 +-
39
target/arm: Define and use new regime_tcr_value() function
49
hw/timer/armv7m_systick.c | 26 +-
40
target/arm: Calculate mask/base_mask in get_level1_table_address()
50
target/arm/cpu.c | 9 +-
41
target/arm: Fold regime_tcr() and regime_tcr_value() together
51
target/arm/debug_helper.c | 311 +++++
42
target/arm: Fix big-endian host handling of VTCR
52
target/arm/helper.c | 2646 +--------------------------------------
43
target/arm: Store VTCR_EL2, VSTCR_EL2 registers as uint64_t
53
target/arm/m_helper.c | 2679 ++++++++++++++++++++++++++++++++++++++++
44
target/arm: Store TCR_EL* registers as uint64_t
54
target/arm/op_helper.c | 295 -----
45
target/arm: Honour VTCR_EL2 bits in Secure EL2
55
target/arm/translate-vfp.inc.c | 2 +-
46
target/arm: Don't set syndrome ISS for loads and stores with writeback
56
target/arm/translate.c | 15 +-
57
11 files changed, 3096 insertions(+), 2953 deletions(-)
58
create mode 100644 target/arm/debug_helper.c
59
create mode 100644 target/arm/m_helper.c
60
47
48
Richard Henderson (3):
49
target/arm: Fill in VL for tbflags when SME enabled and SVE disabled
50
target/arm: Fix aarch64_sve_change_el for SME
51
linux-user/aarch64: Do not clear PROT_MTE on mprotect
52
53
include/hw/arm/bcm2835_peripherals.h | 2 +
54
target/arm/cpu.h | 38 ++++++++---
55
target/arm/internals.h | 34 +++++++---
56
accel/tcg/translate-all.c | 13 +++-
57
hw/adc/npcm7xx_adc.c | 4 +-
58
hw/arm/bcm2835_peripherals.c | 26 ++++++-
59
hw/intc/armv7m_nvic.c | 9 ++-
60
target/arm/cpu.c | 2 +-
61
target/arm/debug_helper.c | 2 +-
62
target/arm/helper.c | 128 ++++++++++++++++-------------------
63
target/arm/ptw.c | 38 ++++++-----
64
target/arm/tlb_helper.c | 2 +-
65
target/arm/translate-a64.c | 4 +-
66
tests/qtest/bcm2835-dma-test.c | 118 ++++++++++++++++++++++++++++++++
67
tests/qtest/npcm7xx_adc-test.c | 2 +-
68
tests/qtest/meson.build | 3 +-
69
16 files changed, 306 insertions(+), 119 deletions(-)
70
create mode 100644 tests/qtest/bcm2835-dma-test.c
diff view generated by jsdifflib
1
To prevent execution priority remaining negative if the guest
1
In the M-profile Arm ARM, rule R_CVJS defines when an interrupt should
2
returns from an NMI or HardFault with a corrupted IPSR, the
2
be set to the Pending state:
3
v8M interrupt deactivation process forces the HardFault and NMI
3
A) when the input line is high and the interrupt is not Active
4
to inactive based on the current raw execution priority,
4
B) when the input line transitions from low to high and the interrupt
5
even if the interrupt the guest is trying to deactivate
5
is Active
6
is something else. In the pseudocode this is done in the
6
(Note that the first of these is an ongoing condition, and the
7
Deactivate() function.
7
second is a point-in-time event.)
8
8
9
This can be rephrased as:
10
1 when the line goes from low to high, set Pending
11
2 when Active goes from 1 to 0, if line is high then set Pending
12
3 ignore attempts to clear Pending when the line is high
13
and Active is 0
14
15
where 1 covers both B and one of the "transition into condition A"
16
cases, 2 deals with the other "transition into condition A"
17
possibility, and 3 is "don't drop Pending if we're already in
18
condition A". Transitions out of condition A don't affect Pending
19
state.
20
21
We handle case 1 in set_irq_level(). For an interrupt (as opposed
22
to other kinds of exception) the only place where we clear Active
23
is in armv7m_nvic_complete_irq(), where we handle case 2 by
24
checking for whether we need to re-pend the exception. For case 3,
25
the only places where we clear Pending state on an interrupt are in
26
armv7m_nvic_acknowledge_irq() (where we are setting Active so it
27
doesn't count) and for writes to NVIC_ICPRn.
28
29
It is the "write to NVIC_ICPRn" case that we missed: we must ignore
30
this if the input line is high and the interrupt is not Active.
31
(This required behaviour is differently and perhaps more clearly
32
stated in the v7M Arm ARM, which has pseudocode in section B3.4.1
33
that implies it.)
34
35
Reported-by: Igor Kotrasiński <i.kotrasinsk@samsung.com>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
36
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
37
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Message-id: 20190617175317.27557-3-peter.maydell@linaro.org
38
Message-id: 20220628154724.3297442-1-peter.maydell@linaro.org
12
---
39
---
13
hw/intc/armv7m_nvic.c | 40 +++++++++++++++++++++++++++++++++++-----
40
hw/intc/armv7m_nvic.c | 9 ++++++++-
14
1 file changed, 35 insertions(+), 5 deletions(-)
41
1 file changed, 8 insertions(+), 1 deletion(-)
15
42
16
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
43
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
17
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/intc/armv7m_nvic.c
45
--- a/hw/intc/armv7m_nvic.c
19
+++ b/hw/intc/armv7m_nvic.c
46
+++ b/hw/intc/armv7m_nvic.c
20
@@ -XXX,XX +XXX,XX @@ void armv7m_nvic_get_pending_irq_info(void *opaque,
47
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
21
int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
48
startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
22
{
49
23
NVICState *s = (NVICState *)opaque;
50
for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
24
- VecInfo *vec;
51
+ /*
25
+ VecInfo *vec = NULL;
52
+ * Note that if the input line is still held high and the interrupt
26
int ret;
53
+ * is not active then rule R_CVJS requires that the Pending state
27
54
+ * remains set; in that case we mustn't let it be cleared.
28
assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
55
+ */
29
56
if (value & (1 << i) &&
30
- if (secure && exc_is_banked(irq)) {
57
- (attrs.secure || s->itns[startvec + i])) {
31
- vec = &s->sec_vectors[irq];
58
+ (attrs.secure || s->itns[startvec + i]) &&
32
- } else {
59
+ !(setval == 0 && s->vectors[startvec + i].level &&
33
- vec = &s->vectors[irq];
60
+ !s->vectors[startvec + i].active)) {
34
+ /*
61
s->vectors[startvec + i].pending = setval;
35
+ * For negative priorities, v8M will forcibly deactivate the appropriate
62
}
36
+ * NMI or HardFault regardless of what interrupt we're being asked to
63
}
37
+ * deactivate (compare the DeActivate() pseudocode). This is a guard
38
+ * against software returning from NMI or HardFault with a corrupted
39
+ * IPSR and leaving the CPU in a negative-priority state.
40
+ * v7M does not do this, but simply deactivates the requested interrupt.
41
+ */
42
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
43
+ switch (armv7m_nvic_raw_execution_priority(s)) {
44
+ case -1:
45
+ if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
46
+ vec = &s->vectors[ARMV7M_EXCP_HARD];
47
+ } else {
48
+ vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
49
+ }
50
+ break;
51
+ case -2:
52
+ vec = &s->vectors[ARMV7M_EXCP_NMI];
53
+ break;
54
+ case -3:
55
+ vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
56
+ break;
57
+ default:
58
+ break;
59
+ }
60
+ }
61
+
62
+ if (!vec) {
63
+ if (secure && exc_is_banked(irq)) {
64
+ vec = &s->sec_vectors[irq];
65
+ } else {
66
+ vec = &s->vectors[irq];
67
+ }
68
}
69
70
trace_nvic_complete_irq(irq, secure);
71
--
64
--
72
2.20.1
65
2.25.1
73
66
74
67
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
When PSTATE.SM, VL = SVL even if SVE is disabled.
4
This is visible in kselftest ssve-test.
5
6
Reported-by: Mark Brown <broonie@kernel.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220713045848.217364-2-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/helper.c | 10 ++++++++--
13
1 file changed, 8 insertions(+), 2 deletions(-)
14
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
20
}
21
if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
22
int sme_el = sme_exception_el(env, el);
23
+ bool sm = FIELD_EX64(env->svcr, SVCR, SM);
24
25
DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
26
if (sme_el == 0) {
27
/* Similarly, do not compute SVL if SME is disabled. */
28
- DP_TBFLAG_A64(flags, SVL, sve_vqm1_for_el_sm(env, el, true));
29
+ int svl = sve_vqm1_for_el_sm(env, el, true);
30
+ DP_TBFLAG_A64(flags, SVL, svl);
31
+ if (sm) {
32
+ /* If SVE is disabled, we will not have set VL above. */
33
+ DP_TBFLAG_A64(flags, VL, svl);
34
+ }
35
}
36
- if (FIELD_EX64(env->svcr, SVCR, SM)) {
37
+ if (sm) {
38
DP_TBFLAG_A64(flags, PSTATE_SM, 1);
39
DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
40
}
41
--
42
2.25.1
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
In preparation for supporting TCG disablement on ARM, we move most
3
We were only checking for SVE disabled and not taking into
4
of TCG related v7m/v8m helpers and APIs into their own file.
4
account PSTATE.SM to check SME disabled, which resulted in
5
vectors being incorrectly truncated.
5
6
6
Note: It is easier to review this commit using the 'histogram'
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
diff algorithm:
8
Message-id: 20220713045848.217364-3-richard.henderson@linaro.org
8
9
$ git diff --diff-algorithm=histogram ...
10
or
11
$ git diff --histogram ...
12
13
Suggested-by: Samuel Ortiz <sameo@linux.intel.com>
14
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
15
Message-id: 20190702144335.10717-2-philmd@redhat.com
16
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
11
---
19
target/arm/Makefile.objs | 1 +
12
target/arm/helper.c | 31 +++++++++++++++++++++++++------
20
target/arm/helper.c | 2638 +------------------------------------
13
1 file changed, 25 insertions(+), 6 deletions(-)
21
target/arm/m_helper.c | 2676 ++++++++++++++++++++++++++++++++++++++
22
3 files changed, 2681 insertions(+), 2634 deletions(-)
23
create mode 100644 target/arm/m_helper.c
24
14
25
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/Makefile.objs
28
+++ b/target/arm/Makefile.objs
29
@@ -XXX,XX +XXX,XX @@ obj-y += tlb_helper.o debug_helper.o
30
obj-y += translate.o op_helper.o
31
obj-y += crypto_helper.o
32
obj-y += iwmmxt_helper.o vec_helper.o neon_helper.o
33
+obj-y += m_helper.o
34
35
obj-$(CONFIG_SOFTMMU) += psci.o
36
37
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
38
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/helper.c
17
--- a/target/arm/helper.c
40
+++ b/target/arm/helper.c
18
+++ b/target/arm/helper.c
41
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
42
#include "qemu/crc32c.h"
20
}
43
#include "qemu/qemu-print.h"
21
}
44
#include "exec/exec-all.h"
22
45
-#include "exec/cpu_ldst.h"
23
+static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
46
#include <zlib.h> /* For crc32 */
24
+{
47
#include "hw/semihosting/semihost.h"
25
+ int exc_el;
48
#include "sysemu/cpus.h"
26
+
49
@@ -XXX,XX +XXX,XX @@
27
+ if (sm) {
50
#include "qemu/guest-random.h"
28
+ exc_el = sme_exception_el(env, el);
51
#ifdef CONFIG_TCG
29
+ } else {
52
#include "arm_ldst.h"
30
+ exc_el = sve_exception_el(env, el);
53
+#include "exec/cpu_ldst.h"
31
+ }
54
#endif
32
+ if (exc_el) {
55
33
+ return 0; /* disabled */
56
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
34
+ }
57
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rbit)(uint32_t x)
35
+ return sve_vqm1_for_el_sm(env, el, sm);
58
36
+}
59
#ifdef CONFIG_USER_ONLY
37
+
60
38
/*
61
-/* These should probably raise undefined insn exceptions. */
39
* Notice a change in SVE vector size when changing EL.
62
-void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
40
*/
63
-{
41
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
64
- ARMCPU *cpu = env_archcpu(env);
65
-
66
- cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
67
-}
68
-
69
-uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
70
-{
71
- ARMCPU *cpu = env_archcpu(env);
72
-
73
- cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
74
- return 0;
75
-}
76
-
77
-void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
78
-{
79
- /* translate.c should never generate calls here in user-only mode */
80
- g_assert_not_reached();
81
-}
82
-
83
-void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
84
-{
85
- /* translate.c should never generate calls here in user-only mode */
86
- g_assert_not_reached();
87
-}
88
-
89
-void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
90
-{
91
- /* translate.c should never generate calls here in user-only mode */
92
- g_assert_not_reached();
93
-}
94
-
95
-void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
96
-{
97
- /* translate.c should never generate calls here in user-only mode */
98
- g_assert_not_reached();
99
-}
100
-
101
-void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
102
-{
103
- /* translate.c should never generate calls here in user-only mode */
104
- g_assert_not_reached();
105
-}
106
-
107
-uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
108
-{
109
- /*
110
- * The TT instructions can be used by unprivileged code, but in
111
- * user-only emulation we don't have the MPU.
112
- * Luckily since we know we are NonSecure unprivileged (and that in
113
- * turn means that the A flag wasn't specified), all the bits in the
114
- * register must be zero:
115
- * IREGION: 0 because IRVALID is 0
116
- * IRVALID: 0 because NS
117
- * S: 0 because NS
118
- * NSRW: 0 because NS
119
- * NSR: 0 because NS
120
- * RW: 0 because unpriv and A flag not set
121
- * R: 0 because unpriv and A flag not set
122
- * SRVALID: 0 because NS
123
- * MRVALID: 0 because unpriv and A flag not set
124
- * SREGION: 0 becaus SRVALID is 0
125
- * MREGION: 0 because MRVALID is 0
126
- */
127
- return 0;
128
-}
129
-
130
static void switch_mode(CPUARMState *env, int mode)
131
{
42
{
132
ARMCPU *cpu = env_archcpu(env);
43
ARMCPU *cpu = env_archcpu(env);
133
@@ -XXX,XX +XXX,XX @@ void arm_log_exception(int idx)
44
int old_len, new_len;
45
- bool old_a64, new_a64;
46
+ bool old_a64, new_a64, sm;
47
48
/* Nothing to do if no SVE. */
49
if (!cpu_isar_feature(aa64_sve, cpu)) {
50
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
51
* invoke ResetSVEState when taking an exception from, or
52
* returning to, AArch32 state when PSTATE.SM is enabled.
53
*/
54
- if (old_a64 != new_a64 && FIELD_EX64(env->svcr, SVCR, SM)) {
55
+ sm = FIELD_EX64(env->svcr, SVCR, SM);
56
+ if (old_a64 != new_a64 && sm) {
57
arm_reset_sve_state(env);
58
return;
134
}
59
}
135
}
60
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
136
61
* we already have the correct register contents when encountering the
137
-/*
62
* vq0->vq0 transition between EL0->EL1.
138
- * What kind of stack write are we doing? This affects how exceptions
63
*/
139
- * generated during the stacking are treated.
64
- old_len = (old_a64 && !sve_exception_el(env, old_el)
140
- */
65
- ? sve_vqm1_for_el(env, old_el) : 0);
141
-typedef enum StackingMode {
66
- new_len = (new_a64 && !sve_exception_el(env, new_el)
142
- STACK_NORMAL,
67
- ? sve_vqm1_for_el(env, new_el) : 0);
143
- STACK_IGNFAULTS,
68
+ old_len = new_len = 0;
144
- STACK_LAZYFP,
69
+ if (old_a64) {
145
-} StackingMode;
70
+ old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
146
-
147
-static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
148
- ARMMMUIdx mmu_idx, StackingMode mode)
149
-{
150
- CPUState *cs = CPU(cpu);
151
- CPUARMState *env = &cpu->env;
152
- MemTxAttrs attrs = {};
153
- MemTxResult txres;
154
- target_ulong page_size;
155
- hwaddr physaddr;
156
- int prot;
157
- ARMMMUFaultInfo fi = {};
158
- bool secure = mmu_idx & ARM_MMU_IDX_M_S;
159
- int exc;
160
- bool exc_secure;
161
-
162
- if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
163
- &attrs, &prot, &page_size, &fi, NULL)) {
164
- /* MPU/SAU lookup failed */
165
- if (fi.type == ARMFault_QEMU_SFault) {
166
- if (mode == STACK_LAZYFP) {
167
- qemu_log_mask(CPU_LOG_INT,
168
- "...SecureFault with SFSR.LSPERR "
169
- "during lazy stacking\n");
170
- env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
171
- } else {
172
- qemu_log_mask(CPU_LOG_INT,
173
- "...SecureFault with SFSR.AUVIOL "
174
- "during stacking\n");
175
- env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
176
- }
177
- env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
178
- env->v7m.sfar = addr;
179
- exc = ARMV7M_EXCP_SECURE;
180
- exc_secure = false;
181
- } else {
182
- if (mode == STACK_LAZYFP) {
183
- qemu_log_mask(CPU_LOG_INT,
184
- "...MemManageFault with CFSR.MLSPERR\n");
185
- env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
186
- } else {
187
- qemu_log_mask(CPU_LOG_INT,
188
- "...MemManageFault with CFSR.MSTKERR\n");
189
- env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
190
- }
191
- exc = ARMV7M_EXCP_MEM;
192
- exc_secure = secure;
193
- }
194
- goto pend_fault;
195
- }
196
- address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
197
- attrs, &txres);
198
- if (txres != MEMTX_OK) {
199
- /* BusFault trying to write the data */
200
- if (mode == STACK_LAZYFP) {
201
- qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
202
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
203
- } else {
204
- qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
205
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
206
- }
207
- exc = ARMV7M_EXCP_BUS;
208
- exc_secure = false;
209
- goto pend_fault;
210
- }
211
- return true;
212
-
213
-pend_fault:
214
- /*
215
- * By pending the exception at this point we are making
216
- * the IMPDEF choice "overridden exceptions pended" (see the
217
- * MergeExcInfo() pseudocode). The other choice would be to not
218
- * pend them now and then make a choice about which to throw away
219
- * later if we have two derived exceptions.
220
- * The only case when we must not pend the exception but instead
221
- * throw it away is if we are doing the push of the callee registers
222
- * and we've already generated a derived exception (this is indicated
223
- * by the caller passing STACK_IGNFAULTS). Even in this case we will
224
- * still update the fault status registers.
225
- */
226
- switch (mode) {
227
- case STACK_NORMAL:
228
- armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
229
- break;
230
- case STACK_LAZYFP:
231
- armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
232
- break;
233
- case STACK_IGNFAULTS:
234
- break;
235
- }
236
- return false;
237
-}
238
-
239
-static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
240
- ARMMMUIdx mmu_idx)
241
-{
242
- CPUState *cs = CPU(cpu);
243
- CPUARMState *env = &cpu->env;
244
- MemTxAttrs attrs = {};
245
- MemTxResult txres;
246
- target_ulong page_size;
247
- hwaddr physaddr;
248
- int prot;
249
- ARMMMUFaultInfo fi = {};
250
- bool secure = mmu_idx & ARM_MMU_IDX_M_S;
251
- int exc;
252
- bool exc_secure;
253
- uint32_t value;
254
-
255
- if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
256
- &attrs, &prot, &page_size, &fi, NULL)) {
257
- /* MPU/SAU lookup failed */
258
- if (fi.type == ARMFault_QEMU_SFault) {
259
- qemu_log_mask(CPU_LOG_INT,
260
- "...SecureFault with SFSR.AUVIOL during unstack\n");
261
- env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
262
- env->v7m.sfar = addr;
263
- exc = ARMV7M_EXCP_SECURE;
264
- exc_secure = false;
265
- } else {
266
- qemu_log_mask(CPU_LOG_INT,
267
- "...MemManageFault with CFSR.MUNSTKERR\n");
268
- env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
269
- exc = ARMV7M_EXCP_MEM;
270
- exc_secure = secure;
271
- }
272
- goto pend_fault;
273
- }
274
-
275
- value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
276
- attrs, &txres);
277
- if (txres != MEMTX_OK) {
278
- /* BusFault trying to read the data */
279
- qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
280
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
281
- exc = ARMV7M_EXCP_BUS;
282
- exc_secure = false;
283
- goto pend_fault;
284
- }
285
-
286
- *dest = value;
287
- return true;
288
-
289
-pend_fault:
290
- /*
291
- * By pending the exception at this point we are making
292
- * the IMPDEF choice "overridden exceptions pended" (see the
293
- * MergeExcInfo() pseudocode). The other choice would be to not
294
- * pend them now and then make a choice about which to throw away
295
- * later if we have two derived exceptions.
296
- */
297
- armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
298
- return false;
299
-}
300
-
301
-void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
302
-{
303
- /*
304
- * Preserve FP state (because LSPACT was set and we are about
305
- * to execute an FP instruction). This corresponds to the
306
- * PreserveFPState() pseudocode.
307
- * We may throw an exception if the stacking fails.
308
- */
309
- ARMCPU *cpu = env_archcpu(env);
310
- bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
311
- bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
312
- bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
313
- bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
314
- uint32_t fpcar = env->v7m.fpcar[is_secure];
315
- bool stacked_ok = true;
316
- bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
317
- bool take_exception;
318
-
319
- /* Take the iothread lock as we are going to touch the NVIC */
320
- qemu_mutex_lock_iothread();
321
-
322
- /* Check the background context had access to the FPU */
323
- if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
324
- armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
325
- env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
326
- stacked_ok = false;
327
- } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
328
- armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
329
- env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
330
- stacked_ok = false;
331
- }
332
-
333
- if (!splimviol && stacked_ok) {
334
- /* We only stack if the stack limit wasn't violated */
335
- int i;
336
- ARMMMUIdx mmu_idx;
337
-
338
- mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
339
- for (i = 0; i < (ts ? 32 : 16); i += 2) {
340
- uint64_t dn = *aa32_vfp_dreg(env, i / 2);
341
- uint32_t faddr = fpcar + 4 * i;
342
- uint32_t slo = extract64(dn, 0, 32);
343
- uint32_t shi = extract64(dn, 32, 32);
344
-
345
- if (i >= 16) {
346
- faddr += 8; /* skip the slot for the FPSCR */
347
- }
348
- stacked_ok = stacked_ok &&
349
- v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
350
- v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
351
- }
352
-
353
- stacked_ok = stacked_ok &&
354
- v7m_stack_write(cpu, fpcar + 0x40,
355
- vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
356
- }
357
-
358
- /*
359
- * We definitely pended an exception, but it's possible that it
360
- * might not be able to be taken now. If its priority permits us
361
- * to take it now, then we must not update the LSPACT or FP regs,
362
- * but instead jump out to take the exception immediately.
363
- * If it's just pending and won't be taken until the current
364
- * handler exits, then we do update LSPACT and the FP regs.
365
- */
366
- take_exception = !stacked_ok &&
367
- armv7m_nvic_can_take_pending_exception(env->nvic);
368
-
369
- qemu_mutex_unlock_iothread();
370
-
371
- if (take_exception) {
372
- raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
373
- }
374
-
375
- env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
376
-
377
- if (ts) {
378
- /* Clear s0 to s31 and the FPSCR */
379
- int i;
380
-
381
- for (i = 0; i < 32; i += 2) {
382
- *aa32_vfp_dreg(env, i / 2) = 0;
383
- }
384
- vfp_set_fpscr(env, 0);
385
- }
386
- /*
387
- * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
388
- * unchanged.
389
- */
390
-}
391
-
392
-/*
393
- * Write to v7M CONTROL.SPSEL bit for the specified security bank.
394
- * This may change the current stack pointer between Main and Process
395
- * stack pointers if it is done for the CONTROL register for the current
396
- * security state.
397
- */
398
-static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
399
- bool new_spsel,
400
- bool secstate)
401
-{
402
- bool old_is_psp = v7m_using_psp(env);
403
-
404
- env->v7m.control[secstate] =
405
- deposit32(env->v7m.control[secstate],
406
- R_V7M_CONTROL_SPSEL_SHIFT,
407
- R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
408
-
409
- if (secstate == env->v7m.secure) {
410
- bool new_is_psp = v7m_using_psp(env);
411
- uint32_t tmp;
412
-
413
- if (old_is_psp != new_is_psp) {
414
- tmp = env->v7m.other_sp;
415
- env->v7m.other_sp = env->regs[13];
416
- env->regs[13] = tmp;
417
- }
418
- }
419
-}
420
-
421
-/*
422
- * Write to v7M CONTROL.SPSEL bit. This may change the current
423
- * stack pointer between Main and Process stack pointers.
424
- */
425
-static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
426
-{
427
- write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
428
-}
429
-
430
-void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
431
-{
432
- /*
433
- * Write a new value to v7m.exception, thus transitioning into or out
434
- * of Handler mode; this may result in a change of active stack pointer.
435
- */
436
- bool new_is_psp, old_is_psp = v7m_using_psp(env);
437
- uint32_t tmp;
438
-
439
- env->v7m.exception = new_exc;
440
-
441
- new_is_psp = v7m_using_psp(env);
442
-
443
- if (old_is_psp != new_is_psp) {
444
- tmp = env->v7m.other_sp;
445
- env->v7m.other_sp = env->regs[13];
446
- env->regs[13] = tmp;
447
- }
448
-}
449
-
450
-/* Switch M profile security state between NS and S */
451
-static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
452
-{
453
- uint32_t new_ss_msp, new_ss_psp;
454
-
455
- if (env->v7m.secure == new_secstate) {
456
- return;
457
- }
458
-
459
- /*
460
- * All the banked state is accessed by looking at env->v7m.secure
461
- * except for the stack pointer; rearrange the SP appropriately.
462
- */
463
- new_ss_msp = env->v7m.other_ss_msp;
464
- new_ss_psp = env->v7m.other_ss_psp;
465
-
466
- if (v7m_using_psp(env)) {
467
- env->v7m.other_ss_psp = env->regs[13];
468
- env->v7m.other_ss_msp = env->v7m.other_sp;
469
- } else {
470
- env->v7m.other_ss_msp = env->regs[13];
471
- env->v7m.other_ss_psp = env->v7m.other_sp;
472
- }
473
-
474
- env->v7m.secure = new_secstate;
475
-
476
- if (v7m_using_psp(env)) {
477
- env->regs[13] = new_ss_psp;
478
- env->v7m.other_sp = new_ss_msp;
479
- } else {
480
- env->regs[13] = new_ss_msp;
481
- env->v7m.other_sp = new_ss_psp;
482
- }
483
-}
484
-
485
-void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
486
-{
487
- /*
488
- * Handle v7M BXNS:
489
- * - if the return value is a magic value, do exception return (like BX)
490
- * - otherwise bit 0 of the return value is the target security state
491
- */
492
- uint32_t min_magic;
493
-
494
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
495
- /* Covers FNC_RETURN and EXC_RETURN magic */
496
- min_magic = FNC_RETURN_MIN_MAGIC;
497
- } else {
498
- /* EXC_RETURN magic only */
499
- min_magic = EXC_RETURN_MIN_MAGIC;
500
- }
501
-
502
- if (dest >= min_magic) {
503
- /*
504
- * This is an exception return magic value; put it where
505
- * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
506
- * Note that if we ever add gen_ss_advance() singlestep support to
507
- * M profile this should count as an "instruction execution complete"
508
- * event (compare gen_bx_excret_final_code()).
509
- */
510
- env->regs[15] = dest & ~1;
511
- env->thumb = dest & 1;
512
- HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
513
- /* notreached */
514
- }
515
-
516
- /* translate.c should have made BXNS UNDEF unless we're secure */
517
- assert(env->v7m.secure);
518
-
519
- if (!(dest & 1)) {
520
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
521
- }
522
- switch_v7m_security_state(env, dest & 1);
523
- env->thumb = 1;
524
- env->regs[15] = dest & ~1;
525
-}
526
-
527
-void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
528
-{
529
- /*
530
- * Handle v7M BLXNS:
531
- * - bit 0 of the destination address is the target security state
532
- */
533
-
534
- /* At this point regs[15] is the address just after the BLXNS */
535
- uint32_t nextinst = env->regs[15] | 1;
536
- uint32_t sp = env->regs[13] - 8;
537
- uint32_t saved_psr;
538
-
539
- /* translate.c will have made BLXNS UNDEF unless we're secure */
540
- assert(env->v7m.secure);
541
-
542
- if (dest & 1) {
543
- /*
544
- * Target is Secure, so this is just a normal BLX,
545
- * except that the low bit doesn't indicate Thumb/not.
546
- */
547
- env->regs[14] = nextinst;
548
- env->thumb = 1;
549
- env->regs[15] = dest & ~1;
550
- return;
551
- }
552
-
553
- /* Target is non-secure: first push a stack frame */
554
- if (!QEMU_IS_ALIGNED(sp, 8)) {
555
- qemu_log_mask(LOG_GUEST_ERROR,
556
- "BLXNS with misaligned SP is UNPREDICTABLE\n");
557
- }
558
-
559
- if (sp < v7m_sp_limit(env)) {
560
- raise_exception(env, EXCP_STKOF, 0, 1);
561
- }
562
-
563
- saved_psr = env->v7m.exception;
564
- if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
565
- saved_psr |= XPSR_SFPA;
566
- }
567
-
568
- /* Note that these stores can throw exceptions on MPU faults */
569
- cpu_stl_data(env, sp, nextinst);
570
- cpu_stl_data(env, sp + 4, saved_psr);
571
-
572
- env->regs[13] = sp;
573
- env->regs[14] = 0xfeffffff;
574
- if (arm_v7m_is_handler_mode(env)) {
575
- /*
576
- * Write a dummy value to IPSR, to avoid leaking the current secure
577
- * exception number to non-secure code. This is guaranteed not
578
- * to cause write_v7m_exception() to actually change stacks.
579
- */
580
- write_v7m_exception(env, 1);
581
- }
582
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
583
- switch_v7m_security_state(env, 0);
584
- env->thumb = 1;
585
- env->regs[15] = dest;
586
-}
587
-
588
-static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
589
- bool spsel)
590
-{
591
- /*
592
- * Return a pointer to the location where we currently store the
593
- * stack pointer for the requested security state and thread mode.
594
- * This pointer will become invalid if the CPU state is updated
595
- * such that the stack pointers are switched around (eg changing
596
- * the SPSEL control bit).
597
- * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
598
- * Unlike that pseudocode, we require the caller to pass us in the
599
- * SPSEL control bit value; this is because we also use this
600
- * function in handling of pushing of the callee-saves registers
601
- * part of the v8M stack frame (pseudocode PushCalleeStack()),
602
- * and in the tailchain codepath the SPSEL bit comes from the exception
603
- * return magic LR value from the previous exception. The pseudocode
604
- * opencodes the stack-selection in PushCalleeStack(), but we prefer
605
- * to make this utility function generic enough to do the job.
606
- */
607
- bool want_psp = threadmode && spsel;
608
-
609
- if (secure == env->v7m.secure) {
610
- if (want_psp == v7m_using_psp(env)) {
611
- return &env->regs[13];
612
- } else {
613
- return &env->v7m.other_sp;
614
- }
615
- } else {
616
- if (want_psp) {
617
- return &env->v7m.other_ss_psp;
618
- } else {
619
- return &env->v7m.other_ss_msp;
620
- }
621
- }
622
-}
623
-
624
-static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
625
- uint32_t *pvec)
626
-{
627
- CPUState *cs = CPU(cpu);
628
- CPUARMState *env = &cpu->env;
629
- MemTxResult result;
630
- uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
631
- uint32_t vector_entry;
632
- MemTxAttrs attrs = {};
633
- ARMMMUIdx mmu_idx;
634
- bool exc_secure;
635
-
636
- mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
637
-
638
- /*
639
- * We don't do a get_phys_addr() here because the rules for vector
640
- * loads are special: they always use the default memory map, and
641
- * the default memory map permits reads from all addresses.
642
- * Since there's no easy way to pass through to pmsav8_mpu_lookup()
643
- * that we want this special case which would always say "yes",
644
- * we just do the SAU lookup here followed by a direct physical load.
645
- */
646
- attrs.secure = targets_secure;
647
- attrs.user = false;
648
-
649
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
650
- V8M_SAttributes sattrs = {};
651
-
652
- v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
653
- if (sattrs.ns) {
654
- attrs.secure = false;
655
- } else if (!targets_secure) {
656
- /* NS access to S memory */
657
- goto load_fail;
658
- }
659
- }
660
-
661
- vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
662
- attrs, &result);
663
- if (result != MEMTX_OK) {
664
- goto load_fail;
665
- }
666
- *pvec = vector_entry;
667
- return true;
668
-
669
-load_fail:
670
- /*
671
- * All vector table fetch fails are reported as HardFault, with
672
- * HFSR.VECTTBL and .FORCED set. (FORCED is set because
673
- * technically the underlying exception is a MemManage or BusFault
674
- * that is escalated to HardFault.) This is a terminal exception,
675
- * so we will either take the HardFault immediately or else enter
676
- * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
677
- */
678
- exc_secure = targets_secure ||
679
- !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
680
- env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
681
- armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
682
- return false;
683
-}
684
-
685
-static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
686
-{
687
- /*
688
- * Return the integrity signature value for the callee-saves
689
- * stack frame section. @lr is the exception return payload/LR value
690
- * whose FType bit forms bit 0 of the signature if FP is present.
691
- */
692
- uint32_t sig = 0xfefa125a;
693
-
694
- if (!arm_feature(env, ARM_FEATURE_VFP) || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
695
- sig |= 1;
696
- }
697
- return sig;
698
-}
699
-
700
-static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
701
- bool ignore_faults)
702
-{
703
- /*
704
- * For v8M, push the callee-saves register part of the stack frame.
705
- * Compare the v8M pseudocode PushCalleeStack().
706
- * In the tailchaining case this may not be the current stack.
707
- */
708
- CPUARMState *env = &cpu->env;
709
- uint32_t *frame_sp_p;
710
- uint32_t frameptr;
711
- ARMMMUIdx mmu_idx;
712
- bool stacked_ok;
713
- uint32_t limit;
714
- bool want_psp;
715
- uint32_t sig;
716
- StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
717
-
718
- if (dotailchain) {
719
- bool mode = lr & R_V7M_EXCRET_MODE_MASK;
720
- bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
721
- !mode;
722
-
723
- mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
724
- frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
725
- lr & R_V7M_EXCRET_SPSEL_MASK);
726
- want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
727
- if (want_psp) {
728
- limit = env->v7m.psplim[M_REG_S];
729
- } else {
730
- limit = env->v7m.msplim[M_REG_S];
731
- }
732
- } else {
733
- mmu_idx = arm_mmu_idx(env);
734
- frame_sp_p = &env->regs[13];
735
- limit = v7m_sp_limit(env);
736
- }
737
-
738
- frameptr = *frame_sp_p - 0x28;
739
- if (frameptr < limit) {
740
- /*
741
- * Stack limit failure: set SP to the limit value, and generate
742
- * STKOF UsageFault. Stack pushes below the limit must not be
743
- * performed. It is IMPDEF whether pushes above the limit are
744
- * performed; we choose not to.
745
- */
746
- qemu_log_mask(CPU_LOG_INT,
747
- "...STKOF during callee-saves register stacking\n");
748
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
749
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
750
- env->v7m.secure);
751
- *frame_sp_p = limit;
752
- return true;
753
- }
754
-
755
- /*
756
- * Write as much of the stack frame as we can. A write failure may
757
- * cause us to pend a derived exception.
758
- */
759
- sig = v7m_integrity_sig(env, lr);
760
- stacked_ok =
761
- v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
762
- v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
763
- v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
764
- v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
765
- v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
766
- v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
767
- v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
768
- v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
769
- v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
770
-
771
- /* Update SP regardless of whether any of the stack accesses failed. */
772
- *frame_sp_p = frameptr;
773
-
774
- return !stacked_ok;
775
-}
776
-
777
-static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
778
- bool ignore_stackfaults)
779
-{
780
- /*
781
- * Do the "take the exception" parts of exception entry,
782
- * but not the pushing of state to the stack. This is
783
- * similar to the pseudocode ExceptionTaken() function.
784
- */
785
- CPUARMState *env = &cpu->env;
786
- uint32_t addr;
787
- bool targets_secure;
788
- int exc;
789
- bool push_failed = false;
790
-
791
- armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
792
- qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
793
- targets_secure ? "secure" : "nonsecure", exc);
794
-
795
- if (dotailchain) {
796
- /* Sanitize LR FType and PREFIX bits */
797
- if (!arm_feature(env, ARM_FEATURE_VFP)) {
798
- lr |= R_V7M_EXCRET_FTYPE_MASK;
799
- }
800
- lr = deposit32(lr, 24, 8, 0xff);
801
- }
802
-
803
- if (arm_feature(env, ARM_FEATURE_V8)) {
804
- if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
805
- (lr & R_V7M_EXCRET_S_MASK)) {
806
- /*
807
- * The background code (the owner of the registers in the
808
- * exception frame) is Secure. This means it may either already
809
- * have or now needs to push callee-saves registers.
810
- */
811
- if (targets_secure) {
812
- if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
813
- /*
814
- * We took an exception from Secure to NonSecure
815
- * (which means the callee-saved registers got stacked)
816
- * and are now tailchaining to a Secure exception.
817
- * Clear DCRS so eventual return from this Secure
818
- * exception unstacks the callee-saved registers.
819
- */
820
- lr &= ~R_V7M_EXCRET_DCRS_MASK;
821
- }
822
- } else {
823
- /*
824
- * We're going to a non-secure exception; push the
825
- * callee-saves registers to the stack now, if they're
826
- * not already saved.
827
- */
828
- if (lr & R_V7M_EXCRET_DCRS_MASK &&
829
- !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
830
- push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
831
- ignore_stackfaults);
832
- }
833
- lr |= R_V7M_EXCRET_DCRS_MASK;
834
- }
835
- }
836
-
837
- lr &= ~R_V7M_EXCRET_ES_MASK;
838
- if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
839
- lr |= R_V7M_EXCRET_ES_MASK;
840
- }
841
- lr &= ~R_V7M_EXCRET_SPSEL_MASK;
842
- if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
843
- lr |= R_V7M_EXCRET_SPSEL_MASK;
844
- }
845
-
846
- /*
847
- * Clear registers if necessary to prevent non-secure exception
848
- * code being able to see register values from secure code.
849
- * Where register values become architecturally UNKNOWN we leave
850
- * them with their previous values.
851
- */
852
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
853
- if (!targets_secure) {
854
- /*
855
- * Always clear the caller-saved registers (they have been
856
- * pushed to the stack earlier in v7m_push_stack()).
857
- * Clear callee-saved registers if the background code is
858
- * Secure (in which case these regs were saved in
859
- * v7m_push_callee_stack()).
860
- */
861
- int i;
862
-
863
- for (i = 0; i < 13; i++) {
864
- /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
865
- if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
866
- env->regs[i] = 0;
867
- }
868
- }
869
- /* Clear EAPSR */
870
- xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
871
- }
872
- }
873
- }
874
-
875
- if (push_failed && !ignore_stackfaults) {
876
- /*
877
- * Derived exception on callee-saves register stacking:
878
- * we might now want to take a different exception which
879
- * targets a different security state, so try again from the top.
880
- */
881
- qemu_log_mask(CPU_LOG_INT,
882
- "...derived exception on callee-saves register stacking");
883
- v7m_exception_taken(cpu, lr, true, true);
884
- return;
885
- }
886
-
887
- if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
888
- /* Vector load failed: derived exception */
889
- qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
890
- v7m_exception_taken(cpu, lr, true, true);
891
- return;
892
- }
893
-
894
- /*
895
- * Now we've done everything that might cause a derived exception
896
- * we can go ahead and activate whichever exception we're going to
897
- * take (which might now be the derived exception).
898
- */
899
- armv7m_nvic_acknowledge_irq(env->nvic);
900
-
901
- /* Switch to target security state -- must do this before writing SPSEL */
902
- switch_v7m_security_state(env, targets_secure);
903
- write_v7m_control_spsel(env, 0);
904
- arm_clear_exclusive(env);
905
- /* Clear SFPA and FPCA (has no effect if no FPU) */
906
- env->v7m.control[M_REG_S] &=
907
- ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
908
- /* Clear IT bits */
909
- env->condexec_bits = 0;
910
- env->regs[14] = lr;
911
- env->regs[15] = addr & 0xfffffffe;
912
- env->thumb = addr & 1;
913
-}
914
-
915
-static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
916
- bool apply_splim)
917
-{
918
- /*
919
- * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
920
- * that we will need later in order to do lazy FP reg stacking.
921
- */
922
- bool is_secure = env->v7m.secure;
923
- void *nvic = env->nvic;
924
- /*
925
- * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
926
- * are banked and we want to update the bit in the bank for the
927
- * current security state; and in one case we want to specifically
928
- * update the NS banked version of a bit even if we are secure.
929
- */
930
- uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
931
- uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
932
- uint32_t *fpccr = &env->v7m.fpccr[is_secure];
933
- bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
934
-
935
- env->v7m.fpcar[is_secure] = frameptr & ~0x7;
936
-
937
- if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
938
- bool splimviol;
939
- uint32_t splim = v7m_sp_limit(env);
940
- bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
941
- (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
942
-
943
- splimviol = !ign && frameptr < splim;
944
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
945
- }
946
-
947
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
948
-
949
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
950
-
951
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
952
-
953
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
954
- !arm_v7m_is_handler_mode(env));
955
-
956
- hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
957
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
958
-
959
- bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
960
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
961
-
962
- mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
963
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
964
-
965
- ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
966
- *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
967
-
968
- monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
969
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
970
-
971
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
972
- s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
973
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
974
-
975
- sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
976
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
977
- }
978
-}
979
-
980
-void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
981
-{
982
- /* fptr is the value of Rn, the frame pointer we store the FP regs to */
983
- bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
984
- bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
985
-
986
- assert(env->v7m.secure);
987
-
988
- if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
989
- return;
990
- }
991
-
992
- /* Check access to the coprocessor is permitted */
993
- if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
994
- raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
995
- }
996
-
997
- if (lspact) {
998
- /* LSPACT should not be active when there is active FP state */
999
- raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
1000
- }
1001
-
1002
- if (fptr & 7) {
1003
- raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1004
- }
1005
-
1006
- /*
1007
- * Note that we do not use v7m_stack_write() here, because the
1008
- * accesses should not set the FSR bits for stacking errors if they
1009
- * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1010
- * or AccType_LAZYFP). Faults in cpu_stl_data() will throw exceptions
1011
- * and longjmp out.
1012
- */
1013
- if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1014
- bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1015
- int i;
1016
-
1017
- for (i = 0; i < (ts ? 32 : 16); i += 2) {
1018
- uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1019
- uint32_t faddr = fptr + 4 * i;
1020
- uint32_t slo = extract64(dn, 0, 32);
1021
- uint32_t shi = extract64(dn, 32, 32);
1022
-
1023
- if (i >= 16) {
1024
- faddr += 8; /* skip the slot for the FPSCR */
1025
- }
1026
- cpu_stl_data(env, faddr, slo);
1027
- cpu_stl_data(env, faddr + 4, shi);
1028
- }
1029
- cpu_stl_data(env, fptr + 0x40, vfp_get_fpscr(env));
1030
-
1031
- /*
1032
- * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
1033
- * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1034
- */
1035
- if (ts) {
1036
- for (i = 0; i < 32; i += 2) {
1037
- *aa32_vfp_dreg(env, i / 2) = 0;
1038
- }
1039
- vfp_set_fpscr(env, 0);
1040
- }
1041
- } else {
1042
- v7m_update_fpccr(env, fptr, false);
1043
- }
1044
-
1045
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1046
-}
1047
-
1048
-void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1049
-{
1050
- /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1051
- assert(env->v7m.secure);
1052
-
1053
- if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1054
- return;
1055
- }
1056
-
1057
- /* Check access to the coprocessor is permitted */
1058
- if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1059
- raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1060
- }
1061
-
1062
- if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1063
- /* State in FP is still valid */
1064
- env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1065
- } else {
1066
- bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1067
- int i;
1068
- uint32_t fpscr;
1069
-
1070
- if (fptr & 7) {
1071
- raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1072
- }
1073
-
1074
- for (i = 0; i < (ts ? 32 : 16); i += 2) {
1075
- uint32_t slo, shi;
1076
- uint64_t dn;
1077
- uint32_t faddr = fptr + 4 * i;
1078
-
1079
- if (i >= 16) {
1080
- faddr += 8; /* skip the slot for the FPSCR */
1081
- }
1082
-
1083
- slo = cpu_ldl_data(env, faddr);
1084
- shi = cpu_ldl_data(env, faddr + 4);
1085
-
1086
- dn = (uint64_t) shi << 32 | slo;
1087
- *aa32_vfp_dreg(env, i / 2) = dn;
1088
- }
1089
- fpscr = cpu_ldl_data(env, fptr + 0x40);
1090
- vfp_set_fpscr(env, fpscr);
1091
- }
1092
-
1093
- env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1094
-}
1095
-
1096
-static bool v7m_push_stack(ARMCPU *cpu)
1097
-{
1098
- /*
1099
- * Do the "set up stack frame" part of exception entry,
1100
- * similar to pseudocode PushStack().
1101
- * Return true if we generate a derived exception (and so
1102
- * should ignore further stack faults trying to process
1103
- * that derived exception.)
1104
- */
1105
- bool stacked_ok = true, limitviol = false;
1106
- CPUARMState *env = &cpu->env;
1107
- uint32_t xpsr = xpsr_read(env);
1108
- uint32_t frameptr = env->regs[13];
1109
- ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1110
- uint32_t framesize;
1111
- bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1112
-
1113
- if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1114
- (env->v7m.secure || nsacr_cp10)) {
1115
- if (env->v7m.secure &&
1116
- env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1117
- framesize = 0xa8;
1118
- } else {
1119
- framesize = 0x68;
1120
- }
1121
- } else {
1122
- framesize = 0x20;
1123
- }
1124
-
1125
- /* Align stack pointer if the guest wants that */
1126
- if ((frameptr & 4) &&
1127
- (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1128
- frameptr -= 4;
1129
- xpsr |= XPSR_SPREALIGN;
1130
- }
1131
-
1132
- xpsr &= ~XPSR_SFPA;
1133
- if (env->v7m.secure &&
1134
- (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1135
- xpsr |= XPSR_SFPA;
1136
- }
1137
-
1138
- frameptr -= framesize;
1139
-
1140
- if (arm_feature(env, ARM_FEATURE_V8)) {
1141
- uint32_t limit = v7m_sp_limit(env);
1142
-
1143
- if (frameptr < limit) {
1144
- /*
1145
- * Stack limit failure: set SP to the limit value, and generate
1146
- * STKOF UsageFault. Stack pushes below the limit must not be
1147
- * performed. It is IMPDEF whether pushes above the limit are
1148
- * performed; we choose not to.
1149
- */
1150
- qemu_log_mask(CPU_LOG_INT,
1151
- "...STKOF during stacking\n");
1152
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1153
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1154
- env->v7m.secure);
1155
- env->regs[13] = limit;
1156
- /*
1157
- * We won't try to perform any further memory accesses but
1158
- * we must continue through the following code to check for
1159
- * permission faults during FPU state preservation, and we
1160
- * must update FPCCR if lazy stacking is enabled.
1161
- */
1162
- limitviol = true;
1163
- stacked_ok = false;
1164
- }
1165
- }
1166
-
1167
- /*
1168
- * Write as much of the stack frame as we can. If we fail a stack
1169
- * write this will result in a derived exception being pended
1170
- * (which may be taken in preference to the one we started with
1171
- * if it has higher priority).
1172
- */
1173
- stacked_ok = stacked_ok &&
1174
- v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1175
- v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1176
- mmu_idx, STACK_NORMAL) &&
1177
- v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1178
- mmu_idx, STACK_NORMAL) &&
1179
- v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1180
- mmu_idx, STACK_NORMAL) &&
1181
- v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1182
- mmu_idx, STACK_NORMAL) &&
1183
- v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1184
- mmu_idx, STACK_NORMAL) &&
1185
- v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1186
- mmu_idx, STACK_NORMAL) &&
1187
- v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1188
-
1189
- if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1190
- /* FPU is active, try to save its registers */
1191
- bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1192
- bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1193
-
1194
- if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1195
- qemu_log_mask(CPU_LOG_INT,
1196
- "...SecureFault because LSPACT and FPCA both set\n");
1197
- env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1198
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1199
- } else if (!env->v7m.secure && !nsacr_cp10) {
1200
- qemu_log_mask(CPU_LOG_INT,
1201
- "...Secure UsageFault with CFSR.NOCP because "
1202
- "NSACR.CP10 prevents stacking FP regs\n");
1203
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1204
- env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1205
- } else {
1206
- if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1207
- /* Lazy stacking disabled, save registers now */
1208
- int i;
1209
- bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1210
- arm_current_el(env) != 0);
1211
-
1212
- if (stacked_ok && !cpacr_pass) {
1213
- /*
1214
- * Take UsageFault if CPACR forbids access. The pseudocode
1215
- * here does a full CheckCPEnabled() but we know the NSACR
1216
- * check can never fail as we have already handled that.
1217
- */
1218
- qemu_log_mask(CPU_LOG_INT,
1219
- "...UsageFault with CFSR.NOCP because "
1220
- "CPACR.CP10 prevents stacking FP regs\n");
1221
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1222
- env->v7m.secure);
1223
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1224
- stacked_ok = false;
1225
- }
1226
-
1227
- for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1228
- uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1229
- uint32_t faddr = frameptr + 0x20 + 4 * i;
1230
- uint32_t slo = extract64(dn, 0, 32);
1231
- uint32_t shi = extract64(dn, 32, 32);
1232
-
1233
- if (i >= 16) {
1234
- faddr += 8; /* skip the slot for the FPSCR */
1235
- }
1236
- stacked_ok = stacked_ok &&
1237
- v7m_stack_write(cpu, faddr, slo,
1238
- mmu_idx, STACK_NORMAL) &&
1239
- v7m_stack_write(cpu, faddr + 4, shi,
1240
- mmu_idx, STACK_NORMAL);
1241
- }
1242
- stacked_ok = stacked_ok &&
1243
- v7m_stack_write(cpu, frameptr + 0x60,
1244
- vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1245
- if (cpacr_pass) {
1246
- for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1247
- *aa32_vfp_dreg(env, i / 2) = 0;
1248
- }
1249
- vfp_set_fpscr(env, 0);
1250
- }
1251
- } else {
1252
- /* Lazy stacking enabled, save necessary info to stack later */
1253
- v7m_update_fpccr(env, frameptr + 0x20, true);
1254
- }
1255
- }
1256
- }
1257
-
1258
- /*
1259
- * If we broke a stack limit then SP was already updated earlier;
1260
- * otherwise we update SP regardless of whether any of the stack
1261
- * accesses failed or we took some other kind of fault.
1262
- */
1263
- if (!limitviol) {
1264
- env->regs[13] = frameptr;
1265
- }
1266
-
1267
- return !stacked_ok;
1268
-}
1269
-
1270
-static void do_v7m_exception_exit(ARMCPU *cpu)
1271
-{
1272
- CPUARMState *env = &cpu->env;
1273
- uint32_t excret;
1274
- uint32_t xpsr, xpsr_mask;
1275
- bool ufault = false;
1276
- bool sfault = false;
1277
- bool return_to_sp_process;
1278
- bool return_to_handler;
1279
- bool rettobase = false;
1280
- bool exc_secure = false;
1281
- bool return_to_secure;
1282
- bool ftype;
1283
- bool restore_s16_s31;
1284
-
1285
- /*
1286
- * If we're not in Handler mode then jumps to magic exception-exit
1287
- * addresses don't have magic behaviour. However for the v8M
1288
- * security extensions the magic secure-function-return has to
1289
- * work in thread mode too, so to avoid doing an extra check in
1290
- * the generated code we allow exception-exit magic to also cause the
1291
- * internal exception and bring us here in thread mode. Correct code
1292
- * will never try to do this (the following insn fetch will always
1293
- * fault) so we the overhead of having taken an unnecessary exception
1294
- * doesn't matter.
1295
- */
1296
- if (!arm_v7m_is_handler_mode(env)) {
1297
- return;
1298
- }
1299
-
1300
- /*
1301
- * In the spec pseudocode ExceptionReturn() is called directly
1302
- * from BXWritePC() and gets the full target PC value including
1303
- * bit zero. In QEMU's implementation we treat it as a normal
1304
- * jump-to-register (which is then caught later on), and so split
1305
- * the target value up between env->regs[15] and env->thumb in
1306
- * gen_bx(). Reconstitute it.
1307
- */
1308
- excret = env->regs[15];
1309
- if (env->thumb) {
1310
- excret |= 1;
1311
- }
1312
-
1313
- qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1314
- " previous exception %d\n",
1315
- excret, env->v7m.exception);
1316
-
1317
- if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1318
- qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1319
- "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1320
- excret);
1321
- }
1322
-
1323
- ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1324
-
1325
- if (!arm_feature(env, ARM_FEATURE_VFP) && !ftype) {
1326
- qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1327
- "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1328
- "if FPU not present\n",
1329
- excret);
1330
- ftype = true;
1331
- }
1332
-
1333
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1334
- /*
1335
- * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1336
- * we pick which FAULTMASK to clear.
1337
- */
1338
- if (!env->v7m.secure &&
1339
- ((excret & R_V7M_EXCRET_ES_MASK) ||
1340
- !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1341
- sfault = 1;
1342
- /* For all other purposes, treat ES as 0 (R_HXSR) */
1343
- excret &= ~R_V7M_EXCRET_ES_MASK;
1344
- }
1345
- exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1346
- }
1347
-
1348
- if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1349
- /*
1350
- * Auto-clear FAULTMASK on return from other than NMI.
1351
- * If the security extension is implemented then this only
1352
- * happens if the raw execution priority is >= 0; the
1353
- * value of the ES bit in the exception return value indicates
1354
- * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1355
- */
1356
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1357
- if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1358
- env->v7m.faultmask[exc_secure] = 0;
1359
- }
1360
- } else {
1361
- env->v7m.faultmask[M_REG_NS] = 0;
1362
- }
1363
- }
1364
-
1365
- switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1366
- exc_secure)) {
1367
- case -1:
1368
- /* attempt to exit an exception that isn't active */
1369
- ufault = true;
1370
- break;
1371
- case 0:
1372
- /* still an irq active now */
1373
- break;
1374
- case 1:
1375
- /*
1376
- * We returned to base exception level, no nesting.
1377
- * (In the pseudocode this is written using "NestedActivation != 1"
1378
- * where we have 'rettobase == false'.)
1379
- */
1380
- rettobase = true;
1381
- break;
1382
- default:
1383
- g_assert_not_reached();
1384
- }
1385
-
1386
- return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1387
- return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1388
- return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1389
- (excret & R_V7M_EXCRET_S_MASK);
1390
-
1391
- if (arm_feature(env, ARM_FEATURE_V8)) {
1392
- if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1393
- /*
1394
- * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1395
- * we choose to take the UsageFault.
1396
- */
1397
- if ((excret & R_V7M_EXCRET_S_MASK) ||
1398
- (excret & R_V7M_EXCRET_ES_MASK) ||
1399
- !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1400
- ufault = true;
1401
- }
1402
- }
1403
- if (excret & R_V7M_EXCRET_RES0_MASK) {
1404
- ufault = true;
1405
- }
1406
- } else {
1407
- /* For v7M we only recognize certain combinations of the low bits */
1408
- switch (excret & 0xf) {
1409
- case 1: /* Return to Handler */
1410
- break;
1411
- case 13: /* Return to Thread using Process stack */
1412
- case 9: /* Return to Thread using Main stack */
1413
- /*
1414
- * We only need to check NONBASETHRDENA for v7M, because in
1415
- * v8M this bit does not exist (it is RES1).
1416
- */
1417
- if (!rettobase &&
1418
- !(env->v7m.ccr[env->v7m.secure] &
1419
- R_V7M_CCR_NONBASETHRDENA_MASK)) {
1420
- ufault = true;
1421
- }
1422
- break;
1423
- default:
1424
- ufault = true;
1425
- }
1426
- }
1427
-
1428
- /*
1429
- * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1430
- * Handler mode (and will be until we write the new XPSR.Interrupt
1431
- * field) this does not switch around the current stack pointer.
1432
- * We must do this before we do any kind of tailchaining, including
1433
- * for the derived exceptions on integrity check failures, or we will
1434
- * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1435
- */
1436
- write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1437
-
1438
- /*
1439
- * Clear scratch FP values left in caller saved registers; this
1440
- * must happen before any kind of tail chaining.
1441
- */
1442
- if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1443
- (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1444
- if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1445
- env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1446
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1447
- qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1448
- "stackframe: error during lazy state deactivation\n");
1449
- v7m_exception_taken(cpu, excret, true, false);
1450
- return;
1451
- } else {
1452
- /* Clear s0..s15 and FPSCR */
1453
- int i;
1454
-
1455
- for (i = 0; i < 16; i += 2) {
1456
- *aa32_vfp_dreg(env, i / 2) = 0;
1457
- }
1458
- vfp_set_fpscr(env, 0);
1459
- }
1460
- }
1461
-
1462
- if (sfault) {
1463
- env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1464
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1465
- qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1466
- "stackframe: failed EXC_RETURN.ES validity check\n");
1467
- v7m_exception_taken(cpu, excret, true, false);
1468
- return;
1469
- }
1470
-
1471
- if (ufault) {
1472
- /*
1473
- * Bad exception return: instead of popping the exception
1474
- * stack, directly take a usage fault on the current stack.
1475
- */
1476
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1477
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1478
- qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1479
- "stackframe: failed exception return integrity check\n");
1480
- v7m_exception_taken(cpu, excret, true, false);
1481
- return;
1482
- }
1483
-
1484
- /*
1485
- * Tailchaining: if there is currently a pending exception that
1486
- * is high enough priority to preempt execution at the level we're
1487
- * about to return to, then just directly take that exception now,
1488
- * avoiding an unstack-and-then-stack. Note that now we have
1489
- * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1490
- * our current execution priority is already the execution priority we are
1491
- * returning to -- none of the state we would unstack or set based on
1492
- * the EXCRET value affects it.
1493
- */
1494
- if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1495
- qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1496
- v7m_exception_taken(cpu, excret, true, false);
1497
- return;
1498
- }
1499
-
1500
- switch_v7m_security_state(env, return_to_secure);
1501
-
1502
- {
1503
- /*
1504
- * The stack pointer we should be reading the exception frame from
1505
- * depends on bits in the magic exception return type value (and
1506
- * for v8M isn't necessarily the stack pointer we will eventually
1507
- * end up resuming execution with). Get a pointer to the location
1508
- * in the CPU state struct where the SP we need is currently being
1509
- * stored; we will use and modify it in place.
1510
- * We use this limited C variable scope so we don't accidentally
1511
- * use 'frame_sp_p' after we do something that makes it invalid.
1512
- */
1513
- uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
1514
- return_to_secure,
1515
- !return_to_handler,
1516
- return_to_sp_process);
1517
- uint32_t frameptr = *frame_sp_p;
1518
- bool pop_ok = true;
1519
- ARMMMUIdx mmu_idx;
1520
- bool return_to_priv = return_to_handler ||
1521
- !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1522
-
1523
- mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1524
- return_to_priv);
1525
-
1526
- if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1527
- arm_feature(env, ARM_FEATURE_V8)) {
1528
- qemu_log_mask(LOG_GUEST_ERROR,
1529
- "M profile exception return with non-8-aligned SP "
1530
- "for destination state is UNPREDICTABLE\n");
1531
- }
1532
-
1533
- /* Do we need to pop callee-saved registers? */
1534
- if (return_to_secure &&
1535
- ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1536
- (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1537
- uint32_t actual_sig;
1538
-
1539
- pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1540
-
1541
- if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1542
- /* Take a SecureFault on the current stack */
1543
- env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1544
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1545
- qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1546
- "stackframe: failed exception return integrity "
1547
- "signature check\n");
1548
- v7m_exception_taken(cpu, excret, true, false);
1549
- return;
1550
- }
1551
-
1552
- pop_ok = pop_ok &&
1553
- v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1554
- v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1555
- v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1556
- v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1557
- v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1558
- v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1559
- v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1560
- v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1561
-
1562
- frameptr += 0x28;
1563
- }
1564
-
1565
- /* Pop registers */
1566
- pop_ok = pop_ok &&
1567
- v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1568
- v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1569
- v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1570
- v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1571
- v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1572
- v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1573
- v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1574
- v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1575
-
1576
- if (!pop_ok) {
1577
- /*
1578
- * v7m_stack_read() pended a fault, so take it (as a tail
1579
- * chained exception on the same stack frame)
1580
- */
1581
- qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1582
- v7m_exception_taken(cpu, excret, true, false);
1583
- return;
1584
- }
1585
-
1586
- /*
1587
- * Returning from an exception with a PC with bit 0 set is defined
1588
- * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1589
- * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1590
- * the lsbit, and there are several RTOSes out there which incorrectly
1591
- * assume the r15 in the stack frame should be a Thumb-style "lsbit
1592
- * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1593
- * complain about the badly behaved guest.
1594
- */
1595
- if (env->regs[15] & 1) {
1596
- env->regs[15] &= ~1U;
1597
- if (!arm_feature(env, ARM_FEATURE_V8)) {
1598
- qemu_log_mask(LOG_GUEST_ERROR,
1599
- "M profile return from interrupt with misaligned "
1600
- "PC is UNPREDICTABLE on v7M\n");
1601
- }
1602
- }
1603
-
1604
- if (arm_feature(env, ARM_FEATURE_V8)) {
1605
- /*
1606
- * For v8M we have to check whether the xPSR exception field
1607
- * matches the EXCRET value for return to handler/thread
1608
- * before we commit to changing the SP and xPSR.
1609
- */
1610
- bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1611
- if (return_to_handler != will_be_handler) {
1612
- /*
1613
- * Take an INVPC UsageFault on the current stack.
1614
- * By this point we will have switched to the security state
1615
- * for the background state, so this UsageFault will target
1616
- * that state.
1617
- */
1618
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1619
- env->v7m.secure);
1620
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1621
- qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1622
- "stackframe: failed exception return integrity "
1623
- "check\n");
1624
- v7m_exception_taken(cpu, excret, true, false);
1625
- return;
1626
- }
1627
- }
1628
-
1629
- if (!ftype) {
1630
- /* FP present and we need to handle it */
1631
- if (!return_to_secure &&
1632
- (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1633
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1634
- env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1635
- qemu_log_mask(CPU_LOG_INT,
1636
- "...taking SecureFault on existing stackframe: "
1637
- "Secure LSPACT set but exception return is "
1638
- "not to secure state\n");
1639
- v7m_exception_taken(cpu, excret, true, false);
1640
- return;
1641
- }
1642
-
1643
- restore_s16_s31 = return_to_secure &&
1644
- (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1645
-
1646
- if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1647
- /* State in FPU is still valid, just clear LSPACT */
1648
- env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1649
- } else {
1650
- int i;
1651
- uint32_t fpscr;
1652
- bool cpacr_pass, nsacr_pass;
1653
-
1654
- cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1655
- return_to_priv);
1656
- nsacr_pass = return_to_secure ||
1657
- extract32(env->v7m.nsacr, 10, 1);
1658
-
1659
- if (!cpacr_pass) {
1660
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1661
- return_to_secure);
1662
- env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1663
- qemu_log_mask(CPU_LOG_INT,
1664
- "...taking UsageFault on existing "
1665
- "stackframe: CPACR.CP10 prevents unstacking "
1666
- "FP regs\n");
1667
- v7m_exception_taken(cpu, excret, true, false);
1668
- return;
1669
- } else if (!nsacr_pass) {
1670
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1671
- env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1672
- qemu_log_mask(CPU_LOG_INT,
1673
- "...taking Secure UsageFault on existing "
1674
- "stackframe: NSACR.CP10 prevents unstacking "
1675
- "FP regs\n");
1676
- v7m_exception_taken(cpu, excret, true, false);
1677
- return;
1678
- }
1679
-
1680
- for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1681
- uint32_t slo, shi;
1682
- uint64_t dn;
1683
- uint32_t faddr = frameptr + 0x20 + 4 * i;
1684
-
1685
- if (i >= 16) {
1686
- faddr += 8; /* Skip the slot for the FPSCR */
1687
- }
1688
-
1689
- pop_ok = pop_ok &&
1690
- v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1691
- v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1692
-
1693
- if (!pop_ok) {
1694
- break;
1695
- }
1696
-
1697
- dn = (uint64_t)shi << 32 | slo;
1698
- *aa32_vfp_dreg(env, i / 2) = dn;
1699
- }
1700
- pop_ok = pop_ok &&
1701
- v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1702
- if (pop_ok) {
1703
- vfp_set_fpscr(env, fpscr);
1704
- }
1705
- if (!pop_ok) {
1706
- /*
1707
- * These regs are 0 if security extension present;
1708
- * otherwise merely UNKNOWN. We zero always.
1709
- */
1710
- for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1711
- *aa32_vfp_dreg(env, i / 2) = 0;
1712
- }
1713
- vfp_set_fpscr(env, 0);
1714
- }
1715
- }
1716
- }
1717
- env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1718
- V7M_CONTROL, FPCA, !ftype);
1719
-
1720
- /* Commit to consuming the stack frame */
1721
- frameptr += 0x20;
1722
- if (!ftype) {
1723
- frameptr += 0x48;
1724
- if (restore_s16_s31) {
1725
- frameptr += 0x40;
1726
- }
1727
- }
1728
- /*
1729
- * Undo stack alignment (the SPREALIGN bit indicates that the original
1730
- * pre-exception SP was not 8-aligned and we added a padding word to
1731
- * align it, so we undo this by ORing in the bit that increases it
1732
- * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1733
- * would work too but a logical OR is how the pseudocode specifies it.)
1734
- */
1735
- if (xpsr & XPSR_SPREALIGN) {
1736
- frameptr |= 4;
1737
- }
1738
- *frame_sp_p = frameptr;
1739
- }
1740
-
1741
- xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1742
- if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1743
- xpsr_mask &= ~XPSR_GE;
1744
- }
1745
- /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1746
- xpsr_write(env, xpsr, xpsr_mask);
1747
-
1748
- if (env->v7m.secure) {
1749
- bool sfpa = xpsr & XPSR_SFPA;
1750
-
1751
- env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1752
- V7M_CONTROL, SFPA, sfpa);
1753
- }
1754
-
1755
- /*
1756
- * The restored xPSR exception field will be zero if we're
1757
- * resuming in Thread mode. If that doesn't match what the
1758
- * exception return excret specified then this is a UsageFault.
1759
- * v7M requires we make this check here; v8M did it earlier.
1760
- */
1761
- if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1762
- /*
1763
- * Take an INVPC UsageFault by pushing the stack again;
1764
- * we know we're v7M so this is never a Secure UsageFault.
1765
- */
1766
- bool ignore_stackfaults;
1767
-
1768
- assert(!arm_feature(env, ARM_FEATURE_V8));
1769
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1770
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1771
- ignore_stackfaults = v7m_push_stack(cpu);
1772
- qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1773
- "failed exception return integrity check\n");
1774
- v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1775
- return;
1776
- }
1777
-
1778
- /* Otherwise, we have a successful exception exit. */
1779
- arm_clear_exclusive(env);
1780
- qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1781
-}
1782
-
1783
-static bool do_v7m_function_return(ARMCPU *cpu)
1784
-{
1785
- /*
1786
- * v8M security extensions magic function return.
1787
- * We may either:
1788
- * (1) throw an exception (longjump)
1789
- * (2) return true if we successfully handled the function return
1790
- * (3) return false if we failed a consistency check and have
1791
- * pended a UsageFault that needs to be taken now
1792
- *
1793
- * At this point the magic return value is split between env->regs[15]
1794
- * and env->thumb. We don't bother to reconstitute it because we don't
1795
- * need it (all values are handled the same way).
1796
- */
1797
- CPUARMState *env = &cpu->env;
1798
- uint32_t newpc, newpsr, newpsr_exc;
1799
-
1800
- qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1801
-
1802
- {
1803
- bool threadmode, spsel;
1804
- TCGMemOpIdx oi;
1805
- ARMMMUIdx mmu_idx;
1806
- uint32_t *frame_sp_p;
1807
- uint32_t frameptr;
1808
-
1809
- /* Pull the return address and IPSR from the Secure stack */
1810
- threadmode = !arm_v7m_is_handler_mode(env);
1811
- spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1812
-
1813
- frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
1814
- frameptr = *frame_sp_p;
1815
-
1816
- /*
1817
- * These loads may throw an exception (for MPU faults). We want to
1818
- * do them as secure, so work out what MMU index that is.
1819
- */
1820
- mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1821
- oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
1822
- newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
1823
- newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
1824
-
1825
- /* Consistency checks on new IPSR */
1826
- newpsr_exc = newpsr & XPSR_EXCP;
1827
- if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1828
- (env->v7m.exception == 1 && newpsr_exc != 0))) {
1829
- /* Pend the fault and tell our caller to take it */
1830
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1831
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1832
- env->v7m.secure);
1833
- qemu_log_mask(CPU_LOG_INT,
1834
- "...taking INVPC UsageFault: "
1835
- "IPSR consistency check failed\n");
1836
- return false;
1837
- }
1838
-
1839
- *frame_sp_p = frameptr + 8;
1840
- }
1841
-
1842
- /* This invalidates frame_sp_p */
1843
- switch_v7m_security_state(env, true);
1844
- env->v7m.exception = newpsr_exc;
1845
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1846
- if (newpsr & XPSR_SFPA) {
1847
- env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1848
- }
1849
- xpsr_write(env, 0, XPSR_IT);
1850
- env->thumb = newpc & 1;
1851
- env->regs[15] = newpc & ~1;
1852
-
1853
- qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1854
- return true;
1855
-}
1856
-
1857
-static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1858
- uint32_t addr, uint16_t *insn)
1859
-{
1860
- /*
1861
- * Load a 16-bit portion of a v7M instruction, returning true on success,
1862
- * or false on failure (in which case we will have pended the appropriate
1863
- * exception).
1864
- * We need to do the instruction fetch's MPU and SAU checks
1865
- * like this because there is no MMU index that would allow
1866
- * doing the load with a single function call. Instead we must
1867
- * first check that the security attributes permit the load
1868
- * and that they don't mismatch on the two halves of the instruction,
1869
- * and then we do the load as a secure load (ie using the security
1870
- * attributes of the address, not the CPU, as architecturally required).
1871
- */
1872
- CPUState *cs = CPU(cpu);
1873
- CPUARMState *env = &cpu->env;
1874
- V8M_SAttributes sattrs = {};
1875
- MemTxAttrs attrs = {};
1876
- ARMMMUFaultInfo fi = {};
1877
- MemTxResult txres;
1878
- target_ulong page_size;
1879
- hwaddr physaddr;
1880
- int prot;
1881
-
1882
- v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
1883
- if (!sattrs.nsc || sattrs.ns) {
1884
- /*
1885
- * This must be the second half of the insn, and it straddles a
1886
- * region boundary with the second half not being S&NSC.
1887
- */
1888
- env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
1889
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1890
- qemu_log_mask(CPU_LOG_INT,
1891
- "...really SecureFault with SFSR.INVEP\n");
1892
- return false;
1893
- }
1894
- if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
1895
- &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
1896
- /* the MPU lookup failed */
1897
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
1898
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
1899
- qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
1900
- return false;
1901
- }
1902
- *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
1903
- attrs, &txres);
1904
- if (txres != MEMTX_OK) {
1905
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
1906
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
1907
- qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
1908
- return false;
1909
- }
1910
- return true;
1911
-}
1912
-
1913
-static bool v7m_handle_execute_nsc(ARMCPU *cpu)
1914
-{
1915
- /*
1916
- * Check whether this attempt to execute code in a Secure & NS-Callable
1917
- * memory region is for an SG instruction; if so, then emulate the
1918
- * effect of the SG instruction and return true. Otherwise pend
1919
- * the correct kind of exception and return false.
1920
- */
1921
- CPUARMState *env = &cpu->env;
1922
- ARMMMUIdx mmu_idx;
1923
- uint16_t insn;
1924
-
1925
- /*
1926
- * We should never get here unless get_phys_addr_pmsav8() caused
1927
- * an exception for NS executing in S&NSC memory.
1928
- */
1929
- assert(!env->v7m.secure);
1930
- assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
1931
-
1932
- /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
1933
- mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1934
-
1935
- if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
1936
- return false;
1937
- }
1938
-
1939
- if (!env->thumb) {
1940
- goto gen_invep;
1941
- }
1942
-
1943
- if (insn != 0xe97f) {
1944
- /*
1945
- * Not an SG instruction first half (we choose the IMPDEF
1946
- * early-SG-check option).
1947
- */
1948
- goto gen_invep;
1949
- }
1950
-
1951
- if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
1952
- return false;
1953
- }
1954
-
1955
- if (insn != 0xe97f) {
1956
- /*
1957
- * Not an SG instruction second half (yes, both halves of the SG
1958
- * insn have the same hex value)
1959
- */
1960
- goto gen_invep;
1961
- }
1962
-
1963
- /*
1964
- * OK, we have confirmed that we really have an SG instruction.
1965
- * We know we're NS in S memory so don't need to repeat those checks.
1966
- */
1967
- qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
1968
- ", executing it\n", env->regs[15]);
1969
- env->regs[14] &= ~1;
1970
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1971
- switch_v7m_security_state(env, true);
1972
- xpsr_write(env, 0, XPSR_IT);
1973
- env->regs[15] += 4;
1974
- return true;
1975
-
1976
-gen_invep:
1977
- env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
1978
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1979
- qemu_log_mask(CPU_LOG_INT,
1980
- "...really SecureFault with SFSR.INVEP\n");
1981
- return false;
1982
-}
1983
-
1984
-void arm_v7m_cpu_do_interrupt(CPUState *cs)
1985
-{
1986
- ARMCPU *cpu = ARM_CPU(cs);
1987
- CPUARMState *env = &cpu->env;
1988
- uint32_t lr;
1989
- bool ignore_stackfaults;
1990
-
1991
- arm_log_exception(cs->exception_index);
1992
-
1993
- /*
1994
- * For exceptions we just mark as pending on the NVIC, and let that
1995
- * handle it.
1996
- */
1997
- switch (cs->exception_index) {
1998
- case EXCP_UDEF:
1999
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2000
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
2001
- break;
2002
- case EXCP_NOCP:
2003
- {
2004
- /*
2005
- * NOCP might be directed to something other than the current
2006
- * security state if this fault is because of NSACR; we indicate
2007
- * the target security state using exception.target_el.
2008
- */
2009
- int target_secstate;
2010
-
2011
- if (env->exception.target_el == 3) {
2012
- target_secstate = M_REG_S;
2013
- } else {
2014
- target_secstate = env->v7m.secure;
2015
- }
2016
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2017
- env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2018
- break;
2019
- }
2020
- case EXCP_INVSTATE:
2021
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2022
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2023
- break;
2024
- case EXCP_STKOF:
2025
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2026
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2027
- break;
2028
- case EXCP_LSERR:
2029
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2030
- env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2031
- break;
2032
- case EXCP_UNALIGNED:
2033
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2034
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2035
- break;
2036
- case EXCP_SWI:
2037
- /* The PC already points to the next instruction. */
2038
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2039
- break;
2040
- case EXCP_PREFETCH_ABORT:
2041
- case EXCP_DATA_ABORT:
2042
- /*
2043
- * Note that for M profile we don't have a guest facing FSR, but
2044
- * the env->exception.fsr will be populated by the code that
2045
- * raises the fault, in the A profile short-descriptor format.
2046
- */
2047
- switch (env->exception.fsr & 0xf) {
2048
- case M_FAKE_FSR_NSC_EXEC:
2049
- /*
2050
- * Exception generated when we try to execute code at an address
2051
- * which is marked as Secure & Non-Secure Callable and the CPU
2052
- * is in the Non-Secure state. The only instruction which can
2053
- * be executed like this is SG (and that only if both halves of
2054
- * the SG instruction have the same security attributes.)
2055
- * Everything else must generate an INVEP SecureFault, so we
2056
- * emulate the SG instruction here.
2057
- */
2058
- if (v7m_handle_execute_nsc(cpu)) {
2059
- return;
2060
- }
2061
- break;
2062
- case M_FAKE_FSR_SFAULT:
2063
- /*
2064
- * Various flavours of SecureFault for attempts to execute or
2065
- * access data in the wrong security state.
2066
- */
2067
- switch (cs->exception_index) {
2068
- case EXCP_PREFETCH_ABORT:
2069
- if (env->v7m.secure) {
2070
- env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2071
- qemu_log_mask(CPU_LOG_INT,
2072
- "...really SecureFault with SFSR.INVTRAN\n");
2073
- } else {
2074
- env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2075
- qemu_log_mask(CPU_LOG_INT,
2076
- "...really SecureFault with SFSR.INVEP\n");
2077
- }
2078
- break;
2079
- case EXCP_DATA_ABORT:
2080
- /* This must be an NS access to S memory */
2081
- env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2082
- qemu_log_mask(CPU_LOG_INT,
2083
- "...really SecureFault with SFSR.AUVIOL\n");
2084
- break;
2085
- }
2086
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2087
- break;
2088
- case 0x8: /* External Abort */
2089
- switch (cs->exception_index) {
2090
- case EXCP_PREFETCH_ABORT:
2091
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2092
- qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2093
- break;
2094
- case EXCP_DATA_ABORT:
2095
- env->v7m.cfsr[M_REG_NS] |=
2096
- (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2097
- env->v7m.bfar = env->exception.vaddress;
2098
- qemu_log_mask(CPU_LOG_INT,
2099
- "...with CFSR.PRECISERR and BFAR 0x%x\n",
2100
- env->v7m.bfar);
2101
- break;
2102
- }
2103
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2104
- break;
2105
- default:
2106
- /*
2107
- * All other FSR values are either MPU faults or "can't happen
2108
- * for M profile" cases.
2109
- */
2110
- switch (cs->exception_index) {
2111
- case EXCP_PREFETCH_ABORT:
2112
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2113
- qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2114
- break;
2115
- case EXCP_DATA_ABORT:
2116
- env->v7m.cfsr[env->v7m.secure] |=
2117
- (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2118
- env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2119
- qemu_log_mask(CPU_LOG_INT,
2120
- "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2121
- env->v7m.mmfar[env->v7m.secure]);
2122
- break;
2123
- }
2124
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2125
- env->v7m.secure);
2126
- break;
2127
- }
2128
- break;
2129
- case EXCP_BKPT:
2130
- if (semihosting_enabled()) {
2131
- int nr;
2132
- nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
2133
- if (nr == 0xab) {
2134
- env->regs[15] += 2;
2135
- qemu_log_mask(CPU_LOG_INT,
2136
- "...handling as semihosting call 0x%x\n",
2137
- env->regs[0]);
2138
- env->regs[0] = do_arm_semihosting(env);
2139
- return;
2140
- }
2141
- }
2142
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2143
- break;
2144
- case EXCP_IRQ:
2145
- break;
2146
- case EXCP_EXCEPTION_EXIT:
2147
- if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2148
- /* Must be v8M security extension function return */
2149
- assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2150
- assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2151
- if (do_v7m_function_return(cpu)) {
2152
- return;
2153
- }
2154
- } else {
2155
- do_v7m_exception_exit(cpu);
2156
- return;
2157
- }
2158
- break;
2159
- case EXCP_LAZYFP:
2160
- /*
2161
- * We already pended the specific exception in the NVIC in the
2162
- * v7m_preserve_fp_state() helper function.
2163
- */
2164
- break;
2165
- default:
2166
- cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2167
- return; /* Never happens. Keep compiler happy. */
2168
- }
2169
-
2170
- if (arm_feature(env, ARM_FEATURE_V8)) {
2171
- lr = R_V7M_EXCRET_RES1_MASK |
2172
- R_V7M_EXCRET_DCRS_MASK;
2173
- /*
2174
- * The S bit indicates whether we should return to Secure
2175
- * or NonSecure (ie our current state).
2176
- * The ES bit indicates whether we're taking this exception
2177
- * to Secure or NonSecure (ie our target state). We set it
2178
- * later, in v7m_exception_taken().
2179
- * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2180
- * This corresponds to the ARM ARM pseudocode for v8M setting
2181
- * some LR bits in PushStack() and some in ExceptionTaken();
2182
- * the distinction matters for the tailchain cases where we
2183
- * can take an exception without pushing the stack.
2184
- */
2185
- if (env->v7m.secure) {
2186
- lr |= R_V7M_EXCRET_S_MASK;
2187
- }
2188
- if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2189
- lr |= R_V7M_EXCRET_FTYPE_MASK;
2190
- }
2191
- } else {
2192
- lr = R_V7M_EXCRET_RES1_MASK |
2193
- R_V7M_EXCRET_S_MASK |
2194
- R_V7M_EXCRET_DCRS_MASK |
2195
- R_V7M_EXCRET_FTYPE_MASK |
2196
- R_V7M_EXCRET_ES_MASK;
2197
- if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2198
- lr |= R_V7M_EXCRET_SPSEL_MASK;
2199
- }
2200
- }
2201
- if (!arm_v7m_is_handler_mode(env)) {
2202
- lr |= R_V7M_EXCRET_MODE_MASK;
2203
- }
2204
-
2205
- ignore_stackfaults = v7m_push_stack(cpu);
2206
- v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2207
-}
2208
-
2209
/*
2210
* Function used to synchronize QEMU's AArch64 register set with AArch32
2211
* register set. This is necessary when switching between AArch32 and AArch64
2212
@@ -XXX,XX +XXX,XX @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
2213
return phys_addr;
2214
}
2215
2216
-uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2217
-{
2218
- uint32_t mask;
2219
- unsigned el = arm_current_el(env);
2220
-
2221
- /* First handle registers which unprivileged can read */
2222
-
2223
- switch (reg) {
2224
- case 0 ... 7: /* xPSR sub-fields */
2225
- mask = 0;
2226
- if ((reg & 1) && el) {
2227
- mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
2228
- }
2229
- if (!(reg & 4)) {
2230
- mask |= XPSR_NZCV | XPSR_Q; /* APSR */
2231
- if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
2232
- mask |= XPSR_GE;
2233
- }
2234
- }
2235
- /* EPSR reads as zero */
2236
- return xpsr_read(env) & mask;
2237
- break;
2238
- case 20: /* CONTROL */
2239
- {
2240
- uint32_t value = env->v7m.control[env->v7m.secure];
2241
- if (!env->v7m.secure) {
2242
- /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
2243
- value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
2244
- }
2245
- return value;
2246
- }
2247
- case 0x94: /* CONTROL_NS */
2248
- /*
2249
- * We have to handle this here because unprivileged Secure code
2250
- * can read the NS CONTROL register.
2251
- */
2252
- if (!env->v7m.secure) {
2253
- return 0;
2254
- }
2255
- return env->v7m.control[M_REG_NS] |
2256
- (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2257
- }
2258
-
2259
- if (el == 0) {
2260
- return 0; /* unprivileged reads others as zero */
2261
- }
2262
-
2263
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2264
- switch (reg) {
2265
- case 0x88: /* MSP_NS */
2266
- if (!env->v7m.secure) {
2267
- return 0;
2268
- }
2269
- return env->v7m.other_ss_msp;
2270
- case 0x89: /* PSP_NS */
2271
- if (!env->v7m.secure) {
2272
- return 0;
2273
- }
2274
- return env->v7m.other_ss_psp;
2275
- case 0x8a: /* MSPLIM_NS */
2276
- if (!env->v7m.secure) {
2277
- return 0;
2278
- }
2279
- return env->v7m.msplim[M_REG_NS];
2280
- case 0x8b: /* PSPLIM_NS */
2281
- if (!env->v7m.secure) {
2282
- return 0;
2283
- }
2284
- return env->v7m.psplim[M_REG_NS];
2285
- case 0x90: /* PRIMASK_NS */
2286
- if (!env->v7m.secure) {
2287
- return 0;
2288
- }
2289
- return env->v7m.primask[M_REG_NS];
2290
- case 0x91: /* BASEPRI_NS */
2291
- if (!env->v7m.secure) {
2292
- return 0;
2293
- }
2294
- return env->v7m.basepri[M_REG_NS];
2295
- case 0x93: /* FAULTMASK_NS */
2296
- if (!env->v7m.secure) {
2297
- return 0;
2298
- }
2299
- return env->v7m.faultmask[M_REG_NS];
2300
- case 0x98: /* SP_NS */
2301
- {
2302
- /*
2303
- * This gives the non-secure SP selected based on whether we're
2304
- * currently in handler mode or not, using the NS CONTROL.SPSEL.
2305
- */
2306
- bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2307
-
2308
- if (!env->v7m.secure) {
2309
- return 0;
2310
- }
2311
- if (!arm_v7m_is_handler_mode(env) && spsel) {
2312
- return env->v7m.other_ss_psp;
2313
- } else {
2314
- return env->v7m.other_ss_msp;
2315
- }
2316
- }
2317
- default:
2318
- break;
2319
- }
2320
- }
2321
-
2322
- switch (reg) {
2323
- case 8: /* MSP */
2324
- return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2325
- case 9: /* PSP */
2326
- return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2327
- case 10: /* MSPLIM */
2328
- if (!arm_feature(env, ARM_FEATURE_V8)) {
2329
- goto bad_reg;
2330
- }
2331
- return env->v7m.msplim[env->v7m.secure];
2332
- case 11: /* PSPLIM */
2333
- if (!arm_feature(env, ARM_FEATURE_V8)) {
2334
- goto bad_reg;
2335
- }
2336
- return env->v7m.psplim[env->v7m.secure];
2337
- case 16: /* PRIMASK */
2338
- return env->v7m.primask[env->v7m.secure];
2339
- case 17: /* BASEPRI */
2340
- case 18: /* BASEPRI_MAX */
2341
- return env->v7m.basepri[env->v7m.secure];
2342
- case 19: /* FAULTMASK */
2343
- return env->v7m.faultmask[env->v7m.secure];
2344
- default:
2345
- bad_reg:
2346
- qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2347
- " register %d\n", reg);
2348
- return 0;
2349
- }
2350
-}
2351
-
2352
-void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2353
-{
2354
- /*
2355
- * We're passed bits [11..0] of the instruction; extract
2356
- * SYSm and the mask bits.
2357
- * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2358
- * we choose to treat them as if the mask bits were valid.
2359
- * NB that the pseudocode 'mask' variable is bits [11..10],
2360
- * whereas ours is [11..8].
2361
- */
2362
- uint32_t mask = extract32(maskreg, 8, 4);
2363
- uint32_t reg = extract32(maskreg, 0, 8);
2364
- int cur_el = arm_current_el(env);
2365
-
2366
- if (cur_el == 0 && reg > 7 && reg != 20) {
2367
- /*
2368
- * only xPSR sub-fields and CONTROL.SFPA may be written by
2369
- * unprivileged code
2370
- */
2371
- return;
2372
- }
2373
-
2374
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2375
- switch (reg) {
2376
- case 0x88: /* MSP_NS */
2377
- if (!env->v7m.secure) {
2378
- return;
2379
- }
2380
- env->v7m.other_ss_msp = val;
2381
- return;
2382
- case 0x89: /* PSP_NS */
2383
- if (!env->v7m.secure) {
2384
- return;
2385
- }
2386
- env->v7m.other_ss_psp = val;
2387
- return;
2388
- case 0x8a: /* MSPLIM_NS */
2389
- if (!env->v7m.secure) {
2390
- return;
2391
- }
2392
- env->v7m.msplim[M_REG_NS] = val & ~7;
2393
- return;
2394
- case 0x8b: /* PSPLIM_NS */
2395
- if (!env->v7m.secure) {
2396
- return;
2397
- }
2398
- env->v7m.psplim[M_REG_NS] = val & ~7;
2399
- return;
2400
- case 0x90: /* PRIMASK_NS */
2401
- if (!env->v7m.secure) {
2402
- return;
2403
- }
2404
- env->v7m.primask[M_REG_NS] = val & 1;
2405
- return;
2406
- case 0x91: /* BASEPRI_NS */
2407
- if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2408
- return;
2409
- }
2410
- env->v7m.basepri[M_REG_NS] = val & 0xff;
2411
- return;
2412
- case 0x93: /* FAULTMASK_NS */
2413
- if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2414
- return;
2415
- }
2416
- env->v7m.faultmask[M_REG_NS] = val & 1;
2417
- return;
2418
- case 0x94: /* CONTROL_NS */
2419
- if (!env->v7m.secure) {
2420
- return;
2421
- }
2422
- write_v7m_control_spsel_for_secstate(env,
2423
- val & R_V7M_CONTROL_SPSEL_MASK,
2424
- M_REG_NS);
2425
- if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2426
- env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2427
- env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2428
- }
2429
- /*
2430
- * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2431
- * RES0 if the FPU is not present, and is stored in the S bank
2432
- */
2433
- if (arm_feature(env, ARM_FEATURE_VFP) &&
2434
- extract32(env->v7m.nsacr, 10, 1)) {
2435
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2436
- env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2437
- }
2438
- return;
2439
- case 0x98: /* SP_NS */
2440
- {
2441
- /*
2442
- * This gives the non-secure SP selected based on whether we're
2443
- * currently in handler mode or not, using the NS CONTROL.SPSEL.
2444
- */
2445
- bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2446
- bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2447
- uint32_t limit;
2448
-
2449
- if (!env->v7m.secure) {
2450
- return;
2451
- }
2452
-
2453
- limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2454
-
2455
- if (val < limit) {
2456
- CPUState *cs = env_cpu(env);
2457
-
2458
- cpu_restore_state(cs, GETPC(), true);
2459
- raise_exception(env, EXCP_STKOF, 0, 1);
2460
- }
2461
-
2462
- if (is_psp) {
2463
- env->v7m.other_ss_psp = val;
2464
- } else {
2465
- env->v7m.other_ss_msp = val;
2466
- }
2467
- return;
2468
- }
2469
- default:
2470
- break;
2471
- }
2472
- }
2473
-
2474
- switch (reg) {
2475
- case 0 ... 7: /* xPSR sub-fields */
2476
- /* only APSR is actually writable */
2477
- if (!(reg & 4)) {
2478
- uint32_t apsrmask = 0;
2479
-
2480
- if (mask & 8) {
2481
- apsrmask |= XPSR_NZCV | XPSR_Q;
2482
- }
2483
- if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
2484
- apsrmask |= XPSR_GE;
2485
- }
2486
- xpsr_write(env, val, apsrmask);
2487
- }
2488
- break;
2489
- case 8: /* MSP */
2490
- if (v7m_using_psp(env)) {
2491
- env->v7m.other_sp = val;
2492
- } else {
2493
- env->regs[13] = val;
2494
- }
2495
- break;
2496
- case 9: /* PSP */
2497
- if (v7m_using_psp(env)) {
2498
- env->regs[13] = val;
2499
- } else {
2500
- env->v7m.other_sp = val;
2501
- }
2502
- break;
2503
- case 10: /* MSPLIM */
2504
- if (!arm_feature(env, ARM_FEATURE_V8)) {
2505
- goto bad_reg;
2506
- }
2507
- env->v7m.msplim[env->v7m.secure] = val & ~7;
2508
- break;
2509
- case 11: /* PSPLIM */
2510
- if (!arm_feature(env, ARM_FEATURE_V8)) {
2511
- goto bad_reg;
2512
- }
2513
- env->v7m.psplim[env->v7m.secure] = val & ~7;
2514
- break;
2515
- case 16: /* PRIMASK */
2516
- env->v7m.primask[env->v7m.secure] = val & 1;
2517
- break;
2518
- case 17: /* BASEPRI */
2519
- if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2520
- goto bad_reg;
2521
- }
2522
- env->v7m.basepri[env->v7m.secure] = val & 0xff;
2523
- break;
2524
- case 18: /* BASEPRI_MAX */
2525
- if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2526
- goto bad_reg;
2527
- }
2528
- val &= 0xff;
2529
- if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2530
- || env->v7m.basepri[env->v7m.secure] == 0)) {
2531
- env->v7m.basepri[env->v7m.secure] = val;
2532
- }
2533
- break;
2534
- case 19: /* FAULTMASK */
2535
- if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2536
- goto bad_reg;
2537
- }
2538
- env->v7m.faultmask[env->v7m.secure] = val & 1;
2539
- break;
2540
- case 20: /* CONTROL */
2541
- /*
2542
- * Writing to the SPSEL bit only has an effect if we are in
2543
- * thread mode; other bits can be updated by any privileged code.
2544
- * write_v7m_control_spsel() deals with updating the SPSEL bit in
2545
- * env->v7m.control, so we only need update the others.
2546
- * For v7M, we must just ignore explicit writes to SPSEL in handler
2547
- * mode; for v8M the write is permitted but will have no effect.
2548
- * All these bits are writes-ignored from non-privileged code,
2549
- * except for SFPA.
2550
- */
2551
- if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2552
- !arm_v7m_is_handler_mode(env))) {
2553
- write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2554
- }
2555
- if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2556
- env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2557
- env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2558
- }
2559
- if (arm_feature(env, ARM_FEATURE_VFP)) {
2560
- /*
2561
- * SFPA is RAZ/WI from NS or if no FPU.
2562
- * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2563
- * Both are stored in the S bank.
2564
- */
2565
- if (env->v7m.secure) {
2566
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2567
- env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2568
- }
2569
- if (cur_el > 0 &&
2570
- (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2571
- extract32(env->v7m.nsacr, 10, 1))) {
2572
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2573
- env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2574
- }
2575
- }
2576
- break;
2577
- default:
2578
- bad_reg:
2579
- qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2580
- " register %d\n", reg);
2581
- return;
2582
- }
2583
-}
2584
-
2585
-uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2586
-{
2587
- /* Implement the TT instruction. op is bits [7:6] of the insn. */
2588
- bool forceunpriv = op & 1;
2589
- bool alt = op & 2;
2590
- V8M_SAttributes sattrs = {};
2591
- uint32_t tt_resp;
2592
- bool r, rw, nsr, nsrw, mrvalid;
2593
- int prot;
2594
- ARMMMUFaultInfo fi = {};
2595
- MemTxAttrs attrs = {};
2596
- hwaddr phys_addr;
2597
- ARMMMUIdx mmu_idx;
2598
- uint32_t mregion;
2599
- bool targetpriv;
2600
- bool targetsec = env->v7m.secure;
2601
- bool is_subpage;
2602
-
2603
- /*
2604
- * Work out what the security state and privilege level we're
2605
- * interested in is...
2606
- */
2607
- if (alt) {
2608
- targetsec = !targetsec;
2609
- }
2610
-
2611
- if (forceunpriv) {
2612
- targetpriv = false;
2613
- } else {
2614
- targetpriv = arm_v7m_is_handler_mode(env) ||
2615
- !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2616
- }
2617
-
2618
- /* ...and then figure out which MMU index this is */
2619
- mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2620
-
2621
- /*
2622
- * We know that the MPU and SAU don't care about the access type
2623
- * for our purposes beyond that we don't want to claim to be
2624
- * an insn fetch, so we arbitrarily call this a read.
2625
- */
2626
-
2627
- /*
2628
- * MPU region info only available for privileged or if
2629
- * inspecting the other MPU state.
2630
- */
2631
- if (arm_current_el(env) != 0 || alt) {
2632
- /* We can ignore the return value as prot is always set */
2633
- pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2634
- &phys_addr, &attrs, &prot, &is_subpage,
2635
- &fi, &mregion);
2636
- if (mregion == -1) {
2637
- mrvalid = false;
2638
- mregion = 0;
2639
- } else {
2640
- mrvalid = true;
2641
- }
2642
- r = prot & PAGE_READ;
2643
- rw = prot & PAGE_WRITE;
2644
- } else {
2645
- r = false;
2646
- rw = false;
2647
- mrvalid = false;
2648
- mregion = 0;
2649
- }
2650
-
2651
- if (env->v7m.secure) {
2652
- v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
2653
- nsr = sattrs.ns && r;
2654
- nsrw = sattrs.ns && rw;
2655
- } else {
2656
- sattrs.ns = true;
2657
- nsr = false;
2658
- nsrw = false;
2659
- }
2660
-
2661
- tt_resp = (sattrs.iregion << 24) |
2662
- (sattrs.irvalid << 23) |
2663
- ((!sattrs.ns) << 22) |
2664
- (nsrw << 21) |
2665
- (nsr << 20) |
2666
- (rw << 19) |
2667
- (r << 18) |
2668
- (sattrs.srvalid << 17) |
2669
- (mrvalid << 16) |
2670
- (sattrs.sregion << 8) |
2671
- mregion;
2672
-
2673
- return tt_resp;
2674
-}
2675
-
2676
#endif
2677
2678
/* Note that signed overflow is undefined in C. The following routines are
2679
@@ -XXX,XX +XXX,XX @@ int fp_exception_el(CPUARMState *env, int cur_el)
2680
return 0;
2681
}
2682
2683
-ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
2684
- bool secstate, bool priv, bool negpri)
2685
-{
2686
- ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
2687
-
2688
- if (priv) {
2689
- mmu_idx |= ARM_MMU_IDX_M_PRIV;
2690
- }
2691
-
2692
- if (negpri) {
2693
- mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
2694
- }
2695
-
2696
- if (secstate) {
2697
- mmu_idx |= ARM_MMU_IDX_M_S;
2698
- }
2699
-
2700
- return mmu_idx;
2701
-}
2702
-
2703
-ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
2704
- bool secstate, bool priv)
2705
-{
2706
- bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
2707
-
2708
- return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
2709
-}
2710
-
2711
-/* Return the MMU index for a v7M CPU in the specified security state */
2712
+#ifndef CONFIG_TCG
2713
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
2714
{
2715
- bool priv = arm_current_el(env) != 0;
2716
-
2717
- return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
2718
+ g_assert_not_reached();
2719
}
2720
+#endif
2721
2722
ARMMMUIdx arm_mmu_idx(CPUARMState *env)
2723
{
2724
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
2725
new file mode 100644
2726
index XXXXXXX..XXXXXXX
2727
--- /dev/null
2728
+++ b/target/arm/m_helper.c
2729
@@ -XXX,XX +XXX,XX @@
2730
+/*
2731
+ * ARM generic helpers.
2732
+ *
2733
+ * This code is licensed under the GNU GPL v2 or later.
2734
+ *
2735
+ * SPDX-License-Identifier: GPL-2.0-or-later
2736
+ */
2737
+#include "qemu/osdep.h"
2738
+#include "qemu/units.h"
2739
+#include "target/arm/idau.h"
2740
+#include "trace.h"
2741
+#include "cpu.h"
2742
+#include "internals.h"
2743
+#include "exec/gdbstub.h"
2744
+#include "exec/helper-proto.h"
2745
+#include "qemu/host-utils.h"
2746
+#include "sysemu/sysemu.h"
2747
+#include "qemu/bitops.h"
2748
+#include "qemu/crc32c.h"
2749
+#include "qemu/qemu-print.h"
2750
+#include "exec/exec-all.h"
2751
+#include <zlib.h> /* For crc32 */
2752
+#include "hw/semihosting/semihost.h"
2753
+#include "sysemu/cpus.h"
2754
+#include "sysemu/kvm.h"
2755
+#include "qemu/range.h"
2756
+#include "qapi/qapi-commands-target.h"
2757
+#include "qapi/error.h"
2758
+#include "qemu/guest-random.h"
2759
+#ifdef CONFIG_TCG
2760
+#include "arm_ldst.h"
2761
+#include "exec/cpu_ldst.h"
2762
+#endif
2763
+
2764
+#ifdef CONFIG_USER_ONLY
2765
+
2766
+/* These should probably raise undefined insn exceptions. */
2767
+void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
2768
+{
2769
+ ARMCPU *cpu = env_archcpu(env);
2770
+
2771
+ cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
2772
+}
2773
+
2774
+uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2775
+{
2776
+ ARMCPU *cpu = env_archcpu(env);
2777
+
2778
+ cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
2779
+ return 0;
2780
+}
2781
+
2782
+void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
2783
+{
2784
+ /* translate.c should never generate calls here in user-only mode */
2785
+ g_assert_not_reached();
2786
+}
2787
+
2788
+void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
2789
+{
2790
+ /* translate.c should never generate calls here in user-only mode */
2791
+ g_assert_not_reached();
2792
+}
2793
+
2794
+void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
2795
+{
2796
+ /* translate.c should never generate calls here in user-only mode */
2797
+ g_assert_not_reached();
2798
+}
2799
+
2800
+void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
2801
+{
2802
+ /* translate.c should never generate calls here in user-only mode */
2803
+ g_assert_not_reached();
2804
+}
2805
+
2806
+void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
2807
+{
2808
+ /* translate.c should never generate calls here in user-only mode */
2809
+ g_assert_not_reached();
2810
+}
2811
+
2812
+uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2813
+{
2814
+ /*
2815
+ * The TT instructions can be used by unprivileged code, but in
2816
+ * user-only emulation we don't have the MPU.
2817
+ * Luckily since we know we are NonSecure unprivileged (and that in
2818
+ * turn means that the A flag wasn't specified), all the bits in the
2819
+ * register must be zero:
2820
+ * IREGION: 0 because IRVALID is 0
2821
+ * IRVALID: 0 because NS
2822
+ * S: 0 because NS
2823
+ * NSRW: 0 because NS
2824
+ * NSR: 0 because NS
2825
+ * RW: 0 because unpriv and A flag not set
2826
+ * R: 0 because unpriv and A flag not set
2827
+ * SRVALID: 0 because NS
2828
+ * MRVALID: 0 because unpriv and A flag not set
2829
+ * SREGION: 0 becaus SRVALID is 0
2830
+ * MREGION: 0 because MRVALID is 0
2831
+ */
2832
+ return 0;
2833
+}
2834
+
2835
+#else
2836
+
2837
+/*
2838
+ * What kind of stack write are we doing? This affects how exceptions
2839
+ * generated during the stacking are treated.
2840
+ */
2841
+typedef enum StackingMode {
2842
+ STACK_NORMAL,
2843
+ STACK_IGNFAULTS,
2844
+ STACK_LAZYFP,
2845
+} StackingMode;
2846
+
2847
+static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
2848
+ ARMMMUIdx mmu_idx, StackingMode mode)
2849
+{
2850
+ CPUState *cs = CPU(cpu);
2851
+ CPUARMState *env = &cpu->env;
2852
+ MemTxAttrs attrs = {};
2853
+ MemTxResult txres;
2854
+ target_ulong page_size;
2855
+ hwaddr physaddr;
2856
+ int prot;
2857
+ ARMMMUFaultInfo fi = {};
2858
+ bool secure = mmu_idx & ARM_MMU_IDX_M_S;
2859
+ int exc;
2860
+ bool exc_secure;
2861
+
2862
+ if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
2863
+ &attrs, &prot, &page_size, &fi, NULL)) {
2864
+ /* MPU/SAU lookup failed */
2865
+ if (fi.type == ARMFault_QEMU_SFault) {
2866
+ if (mode == STACK_LAZYFP) {
2867
+ qemu_log_mask(CPU_LOG_INT,
2868
+ "...SecureFault with SFSR.LSPERR "
2869
+ "during lazy stacking\n");
2870
+ env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
2871
+ } else {
2872
+ qemu_log_mask(CPU_LOG_INT,
2873
+ "...SecureFault with SFSR.AUVIOL "
2874
+ "during stacking\n");
2875
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2876
+ }
2877
+ env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
2878
+ env->v7m.sfar = addr;
2879
+ exc = ARMV7M_EXCP_SECURE;
2880
+ exc_secure = false;
2881
+ } else {
2882
+ if (mode == STACK_LAZYFP) {
2883
+ qemu_log_mask(CPU_LOG_INT,
2884
+ "...MemManageFault with CFSR.MLSPERR\n");
2885
+ env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
2886
+ } else {
2887
+ qemu_log_mask(CPU_LOG_INT,
2888
+ "...MemManageFault with CFSR.MSTKERR\n");
2889
+ env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
2890
+ }
2891
+ exc = ARMV7M_EXCP_MEM;
2892
+ exc_secure = secure;
2893
+ }
2894
+ goto pend_fault;
2895
+ }
71
+ }
2896
+ address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
72
+ if (new_a64) {
2897
+ attrs, &txres);
73
+ new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
2898
+ if (txres != MEMTX_OK) {
2899
+ /* BusFault trying to write the data */
2900
+ if (mode == STACK_LAZYFP) {
2901
+ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
2902
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
2903
+ } else {
2904
+ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
2905
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
2906
+ }
2907
+ exc = ARMV7M_EXCP_BUS;
2908
+ exc_secure = false;
2909
+ goto pend_fault;
2910
+ }
74
+ }
2911
+ return true;
75
2912
+
76
/* When changing vector length, clear inaccessible state. */
2913
+pend_fault:
77
if (new_len < old_len) {
2914
+ /*
2915
+ * By pending the exception at this point we are making
2916
+ * the IMPDEF choice "overridden exceptions pended" (see the
2917
+ * MergeExcInfo() pseudocode). The other choice would be to not
2918
+ * pend them now and then make a choice about which to throw away
2919
+ * later if we have two derived exceptions.
2920
+ * The only case when we must not pend the exception but instead
2921
+ * throw it away is if we are doing the push of the callee registers
2922
+ * and we've already generated a derived exception (this is indicated
2923
+ * by the caller passing STACK_IGNFAULTS). Even in this case we will
2924
+ * still update the fault status registers.
2925
+ */
2926
+ switch (mode) {
2927
+ case STACK_NORMAL:
2928
+ armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
2929
+ break;
2930
+ case STACK_LAZYFP:
2931
+ armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
2932
+ break;
2933
+ case STACK_IGNFAULTS:
2934
+ break;
2935
+ }
2936
+ return false;
2937
+}
2938
+
2939
+static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
2940
+ ARMMMUIdx mmu_idx)
2941
+{
2942
+ CPUState *cs = CPU(cpu);
2943
+ CPUARMState *env = &cpu->env;
2944
+ MemTxAttrs attrs = {};
2945
+ MemTxResult txres;
2946
+ target_ulong page_size;
2947
+ hwaddr physaddr;
2948
+ int prot;
2949
+ ARMMMUFaultInfo fi = {};
2950
+ bool secure = mmu_idx & ARM_MMU_IDX_M_S;
2951
+ int exc;
2952
+ bool exc_secure;
2953
+ uint32_t value;
2954
+
2955
+ if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
2956
+ &attrs, &prot, &page_size, &fi, NULL)) {
2957
+ /* MPU/SAU lookup failed */
2958
+ if (fi.type == ARMFault_QEMU_SFault) {
2959
+ qemu_log_mask(CPU_LOG_INT,
2960
+ "...SecureFault with SFSR.AUVIOL during unstack\n");
2961
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
2962
+ env->v7m.sfar = addr;
2963
+ exc = ARMV7M_EXCP_SECURE;
2964
+ exc_secure = false;
2965
+ } else {
2966
+ qemu_log_mask(CPU_LOG_INT,
2967
+ "...MemManageFault with CFSR.MUNSTKERR\n");
2968
+ env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
2969
+ exc = ARMV7M_EXCP_MEM;
2970
+ exc_secure = secure;
2971
+ }
2972
+ goto pend_fault;
2973
+ }
2974
+
2975
+ value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
2976
+ attrs, &txres);
2977
+ if (txres != MEMTX_OK) {
2978
+ /* BusFault trying to read the data */
2979
+ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
2980
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
2981
+ exc = ARMV7M_EXCP_BUS;
2982
+ exc_secure = false;
2983
+ goto pend_fault;
2984
+ }
2985
+
2986
+ *dest = value;
2987
+ return true;
2988
+
2989
+pend_fault:
2990
+ /*
2991
+ * By pending the exception at this point we are making
2992
+ * the IMPDEF choice "overridden exceptions pended" (see the
2993
+ * MergeExcInfo() pseudocode). The other choice would be to not
2994
+ * pend them now and then make a choice about which to throw away
2995
+ * later if we have two derived exceptions.
2996
+ */
2997
+ armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
2998
+ return false;
2999
+}
3000
+
3001
+void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
3002
+{
3003
+ /*
3004
+ * Preserve FP state (because LSPACT was set and we are about
3005
+ * to execute an FP instruction). This corresponds to the
3006
+ * PreserveFPState() pseudocode.
3007
+ * We may throw an exception if the stacking fails.
3008
+ */
3009
+ ARMCPU *cpu = env_archcpu(env);
3010
+ bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
3011
+ bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
3012
+ bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
3013
+ bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
3014
+ uint32_t fpcar = env->v7m.fpcar[is_secure];
3015
+ bool stacked_ok = true;
3016
+ bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
3017
+ bool take_exception;
3018
+
3019
+ /* Take the iothread lock as we are going to touch the NVIC */
3020
+ qemu_mutex_lock_iothread();
3021
+
3022
+ /* Check the background context had access to the FPU */
3023
+ if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
3024
+ armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
3025
+ env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
3026
+ stacked_ok = false;
3027
+ } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
3028
+ armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
3029
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
3030
+ stacked_ok = false;
3031
+ }
3032
+
3033
+ if (!splimviol && stacked_ok) {
3034
+ /* We only stack if the stack limit wasn't violated */
3035
+ int i;
3036
+ ARMMMUIdx mmu_idx;
3037
+
3038
+ mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
3039
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
3040
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
3041
+ uint32_t faddr = fpcar + 4 * i;
3042
+ uint32_t slo = extract64(dn, 0, 32);
3043
+ uint32_t shi = extract64(dn, 32, 32);
3044
+
3045
+ if (i >= 16) {
3046
+ faddr += 8; /* skip the slot for the FPSCR */
3047
+ }
3048
+ stacked_ok = stacked_ok &&
3049
+ v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
3050
+ v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
3051
+ }
3052
+
3053
+ stacked_ok = stacked_ok &&
3054
+ v7m_stack_write(cpu, fpcar + 0x40,
3055
+ vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
3056
+ }
3057
+
3058
+ /*
3059
+ * We definitely pended an exception, but it's possible that it
3060
+ * might not be able to be taken now. If its priority permits us
3061
+ * to take it now, then we must not update the LSPACT or FP regs,
3062
+ * but instead jump out to take the exception immediately.
3063
+ * If it's just pending and won't be taken until the current
3064
+ * handler exits, then we do update LSPACT and the FP regs.
3065
+ */
3066
+ take_exception = !stacked_ok &&
3067
+ armv7m_nvic_can_take_pending_exception(env->nvic);
3068
+
3069
+ qemu_mutex_unlock_iothread();
3070
+
3071
+ if (take_exception) {
3072
+ raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
3073
+ }
3074
+
3075
+ env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
3076
+
3077
+ if (ts) {
3078
+ /* Clear s0 to s31 and the FPSCR */
3079
+ int i;
3080
+
3081
+ for (i = 0; i < 32; i += 2) {
3082
+ *aa32_vfp_dreg(env, i / 2) = 0;
3083
+ }
3084
+ vfp_set_fpscr(env, 0);
3085
+ }
3086
+ /*
3087
+ * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
3088
+ * unchanged.
3089
+ */
3090
+}
3091
+
3092
+/*
3093
+ * Write to v7M CONTROL.SPSEL bit for the specified security bank.
3094
+ * This may change the current stack pointer between Main and Process
3095
+ * stack pointers if it is done for the CONTROL register for the current
3096
+ * security state.
3097
+ */
3098
+static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
3099
+ bool new_spsel,
3100
+ bool secstate)
3101
+{
3102
+ bool old_is_psp = v7m_using_psp(env);
3103
+
3104
+ env->v7m.control[secstate] =
3105
+ deposit32(env->v7m.control[secstate],
3106
+ R_V7M_CONTROL_SPSEL_SHIFT,
3107
+ R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
3108
+
3109
+ if (secstate == env->v7m.secure) {
3110
+ bool new_is_psp = v7m_using_psp(env);
3111
+ uint32_t tmp;
3112
+
3113
+ if (old_is_psp != new_is_psp) {
3114
+ tmp = env->v7m.other_sp;
3115
+ env->v7m.other_sp = env->regs[13];
3116
+ env->regs[13] = tmp;
3117
+ }
3118
+ }
3119
+}
3120
+
3121
+/*
3122
+ * Write to v7M CONTROL.SPSEL bit. This may change the current
3123
+ * stack pointer between Main and Process stack pointers.
3124
+ */
3125
+static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
3126
+{
3127
+ write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
3128
+}
3129
+
3130
+void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
3131
+{
3132
+ /*
3133
+ * Write a new value to v7m.exception, thus transitioning into or out
3134
+ * of Handler mode; this may result in a change of active stack pointer.
3135
+ */
3136
+ bool new_is_psp, old_is_psp = v7m_using_psp(env);
3137
+ uint32_t tmp;
3138
+
3139
+ env->v7m.exception = new_exc;
3140
+
3141
+ new_is_psp = v7m_using_psp(env);
3142
+
3143
+ if (old_is_psp != new_is_psp) {
3144
+ tmp = env->v7m.other_sp;
3145
+ env->v7m.other_sp = env->regs[13];
3146
+ env->regs[13] = tmp;
3147
+ }
3148
+}
3149
+
3150
+/* Switch M profile security state between NS and S */
3151
+static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
3152
+{
3153
+ uint32_t new_ss_msp, new_ss_psp;
3154
+
3155
+ if (env->v7m.secure == new_secstate) {
3156
+ return;
3157
+ }
3158
+
3159
+ /*
3160
+ * All the banked state is accessed by looking at env->v7m.secure
3161
+ * except for the stack pointer; rearrange the SP appropriately.
3162
+ */
3163
+ new_ss_msp = env->v7m.other_ss_msp;
3164
+ new_ss_psp = env->v7m.other_ss_psp;
3165
+
3166
+ if (v7m_using_psp(env)) {
3167
+ env->v7m.other_ss_psp = env->regs[13];
3168
+ env->v7m.other_ss_msp = env->v7m.other_sp;
3169
+ } else {
3170
+ env->v7m.other_ss_msp = env->regs[13];
3171
+ env->v7m.other_ss_psp = env->v7m.other_sp;
3172
+ }
3173
+
3174
+ env->v7m.secure = new_secstate;
3175
+
3176
+ if (v7m_using_psp(env)) {
3177
+ env->regs[13] = new_ss_psp;
3178
+ env->v7m.other_sp = new_ss_msp;
3179
+ } else {
3180
+ env->regs[13] = new_ss_msp;
3181
+ env->v7m.other_sp = new_ss_psp;
3182
+ }
3183
+}
3184
+
3185
+void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
3186
+{
3187
+ /*
3188
+ * Handle v7M BXNS:
3189
+ * - if the return value is a magic value, do exception return (like BX)
3190
+ * - otherwise bit 0 of the return value is the target security state
3191
+ */
3192
+ uint32_t min_magic;
3193
+
3194
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3195
+ /* Covers FNC_RETURN and EXC_RETURN magic */
3196
+ min_magic = FNC_RETURN_MIN_MAGIC;
3197
+ } else {
3198
+ /* EXC_RETURN magic only */
3199
+ min_magic = EXC_RETURN_MIN_MAGIC;
3200
+ }
3201
+
3202
+ if (dest >= min_magic) {
3203
+ /*
3204
+ * This is an exception return magic value; put it where
3205
+ * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
3206
+ * Note that if we ever add gen_ss_advance() singlestep support to
3207
+ * M profile this should count as an "instruction execution complete"
3208
+ * event (compare gen_bx_excret_final_code()).
3209
+ */
3210
+ env->regs[15] = dest & ~1;
3211
+ env->thumb = dest & 1;
3212
+ HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
3213
+ /* notreached */
3214
+ }
3215
+
3216
+ /* translate.c should have made BXNS UNDEF unless we're secure */
3217
+ assert(env->v7m.secure);
3218
+
3219
+ if (!(dest & 1)) {
3220
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
3221
+ }
3222
+ switch_v7m_security_state(env, dest & 1);
3223
+ env->thumb = 1;
3224
+ env->regs[15] = dest & ~1;
3225
+}
3226
+
3227
+void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
3228
+{
3229
+ /*
3230
+ * Handle v7M BLXNS:
3231
+ * - bit 0 of the destination address is the target security state
3232
+ */
3233
+
3234
+ /* At this point regs[15] is the address just after the BLXNS */
3235
+ uint32_t nextinst = env->regs[15] | 1;
3236
+ uint32_t sp = env->regs[13] - 8;
3237
+ uint32_t saved_psr;
3238
+
3239
+ /* translate.c will have made BLXNS UNDEF unless we're secure */
3240
+ assert(env->v7m.secure);
3241
+
3242
+ if (dest & 1) {
3243
+ /*
3244
+ * Target is Secure, so this is just a normal BLX,
3245
+ * except that the low bit doesn't indicate Thumb/not.
3246
+ */
3247
+ env->regs[14] = nextinst;
3248
+ env->thumb = 1;
3249
+ env->regs[15] = dest & ~1;
3250
+ return;
3251
+ }
3252
+
3253
+ /* Target is non-secure: first push a stack frame */
3254
+ if (!QEMU_IS_ALIGNED(sp, 8)) {
3255
+ qemu_log_mask(LOG_GUEST_ERROR,
3256
+ "BLXNS with misaligned SP is UNPREDICTABLE\n");
3257
+ }
3258
+
3259
+ if (sp < v7m_sp_limit(env)) {
3260
+ raise_exception(env, EXCP_STKOF, 0, 1);
3261
+ }
3262
+
3263
+ saved_psr = env->v7m.exception;
3264
+ if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
3265
+ saved_psr |= XPSR_SFPA;
3266
+ }
3267
+
3268
+ /* Note that these stores can throw exceptions on MPU faults */
3269
+ cpu_stl_data(env, sp, nextinst);
3270
+ cpu_stl_data(env, sp + 4, saved_psr);
3271
+
3272
+ env->regs[13] = sp;
3273
+ env->regs[14] = 0xfeffffff;
3274
+ if (arm_v7m_is_handler_mode(env)) {
3275
+ /*
3276
+ * Write a dummy value to IPSR, to avoid leaking the current secure
3277
+ * exception number to non-secure code. This is guaranteed not
3278
+ * to cause write_v7m_exception() to actually change stacks.
3279
+ */
3280
+ write_v7m_exception(env, 1);
3281
+ }
3282
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
3283
+ switch_v7m_security_state(env, 0);
3284
+ env->thumb = 1;
3285
+ env->regs[15] = dest;
3286
+}
3287
+
3288
+static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
3289
+ bool spsel)
3290
+{
3291
+ /*
3292
+ * Return a pointer to the location where we currently store the
3293
+ * stack pointer for the requested security state and thread mode.
3294
+ * This pointer will become invalid if the CPU state is updated
3295
+ * such that the stack pointers are switched around (eg changing
3296
+ * the SPSEL control bit).
3297
+ * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
3298
+ * Unlike that pseudocode, we require the caller to pass us in the
3299
+ * SPSEL control bit value; this is because we also use this
3300
+ * function in handling of pushing of the callee-saves registers
3301
+ * part of the v8M stack frame (pseudocode PushCalleeStack()),
3302
+ * and in the tailchain codepath the SPSEL bit comes from the exception
3303
+ * return magic LR value from the previous exception. The pseudocode
3304
+ * opencodes the stack-selection in PushCalleeStack(), but we prefer
3305
+ * to make this utility function generic enough to do the job.
3306
+ */
3307
+ bool want_psp = threadmode && spsel;
3308
+
3309
+ if (secure == env->v7m.secure) {
3310
+ if (want_psp == v7m_using_psp(env)) {
3311
+ return &env->regs[13];
3312
+ } else {
3313
+ return &env->v7m.other_sp;
3314
+ }
3315
+ } else {
3316
+ if (want_psp) {
3317
+ return &env->v7m.other_ss_psp;
3318
+ } else {
3319
+ return &env->v7m.other_ss_msp;
3320
+ }
3321
+ }
3322
+}
3323
+
3324
+static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
3325
+ uint32_t *pvec)
3326
+{
3327
+ CPUState *cs = CPU(cpu);
3328
+ CPUARMState *env = &cpu->env;
3329
+ MemTxResult result;
3330
+ uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
3331
+ uint32_t vector_entry;
3332
+ MemTxAttrs attrs = {};
3333
+ ARMMMUIdx mmu_idx;
3334
+ bool exc_secure;
3335
+
3336
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
3337
+
3338
+ /*
3339
+ * We don't do a get_phys_addr() here because the rules for vector
3340
+ * loads are special: they always use the default memory map, and
3341
+ * the default memory map permits reads from all addresses.
3342
+ * Since there's no easy way to pass through to pmsav8_mpu_lookup()
3343
+ * that we want this special case which would always say "yes",
3344
+ * we just do the SAU lookup here followed by a direct physical load.
3345
+ */
3346
+ attrs.secure = targets_secure;
3347
+ attrs.user = false;
3348
+
3349
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3350
+ V8M_SAttributes sattrs = {};
3351
+
3352
+ v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
3353
+ if (sattrs.ns) {
3354
+ attrs.secure = false;
3355
+ } else if (!targets_secure) {
3356
+ /* NS access to S memory */
3357
+ goto load_fail;
3358
+ }
3359
+ }
3360
+
3361
+ vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
3362
+ attrs, &result);
3363
+ if (result != MEMTX_OK) {
3364
+ goto load_fail;
3365
+ }
3366
+ *pvec = vector_entry;
3367
+ return true;
3368
+
3369
+load_fail:
3370
+ /*
3371
+ * All vector table fetch fails are reported as HardFault, with
3372
+ * HFSR.VECTTBL and .FORCED set. (FORCED is set because
3373
+ * technically the underlying exception is a MemManage or BusFault
3374
+ * that is escalated to HardFault.) This is a terminal exception,
3375
+ * so we will either take the HardFault immediately or else enter
3376
+ * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
3377
+ */
3378
+ exc_secure = targets_secure ||
3379
+ !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
3380
+ env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
3381
+ armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
3382
+ return false;
3383
+}
3384
+
3385
+static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
3386
+{
3387
+ /*
3388
+ * Return the integrity signature value for the callee-saves
3389
+ * stack frame section. @lr is the exception return payload/LR value
3390
+ * whose FType bit forms bit 0 of the signature if FP is present.
3391
+ */
3392
+ uint32_t sig = 0xfefa125a;
3393
+
3394
+ if (!arm_feature(env, ARM_FEATURE_VFP) || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
3395
+ sig |= 1;
3396
+ }
3397
+ return sig;
3398
+}
3399
+
3400
+static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
3401
+ bool ignore_faults)
3402
+{
3403
+ /*
3404
+ * For v8M, push the callee-saves register part of the stack frame.
3405
+ * Compare the v8M pseudocode PushCalleeStack().
3406
+ * In the tailchaining case this may not be the current stack.
3407
+ */
3408
+ CPUARMState *env = &cpu->env;
3409
+ uint32_t *frame_sp_p;
3410
+ uint32_t frameptr;
3411
+ ARMMMUIdx mmu_idx;
3412
+ bool stacked_ok;
3413
+ uint32_t limit;
3414
+ bool want_psp;
3415
+ uint32_t sig;
3416
+ StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
3417
+
3418
+ if (dotailchain) {
3419
+ bool mode = lr & R_V7M_EXCRET_MODE_MASK;
3420
+ bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
3421
+ !mode;
3422
+
3423
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
3424
+ frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
3425
+ lr & R_V7M_EXCRET_SPSEL_MASK);
3426
+ want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
3427
+ if (want_psp) {
3428
+ limit = env->v7m.psplim[M_REG_S];
3429
+ } else {
3430
+ limit = env->v7m.msplim[M_REG_S];
3431
+ }
3432
+ } else {
3433
+ mmu_idx = arm_mmu_idx(env);
3434
+ frame_sp_p = &env->regs[13];
3435
+ limit = v7m_sp_limit(env);
3436
+ }
3437
+
3438
+ frameptr = *frame_sp_p - 0x28;
3439
+ if (frameptr < limit) {
3440
+ /*
3441
+ * Stack limit failure: set SP to the limit value, and generate
3442
+ * STKOF UsageFault. Stack pushes below the limit must not be
3443
+ * performed. It is IMPDEF whether pushes above the limit are
3444
+ * performed; we choose not to.
3445
+ */
3446
+ qemu_log_mask(CPU_LOG_INT,
3447
+ "...STKOF during callee-saves register stacking\n");
3448
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
3449
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
3450
+ env->v7m.secure);
3451
+ *frame_sp_p = limit;
3452
+ return true;
3453
+ }
3454
+
3455
+ /*
3456
+ * Write as much of the stack frame as we can. A write failure may
3457
+ * cause us to pend a derived exception.
3458
+ */
3459
+ sig = v7m_integrity_sig(env, lr);
3460
+ stacked_ok =
3461
+ v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
3462
+ v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
3463
+ v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
3464
+ v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
3465
+ v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
3466
+ v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
3467
+ v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
3468
+ v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
3469
+ v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
3470
+
3471
+ /* Update SP regardless of whether any of the stack accesses failed. */
3472
+ *frame_sp_p = frameptr;
3473
+
3474
+ return !stacked_ok;
3475
+}
3476
+
3477
+static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
3478
+ bool ignore_stackfaults)
3479
+{
3480
+ /*
3481
+ * Do the "take the exception" parts of exception entry,
3482
+ * but not the pushing of state to the stack. This is
3483
+ * similar to the pseudocode ExceptionTaken() function.
3484
+ */
3485
+ CPUARMState *env = &cpu->env;
3486
+ uint32_t addr;
3487
+ bool targets_secure;
3488
+ int exc;
3489
+ bool push_failed = false;
3490
+
3491
+ armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
3492
+ qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
3493
+ targets_secure ? "secure" : "nonsecure", exc);
3494
+
3495
+ if (dotailchain) {
3496
+ /* Sanitize LR FType and PREFIX bits */
3497
+ if (!arm_feature(env, ARM_FEATURE_VFP)) {
3498
+ lr |= R_V7M_EXCRET_FTYPE_MASK;
3499
+ }
3500
+ lr = deposit32(lr, 24, 8, 0xff);
3501
+ }
3502
+
3503
+ if (arm_feature(env, ARM_FEATURE_V8)) {
3504
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
3505
+ (lr & R_V7M_EXCRET_S_MASK)) {
3506
+ /*
3507
+ * The background code (the owner of the registers in the
3508
+ * exception frame) is Secure. This means it may either already
3509
+ * have or now needs to push callee-saves registers.
3510
+ */
3511
+ if (targets_secure) {
3512
+ if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
3513
+ /*
3514
+ * We took an exception from Secure to NonSecure
3515
+ * (which means the callee-saved registers got stacked)
3516
+ * and are now tailchaining to a Secure exception.
3517
+ * Clear DCRS so eventual return from this Secure
3518
+ * exception unstacks the callee-saved registers.
3519
+ */
3520
+ lr &= ~R_V7M_EXCRET_DCRS_MASK;
3521
+ }
3522
+ } else {
3523
+ /*
3524
+ * We're going to a non-secure exception; push the
3525
+ * callee-saves registers to the stack now, if they're
3526
+ * not already saved.
3527
+ */
3528
+ if (lr & R_V7M_EXCRET_DCRS_MASK &&
3529
+ !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
3530
+ push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
3531
+ ignore_stackfaults);
3532
+ }
3533
+ lr |= R_V7M_EXCRET_DCRS_MASK;
3534
+ }
3535
+ }
3536
+
3537
+ lr &= ~R_V7M_EXCRET_ES_MASK;
3538
+ if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3539
+ lr |= R_V7M_EXCRET_ES_MASK;
3540
+ }
3541
+ lr &= ~R_V7M_EXCRET_SPSEL_MASK;
3542
+ if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
3543
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
3544
+ }
3545
+
3546
+ /*
3547
+ * Clear registers if necessary to prevent non-secure exception
3548
+ * code being able to see register values from secure code.
3549
+ * Where register values become architecturally UNKNOWN we leave
3550
+ * them with their previous values.
3551
+ */
3552
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3553
+ if (!targets_secure) {
3554
+ /*
3555
+ * Always clear the caller-saved registers (they have been
3556
+ * pushed to the stack earlier in v7m_push_stack()).
3557
+ * Clear callee-saved registers if the background code is
3558
+ * Secure (in which case these regs were saved in
3559
+ * v7m_push_callee_stack()).
3560
+ */
3561
+ int i;
3562
+
3563
+ for (i = 0; i < 13; i++) {
3564
+ /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
3565
+ if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
3566
+ env->regs[i] = 0;
3567
+ }
3568
+ }
3569
+ /* Clear EAPSR */
3570
+ xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
3571
+ }
3572
+ }
3573
+ }
3574
+
3575
+ if (push_failed && !ignore_stackfaults) {
3576
+ /*
3577
+ * Derived exception on callee-saves register stacking:
3578
+ * we might now want to take a different exception which
3579
+ * targets a different security state, so try again from the top.
3580
+ */
3581
+ qemu_log_mask(CPU_LOG_INT,
3582
+ "...derived exception on callee-saves register stacking");
3583
+ v7m_exception_taken(cpu, lr, true, true);
3584
+ return;
3585
+ }
3586
+
3587
+ if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
3588
+ /* Vector load failed: derived exception */
3589
+ qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
3590
+ v7m_exception_taken(cpu, lr, true, true);
3591
+ return;
3592
+ }
3593
+
3594
+ /*
3595
+ * Now we've done everything that might cause a derived exception
3596
+ * we can go ahead and activate whichever exception we're going to
3597
+ * take (which might now be the derived exception).
3598
+ */
3599
+ armv7m_nvic_acknowledge_irq(env->nvic);
3600
+
3601
+ /* Switch to target security state -- must do this before writing SPSEL */
3602
+ switch_v7m_security_state(env, targets_secure);
3603
+ write_v7m_control_spsel(env, 0);
3604
+ arm_clear_exclusive(env);
3605
+ /* Clear SFPA and FPCA (has no effect if no FPU) */
3606
+ env->v7m.control[M_REG_S] &=
3607
+ ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
3608
+ /* Clear IT bits */
3609
+ env->condexec_bits = 0;
3610
+ env->regs[14] = lr;
3611
+ env->regs[15] = addr & 0xfffffffe;
3612
+ env->thumb = addr & 1;
3613
+}
3614
+
3615
+static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
3616
+ bool apply_splim)
3617
+{
3618
+ /*
3619
+ * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
3620
+ * that we will need later in order to do lazy FP reg stacking.
3621
+ */
3622
+ bool is_secure = env->v7m.secure;
3623
+ void *nvic = env->nvic;
3624
+ /*
3625
+ * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
3626
+ * are banked and we want to update the bit in the bank for the
3627
+ * current security state; and in one case we want to specifically
3628
+ * update the NS banked version of a bit even if we are secure.
3629
+ */
3630
+ uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
3631
+ uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
3632
+ uint32_t *fpccr = &env->v7m.fpccr[is_secure];
3633
+ bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
3634
+
3635
+ env->v7m.fpcar[is_secure] = frameptr & ~0x7;
3636
+
3637
+ if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
3638
+ bool splimviol;
3639
+ uint32_t splim = v7m_sp_limit(env);
3640
+ bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
3641
+ (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
3642
+
3643
+ splimviol = !ign && frameptr < splim;
3644
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
3645
+ }
3646
+
3647
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
3648
+
3649
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
3650
+
3651
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
3652
+
3653
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
3654
+ !arm_v7m_is_handler_mode(env));
3655
+
3656
+ hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
3657
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
3658
+
3659
+ bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
3660
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
3661
+
3662
+ mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
3663
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
3664
+
3665
+ ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
3666
+ *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
3667
+
3668
+ monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
3669
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
3670
+
3671
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3672
+ s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
3673
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
3674
+
3675
+ sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
3676
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
3677
+ }
3678
+}
3679
+
3680
+void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
3681
+{
3682
+ /* fptr is the value of Rn, the frame pointer we store the FP regs to */
3683
+ bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
3684
+ bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
3685
+
3686
+ assert(env->v7m.secure);
3687
+
3688
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
3689
+ return;
3690
+ }
3691
+
3692
+ /* Check access to the coprocessor is permitted */
3693
+ if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
3694
+ raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
3695
+ }
3696
+
3697
+ if (lspact) {
3698
+ /* LSPACT should not be active when there is active FP state */
3699
+ raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
3700
+ }
3701
+
3702
+ if (fptr & 7) {
3703
+ raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
3704
+ }
3705
+
3706
+ /*
3707
+ * Note that we do not use v7m_stack_write() here, because the
3708
+ * accesses should not set the FSR bits for stacking errors if they
3709
+ * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
3710
+ * or AccType_LAZYFP). Faults in cpu_stl_data() will throw exceptions
3711
+ * and longjmp out.
3712
+ */
3713
+ if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
3714
+ bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
3715
+ int i;
3716
+
3717
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
3718
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
3719
+ uint32_t faddr = fptr + 4 * i;
3720
+ uint32_t slo = extract64(dn, 0, 32);
3721
+ uint32_t shi = extract64(dn, 32, 32);
3722
+
3723
+ if (i >= 16) {
3724
+ faddr += 8; /* skip the slot for the FPSCR */
3725
+ }
3726
+ cpu_stl_data(env, faddr, slo);
3727
+ cpu_stl_data(env, faddr + 4, shi);
3728
+ }
3729
+ cpu_stl_data(env, fptr + 0x40, vfp_get_fpscr(env));
3730
+
3731
+ /*
3732
+ * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
3733
+ * leave them unchanged, matching our choice in v7m_preserve_fp_state.
3734
+ */
3735
+ if (ts) {
3736
+ for (i = 0; i < 32; i += 2) {
3737
+ *aa32_vfp_dreg(env, i / 2) = 0;
3738
+ }
3739
+ vfp_set_fpscr(env, 0);
3740
+ }
3741
+ } else {
3742
+ v7m_update_fpccr(env, fptr, false);
3743
+ }
3744
+
3745
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
3746
+}
3747
+
3748
+void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
3749
+{
3750
+ /* fptr is the value of Rn, the frame pointer we load the FP regs from */
3751
+ assert(env->v7m.secure);
3752
+
3753
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
3754
+ return;
3755
+ }
3756
+
3757
+ /* Check access to the coprocessor is permitted */
3758
+ if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
3759
+ raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
3760
+ }
3761
+
3762
+ if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
3763
+ /* State in FP is still valid */
3764
+ env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
3765
+ } else {
3766
+ bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
3767
+ int i;
3768
+ uint32_t fpscr;
3769
+
3770
+ if (fptr & 7) {
3771
+ raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
3772
+ }
3773
+
3774
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
3775
+ uint32_t slo, shi;
3776
+ uint64_t dn;
3777
+ uint32_t faddr = fptr + 4 * i;
3778
+
3779
+ if (i >= 16) {
3780
+ faddr += 8; /* skip the slot for the FPSCR */
3781
+ }
3782
+
3783
+ slo = cpu_ldl_data(env, faddr);
3784
+ shi = cpu_ldl_data(env, faddr + 4);
3785
+
3786
+ dn = (uint64_t) shi << 32 | slo;
3787
+ *aa32_vfp_dreg(env, i / 2) = dn;
3788
+ }
3789
+ fpscr = cpu_ldl_data(env, fptr + 0x40);
3790
+ vfp_set_fpscr(env, fpscr);
3791
+ }
3792
+
3793
+ env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
3794
+}
3795
+
3796
+static bool v7m_push_stack(ARMCPU *cpu)
3797
+{
3798
+ /*
3799
+ * Do the "set up stack frame" part of exception entry,
3800
+ * similar to pseudocode PushStack().
3801
+ * Return true if we generate a derived exception (and so
3802
+ * should ignore further stack faults trying to process
3803
+ * that derived exception.)
3804
+ */
3805
+ bool stacked_ok = true, limitviol = false;
3806
+ CPUARMState *env = &cpu->env;
3807
+ uint32_t xpsr = xpsr_read(env);
3808
+ uint32_t frameptr = env->regs[13];
3809
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
3810
+ uint32_t framesize;
3811
+ bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
3812
+
3813
+ if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
3814
+ (env->v7m.secure || nsacr_cp10)) {
3815
+ if (env->v7m.secure &&
3816
+ env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
3817
+ framesize = 0xa8;
3818
+ } else {
3819
+ framesize = 0x68;
3820
+ }
3821
+ } else {
3822
+ framesize = 0x20;
3823
+ }
3824
+
3825
+ /* Align stack pointer if the guest wants that */
3826
+ if ((frameptr & 4) &&
3827
+ (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
3828
+ frameptr -= 4;
3829
+ xpsr |= XPSR_SPREALIGN;
3830
+ }
3831
+
3832
+ xpsr &= ~XPSR_SFPA;
3833
+ if (env->v7m.secure &&
3834
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
3835
+ xpsr |= XPSR_SFPA;
3836
+ }
3837
+
3838
+ frameptr -= framesize;
3839
+
3840
+ if (arm_feature(env, ARM_FEATURE_V8)) {
3841
+ uint32_t limit = v7m_sp_limit(env);
3842
+
3843
+ if (frameptr < limit) {
3844
+ /*
3845
+ * Stack limit failure: set SP to the limit value, and generate
3846
+ * STKOF UsageFault. Stack pushes below the limit must not be
3847
+ * performed. It is IMPDEF whether pushes above the limit are
3848
+ * performed; we choose not to.
3849
+ */
3850
+ qemu_log_mask(CPU_LOG_INT,
3851
+ "...STKOF during stacking\n");
3852
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
3853
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
3854
+ env->v7m.secure);
3855
+ env->regs[13] = limit;
3856
+ /*
3857
+ * We won't try to perform any further memory accesses but
3858
+ * we must continue through the following code to check for
3859
+ * permission faults during FPU state preservation, and we
3860
+ * must update FPCCR if lazy stacking is enabled.
3861
+ */
3862
+ limitviol = true;
3863
+ stacked_ok = false;
3864
+ }
3865
+ }
3866
+
3867
+ /*
3868
+ * Write as much of the stack frame as we can. If we fail a stack
3869
+ * write this will result in a derived exception being pended
3870
+ * (which may be taken in preference to the one we started with
3871
+ * if it has higher priority).
3872
+ */
3873
+ stacked_ok = stacked_ok &&
3874
+ v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
3875
+ v7m_stack_write(cpu, frameptr + 4, env->regs[1],
3876
+ mmu_idx, STACK_NORMAL) &&
3877
+ v7m_stack_write(cpu, frameptr + 8, env->regs[2],
3878
+ mmu_idx, STACK_NORMAL) &&
3879
+ v7m_stack_write(cpu, frameptr + 12, env->regs[3],
3880
+ mmu_idx, STACK_NORMAL) &&
3881
+ v7m_stack_write(cpu, frameptr + 16, env->regs[12],
3882
+ mmu_idx, STACK_NORMAL) &&
3883
+ v7m_stack_write(cpu, frameptr + 20, env->regs[14],
3884
+ mmu_idx, STACK_NORMAL) &&
3885
+ v7m_stack_write(cpu, frameptr + 24, env->regs[15],
3886
+ mmu_idx, STACK_NORMAL) &&
3887
+ v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
3888
+
3889
+ if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
3890
+ /* FPU is active, try to save its registers */
3891
+ bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
3892
+ bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
3893
+
3894
+ if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3895
+ qemu_log_mask(CPU_LOG_INT,
3896
+ "...SecureFault because LSPACT and FPCA both set\n");
3897
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
3898
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
3899
+ } else if (!env->v7m.secure && !nsacr_cp10) {
3900
+ qemu_log_mask(CPU_LOG_INT,
3901
+ "...Secure UsageFault with CFSR.NOCP because "
3902
+ "NSACR.CP10 prevents stacking FP regs\n");
3903
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
3904
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
3905
+ } else {
3906
+ if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
3907
+ /* Lazy stacking disabled, save registers now */
3908
+ int i;
3909
+ bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
3910
+ arm_current_el(env) != 0);
3911
+
3912
+ if (stacked_ok && !cpacr_pass) {
3913
+ /*
3914
+ * Take UsageFault if CPACR forbids access. The pseudocode
3915
+ * here does a full CheckCPEnabled() but we know the NSACR
3916
+ * check can never fail as we have already handled that.
3917
+ */
3918
+ qemu_log_mask(CPU_LOG_INT,
3919
+ "...UsageFault with CFSR.NOCP because "
3920
+ "CPACR.CP10 prevents stacking FP regs\n");
3921
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
3922
+ env->v7m.secure);
3923
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
3924
+ stacked_ok = false;
3925
+ }
3926
+
3927
+ for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
3928
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
3929
+ uint32_t faddr = frameptr + 0x20 + 4 * i;
3930
+ uint32_t slo = extract64(dn, 0, 32);
3931
+ uint32_t shi = extract64(dn, 32, 32);
3932
+
3933
+ if (i >= 16) {
3934
+ faddr += 8; /* skip the slot for the FPSCR */
3935
+ }
3936
+ stacked_ok = stacked_ok &&
3937
+ v7m_stack_write(cpu, faddr, slo,
3938
+ mmu_idx, STACK_NORMAL) &&
3939
+ v7m_stack_write(cpu, faddr + 4, shi,
3940
+ mmu_idx, STACK_NORMAL);
3941
+ }
3942
+ stacked_ok = stacked_ok &&
3943
+ v7m_stack_write(cpu, frameptr + 0x60,
3944
+ vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
3945
+ if (cpacr_pass) {
3946
+ for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
3947
+ *aa32_vfp_dreg(env, i / 2) = 0;
3948
+ }
3949
+ vfp_set_fpscr(env, 0);
3950
+ }
3951
+ } else {
3952
+ /* Lazy stacking enabled, save necessary info to stack later */
3953
+ v7m_update_fpccr(env, frameptr + 0x20, true);
3954
+ }
3955
+ }
3956
+ }
3957
+
3958
+ /*
3959
+ * If we broke a stack limit then SP was already updated earlier;
3960
+ * otherwise we update SP regardless of whether any of the stack
3961
+ * accesses failed or we took some other kind of fault.
3962
+ */
3963
+ if (!limitviol) {
3964
+ env->regs[13] = frameptr;
3965
+ }
3966
+
3967
+ return !stacked_ok;
3968
+}
3969
+
3970
+static void do_v7m_exception_exit(ARMCPU *cpu)
3971
+{
3972
+ CPUARMState *env = &cpu->env;
3973
+ uint32_t excret;
3974
+ uint32_t xpsr, xpsr_mask;
3975
+ bool ufault = false;
3976
+ bool sfault = false;
3977
+ bool return_to_sp_process;
3978
+ bool return_to_handler;
3979
+ bool rettobase = false;
3980
+ bool exc_secure = false;
3981
+ bool return_to_secure;
3982
+ bool ftype;
3983
+ bool restore_s16_s31;
3984
+
3985
+ /*
3986
+ * If we're not in Handler mode then jumps to magic exception-exit
3987
+ * addresses don't have magic behaviour. However for the v8M
3988
+ * security extensions the magic secure-function-return has to
3989
+ * work in thread mode too, so to avoid doing an extra check in
3990
+ * the generated code we allow exception-exit magic to also cause the
3991
+ * internal exception and bring us here in thread mode. Correct code
3992
+ * will never try to do this (the following insn fetch will always
3993
+ * fault) so we the overhead of having taken an unnecessary exception
3994
+ * doesn't matter.
3995
+ */
3996
+ if (!arm_v7m_is_handler_mode(env)) {
3997
+ return;
3998
+ }
3999
+
4000
+ /*
4001
+ * In the spec pseudocode ExceptionReturn() is called directly
4002
+ * from BXWritePC() and gets the full target PC value including
4003
+ * bit zero. In QEMU's implementation we treat it as a normal
4004
+ * jump-to-register (which is then caught later on), and so split
4005
+ * the target value up between env->regs[15] and env->thumb in
4006
+ * gen_bx(). Reconstitute it.
4007
+ */
4008
+ excret = env->regs[15];
4009
+ if (env->thumb) {
4010
+ excret |= 1;
4011
+ }
4012
+
4013
+ qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
4014
+ " previous exception %d\n",
4015
+ excret, env->v7m.exception);
4016
+
4017
+ if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
4018
+ qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
4019
+ "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
4020
+ excret);
4021
+ }
4022
+
4023
+ ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
4024
+
4025
+ if (!arm_feature(env, ARM_FEATURE_VFP) && !ftype) {
4026
+ qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
4027
+ "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
4028
+ "if FPU not present\n",
4029
+ excret);
4030
+ ftype = true;
4031
+ }
4032
+
4033
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
4034
+ /*
4035
+ * EXC_RETURN.ES validation check (R_SMFL). We must do this before
4036
+ * we pick which FAULTMASK to clear.
4037
+ */
4038
+ if (!env->v7m.secure &&
4039
+ ((excret & R_V7M_EXCRET_ES_MASK) ||
4040
+ !(excret & R_V7M_EXCRET_DCRS_MASK))) {
4041
+ sfault = 1;
4042
+ /* For all other purposes, treat ES as 0 (R_HXSR) */
4043
+ excret &= ~R_V7M_EXCRET_ES_MASK;
4044
+ }
4045
+ exc_secure = excret & R_V7M_EXCRET_ES_MASK;
4046
+ }
4047
+
4048
+ if (env->v7m.exception != ARMV7M_EXCP_NMI) {
4049
+ /*
4050
+ * Auto-clear FAULTMASK on return from other than NMI.
4051
+ * If the security extension is implemented then this only
4052
+ * happens if the raw execution priority is >= 0; the
4053
+ * value of the ES bit in the exception return value indicates
4054
+ * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
4055
+ */
4056
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
4057
+ if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
4058
+ env->v7m.faultmask[exc_secure] = 0;
4059
+ }
4060
+ } else {
4061
+ env->v7m.faultmask[M_REG_NS] = 0;
4062
+ }
4063
+ }
4064
+
4065
+ switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
4066
+ exc_secure)) {
4067
+ case -1:
4068
+ /* attempt to exit an exception that isn't active */
4069
+ ufault = true;
4070
+ break;
4071
+ case 0:
4072
+ /* still an irq active now */
4073
+ break;
4074
+ case 1:
4075
+ /*
4076
+ * We returned to base exception level, no nesting.
4077
+ * (In the pseudocode this is written using "NestedActivation != 1"
4078
+ * where we have 'rettobase == false'.)
4079
+ */
4080
+ rettobase = true;
4081
+ break;
4082
+ default:
4083
+ g_assert_not_reached();
4084
+ }
4085
+
4086
+ return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
4087
+ return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
4088
+ return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
4089
+ (excret & R_V7M_EXCRET_S_MASK);
4090
+
4091
+ if (arm_feature(env, ARM_FEATURE_V8)) {
4092
+ if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
4093
+ /*
4094
+ * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
4095
+ * we choose to take the UsageFault.
4096
+ */
4097
+ if ((excret & R_V7M_EXCRET_S_MASK) ||
4098
+ (excret & R_V7M_EXCRET_ES_MASK) ||
4099
+ !(excret & R_V7M_EXCRET_DCRS_MASK)) {
4100
+ ufault = true;
4101
+ }
4102
+ }
4103
+ if (excret & R_V7M_EXCRET_RES0_MASK) {
4104
+ ufault = true;
4105
+ }
4106
+ } else {
4107
+ /* For v7M we only recognize certain combinations of the low bits */
4108
+ switch (excret & 0xf) {
4109
+ case 1: /* Return to Handler */
4110
+ break;
4111
+ case 13: /* Return to Thread using Process stack */
4112
+ case 9: /* Return to Thread using Main stack */
4113
+ /*
4114
+ * We only need to check NONBASETHRDENA for v7M, because in
4115
+ * v8M this bit does not exist (it is RES1).
4116
+ */
4117
+ if (!rettobase &&
4118
+ !(env->v7m.ccr[env->v7m.secure] &
4119
+ R_V7M_CCR_NONBASETHRDENA_MASK)) {
4120
+ ufault = true;
4121
+ }
4122
+ break;
4123
+ default:
4124
+ ufault = true;
4125
+ }
4126
+ }
4127
+
4128
+ /*
4129
+ * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
4130
+ * Handler mode (and will be until we write the new XPSR.Interrupt
4131
+ * field) this does not switch around the current stack pointer.
4132
+ * We must do this before we do any kind of tailchaining, including
4133
+ * for the derived exceptions on integrity check failures, or we will
4134
+ * give the guest an incorrect EXCRET.SPSEL value on exception entry.
4135
+ */
4136
+ write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
4137
+
4138
+ /*
4139
+ * Clear scratch FP values left in caller saved registers; this
4140
+ * must happen before any kind of tail chaining.
4141
+ */
4142
+ if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
4143
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
4144
+ if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
4145
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
4146
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4147
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
4148
+ "stackframe: error during lazy state deactivation\n");
4149
+ v7m_exception_taken(cpu, excret, true, false);
4150
+ return;
4151
+ } else {
4152
+ /* Clear s0..s15 and FPSCR */
4153
+ int i;
4154
+
4155
+ for (i = 0; i < 16; i += 2) {
4156
+ *aa32_vfp_dreg(env, i / 2) = 0;
4157
+ }
4158
+ vfp_set_fpscr(env, 0);
4159
+ }
4160
+ }
4161
+
4162
+ if (sfault) {
4163
+ env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
4164
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4165
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
4166
+ "stackframe: failed EXC_RETURN.ES validity check\n");
4167
+ v7m_exception_taken(cpu, excret, true, false);
4168
+ return;
4169
+ }
4170
+
4171
+ if (ufault) {
4172
+ /*
4173
+ * Bad exception return: instead of popping the exception
4174
+ * stack, directly take a usage fault on the current stack.
4175
+ */
4176
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
4177
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4178
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
4179
+ "stackframe: failed exception return integrity check\n");
4180
+ v7m_exception_taken(cpu, excret, true, false);
4181
+ return;
4182
+ }
4183
+
4184
+ /*
4185
+ * Tailchaining: if there is currently a pending exception that
4186
+ * is high enough priority to preempt execution at the level we're
4187
+ * about to return to, then just directly take that exception now,
4188
+ * avoiding an unstack-and-then-stack. Note that now we have
4189
+ * deactivated the previous exception by calling armv7m_nvic_complete_irq()
4190
+ * our current execution priority is already the execution priority we are
4191
+ * returning to -- none of the state we would unstack or set based on
4192
+ * the EXCRET value affects it.
4193
+ */
4194
+ if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
4195
+ qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
4196
+ v7m_exception_taken(cpu, excret, true, false);
4197
+ return;
4198
+ }
4199
+
4200
+ switch_v7m_security_state(env, return_to_secure);
4201
+
4202
+ {
4203
+ /*
4204
+ * The stack pointer we should be reading the exception frame from
4205
+ * depends on bits in the magic exception return type value (and
4206
+ * for v8M isn't necessarily the stack pointer we will eventually
4207
+ * end up resuming execution with). Get a pointer to the location
4208
+ * in the CPU state struct where the SP we need is currently being
4209
+ * stored; we will use and modify it in place.
4210
+ * We use this limited C variable scope so we don't accidentally
4211
+ * use 'frame_sp_p' after we do something that makes it invalid.
4212
+ */
4213
+ uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
4214
+ return_to_secure,
4215
+ !return_to_handler,
4216
+ return_to_sp_process);
4217
+ uint32_t frameptr = *frame_sp_p;
4218
+ bool pop_ok = true;
4219
+ ARMMMUIdx mmu_idx;
4220
+ bool return_to_priv = return_to_handler ||
4221
+ !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
4222
+
4223
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
4224
+ return_to_priv);
4225
+
4226
+ if (!QEMU_IS_ALIGNED(frameptr, 8) &&
4227
+ arm_feature(env, ARM_FEATURE_V8)) {
4228
+ qemu_log_mask(LOG_GUEST_ERROR,
4229
+ "M profile exception return with non-8-aligned SP "
4230
+ "for destination state is UNPREDICTABLE\n");
4231
+ }
4232
+
4233
+ /* Do we need to pop callee-saved registers? */
4234
+ if (return_to_secure &&
4235
+ ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
4236
+ (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
4237
+ uint32_t actual_sig;
4238
+
4239
+ pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
4240
+
4241
+ if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
4242
+ /* Take a SecureFault on the current stack */
4243
+ env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
4244
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4245
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
4246
+ "stackframe: failed exception return integrity "
4247
+ "signature check\n");
4248
+ v7m_exception_taken(cpu, excret, true, false);
4249
+ return;
4250
+ }
4251
+
4252
+ pop_ok = pop_ok &&
4253
+ v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
4254
+ v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
4255
+ v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
4256
+ v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
4257
+ v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
4258
+ v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
4259
+ v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
4260
+ v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
4261
+
4262
+ frameptr += 0x28;
4263
+ }
4264
+
4265
+ /* Pop registers */
4266
+ pop_ok = pop_ok &&
4267
+ v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
4268
+ v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
4269
+ v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
4270
+ v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
4271
+ v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
4272
+ v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
4273
+ v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
4274
+ v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
4275
+
4276
+ if (!pop_ok) {
4277
+ /*
4278
+ * v7m_stack_read() pended a fault, so take it (as a tail
4279
+ * chained exception on the same stack frame)
4280
+ */
4281
+ qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
4282
+ v7m_exception_taken(cpu, excret, true, false);
4283
+ return;
4284
+ }
4285
+
4286
+ /*
4287
+ * Returning from an exception with a PC with bit 0 set is defined
4288
+ * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
4289
+ * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
4290
+ * the lsbit, and there are several RTOSes out there which incorrectly
4291
+ * assume the r15 in the stack frame should be a Thumb-style "lsbit
4292
+ * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
4293
+ * complain about the badly behaved guest.
4294
+ */
4295
+ if (env->regs[15] & 1) {
4296
+ env->regs[15] &= ~1U;
4297
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
4298
+ qemu_log_mask(LOG_GUEST_ERROR,
4299
+ "M profile return from interrupt with misaligned "
4300
+ "PC is UNPREDICTABLE on v7M\n");
4301
+ }
4302
+ }
4303
+
4304
+ if (arm_feature(env, ARM_FEATURE_V8)) {
4305
+ /*
4306
+ * For v8M we have to check whether the xPSR exception field
4307
+ * matches the EXCRET value for return to handler/thread
4308
+ * before we commit to changing the SP and xPSR.
4309
+ */
4310
+ bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
4311
+ if (return_to_handler != will_be_handler) {
4312
+ /*
4313
+ * Take an INVPC UsageFault on the current stack.
4314
+ * By this point we will have switched to the security state
4315
+ * for the background state, so this UsageFault will target
4316
+ * that state.
4317
+ */
4318
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
4319
+ env->v7m.secure);
4320
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
4321
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
4322
+ "stackframe: failed exception return integrity "
4323
+ "check\n");
4324
+ v7m_exception_taken(cpu, excret, true, false);
4325
+ return;
4326
+ }
4327
+ }
4328
+
4329
+ if (!ftype) {
4330
+ /* FP present and we need to handle it */
4331
+ if (!return_to_secure &&
4332
+ (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
4333
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4334
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
4335
+ qemu_log_mask(CPU_LOG_INT,
4336
+ "...taking SecureFault on existing stackframe: "
4337
+ "Secure LSPACT set but exception return is "
4338
+ "not to secure state\n");
4339
+ v7m_exception_taken(cpu, excret, true, false);
4340
+ return;
4341
+ }
4342
+
4343
+ restore_s16_s31 = return_to_secure &&
4344
+ (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
4345
+
4346
+ if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
4347
+ /* State in FPU is still valid, just clear LSPACT */
4348
+ env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
4349
+ } else {
4350
+ int i;
4351
+ uint32_t fpscr;
4352
+ bool cpacr_pass, nsacr_pass;
4353
+
4354
+ cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
4355
+ return_to_priv);
4356
+ nsacr_pass = return_to_secure ||
4357
+ extract32(env->v7m.nsacr, 10, 1);
4358
+
4359
+ if (!cpacr_pass) {
4360
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
4361
+ return_to_secure);
4362
+ env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
4363
+ qemu_log_mask(CPU_LOG_INT,
4364
+ "...taking UsageFault on existing "
4365
+ "stackframe: CPACR.CP10 prevents unstacking "
4366
+ "FP regs\n");
4367
+ v7m_exception_taken(cpu, excret, true, false);
4368
+ return;
4369
+ } else if (!nsacr_pass) {
4370
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
4371
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
4372
+ qemu_log_mask(CPU_LOG_INT,
4373
+ "...taking Secure UsageFault on existing "
4374
+ "stackframe: NSACR.CP10 prevents unstacking "
4375
+ "FP regs\n");
4376
+ v7m_exception_taken(cpu, excret, true, false);
4377
+ return;
4378
+ }
4379
+
4380
+ for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
4381
+ uint32_t slo, shi;
4382
+ uint64_t dn;
4383
+ uint32_t faddr = frameptr + 0x20 + 4 * i;
4384
+
4385
+ if (i >= 16) {
4386
+ faddr += 8; /* Skip the slot for the FPSCR */
4387
+ }
4388
+
4389
+ pop_ok = pop_ok &&
4390
+ v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
4391
+ v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
4392
+
4393
+ if (!pop_ok) {
4394
+ break;
4395
+ }
4396
+
4397
+ dn = (uint64_t)shi << 32 | slo;
4398
+ *aa32_vfp_dreg(env, i / 2) = dn;
4399
+ }
4400
+ pop_ok = pop_ok &&
4401
+ v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
4402
+ if (pop_ok) {
4403
+ vfp_set_fpscr(env, fpscr);
4404
+ }
4405
+ if (!pop_ok) {
4406
+ /*
4407
+ * These regs are 0 if security extension present;
4408
+ * otherwise merely UNKNOWN. We zero always.
4409
+ */
4410
+ for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
4411
+ *aa32_vfp_dreg(env, i / 2) = 0;
4412
+ }
4413
+ vfp_set_fpscr(env, 0);
4414
+ }
4415
+ }
4416
+ }
4417
+ env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
4418
+ V7M_CONTROL, FPCA, !ftype);
4419
+
4420
+ /* Commit to consuming the stack frame */
4421
+ frameptr += 0x20;
4422
+ if (!ftype) {
4423
+ frameptr += 0x48;
4424
+ if (restore_s16_s31) {
4425
+ frameptr += 0x40;
4426
+ }
4427
+ }
4428
+ /*
4429
+ * Undo stack alignment (the SPREALIGN bit indicates that the original
4430
+ * pre-exception SP was not 8-aligned and we added a padding word to
4431
+ * align it, so we undo this by ORing in the bit that increases it
4432
+ * from the current 8-aligned value to the 8-unaligned value. (Adding 4
4433
+ * would work too but a logical OR is how the pseudocode specifies it.)
4434
+ */
4435
+ if (xpsr & XPSR_SPREALIGN) {
4436
+ frameptr |= 4;
4437
+ }
4438
+ *frame_sp_p = frameptr;
4439
+ }
4440
+
4441
+ xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
4442
+ if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
4443
+ xpsr_mask &= ~XPSR_GE;
4444
+ }
4445
+ /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
4446
+ xpsr_write(env, xpsr, xpsr_mask);
4447
+
4448
+ if (env->v7m.secure) {
4449
+ bool sfpa = xpsr & XPSR_SFPA;
4450
+
4451
+ env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
4452
+ V7M_CONTROL, SFPA, sfpa);
4453
+ }
4454
+
4455
+ /*
4456
+ * The restored xPSR exception field will be zero if we're
4457
+ * resuming in Thread mode. If that doesn't match what the
4458
+ * exception return excret specified then this is a UsageFault.
4459
+ * v7M requires we make this check here; v8M did it earlier.
4460
+ */
4461
+ if (return_to_handler != arm_v7m_is_handler_mode(env)) {
4462
+ /*
4463
+ * Take an INVPC UsageFault by pushing the stack again;
4464
+ * we know we're v7M so this is never a Secure UsageFault.
4465
+ */
4466
+ bool ignore_stackfaults;
4467
+
4468
+ assert(!arm_feature(env, ARM_FEATURE_V8));
4469
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
4470
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
4471
+ ignore_stackfaults = v7m_push_stack(cpu);
4472
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
4473
+ "failed exception return integrity check\n");
4474
+ v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
4475
+ return;
4476
+ }
4477
+
4478
+ /* Otherwise, we have a successful exception exit. */
4479
+ arm_clear_exclusive(env);
4480
+ qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
4481
+}
4482
+
4483
+static bool do_v7m_function_return(ARMCPU *cpu)
4484
+{
4485
+ /*
4486
+ * v8M security extensions magic function return.
4487
+ * We may either:
4488
+ * (1) throw an exception (longjump)
4489
+ * (2) return true if we successfully handled the function return
4490
+ * (3) return false if we failed a consistency check and have
4491
+ * pended a UsageFault that needs to be taken now
4492
+ *
4493
+ * At this point the magic return value is split between env->regs[15]
4494
+ * and env->thumb. We don't bother to reconstitute it because we don't
4495
+ * need it (all values are handled the same way).
4496
+ */
4497
+ CPUARMState *env = &cpu->env;
4498
+ uint32_t newpc, newpsr, newpsr_exc;
4499
+
4500
+ qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
4501
+
4502
+ {
4503
+ bool threadmode, spsel;
4504
+ TCGMemOpIdx oi;
4505
+ ARMMMUIdx mmu_idx;
4506
+ uint32_t *frame_sp_p;
4507
+ uint32_t frameptr;
4508
+
4509
+ /* Pull the return address and IPSR from the Secure stack */
4510
+ threadmode = !arm_v7m_is_handler_mode(env);
4511
+ spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
4512
+
4513
+ frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
4514
+ frameptr = *frame_sp_p;
4515
+
4516
+ /*
4517
+ * These loads may throw an exception (for MPU faults). We want to
4518
+ * do them as secure, so work out what MMU index that is.
4519
+ */
4520
+ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
4521
+ oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
4522
+ newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
4523
+ newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
4524
+
4525
+ /* Consistency checks on new IPSR */
4526
+ newpsr_exc = newpsr & XPSR_EXCP;
4527
+ if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
4528
+ (env->v7m.exception == 1 && newpsr_exc != 0))) {
4529
+ /* Pend the fault and tell our caller to take it */
4530
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
4531
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
4532
+ env->v7m.secure);
4533
+ qemu_log_mask(CPU_LOG_INT,
4534
+ "...taking INVPC UsageFault: "
4535
+ "IPSR consistency check failed\n");
4536
+ return false;
4537
+ }
4538
+
4539
+ *frame_sp_p = frameptr + 8;
4540
+ }
4541
+
4542
+ /* This invalidates frame_sp_p */
4543
+ switch_v7m_security_state(env, true);
4544
+ env->v7m.exception = newpsr_exc;
4545
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
4546
+ if (newpsr & XPSR_SFPA) {
4547
+ env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
4548
+ }
4549
+ xpsr_write(env, 0, XPSR_IT);
4550
+ env->thumb = newpc & 1;
4551
+ env->regs[15] = newpc & ~1;
4552
+
4553
+ qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
4554
+ return true;
4555
+}
4556
+
4557
+static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
4558
+ uint32_t addr, uint16_t *insn)
4559
+{
4560
+ /*
4561
+ * Load a 16-bit portion of a v7M instruction, returning true on success,
4562
+ * or false on failure (in which case we will have pended the appropriate
4563
+ * exception).
4564
+ * We need to do the instruction fetch's MPU and SAU checks
4565
+ * like this because there is no MMU index that would allow
4566
+ * doing the load with a single function call. Instead we must
4567
+ * first check that the security attributes permit the load
4568
+ * and that they don't mismatch on the two halves of the instruction,
4569
+ * and then we do the load as a secure load (ie using the security
4570
+ * attributes of the address, not the CPU, as architecturally required).
4571
+ */
4572
+ CPUState *cs = CPU(cpu);
4573
+ CPUARMState *env = &cpu->env;
4574
+ V8M_SAttributes sattrs = {};
4575
+ MemTxAttrs attrs = {};
4576
+ ARMMMUFaultInfo fi = {};
4577
+ MemTxResult txres;
4578
+ target_ulong page_size;
4579
+ hwaddr physaddr;
4580
+ int prot;
4581
+
4582
+ v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
4583
+ if (!sattrs.nsc || sattrs.ns) {
4584
+ /*
4585
+ * This must be the second half of the insn, and it straddles a
4586
+ * region boundary with the second half not being S&NSC.
4587
+ */
4588
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
4589
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4590
+ qemu_log_mask(CPU_LOG_INT,
4591
+ "...really SecureFault with SFSR.INVEP\n");
4592
+ return false;
4593
+ }
4594
+ if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
4595
+ &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
4596
+ /* the MPU lookup failed */
4597
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
4598
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
4599
+ qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
4600
+ return false;
4601
+ }
4602
+ *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
4603
+ attrs, &txres);
4604
+ if (txres != MEMTX_OK) {
4605
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
4606
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
4607
+ qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
4608
+ return false;
4609
+ }
4610
+ return true;
4611
+}
4612
+
4613
+static bool v7m_handle_execute_nsc(ARMCPU *cpu)
4614
+{
4615
+ /*
4616
+ * Check whether this attempt to execute code in a Secure & NS-Callable
4617
+ * memory region is for an SG instruction; if so, then emulate the
4618
+ * effect of the SG instruction and return true. Otherwise pend
4619
+ * the correct kind of exception and return false.
4620
+ */
4621
+ CPUARMState *env = &cpu->env;
4622
+ ARMMMUIdx mmu_idx;
4623
+ uint16_t insn;
4624
+
4625
+ /*
4626
+ * We should never get here unless get_phys_addr_pmsav8() caused
4627
+ * an exception for NS executing in S&NSC memory.
4628
+ */
4629
+ assert(!env->v7m.secure);
4630
+ assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
4631
+
4632
+ /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
4633
+ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
4634
+
4635
+ if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
4636
+ return false;
4637
+ }
4638
+
4639
+ if (!env->thumb) {
4640
+ goto gen_invep;
4641
+ }
4642
+
4643
+ if (insn != 0xe97f) {
4644
+ /*
4645
+ * Not an SG instruction first half (we choose the IMPDEF
4646
+ * early-SG-check option).
4647
+ */
4648
+ goto gen_invep;
4649
+ }
4650
+
4651
+ if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
4652
+ return false;
4653
+ }
4654
+
4655
+ if (insn != 0xe97f) {
4656
+ /*
4657
+ * Not an SG instruction second half (yes, both halves of the SG
4658
+ * insn have the same hex value)
4659
+ */
4660
+ goto gen_invep;
4661
+ }
4662
+
4663
+ /*
4664
+ * OK, we have confirmed that we really have an SG instruction.
4665
+ * We know we're NS in S memory so don't need to repeat those checks.
4666
+ */
4667
+ qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
4668
+ ", executing it\n", env->regs[15]);
4669
+ env->regs[14] &= ~1;
4670
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
4671
+ switch_v7m_security_state(env, true);
4672
+ xpsr_write(env, 0, XPSR_IT);
4673
+ env->regs[15] += 4;
4674
+ return true;
4675
+
4676
+gen_invep:
4677
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
4678
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4679
+ qemu_log_mask(CPU_LOG_INT,
4680
+ "...really SecureFault with SFSR.INVEP\n");
4681
+ return false;
4682
+}
4683
+
4684
+void arm_v7m_cpu_do_interrupt(CPUState *cs)
4685
+{
4686
+ ARMCPU *cpu = ARM_CPU(cs);
4687
+ CPUARMState *env = &cpu->env;
4688
+ uint32_t lr;
4689
+ bool ignore_stackfaults;
4690
+
4691
+ arm_log_exception(cs->exception_index);
4692
+
4693
+ /*
4694
+ * For exceptions we just mark as pending on the NVIC, and let that
4695
+ * handle it.
4696
+ */
4697
+ switch (cs->exception_index) {
4698
+ case EXCP_UDEF:
4699
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4700
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
4701
+ break;
4702
+ case EXCP_NOCP:
4703
+ {
4704
+ /*
4705
+ * NOCP might be directed to something other than the current
4706
+ * security state if this fault is because of NSACR; we indicate
4707
+ * the target security state using exception.target_el.
4708
+ */
4709
+ int target_secstate;
4710
+
4711
+ if (env->exception.target_el == 3) {
4712
+ target_secstate = M_REG_S;
4713
+ } else {
4714
+ target_secstate = env->v7m.secure;
4715
+ }
4716
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
4717
+ env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
4718
+ break;
4719
+ }
4720
+ case EXCP_INVSTATE:
4721
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4722
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
4723
+ break;
4724
+ case EXCP_STKOF:
4725
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4726
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
4727
+ break;
4728
+ case EXCP_LSERR:
4729
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4730
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
4731
+ break;
4732
+ case EXCP_UNALIGNED:
4733
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4734
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
4735
+ break;
4736
+ case EXCP_SWI:
4737
+ /* The PC already points to the next instruction. */
4738
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
4739
+ break;
4740
+ case EXCP_PREFETCH_ABORT:
4741
+ case EXCP_DATA_ABORT:
4742
+ /*
4743
+ * Note that for M profile we don't have a guest facing FSR, but
4744
+ * the env->exception.fsr will be populated by the code that
4745
+ * raises the fault, in the A profile short-descriptor format.
4746
+ */
4747
+ switch (env->exception.fsr & 0xf) {
4748
+ case M_FAKE_FSR_NSC_EXEC:
4749
+ /*
4750
+ * Exception generated when we try to execute code at an address
4751
+ * which is marked as Secure & Non-Secure Callable and the CPU
4752
+ * is in the Non-Secure state. The only instruction which can
4753
+ * be executed like this is SG (and that only if both halves of
4754
+ * the SG instruction have the same security attributes.)
4755
+ * Everything else must generate an INVEP SecureFault, so we
4756
+ * emulate the SG instruction here.
4757
+ */
4758
+ if (v7m_handle_execute_nsc(cpu)) {
4759
+ return;
4760
+ }
4761
+ break;
4762
+ case M_FAKE_FSR_SFAULT:
4763
+ /*
4764
+ * Various flavours of SecureFault for attempts to execute or
4765
+ * access data in the wrong security state.
4766
+ */
4767
+ switch (cs->exception_index) {
4768
+ case EXCP_PREFETCH_ABORT:
4769
+ if (env->v7m.secure) {
4770
+ env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
4771
+ qemu_log_mask(CPU_LOG_INT,
4772
+ "...really SecureFault with SFSR.INVTRAN\n");
4773
+ } else {
4774
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
4775
+ qemu_log_mask(CPU_LOG_INT,
4776
+ "...really SecureFault with SFSR.INVEP\n");
4777
+ }
4778
+ break;
4779
+ case EXCP_DATA_ABORT:
4780
+ /* This must be an NS access to S memory */
4781
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
4782
+ qemu_log_mask(CPU_LOG_INT,
4783
+ "...really SecureFault with SFSR.AUVIOL\n");
4784
+ break;
4785
+ }
4786
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4787
+ break;
4788
+ case 0x8: /* External Abort */
4789
+ switch (cs->exception_index) {
4790
+ case EXCP_PREFETCH_ABORT:
4791
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
4792
+ qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
4793
+ break;
4794
+ case EXCP_DATA_ABORT:
4795
+ env->v7m.cfsr[M_REG_NS] |=
4796
+ (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
4797
+ env->v7m.bfar = env->exception.vaddress;
4798
+ qemu_log_mask(CPU_LOG_INT,
4799
+ "...with CFSR.PRECISERR and BFAR 0x%x\n",
4800
+ env->v7m.bfar);
4801
+ break;
4802
+ }
4803
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
4804
+ break;
4805
+ default:
4806
+ /*
4807
+ * All other FSR values are either MPU faults or "can't happen
4808
+ * for M profile" cases.
4809
+ */
4810
+ switch (cs->exception_index) {
4811
+ case EXCP_PREFETCH_ABORT:
4812
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
4813
+ qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
4814
+ break;
4815
+ case EXCP_DATA_ABORT:
4816
+ env->v7m.cfsr[env->v7m.secure] |=
4817
+ (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
4818
+ env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
4819
+ qemu_log_mask(CPU_LOG_INT,
4820
+ "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
4821
+ env->v7m.mmfar[env->v7m.secure]);
4822
+ break;
4823
+ }
4824
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
4825
+ env->v7m.secure);
4826
+ break;
4827
+ }
4828
+ break;
4829
+ case EXCP_BKPT:
4830
+ if (semihosting_enabled()) {
4831
+ int nr;
4832
+ nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
4833
+ if (nr == 0xab) {
4834
+ env->regs[15] += 2;
4835
+ qemu_log_mask(CPU_LOG_INT,
4836
+ "...handling as semihosting call 0x%x\n",
4837
+ env->regs[0]);
4838
+ env->regs[0] = do_arm_semihosting(env);
4839
+ return;
4840
+ }
4841
+ }
4842
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
4843
+ break;
4844
+ case EXCP_IRQ:
4845
+ break;
4846
+ case EXCP_EXCEPTION_EXIT:
4847
+ if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
4848
+ /* Must be v8M security extension function return */
4849
+ assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
4850
+ assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
4851
+ if (do_v7m_function_return(cpu)) {
4852
+ return;
4853
+ }
4854
+ } else {
4855
+ do_v7m_exception_exit(cpu);
4856
+ return;
4857
+ }
4858
+ break;
4859
+ case EXCP_LAZYFP:
4860
+ /*
4861
+ * We already pended the specific exception in the NVIC in the
4862
+ * v7m_preserve_fp_state() helper function.
4863
+ */
4864
+ break;
4865
+ default:
4866
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
4867
+ return; /* Never happens. Keep compiler happy. */
4868
+ }
4869
+
4870
+ if (arm_feature(env, ARM_FEATURE_V8)) {
4871
+ lr = R_V7M_EXCRET_RES1_MASK |
4872
+ R_V7M_EXCRET_DCRS_MASK;
4873
+ /*
4874
+ * The S bit indicates whether we should return to Secure
4875
+ * or NonSecure (ie our current state).
4876
+ * The ES bit indicates whether we're taking this exception
4877
+ * to Secure or NonSecure (ie our target state). We set it
4878
+ * later, in v7m_exception_taken().
4879
+ * The SPSEL bit is also set in v7m_exception_taken() for v8M.
4880
+ * This corresponds to the ARM ARM pseudocode for v8M setting
4881
+ * some LR bits in PushStack() and some in ExceptionTaken();
4882
+ * the distinction matters for the tailchain cases where we
4883
+ * can take an exception without pushing the stack.
4884
+ */
4885
+ if (env->v7m.secure) {
4886
+ lr |= R_V7M_EXCRET_S_MASK;
4887
+ }
4888
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
4889
+ lr |= R_V7M_EXCRET_FTYPE_MASK;
4890
+ }
4891
+ } else {
4892
+ lr = R_V7M_EXCRET_RES1_MASK |
4893
+ R_V7M_EXCRET_S_MASK |
4894
+ R_V7M_EXCRET_DCRS_MASK |
4895
+ R_V7M_EXCRET_FTYPE_MASK |
4896
+ R_V7M_EXCRET_ES_MASK;
4897
+ if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
4898
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
4899
+ }
4900
+ }
4901
+ if (!arm_v7m_is_handler_mode(env)) {
4902
+ lr |= R_V7M_EXCRET_MODE_MASK;
4903
+ }
4904
+
4905
+ ignore_stackfaults = v7m_push_stack(cpu);
4906
+ v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
4907
+}
4908
+
4909
+uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
4910
+{
4911
+ uint32_t mask;
4912
+ unsigned el = arm_current_el(env);
4913
+
4914
+ /* First handle registers which unprivileged can read */
4915
+
4916
+ switch (reg) {
4917
+ case 0 ... 7: /* xPSR sub-fields */
4918
+ mask = 0;
4919
+ if ((reg & 1) && el) {
4920
+ mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
4921
+ }
4922
+ if (!(reg & 4)) {
4923
+ mask |= XPSR_NZCV | XPSR_Q; /* APSR */
4924
+ if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
4925
+ mask |= XPSR_GE;
4926
+ }
4927
+ }
4928
+ /* EPSR reads as zero */
4929
+ return xpsr_read(env) & mask;
4930
+ break;
4931
+ case 20: /* CONTROL */
4932
+ {
4933
+ uint32_t value = env->v7m.control[env->v7m.secure];
4934
+ if (!env->v7m.secure) {
4935
+ /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
4936
+ value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
4937
+ }
4938
+ return value;
4939
+ }
4940
+ case 0x94: /* CONTROL_NS */
4941
+ /*
4942
+ * We have to handle this here because unprivileged Secure code
4943
+ * can read the NS CONTROL register.
4944
+ */
4945
+ if (!env->v7m.secure) {
4946
+ return 0;
4947
+ }
4948
+ return env->v7m.control[M_REG_NS] |
4949
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
4950
+ }
4951
+
4952
+ if (el == 0) {
4953
+ return 0; /* unprivileged reads others as zero */
4954
+ }
4955
+
4956
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
4957
+ switch (reg) {
4958
+ case 0x88: /* MSP_NS */
4959
+ if (!env->v7m.secure) {
4960
+ return 0;
4961
+ }
4962
+ return env->v7m.other_ss_msp;
4963
+ case 0x89: /* PSP_NS */
4964
+ if (!env->v7m.secure) {
4965
+ return 0;
4966
+ }
4967
+ return env->v7m.other_ss_psp;
4968
+ case 0x8a: /* MSPLIM_NS */
4969
+ if (!env->v7m.secure) {
4970
+ return 0;
4971
+ }
4972
+ return env->v7m.msplim[M_REG_NS];
4973
+ case 0x8b: /* PSPLIM_NS */
4974
+ if (!env->v7m.secure) {
4975
+ return 0;
4976
+ }
4977
+ return env->v7m.psplim[M_REG_NS];
4978
+ case 0x90: /* PRIMASK_NS */
4979
+ if (!env->v7m.secure) {
4980
+ return 0;
4981
+ }
4982
+ return env->v7m.primask[M_REG_NS];
4983
+ case 0x91: /* BASEPRI_NS */
4984
+ if (!env->v7m.secure) {
4985
+ return 0;
4986
+ }
4987
+ return env->v7m.basepri[M_REG_NS];
4988
+ case 0x93: /* FAULTMASK_NS */
4989
+ if (!env->v7m.secure) {
4990
+ return 0;
4991
+ }
4992
+ return env->v7m.faultmask[M_REG_NS];
4993
+ case 0x98: /* SP_NS */
4994
+ {
4995
+ /*
4996
+ * This gives the non-secure SP selected based on whether we're
4997
+ * currently in handler mode or not, using the NS CONTROL.SPSEL.
4998
+ */
4999
+ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
5000
+
5001
+ if (!env->v7m.secure) {
5002
+ return 0;
5003
+ }
5004
+ if (!arm_v7m_is_handler_mode(env) && spsel) {
5005
+ return env->v7m.other_ss_psp;
5006
+ } else {
5007
+ return env->v7m.other_ss_msp;
5008
+ }
5009
+ }
5010
+ default:
5011
+ break;
5012
+ }
5013
+ }
5014
+
5015
+ switch (reg) {
5016
+ case 8: /* MSP */
5017
+ return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
5018
+ case 9: /* PSP */
5019
+ return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
5020
+ case 10: /* MSPLIM */
5021
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
5022
+ goto bad_reg;
5023
+ }
5024
+ return env->v7m.msplim[env->v7m.secure];
5025
+ case 11: /* PSPLIM */
5026
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
5027
+ goto bad_reg;
5028
+ }
5029
+ return env->v7m.psplim[env->v7m.secure];
5030
+ case 16: /* PRIMASK */
5031
+ return env->v7m.primask[env->v7m.secure];
5032
+ case 17: /* BASEPRI */
5033
+ case 18: /* BASEPRI_MAX */
5034
+ return env->v7m.basepri[env->v7m.secure];
5035
+ case 19: /* FAULTMASK */
5036
+ return env->v7m.faultmask[env->v7m.secure];
5037
+ default:
5038
+ bad_reg:
5039
+ qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
5040
+ " register %d\n", reg);
5041
+ return 0;
5042
+ }
5043
+}
5044
+
5045
+void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
5046
+{
5047
+ /*
5048
+ * We're passed bits [11..0] of the instruction; extract
5049
+ * SYSm and the mask bits.
5050
+ * Invalid combinations of SYSm and mask are UNPREDICTABLE;
5051
+ * we choose to treat them as if the mask bits were valid.
5052
+ * NB that the pseudocode 'mask' variable is bits [11..10],
5053
+ * whereas ours is [11..8].
5054
+ */
5055
+ uint32_t mask = extract32(maskreg, 8, 4);
5056
+ uint32_t reg = extract32(maskreg, 0, 8);
5057
+ int cur_el = arm_current_el(env);
5058
+
5059
+ if (cur_el == 0 && reg > 7 && reg != 20) {
5060
+ /*
5061
+ * only xPSR sub-fields and CONTROL.SFPA may be written by
5062
+ * unprivileged code
5063
+ */
5064
+ return;
5065
+ }
5066
+
5067
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
5068
+ switch (reg) {
5069
+ case 0x88: /* MSP_NS */
5070
+ if (!env->v7m.secure) {
5071
+ return;
5072
+ }
5073
+ env->v7m.other_ss_msp = val;
5074
+ return;
5075
+ case 0x89: /* PSP_NS */
5076
+ if (!env->v7m.secure) {
5077
+ return;
5078
+ }
5079
+ env->v7m.other_ss_psp = val;
5080
+ return;
5081
+ case 0x8a: /* MSPLIM_NS */
5082
+ if (!env->v7m.secure) {
5083
+ return;
5084
+ }
5085
+ env->v7m.msplim[M_REG_NS] = val & ~7;
5086
+ return;
5087
+ case 0x8b: /* PSPLIM_NS */
5088
+ if (!env->v7m.secure) {
5089
+ return;
5090
+ }
5091
+ env->v7m.psplim[M_REG_NS] = val & ~7;
5092
+ return;
5093
+ case 0x90: /* PRIMASK_NS */
5094
+ if (!env->v7m.secure) {
5095
+ return;
5096
+ }
5097
+ env->v7m.primask[M_REG_NS] = val & 1;
5098
+ return;
5099
+ case 0x91: /* BASEPRI_NS */
5100
+ if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
5101
+ return;
5102
+ }
5103
+ env->v7m.basepri[M_REG_NS] = val & 0xff;
5104
+ return;
5105
+ case 0x93: /* FAULTMASK_NS */
5106
+ if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
5107
+ return;
5108
+ }
5109
+ env->v7m.faultmask[M_REG_NS] = val & 1;
5110
+ return;
5111
+ case 0x94: /* CONTROL_NS */
5112
+ if (!env->v7m.secure) {
5113
+ return;
5114
+ }
5115
+ write_v7m_control_spsel_for_secstate(env,
5116
+ val & R_V7M_CONTROL_SPSEL_MASK,
5117
+ M_REG_NS);
5118
+ if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
5119
+ env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
5120
+ env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
5121
+ }
5122
+ /*
5123
+ * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
5124
+ * RES0 if the FPU is not present, and is stored in the S bank
5125
+ */
5126
+ if (arm_feature(env, ARM_FEATURE_VFP) &&
5127
+ extract32(env->v7m.nsacr, 10, 1)) {
5128
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
5129
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
5130
+ }
5131
+ return;
5132
+ case 0x98: /* SP_NS */
5133
+ {
5134
+ /*
5135
+ * This gives the non-secure SP selected based on whether we're
5136
+ * currently in handler mode or not, using the NS CONTROL.SPSEL.
5137
+ */
5138
+ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
5139
+ bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
5140
+ uint32_t limit;
5141
+
5142
+ if (!env->v7m.secure) {
5143
+ return;
5144
+ }
5145
+
5146
+ limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
5147
+
5148
+ if (val < limit) {
5149
+ CPUState *cs = env_cpu(env);
5150
+
5151
+ cpu_restore_state(cs, GETPC(), true);
5152
+ raise_exception(env, EXCP_STKOF, 0, 1);
5153
+ }
5154
+
5155
+ if (is_psp) {
5156
+ env->v7m.other_ss_psp = val;
5157
+ } else {
5158
+ env->v7m.other_ss_msp = val;
5159
+ }
5160
+ return;
5161
+ }
5162
+ default:
5163
+ break;
5164
+ }
5165
+ }
5166
+
5167
+ switch (reg) {
5168
+ case 0 ... 7: /* xPSR sub-fields */
5169
+ /* only APSR is actually writable */
5170
+ if (!(reg & 4)) {
5171
+ uint32_t apsrmask = 0;
5172
+
5173
+ if (mask & 8) {
5174
+ apsrmask |= XPSR_NZCV | XPSR_Q;
5175
+ }
5176
+ if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
5177
+ apsrmask |= XPSR_GE;
5178
+ }
5179
+ xpsr_write(env, val, apsrmask);
5180
+ }
5181
+ break;
5182
+ case 8: /* MSP */
5183
+ if (v7m_using_psp(env)) {
5184
+ env->v7m.other_sp = val;
5185
+ } else {
5186
+ env->regs[13] = val;
5187
+ }
5188
+ break;
5189
+ case 9: /* PSP */
5190
+ if (v7m_using_psp(env)) {
5191
+ env->regs[13] = val;
5192
+ } else {
5193
+ env->v7m.other_sp = val;
5194
+ }
5195
+ break;
5196
+ case 10: /* MSPLIM */
5197
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
5198
+ goto bad_reg;
5199
+ }
5200
+ env->v7m.msplim[env->v7m.secure] = val & ~7;
5201
+ break;
5202
+ case 11: /* PSPLIM */
5203
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
5204
+ goto bad_reg;
5205
+ }
5206
+ env->v7m.psplim[env->v7m.secure] = val & ~7;
5207
+ break;
5208
+ case 16: /* PRIMASK */
5209
+ env->v7m.primask[env->v7m.secure] = val & 1;
5210
+ break;
5211
+ case 17: /* BASEPRI */
5212
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
5213
+ goto bad_reg;
5214
+ }
5215
+ env->v7m.basepri[env->v7m.secure] = val & 0xff;
5216
+ break;
5217
+ case 18: /* BASEPRI_MAX */
5218
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
5219
+ goto bad_reg;
5220
+ }
5221
+ val &= 0xff;
5222
+ if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
5223
+ || env->v7m.basepri[env->v7m.secure] == 0)) {
5224
+ env->v7m.basepri[env->v7m.secure] = val;
5225
+ }
5226
+ break;
5227
+ case 19: /* FAULTMASK */
5228
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
5229
+ goto bad_reg;
5230
+ }
5231
+ env->v7m.faultmask[env->v7m.secure] = val & 1;
5232
+ break;
5233
+ case 20: /* CONTROL */
5234
+ /*
5235
+ * Writing to the SPSEL bit only has an effect if we are in
5236
+ * thread mode; other bits can be updated by any privileged code.
5237
+ * write_v7m_control_spsel() deals with updating the SPSEL bit in
5238
+ * env->v7m.control, so we only need update the others.
5239
+ * For v7M, we must just ignore explicit writes to SPSEL in handler
5240
+ * mode; for v8M the write is permitted but will have no effect.
5241
+ * All these bits are writes-ignored from non-privileged code,
5242
+ * except for SFPA.
5243
+ */
5244
+ if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
5245
+ !arm_v7m_is_handler_mode(env))) {
5246
+ write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
5247
+ }
5248
+ if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
5249
+ env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
5250
+ env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
5251
+ }
5252
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
5253
+ /*
5254
+ * SFPA is RAZ/WI from NS or if no FPU.
5255
+ * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
5256
+ * Both are stored in the S bank.
5257
+ */
5258
+ if (env->v7m.secure) {
5259
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
5260
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
5261
+ }
5262
+ if (cur_el > 0 &&
5263
+ (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
5264
+ extract32(env->v7m.nsacr, 10, 1))) {
5265
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
5266
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
5267
+ }
5268
+ }
5269
+ break;
5270
+ default:
5271
+ bad_reg:
5272
+ qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
5273
+ " register %d\n", reg);
5274
+ return;
5275
+ }
5276
+}
5277
+
5278
+uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
5279
+{
5280
+ /* Implement the TT instruction. op is bits [7:6] of the insn. */
5281
+ bool forceunpriv = op & 1;
5282
+ bool alt = op & 2;
5283
+ V8M_SAttributes sattrs = {};
5284
+ uint32_t tt_resp;
5285
+ bool r, rw, nsr, nsrw, mrvalid;
5286
+ int prot;
5287
+ ARMMMUFaultInfo fi = {};
5288
+ MemTxAttrs attrs = {};
5289
+ hwaddr phys_addr;
5290
+ ARMMMUIdx mmu_idx;
5291
+ uint32_t mregion;
5292
+ bool targetpriv;
5293
+ bool targetsec = env->v7m.secure;
5294
+ bool is_subpage;
5295
+
5296
+ /*
5297
+ * Work out what the security state and privilege level we're
5298
+ * interested in is...
5299
+ */
5300
+ if (alt) {
5301
+ targetsec = !targetsec;
5302
+ }
5303
+
5304
+ if (forceunpriv) {
5305
+ targetpriv = false;
5306
+ } else {
5307
+ targetpriv = arm_v7m_is_handler_mode(env) ||
5308
+ !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
5309
+ }
5310
+
5311
+ /* ...and then figure out which MMU index this is */
5312
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
5313
+
5314
+ /*
5315
+ * We know that the MPU and SAU don't care about the access type
5316
+ * for our purposes beyond that we don't want to claim to be
5317
+ * an insn fetch, so we arbitrarily call this a read.
5318
+ */
5319
+
5320
+ /*
5321
+ * MPU region info only available for privileged or if
5322
+ * inspecting the other MPU state.
5323
+ */
5324
+ if (arm_current_el(env) != 0 || alt) {
5325
+ /* We can ignore the return value as prot is always set */
5326
+ pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
5327
+ &phys_addr, &attrs, &prot, &is_subpage,
5328
+ &fi, &mregion);
5329
+ if (mregion == -1) {
5330
+ mrvalid = false;
5331
+ mregion = 0;
5332
+ } else {
5333
+ mrvalid = true;
5334
+ }
5335
+ r = prot & PAGE_READ;
5336
+ rw = prot & PAGE_WRITE;
5337
+ } else {
5338
+ r = false;
5339
+ rw = false;
5340
+ mrvalid = false;
5341
+ mregion = 0;
5342
+ }
5343
+
5344
+ if (env->v7m.secure) {
5345
+ v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
5346
+ nsr = sattrs.ns && r;
5347
+ nsrw = sattrs.ns && rw;
5348
+ } else {
5349
+ sattrs.ns = true;
5350
+ nsr = false;
5351
+ nsrw = false;
5352
+ }
5353
+
5354
+ tt_resp = (sattrs.iregion << 24) |
5355
+ (sattrs.irvalid << 23) |
5356
+ ((!sattrs.ns) << 22) |
5357
+ (nsrw << 21) |
5358
+ (nsr << 20) |
5359
+ (rw << 19) |
5360
+ (r << 18) |
5361
+ (sattrs.srvalid << 17) |
5362
+ (mrvalid << 16) |
5363
+ (sattrs.sregion << 8) |
5364
+ mregion;
5365
+
5366
+ return tt_resp;
5367
+}
5368
+
5369
+#endif /* !CONFIG_USER_ONLY */
5370
+
5371
+ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
5372
+ bool secstate, bool priv, bool negpri)
5373
+{
5374
+ ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
5375
+
5376
+ if (priv) {
5377
+ mmu_idx |= ARM_MMU_IDX_M_PRIV;
5378
+ }
5379
+
5380
+ if (negpri) {
5381
+ mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
5382
+ }
5383
+
5384
+ if (secstate) {
5385
+ mmu_idx |= ARM_MMU_IDX_M_S;
5386
+ }
5387
+
5388
+ return mmu_idx;
5389
+}
5390
+
5391
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
5392
+ bool secstate, bool priv)
5393
+{
5394
+ bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
5395
+
5396
+ return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
5397
+}
5398
+
5399
+/* Return the MMU index for a v7M CPU in the specified security state */
5400
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
5401
+{
5402
+ bool priv = arm_current_el(env) != 0;
5403
+
5404
+ return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
5405
+}
5406
--
78
--
5407
2.20.1
79
2.25.1
5408
5409
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
The documentation for PROT_MTE says that it cannot be cleared
4
by mprotect. Further, the implementation of the VM_ARCH_CLEAR bit,
5
contains PROT_BTI confiming that bit should be cleared.
6
7
Introduce PAGE_TARGET_STICKY to allow target/arch/cpu.h to control
8
which bits may be reset during page_set_flags. This is sort of the
9
opposite of VM_ARCH_CLEAR, but works better with qemu's PAGE_* bits
10
that are separate from PROT_* bits.
11
12
Reported-by: Vitaly Buka <vitalybuka@google.com>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20220711031420.17820-1-richard.henderson@linaro.org
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
18
target/arm/cpu.h | 7 +++++--
19
accel/tcg/translate-all.c | 13 +++++++++++--
20
2 files changed, 16 insertions(+), 4 deletions(-)
21
22
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/cpu.h
25
+++ b/target/arm/cpu.h
26
@@ -XXX,XX +XXX,XX @@ static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
27
28
/*
29
* AArch64 usage of the PAGE_TARGET_* bits for linux-user.
30
+ * Note that with the Linux kernel, PROT_MTE may not be cleared by mprotect
31
+ * mprotect but PROT_BTI may be cleared. C.f. the kernel's VM_ARCH_CLEAR.
32
*/
33
-#define PAGE_BTI PAGE_TARGET_1
34
-#define PAGE_MTE PAGE_TARGET_2
35
+#define PAGE_BTI PAGE_TARGET_1
36
+#define PAGE_MTE PAGE_TARGET_2
37
+#define PAGE_TARGET_STICKY PAGE_MTE
38
39
#ifdef TARGET_TAGGED_ADDRESSES
40
/**
41
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/accel/tcg/translate-all.c
44
+++ b/accel/tcg/translate-all.c
45
@@ -XXX,XX +XXX,XX @@ int page_get_flags(target_ulong address)
46
return p->flags;
47
}
48
49
+/*
50
+ * Allow the target to decide if PAGE_TARGET_[12] may be reset.
51
+ * By default, they are not kept.
52
+ */
53
+#ifndef PAGE_TARGET_STICKY
54
+#define PAGE_TARGET_STICKY 0
55
+#endif
56
+#define PAGE_STICKY (PAGE_ANON | PAGE_TARGET_STICKY)
57
+
58
/* Modify the flags of a page and invalidate the code if necessary.
59
The flag PAGE_WRITE_ORG is positioned automatically depending
60
on PAGE_WRITE. The mmap_lock should already be held. */
61
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
62
p->target_data = NULL;
63
p->flags = flags;
64
} else {
65
- /* Using mprotect on a page does not change MAP_ANON. */
66
- p->flags = (p->flags & PAGE_ANON) | flags;
67
+ /* Using mprotect on a page does not change sticky bits. */
68
+ p->flags = (p->flags & PAGE_STICKY) | flags;
69
}
70
}
71
}
72
--
73
2.25.1
diff view generated by jsdifflib
New patch
1
The regime_tcr() function returns a pointer to a struct TCR
2
corresponding to the TCR controlling a translation regime. The
3
struct TCR has the raw value of the register, plus two fields mask
4
and base_mask which are used as a small optimization in the case of
5
32-bit short-descriptor lookups. Almost all callers of regime_tcr()
6
only want the raw register value. Define and use a new
7
regime_tcr_value() function which returns only the raw 64-bit
8
register value.
1
9
10
This is a preliminary to removing the 32-bit short descriptor
11
optimization -- it only saves a handful of bit operations, which is
12
tiny compared to the overhead of doing a page table walk at all, and
13
the TCR struct is awkward and makes fixing
14
https://gitlab.com/qemu-project/qemu/-/issues/1103 unnecessarily
15
difficult.
16
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 20220714132303.1287193-2-peter.maydell@linaro.org
20
---
21
target/arm/internals.h | 6 ++++++
22
target/arm/helper.c | 6 +++---
23
target/arm/ptw.c | 8 ++++----
24
target/arm/tlb_helper.c | 2 +-
25
4 files changed, 14 insertions(+), 8 deletions(-)
26
27
diff --git a/target/arm/internals.h b/target/arm/internals.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/internals.h
30
+++ b/target/arm/internals.h
31
@@ -XXX,XX +XXX,XX @@ static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
32
return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
33
}
34
35
+/* Return the raw value of the TCR controlling this translation regime */
36
+static inline uint64_t regime_tcr_value(CPUARMState *env, ARMMMUIdx mmu_idx)
37
+{
38
+ return regime_tcr(env, mmu_idx)->raw_tcr;
39
+}
40
+
41
/**
42
* arm_num_brps: Return number of implemented breakpoints.
43
* Note that the ID register BRPS field is "number of bps - 1",
44
diff --git a/target/arm/helper.c b/target/arm/helper.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/helper.c
47
+++ b/target/arm/helper.c
48
@@ -XXX,XX +XXX,XX @@ static int vae1_tlbmask(CPUARMState *env)
49
static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
50
uint64_t addr)
51
{
52
- uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
53
+ uint64_t tcr = regime_tcr_value(env, mmu_idx);
54
int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
55
int select = extract64(addr, 55, 1);
56
57
@@ -XXX,XX +XXX,XX @@ static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
58
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
59
ARMMMUIdx mmu_idx, bool data)
60
{
61
- uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
62
+ uint64_t tcr = regime_tcr_value(env, mmu_idx);
63
bool epd, hpd, using16k, using64k, tsz_oob, ds;
64
int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
65
ARMCPU *cpu = env_archcpu(env);
66
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
67
{
68
CPUARMTBFlags flags = {};
69
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
70
- uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
71
+ uint64_t tcr = regime_tcr_value(env, mmu_idx);
72
uint64_t sctlr;
73
int tbii, tbid;
74
75
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/ptw.c
78
+++ b/target/arm/ptw.c
79
@@ -XXX,XX +XXX,XX @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
80
static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
81
ARMMMUIdx mmu_idx)
82
{
83
- uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
84
+ uint64_t tcr = regime_tcr_value(env, mmu_idx);
85
uint32_t el = regime_el(env, mmu_idx);
86
int select, tsz;
87
bool epd, hpd;
88
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
89
uint32_t attrs;
90
int32_t stride;
91
int addrsize, inputsize, outputsize;
92
- TCR *tcr = regime_tcr(env, mmu_idx);
93
+ uint64_t tcr = regime_tcr_value(env, mmu_idx);
94
int ap, ns, xn, pxn;
95
uint32_t el = regime_el(env, mmu_idx);
96
uint64_t descaddrmask;
97
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
98
* For stage 2 translations the starting level is specified by the
99
* VTCR_EL2.SL0 field (whose interpretation depends on the page size)
100
*/
101
- uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
102
- uint32_t sl2 = extract64(tcr->raw_tcr, 33, 1);
103
+ uint32_t sl0 = extract32(tcr, 6, 2);
104
+ uint32_t sl2 = extract64(tcr, 33, 1);
105
uint32_t startlevel;
106
bool ok;
107
108
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
109
index XXXXXXX..XXXXXXX 100644
110
--- a/target/arm/tlb_helper.c
111
+++ b/target/arm/tlb_helper.c
112
@@ -XXX,XX +XXX,XX @@ bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
113
return true;
114
}
115
if (arm_feature(env, ARM_FEATURE_LPAE)
116
- && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
117
+ && (regime_tcr_value(env, mmu_idx) & TTBCR_EAE)) {
118
return true;
119
}
120
return false;
121
--
122
2.25.1
diff view generated by jsdifflib
1
In the various helper functions for v7M/v8M instructions, use
1
In get_level1_table_address(), instead of using precalculated values
2
the _ra versions of cpu_stl_data() and friends. Otherwise we
2
of mask and base_mask from the TCR struct, calculate them directly
3
may get wrong behaviour or an assert() due to not being able
3
(in the same way we currently do in vmsa_ttbcr_raw_write() to
4
to locate the TB if there is an exception on the memory access
4
populate the TCR struct fields).
5
or if it performs an IO operation when in icount mode.
6
5
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20190617175317.27557-5-peter.maydell@linaro.org
8
Message-id: 20220714132303.1287193-3-peter.maydell@linaro.org
10
---
9
---
11
target/arm/m_helper.c | 21 ++++++++++++---------
10
target/arm/ptw.c | 14 +++++++++-----
12
1 file changed, 12 insertions(+), 9 deletions(-)
11
1 file changed, 9 insertions(+), 5 deletions(-)
13
12
14
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
13
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/m_helper.c
15
--- a/target/arm/ptw.c
17
+++ b/target/arm/m_helper.c
16
+++ b/target/arm/ptw.c
18
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
17
@@ -XXX,XX +XXX,XX @@ static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
18
uint32_t *table, uint32_t address)
19
{
20
/* Note that we can only get here for an AArch32 PL0/PL1 lookup */
21
- TCR *tcr = regime_tcr(env, mmu_idx);
22
+ uint64_t tcr = regime_tcr_value(env, mmu_idx);
23
+ int maskshift = extract32(tcr, 0, 3);
24
+ uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
25
+ uint32_t base_mask;
26
27
- if (address & tcr->mask) {
28
- if (tcr->raw_tcr & TTBCR_PD1) {
29
+ if (address & mask) {
30
+ if (tcr & TTBCR_PD1) {
31
/* Translation table walk disabled for TTBR1 */
32
return false;
33
}
34
*table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
35
} else {
36
- if (tcr->raw_tcr & TTBCR_PD0) {
37
+ if (tcr & TTBCR_PD0) {
38
/* Translation table walk disabled for TTBR0 */
39
return false;
40
}
41
- *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
42
+ base_mask = ~((uint32_t)0x3fffu >> maskshift);
43
+ *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
19
}
44
}
20
45
*table |= (address >> 18) & 0x3ffc;
21
/* Note that these stores can throw exceptions on MPU faults */
46
return true;
22
- cpu_stl_data(env, sp, nextinst);
23
- cpu_stl_data(env, sp + 4, saved_psr);
24
+ cpu_stl_data_ra(env, sp, nextinst, GETPC());
25
+ cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
26
27
env->regs[13] = sp;
28
env->regs[14] = 0xfeffffff;
29
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
30
/* fptr is the value of Rn, the frame pointer we store the FP regs to */
31
bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
32
bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
33
+ uintptr_t ra = GETPC();
34
35
assert(env->v7m.secure);
36
37
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
38
* Note that we do not use v7m_stack_write() here, because the
39
* accesses should not set the FSR bits for stacking errors if they
40
* fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
41
- * or AccType_LAZYFP). Faults in cpu_stl_data() will throw exceptions
42
+ * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
43
* and longjmp out.
44
*/
45
if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
46
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
47
if (i >= 16) {
48
faddr += 8; /* skip the slot for the FPSCR */
49
}
50
- cpu_stl_data(env, faddr, slo);
51
- cpu_stl_data(env, faddr + 4, shi);
52
+ cpu_stl_data_ra(env, faddr, slo, ra);
53
+ cpu_stl_data_ra(env, faddr + 4, shi, ra);
54
}
55
- cpu_stl_data(env, fptr + 0x40, vfp_get_fpscr(env));
56
+ cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
57
58
/*
59
* If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
60
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
61
62
void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
63
{
64
+ uintptr_t ra = GETPC();
65
+
66
/* fptr is the value of Rn, the frame pointer we load the FP regs from */
67
assert(env->v7m.secure);
68
69
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
70
faddr += 8; /* skip the slot for the FPSCR */
71
}
72
73
- slo = cpu_ldl_data(env, faddr);
74
- shi = cpu_ldl_data(env, faddr + 4);
75
+ slo = cpu_ldl_data_ra(env, faddr, ra);
76
+ shi = cpu_ldl_data_ra(env, faddr + 4, ra);
77
78
dn = (uint64_t) shi << 32 | slo;
79
*aa32_vfp_dreg(env, i / 2) = dn;
80
}
81
- fpscr = cpu_ldl_data(env, fptr + 0x40);
82
+ fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
83
vfp_set_fpscr(env, fpscr);
84
}
85
86
--
47
--
87
2.20.1
48
2.25.1
88
89
diff view generated by jsdifflib
1
Like most of the v7M memory mapped system registers, the systick
1
The only caller of regime_tcr() is now regime_tcr_value(); fold the
2
registers are accessible to privileged code only and user accesses
2
two together, and use the shorter and more natural 'regime_tcr'
3
must generate a BusFault. We implement that for registers in
3
name for the new function.
4
the NVIC proper already, but missed it for systick since we
5
implement it as a separate device. Correct the omission.
6
4
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Message-id: 20220714132303.1287193-4-peter.maydell@linaro.org
10
Message-id: 20190617175317.27557-6-peter.maydell@linaro.org
11
---
8
---
12
hw/timer/armv7m_systick.c | 26 ++++++++++++++++++++------
9
target/arm/internals.h | 16 +++++-----------
13
1 file changed, 20 insertions(+), 6 deletions(-)
10
target/arm/helper.c | 6 +++---
11
target/arm/ptw.c | 6 +++---
12
target/arm/tlb_helper.c | 2 +-
13
4 files changed, 12 insertions(+), 18 deletions(-)
14
14
15
diff --git a/hw/timer/armv7m_systick.c b/hw/timer/armv7m_systick.c
15
diff --git a/target/arm/internals.h b/target/arm/internals.h
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/timer/armv7m_systick.c
17
--- a/target/arm/internals.h
18
+++ b/hw/timer/armv7m_systick.c
18
+++ b/target/arm/internals.h
19
@@ -XXX,XX +XXX,XX @@ static void systick_timer_tick(void *opaque)
19
@@ -XXX,XX +XXX,XX @@ static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
20
return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
21
}
22
23
-/* Return the TCR controlling this translation regime */
24
-static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
25
+/* Return the value of the TCR controlling this translation regime */
26
+static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
27
{
28
if (mmu_idx == ARMMMUIdx_Stage2) {
29
- return &env->cp15.vtcr_el2;
30
+ return env->cp15.vtcr_el2.raw_tcr;
20
}
31
}
32
if (mmu_idx == ARMMMUIdx_Stage2_S) {
33
/*
34
* Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
35
* those are not currently used by QEMU, so just return VSTCR_EL2.
36
*/
37
- return &env->cp15.vstcr_el2;
38
+ return env->cp15.vstcr_el2.raw_tcr;
39
}
40
- return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
41
-}
42
-
43
-/* Return the raw value of the TCR controlling this translation regime */
44
-static inline uint64_t regime_tcr_value(CPUARMState *env, ARMMMUIdx mmu_idx)
45
-{
46
- return regime_tcr(env, mmu_idx)->raw_tcr;
47
+ return env->cp15.tcr_el[regime_el(env, mmu_idx)].raw_tcr;
21
}
48
}
22
49
23
-static uint64_t systick_read(void *opaque, hwaddr addr, unsigned size)
50
/**
24
+static MemTxResult systick_read(void *opaque, hwaddr addr, uint64_t *data,
51
diff --git a/target/arm/helper.c b/target/arm/helper.c
25
+ unsigned size, MemTxAttrs attrs)
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/helper.c
54
+++ b/target/arm/helper.c
55
@@ -XXX,XX +XXX,XX @@ static int vae1_tlbmask(CPUARMState *env)
56
static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
57
uint64_t addr)
26
{
58
{
27
SysTickState *s = opaque;
59
- uint64_t tcr = regime_tcr_value(env, mmu_idx);
28
uint32_t val;
60
+ uint64_t tcr = regime_tcr(env, mmu_idx);
29
61
int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
30
+ if (attrs.user) {
62
int select = extract64(addr, 55, 1);
31
+ /* Generate BusFault for unprivileged accesses */
63
32
+ return MEMTX_ERROR;
64
@@ -XXX,XX +XXX,XX @@ static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
33
+ }
65
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
34
+
66
ARMMMUIdx mmu_idx, bool data)
35
switch (addr) {
67
{
36
case 0x0: /* SysTick Control and Status. */
68
- uint64_t tcr = regime_tcr_value(env, mmu_idx);
37
val = s->control;
69
+ uint64_t tcr = regime_tcr(env, mmu_idx);
38
@@ -XXX,XX +XXX,XX @@ static uint64_t systick_read(void *opaque, hwaddr addr, unsigned size)
70
bool epd, hpd, using16k, using64k, tsz_oob, ds;
71
int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
72
ARMCPU *cpu = env_archcpu(env);
73
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
74
{
75
CPUARMTBFlags flags = {};
76
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
77
- uint64_t tcr = regime_tcr_value(env, mmu_idx);
78
+ uint64_t tcr = regime_tcr(env, mmu_idx);
79
uint64_t sctlr;
80
int tbii, tbid;
81
82
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
83
index XXXXXXX..XXXXXXX 100644
84
--- a/target/arm/ptw.c
85
+++ b/target/arm/ptw.c
86
@@ -XXX,XX +XXX,XX @@ static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
87
uint32_t *table, uint32_t address)
88
{
89
/* Note that we can only get here for an AArch32 PL0/PL1 lookup */
90
- uint64_t tcr = regime_tcr_value(env, mmu_idx);
91
+ uint64_t tcr = regime_tcr(env, mmu_idx);
92
int maskshift = extract32(tcr, 0, 3);
93
uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
94
uint32_t base_mask;
95
@@ -XXX,XX +XXX,XX @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
96
static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
97
ARMMMUIdx mmu_idx)
98
{
99
- uint64_t tcr = regime_tcr_value(env, mmu_idx);
100
+ uint64_t tcr = regime_tcr(env, mmu_idx);
101
uint32_t el = regime_el(env, mmu_idx);
102
int select, tsz;
103
bool epd, hpd;
104
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
105
uint32_t attrs;
106
int32_t stride;
107
int addrsize, inputsize, outputsize;
108
- uint64_t tcr = regime_tcr_value(env, mmu_idx);
109
+ uint64_t tcr = regime_tcr(env, mmu_idx);
110
int ap, ns, xn, pxn;
111
uint32_t el = regime_el(env, mmu_idx);
112
uint64_t descaddrmask;
113
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
114
index XXXXXXX..XXXXXXX 100644
115
--- a/target/arm/tlb_helper.c
116
+++ b/target/arm/tlb_helper.c
117
@@ -XXX,XX +XXX,XX @@ bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
118
return true;
39
}
119
}
40
120
if (arm_feature(env, ARM_FEATURE_LPAE)
41
trace_systick_read(addr, val, size);
121
- && (regime_tcr_value(env, mmu_idx) & TTBCR_EAE)) {
42
- return val;
122
+ && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
43
+ *data = val;
123
return true;
44
+ return MEMTX_OK;
45
}
46
47
-static void systick_write(void *opaque, hwaddr addr,
48
- uint64_t value, unsigned size)
49
+static MemTxResult systick_write(void *opaque, hwaddr addr,
50
+ uint64_t value, unsigned size,
51
+ MemTxAttrs attrs)
52
{
53
SysTickState *s = opaque;
54
55
+ if (attrs.user) {
56
+ /* Generate BusFault for unprivileged accesses */
57
+ return MEMTX_ERROR;
58
+ }
59
+
60
trace_systick_write(addr, value, size);
61
62
switch (addr) {
63
@@ -XXX,XX +XXX,XX @@ static void systick_write(void *opaque, hwaddr addr,
64
qemu_log_mask(LOG_GUEST_ERROR,
65
"SysTick: Bad write offset 0x%" HWADDR_PRIx "\n", addr);
66
}
124
}
67
+ return MEMTX_OK;
125
return false;
68
}
69
70
static const MemoryRegionOps systick_ops = {
71
- .read = systick_read,
72
- .write = systick_write,
73
+ .read_with_attrs = systick_read,
74
+ .write_with_attrs = systick_write,
75
.endianness = DEVICE_NATIVE_ENDIAN,
76
.valid.min_access_size = 4,
77
.valid.max_access_size = 4,
78
--
126
--
79
2.20.1
127
2.25.1
80
81
diff view generated by jsdifflib
1
Coverity points out (CID 1402195) that the loop in trans_VMOV_imm_dp()
1
We have a bug in our handling of accesses to the AArch32 VTCR
2
that iterates over the destination registers in a short-vector VMOV
2
register on big-endian hosts: we were not adjusting the part of the
3
accidentally throws away the returned updated register number
3
uint64_t field within TCR that the generated code would access. That
4
from vfp_advance_dreg(). Add the missing assignment. (We got this
4
can be done with offsetoflow32(), by using an ARM_CP_STATE_BOTH cpreg
5
correct in trans_VMOV_imm_sp().)
5
struct, or by defining a full set of read/write/reset functions --
6
the various other TCR cpreg structs used one or another of those
7
strategies, but for VTCR we did not, so on a big-endian host VTCR
8
accesses would touch the wrong half of the register.
6
9
7
Fixes: 18cf951af9a27ae573a
10
Use offsetoflow32() in the VTCR register struct. This works even
11
though the field in the CPU struct is currently a struct TCR, because
12
the first field in that struct is the uint64_t raw_tcr.
13
14
None of the other TCR registers have this bug -- either they are
15
AArch64 only, or else they define resetfn, writefn, etc, and
16
expect to be passed the full struct pointer.
17
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20190702105115.9465-1-peter.maydell@linaro.org
20
Message-id: 20220714132303.1287193-5-peter.maydell@linaro.org
11
---
21
---
12
target/arm/translate-vfp.inc.c | 2 +-
22
target/arm/helper.c | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
23
1 file changed, 1 insertion(+), 1 deletion(-)
14
24
15
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
25
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-vfp.inc.c
27
--- a/target/arm/helper.c
18
+++ b/target/arm/translate-vfp.inc.c
28
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
29
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
20
30
.cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
21
/* Set up the operands for the next iteration */
31
.type = ARM_CP_ALIAS,
22
veclen--;
32
.access = PL2_RW, .accessfn = access_el3_aa32ns,
23
- vfp_advance_dreg(vd, delta_d);
33
- .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
24
+ vd = vfp_advance_dreg(vd, delta_d);
34
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
25
}
35
{ .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
26
36
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
27
tcg_temp_free_i64(fd);
37
.access = PL2_RW,
28
--
38
--
29
2.20.1
39
2.25.1
30
31
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
Change the representation of the VSTCR_EL2 and VTCR_EL2 registers in
2
the CPU state struct from struct TCR to uint64_t.
2
3
3
Per Peter Maydell:
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20220714132303.1287193-6-peter.maydell@linaro.org
7
---
8
target/arm/cpu.h | 4 ++--
9
target/arm/internals.h | 4 ++--
10
target/arm/helper.c | 4 +---
11
target/arm/ptw.c | 14 +++++++-------
12
4 files changed, 12 insertions(+), 14 deletions(-)
4
13
5
Semihosting hooks either SVC or HLT instructions, and inside KVM
6
both of those go to EL1, ie to the guest, and can't be trapped to
7
KVM.
8
9
Let check_for_semihosting() return False when not running on TCG.
10
11
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
12
Message-id: 20190701194942.10092-3-philmd@redhat.com
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
target/arm/Makefile.objs | 2 +-
17
target/arm/cpu.h | 7 +++++++
18
target/arm/helper.c | 8 +++++++-
19
3 files changed, 15 insertions(+), 2 deletions(-)
20
21
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/Makefile.objs
24
+++ b/target/arm/Makefile.objs
25
@@ -XXX,XX +XXX,XX @@
26
-obj-y += arm-semi.o
27
+obj-$(CONFIG_TCG) += arm-semi.o
28
obj-y += helper.o vfp_helper.o
29
obj-y += cpu.o gdbstub.o
30
obj-$(TARGET_AARCH64) += cpu64.o gdbstub64.o
31
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
32
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/cpu.h
16
--- a/target/arm/cpu.h
34
+++ b/target/arm/cpu.h
17
+++ b/target/arm/cpu.h
35
@@ -XXX,XX +XXX,XX @@ static inline void aarch64_sve_change_el(CPUARMState *env, int o,
18
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
36
{ }
19
uint64_t vsttbr_el2; /* Secure Virtualization Translation Table. */
37
#endif
20
/* MMU translation table base control. */
38
21
TCR tcr_el[4];
39
+#if !defined(CONFIG_TCG)
22
- TCR vtcr_el2; /* Virtualization Translation Control. */
40
+static inline target_ulong do_arm_semihosting(CPUARMState *env)
23
- TCR vstcr_el2; /* Secure Virtualization Translation Control. */
41
+{
24
+ uint64_t vtcr_el2; /* Virtualization Translation Control. */
42
+ g_assert_not_reached();
25
+ uint64_t vstcr_el2; /* Secure Virtualization Translation Control. */
43
+}
26
uint32_t c2_data; /* MPU data cacheable bits. */
44
+#else
27
uint32_t c2_insn; /* MPU instruction cacheable bits. */
45
target_ulong do_arm_semihosting(CPUARMState *env);
28
union { /* MMU domain access control register
46
+#endif
29
diff --git a/target/arm/internals.h b/target/arm/internals.h
47
void aarch64_sync_32_to_64(CPUARMState *env);
30
index XXXXXXX..XXXXXXX 100644
48
void aarch64_sync_64_to_32(CPUARMState *env);
31
--- a/target/arm/internals.h
49
32
+++ b/target/arm/internals.h
33
@@ -XXX,XX +XXX,XX @@ static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
34
static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
35
{
36
if (mmu_idx == ARMMMUIdx_Stage2) {
37
- return env->cp15.vtcr_el2.raw_tcr;
38
+ return env->cp15.vtcr_el2;
39
}
40
if (mmu_idx == ARMMMUIdx_Stage2_S) {
41
/*
42
* Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
43
* those are not currently used by QEMU, so just return VSTCR_EL2.
44
*/
45
- return env->cp15.vstcr_el2.raw_tcr;
46
+ return env->cp15.vstcr_el2;
47
}
48
return env->cp15.tcr_el[regime_el(env, mmu_idx)].raw_tcr;
49
}
50
diff --git a/target/arm/helper.c b/target/arm/helper.c
50
diff --git a/target/arm/helper.c b/target/arm/helper.c
51
index XXXXXXX..XXXXXXX 100644
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/helper.c
52
--- a/target/arm/helper.c
53
+++ b/target/arm/helper.c
53
+++ b/target/arm/helper.c
54
@@ -XXX,XX +XXX,XX @@
54
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
55
#include "qemu/qemu-print.h"
55
{ .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
56
#include "exec/exec-all.h"
56
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
57
#include "exec/cpu_ldst.h"
57
.access = PL2_RW,
58
-#include "arm_ldst.h"
58
- /* no .writefn needed as this can't cause an ASID change;
59
#include <zlib.h> /* For crc32 */
59
- * no .raw_writefn or .resetfn needed as we never use mask/base_mask
60
#include "hw/semihosting/semihost.h"
60
- */
61
#include "sysemu/cpus.h"
61
+ /* no .writefn needed as this can't cause an ASID change */
62
@@ -XXX,XX +XXX,XX @@
62
.fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
63
#include "qapi/qapi-commands-machine-target.h"
63
{ .name = "VTTBR", .state = ARM_CP_STATE_AA32,
64
#include "qapi/error.h"
64
.cp = 15, .opc1 = 6, .crm = 2,
65
#include "qemu/guest-random.h"
65
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
66
+#ifdef CONFIG_TCG
66
index XXXXXXX..XXXXXXX 100644
67
+#include "arm_ldst.h"
67
--- a/target/arm/ptw.c
68
+#endif
68
+++ b/target/arm/ptw.c
69
69
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
70
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
70
if (arm_is_secure_below_el3(env)) {
71
71
/* Check if page table walk is to secure or non-secure PA space. */
72
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
72
if (*is_secure) {
73
73
- *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
74
static inline bool check_for_semihosting(CPUState *cs)
74
+ *is_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
75
{
75
} else {
76
+#ifdef CONFIG_TCG
76
- *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
77
/* Check whether this exception is a semihosting call; if so
77
+ *is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
78
* then handle it and return true; otherwise return false.
78
}
79
*/
79
} else {
80
@@ -XXX,XX +XXX,XX @@ static inline bool check_for_semihosting(CPUState *cs)
80
assert(!*is_secure);
81
env->regs[0] = do_arm_semihosting(env);
81
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
82
return true;
82
ipa_secure = attrs->secure;
83
}
83
if (arm_is_secure_below_el3(env)) {
84
+#else
84
if (ipa_secure) {
85
+ return false;
85
- attrs->secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
86
+#endif
86
+ attrs->secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
87
}
87
} else {
88
88
- attrs->secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
89
/* Handle a CPU exception for A and R profile CPUs.
89
+ attrs->secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
90
}
91
} else {
92
assert(!ipa_secure);
93
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
94
if (arm_is_secure_below_el3(env)) {
95
if (ipa_secure) {
96
attrs->secure =
97
- !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
98
+ !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW));
99
} else {
100
attrs->secure =
101
- !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
102
- || (env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW)));
103
+ !((env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))
104
+ || (env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)));
105
}
106
}
107
return 0;
90
--
108
--
91
2.20.1
109
2.25.1
92
93
diff view generated by jsdifflib
1
Thumb instructions in an IT block are set up to be conditionally
1
Change the representation of the TCR_EL* registers in the CPU state
2
executed depending on a set of condition bits encoded into the IT
2
struct from struct TCR to uint64_t. This allows us to drop the
3
bits of the CPSR/XPSR. The architecture specifies that if the
3
custom vmsa_ttbcr_raw_write() function, moving the "enforce RES0"
4
condition bits are 0b1111 this means "always execute" (like 0b1110),
4
checks to their more usual location in the writefn
5
not "never execute"; we were treating it as "never execute". (See
5
vmsa_ttbcr_write(). We also don't need the resetfn any more.
6
the ConditionHolds() pseudocode in both the A-profile and M-profile
7
Arm ARM.)
8
9
This is a bit of an obscure corner case, because the only legal
10
way to get to an 0b1111 set of condbits is to do an exception
11
return which sets the XPSR/CPSR up that way. An IT instruction
12
which encodes a condition sequence that would include an 0b1111 is
13
UNPREDICTABLE, and for v8A the CONSTRAINED UNPREDICTABLE choices
14
for such an IT insn are to NOP, UNDEF, or treat 0b1111 like 0b1110.
15
Add a comment noting that we take the latter option.
16
6
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 20190617175317.27557-7-peter.maydell@linaro.org
9
Message-id: 20220714132303.1287193-7-peter.maydell@linaro.org
20
---
10
---
21
target/arm/translate.c | 15 +++++++++++++--
11
target/arm/cpu.h | 8 +----
22
1 file changed, 13 insertions(+), 2 deletions(-)
12
target/arm/internals.h | 6 ++--
13
target/arm/cpu.c | 2 +-
14
target/arm/debug_helper.c | 2 +-
15
target/arm/helper.c | 75 +++++++++++----------------------------
16
target/arm/ptw.c | 2 +-
17
6 files changed, 27 insertions(+), 68 deletions(-)
23
18
24
diff --git a/target/arm/translate.c b/target/arm/translate.c
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
25
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate.c
21
--- a/target/arm/cpu.h
27
+++ b/target/arm/translate.c
22
+++ b/target/arm/cpu.h
28
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
23
@@ -XXX,XX +XXX,XX @@ typedef struct ARMGenericTimer {
29
gen_nop_hint(s, (insn >> 4) & 0xf);
24
#define GTIMER_HYPVIRT 4
30
break;
25
#define NUM_GTIMERS 5
31
}
26
32
- /* If Then. */
27
-typedef struct {
28
- uint64_t raw_tcr;
29
- uint32_t mask;
30
- uint32_t base_mask;
31
-} TCR;
32
-
33
#define VTCR_NSW (1u << 29)
34
#define VTCR_NSA (1u << 30)
35
#define VSTCR_SW VTCR_NSW
36
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
37
uint64_t vttbr_el2; /* Virtualization Translation Table Base. */
38
uint64_t vsttbr_el2; /* Secure Virtualization Translation Table. */
39
/* MMU translation table base control. */
40
- TCR tcr_el[4];
41
+ uint64_t tcr_el[4];
42
uint64_t vtcr_el2; /* Virtualization Translation Control. */
43
uint64_t vstcr_el2; /* Secure Virtualization Translation Control. */
44
uint32_t c2_data; /* MPU data cacheable bits. */
45
diff --git a/target/arm/internals.h b/target/arm/internals.h
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/internals.h
48
+++ b/target/arm/internals.h
49
@@ -XXX,XX +XXX,XX @@ unsigned int arm_pamax(ARMCPU *cpu);
50
*/
51
static inline bool extended_addresses_enabled(CPUARMState *env)
52
{
53
- TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
54
+ uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
55
return arm_el_is_aa64(env, 1) ||
56
- (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
57
+ (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
58
}
59
60
/* Update a QEMU watchpoint based on the information the guest has set in the
61
@@ -XXX,XX +XXX,XX @@ static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
62
*/
63
return env->cp15.vstcr_el2;
64
}
65
- return env->cp15.tcr_el[regime_el(env, mmu_idx)].raw_tcr;
66
+ return env->cp15.tcr_el[regime_el(env, mmu_idx)];
67
}
68
69
/**
70
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/target/arm/cpu.c
73
+++ b/target/arm/cpu.c
74
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
75
* Enable TBI0 but not TBI1.
76
* Note that this must match useronly_clean_ptr.
77
*/
78
- env->cp15.tcr_el[1].raw_tcr = 5 | (1ULL << 37);
79
+ env->cp15.tcr_el[1] = 5 | (1ULL << 37);
80
81
/* Enable MTE */
82
if (cpu_isar_feature(aa64_mte, cpu)) {
83
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
84
index XXXXXXX..XXXXXXX 100644
85
--- a/target/arm/debug_helper.c
86
+++ b/target/arm/debug_helper.c
87
@@ -XXX,XX +XXX,XX @@ static uint32_t arm_debug_exception_fsr(CPUARMState *env)
88
using_lpae = true;
89
} else {
90
if (arm_feature(env, ARM_FEATURE_LPAE) &&
91
- (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) {
92
+ (env->cp15.tcr_el[target_el] & TTBCR_EAE)) {
93
using_lpae = true;
94
}
95
}
96
diff --git a/target/arm/helper.c b/target/arm/helper.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/target/arm/helper.c
99
+++ b/target/arm/helper.c
100
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
101
.fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
102
};
103
104
-static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
105
- uint64_t value)
106
+static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
107
+ uint64_t value)
108
{
109
- TCR *tcr = raw_ptr(env, ri);
110
- int maskshift = extract32(value, 0, 3);
111
+ ARMCPU *cpu = env_archcpu(env);
112
113
if (!arm_feature(env, ARM_FEATURE_V8)) {
114
if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
115
- /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
116
- * using Long-desciptor translation table format */
33
+ /*
117
+ /*
34
+ * IT (If-Then)
118
+ * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
35
+ *
119
+ * using Long-descriptor translation table format
36
+ * Combinations of firstcond and mask which set up an 0b1111
37
+ * condition are UNPREDICTABLE; we take the CONSTRAINED
38
+ * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
39
+ * i.e. both meaning "execute always".
40
+ */
120
+ */
41
s->condexec_cond = (insn >> 4) & 0xe;
121
value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
42
s->condexec_mask = insn & 0x1f;
122
} else if (arm_feature(env, ARM_FEATURE_EL3)) {
43
/* No actual code generated for this insn, just setup state. */
123
- /* In an implementation that includes the Security Extensions
44
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
124
+ /*
45
if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
125
+ * In an implementation that includes the Security Extensions
46
uint32_t cond = dc->condexec_cond;
126
* TTBCR has additional fields PD0 [4] and PD1 [5] for
47
127
* Short-descriptor translation table format.
48
- if (cond != 0x0e) { /* Skip conditional when condition is AL. */
128
*/
49
+ /*
129
@@ -XXX,XX +XXX,XX @@ static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
50
+ * Conditionally skip the insn. Note that both 0xe and 0xf mean
51
+ * "always"; 0xf is not "never".
52
+ */
53
+ if (cond < 0x0e) {
54
arm_skip_unless(dc, cond);
55
}
130
}
56
}
131
}
132
133
- /* Update the masks corresponding to the TCR bank being written
134
- * Note that we always calculate mask and base_mask, but
135
- * they are only used for short-descriptor tables (ie if EAE is 0);
136
- * for long-descriptor tables the TCR fields are used differently
137
- * and the mask and base_mask values are meaningless.
138
- */
139
- tcr->raw_tcr = value;
140
- tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
141
- tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
142
-}
143
-
144
-static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
145
- uint64_t value)
146
-{
147
- ARMCPU *cpu = env_archcpu(env);
148
- TCR *tcr = raw_ptr(env, ri);
149
-
150
if (arm_feature(env, ARM_FEATURE_LPAE)) {
151
/* With LPAE the TTBCR could result in a change of ASID
152
* via the TTBCR.A1 bit, so do a TLB flush.
153
*/
154
tlb_flush(CPU(cpu));
155
}
156
- /* Preserve the high half of TCR_EL1, set via TTBCR2. */
157
- value = deposit64(tcr->raw_tcr, 0, 32, value);
158
- vmsa_ttbcr_raw_write(env, ri, value);
159
-}
160
-
161
-static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
162
-{
163
- TCR *tcr = raw_ptr(env, ri);
164
-
165
- /* Reset both the TCR as well as the masks corresponding to the bank of
166
- * the TCR being reset.
167
- */
168
- tcr->raw_tcr = 0;
169
- tcr->mask = 0;
170
- tcr->base_mask = 0xffffc000u;
171
+ raw_write(env, ri, value);
172
}
173
174
static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
175
uint64_t value)
176
{
177
ARMCPU *cpu = env_archcpu(env);
178
- TCR *tcr = raw_ptr(env, ri);
179
180
/* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
181
tlb_flush(CPU(cpu));
182
- tcr->raw_tcr = value;
183
+ raw_write(env, ri, value);
184
}
185
186
static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
187
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
188
.opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
189
.access = PL1_RW, .accessfn = access_tvm_trvm,
190
.writefn = vmsa_tcr_el12_write,
191
- .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
192
+ .raw_writefn = raw_write,
193
+ .resetvalue = 0,
194
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
195
{ .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
196
.access = PL1_RW, .accessfn = access_tvm_trvm,
197
.type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
198
- .raw_writefn = vmsa_ttbcr_raw_write,
199
- /* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */
200
- .bank_fieldoffsets = { offsetof(CPUARMState, cp15.tcr_el[3]),
201
- offsetof(CPUARMState, cp15.tcr_el[1])} },
202
+ .raw_writefn = raw_write,
203
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
204
+ offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
205
};
206
207
/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
208
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo ttbcr2_reginfo = {
209
.access = PL1_RW, .accessfn = access_tvm_trvm,
210
.type = ARM_CP_ALIAS,
211
.bank_fieldoffsets = {
212
- offsetofhigh32(CPUARMState, cp15.tcr_el[3].raw_tcr),
213
- offsetofhigh32(CPUARMState, cp15.tcr_el[1].raw_tcr),
214
+ offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
215
+ offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
216
},
217
};
218
219
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
220
{ .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
221
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
222
.access = PL2_RW, .writefn = vmsa_tcr_el12_write,
223
- /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
224
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
225
{ .name = "VTCR", .state = ARM_CP_STATE_AA32,
226
.cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
227
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
228
{ .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
229
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
230
.access = PL3_RW,
231
- /* no .writefn needed as this can't cause an ASID change;
232
- * we must provide a .raw_writefn and .resetfn because we handle
233
- * reset and migration for the AArch32 TTBCR(S), which might be
234
- * using mask and base_mask.
235
- */
236
- .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
237
+ /* no .writefn needed as this can't cause an ASID change */
238
+ .resetvalue = 0,
239
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
240
{ .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
241
.type = ARM_CP_ALIAS,
242
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
243
index XXXXXXX..XXXXXXX 100644
244
--- a/target/arm/ptw.c
245
+++ b/target/arm/ptw.c
246
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
247
int r_el = regime_el(env, mmu_idx);
248
if (arm_el_is_aa64(env, r_el)) {
249
int pamax = arm_pamax(env_archcpu(env));
250
- uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
251
+ uint64_t tcr = env->cp15.tcr_el[r_el];
252
int addrtop, tbi;
253
254
tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
57
--
255
--
58
2.20.1
256
2.25.1
59
60
diff view generated by jsdifflib
New patch
1
In regime_tcr() we return the appropriate TCR register for the
2
translation regime. For Secure EL2, we return the VSTCR_EL2 value,
3
but in this translation regime some fields that control behaviour are
4
in VTCR_EL2. When this code was originally written (as the comment
5
notes), QEMU didn't care about any of those fields, but we have since
6
added support for features such as LPA2 which do need the values from
7
those fields.
1
8
9
Synthesize a TCR value by merging in the relevant VTCR_EL2 fields to
10
the VSTCR_EL2 value.
11
12
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1103
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20220714132303.1287193-8-peter.maydell@linaro.org
16
---
17
target/arm/cpu.h | 19 +++++++++++++++++++
18
target/arm/internals.h | 22 +++++++++++++++++++---
19
2 files changed, 38 insertions(+), 3 deletions(-)
20
21
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/cpu.h
24
+++ b/target/arm/cpu.h
25
@@ -XXX,XX +XXX,XX @@ FIELD(CPTR_EL3, TCPAC, 31, 1)
26
#define TTBCR_SH1 (1U << 28)
27
#define TTBCR_EAE (1U << 31)
28
29
+FIELD(VTCR, T0SZ, 0, 6)
30
+FIELD(VTCR, SL0, 6, 2)
31
+FIELD(VTCR, IRGN0, 8, 2)
32
+FIELD(VTCR, ORGN0, 10, 2)
33
+FIELD(VTCR, SH0, 12, 2)
34
+FIELD(VTCR, TG0, 14, 2)
35
+FIELD(VTCR, PS, 16, 3)
36
+FIELD(VTCR, VS, 19, 1)
37
+FIELD(VTCR, HA, 21, 1)
38
+FIELD(VTCR, HD, 22, 1)
39
+FIELD(VTCR, HWU59, 25, 1)
40
+FIELD(VTCR, HWU60, 26, 1)
41
+FIELD(VTCR, HWU61, 27, 1)
42
+FIELD(VTCR, HWU62, 28, 1)
43
+FIELD(VTCR, NSW, 29, 1)
44
+FIELD(VTCR, NSA, 30, 1)
45
+FIELD(VTCR, DS, 32, 1)
46
+FIELD(VTCR, SL2, 33, 1)
47
+
48
/* Bit definitions for ARMv8 SPSR (PSTATE) format.
49
* Only these are valid when in AArch64 mode; in
50
* AArch32 mode SPSRs are basically CPSR-format.
51
diff --git a/target/arm/internals.h b/target/arm/internals.h
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/internals.h
54
+++ b/target/arm/internals.h
55
@@ -XXX,XX +XXX,XX @@ static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
56
return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
57
}
58
59
+/*
60
+ * These are the fields in VTCR_EL2 which affect both the Secure stage 2
61
+ * and the Non-Secure stage 2 translation regimes (and hence which are
62
+ * not present in VSTCR_EL2).
63
+ */
64
+#define VTCR_SHARED_FIELD_MASK \
65
+ (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
66
+ R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
67
+ R_VTCR_DS_MASK)
68
+
69
/* Return the value of the TCR controlling this translation regime */
70
static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
71
{
72
@@ -XXX,XX +XXX,XX @@ static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
73
}
74
if (mmu_idx == ARMMMUIdx_Stage2_S) {
75
/*
76
- * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
77
- * those are not currently used by QEMU, so just return VSTCR_EL2.
78
+ * Secure stage 2 shares fields from VTCR_EL2. We merge those
79
+ * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
80
+ * value so the callers don't need to special case this.
81
+ *
82
+ * If a future architecture change defines bits in VSTCR_EL2 that
83
+ * overlap with these VTCR_EL2 fields we may need to revisit this.
84
*/
85
- return env->cp15.vstcr_el2;
86
+ uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
87
+ v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
88
+ return v;
89
}
90
return env->cp15.tcr_el[regime_el(env, mmu_idx)];
91
}
92
--
93
2.25.1
diff view generated by jsdifflib
New patch
1
From: Hao Wu <wuhaotsh@google.com>
1
2
3
The correct bit for the CONV bit in NPCM7XX ADC is bit 13. This patch
4
fixes that in the module, and also lower the IRQ when the guest
5
is done handling an interrupt event from the ADC module.
6
7
Signed-off-by: Hao Wu <wuhaotsh@google.com>
8
Reviewed-by: Patrick Venture<venture@google.com>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Message-id: 20220714182836.89602-4-wuhaotsh@google.com
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
hw/adc/npcm7xx_adc.c | 2 +-
14
tests/qtest/npcm7xx_adc-test.c | 2 +-
15
2 files changed, 2 insertions(+), 2 deletions(-)
16
17
diff --git a/hw/adc/npcm7xx_adc.c b/hw/adc/npcm7xx_adc.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/adc/npcm7xx_adc.c
20
+++ b/hw/adc/npcm7xx_adc.c
21
@@ -XXX,XX +XXX,XX @@ REG32(NPCM7XX_ADC_DATA, 0x4)
22
#define NPCM7XX_ADC_CON_INT BIT(18)
23
#define NPCM7XX_ADC_CON_EN BIT(17)
24
#define NPCM7XX_ADC_CON_RST BIT(16)
25
-#define NPCM7XX_ADC_CON_CONV BIT(14)
26
+#define NPCM7XX_ADC_CON_CONV BIT(13)
27
#define NPCM7XX_ADC_CON_DIV(rv) extract32(rv, 1, 8)
28
29
#define NPCM7XX_ADC_MAX_RESULT 1023
30
diff --git a/tests/qtest/npcm7xx_adc-test.c b/tests/qtest/npcm7xx_adc-test.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tests/qtest/npcm7xx_adc-test.c
33
+++ b/tests/qtest/npcm7xx_adc-test.c
34
@@ -XXX,XX +XXX,XX @@
35
#define CON_INT BIT(18)
36
#define CON_EN BIT(17)
37
#define CON_RST BIT(16)
38
-#define CON_CONV BIT(14)
39
+#define CON_CONV BIT(13)
40
#define CON_DIV(rv) extract32(rv, 1, 8)
41
42
#define FST_RDST BIT(1)
43
--
44
2.25.1
diff view generated by jsdifflib
New patch
1
From: Hao Wu <wuhaotsh@google.com>
1
2
3
Our sensor test requires both reading and writing from a sensor's
4
QOM property. So we need to make the input of ADC module R/W instead
5
of write only for that to work.
6
7
Signed-off-by: Hao Wu <wuhaotsh@google.com>
8
Reviewed-by: Titus Rwantare <titusr@google.com>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Message-id: 20220714182836.89602-5-wuhaotsh@google.com
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
hw/adc/npcm7xx_adc.c | 2 +-
14
1 file changed, 1 insertion(+), 1 deletion(-)
15
16
diff --git a/hw/adc/npcm7xx_adc.c b/hw/adc/npcm7xx_adc.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/adc/npcm7xx_adc.c
19
+++ b/hw/adc/npcm7xx_adc.c
20
@@ -XXX,XX +XXX,XX @@ static void npcm7xx_adc_init(Object *obj)
21
22
for (i = 0; i < NPCM7XX_ADC_NUM_INPUTS; ++i) {
23
object_property_add_uint32_ptr(obj, "adci[*]",
24
- &s->adci[i], OBJ_PROP_FLAG_WRITE);
25
+ &s->adci[i], OBJ_PROP_FLAG_READWRITE);
26
}
27
object_property_add_uint32_ptr(obj, "vref",
28
&s->vref, OBJ_PROP_FLAG_WRITE);
29
--
30
2.25.1
diff view generated by jsdifflib
1
In v8M, an attempt to return from an exception which is not
1
The architecture requires that for faults on loads and stores which
2
active is an illegal exception return. For this purpose,
2
do writeback, the syndrome information does not have the ISS
3
exceptions which can configurably target either Secure or
3
instruction syndrome information (i.e. ISV is 0). We got this wrong
4
NonSecure are not considered to be active if they are
4
for the load and store instructions covered by disas_ldst_reg_imm9().
5
configured for the opposite security state for the one
5
Calculate iss_valid correctly so that if the insn is a writeback one
6
we're trying to return from (eg attempt to return from
6
it is false.
7
an NS NMI but NMI targets Secure). In the pseudocode this
8
is handled by IsActiveForState().
9
7
10
Detect this case rather than counting an active exception
8
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1057
11
possibly of the wrong security state as being sufficient.
12
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20190617175317.27557-4-peter.maydell@linaro.org
11
Message-id: 20220715123323.1550983-1-peter.maydell@linaro.org
16
---
12
---
17
hw/intc/armv7m_nvic.c | 14 +++++++++++++-
13
target/arm/translate-a64.c | 4 +++-
18
1 file changed, 13 insertions(+), 1 deletion(-)
14
1 file changed, 3 insertions(+), 1 deletion(-)
19
15
20
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
21
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/intc/armv7m_nvic.c
18
--- a/target/arm/translate-a64.c
23
+++ b/hw/intc/armv7m_nvic.c
19
+++ b/target/arm/translate-a64.c
24
@@ -XXX,XX +XXX,XX @@ int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
20
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
25
return -1;
21
bool is_store = false;
22
bool is_extended = false;
23
bool is_unpriv = (idx == 2);
24
- bool iss_valid = !is_vector;
25
+ bool iss_valid;
26
bool post_index;
27
bool writeback;
28
int memidx;
29
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
30
g_assert_not_reached();
26
}
31
}
27
32
28
- ret = nvic_rettobase(s);
33
+ iss_valid = !is_vector && !writeback;
29
+ /*
34
+
30
+ * If this is a configurable exception and it is currently
35
if (rn == 31) {
31
+ * targeting the opposite security state from the one we're trying
36
gen_check_sp_alignment(s);
32
+ * to complete it for, this counts as an illegal exception return.
37
}
33
+ * We still need to deactivate whatever vector the logic above has
34
+ * selected, though, as it might not be the same as the one for the
35
+ * requested exception number.
36
+ */
37
+ if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) {
38
+ ret = -1;
39
+ } else {
40
+ ret = nvic_rettobase(s);
41
+ }
42
43
vec->active = 0;
44
if (vec->level) {
45
--
38
--
46
2.20.1
39
2.25.1
47
48
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
From: Andrey Makarov <ph.makarov@gmail.com>
2
2
3
These routines are TCG specific.
3
There is nothing in the specs on DMA engine interrupt lines: it should have
4
4
been in the "BCM2835 ARM Peripherals" datasheet but the appropriate
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
"ARM peripherals interrupt table" (p.113) is nearly empty.
6
Message-id: 20190701194942.10092-2-philmd@redhat.com
6
7
All Raspberry Pi models 1-3 (based on bcm2835) have
8
Linux device tree (arch/arm/boot/dts/bcm2835-common.dtsi +25):
9
10
/* dma channel 11-14 share one irq */
11
12
This information is repeated in the driver code
13
(drivers/dma/bcm2835-dma.c +1344):
14
15
/*
16
* in case of channel >= 11
17
* use the 11th interrupt and that is shared
18
*/
19
20
In this patch channels 0--10 and 11--14 are handled separately.
21
22
Signed-off-by: Andrey Makarov <andrey.makarov@auriga.com>
23
Message-id: 20220716113210.349153-1-andrey.makarov@auriga.com
24
[PMM: fixed checkpatch nits]
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
25
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
26
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
27
---
10
target/arm/Makefile.objs | 2 +-
28
include/hw/arm/bcm2835_peripherals.h | 2 +
11
target/arm/cpu.c | 9 +-
29
hw/arm/bcm2835_peripherals.c | 26 +++++-
12
target/arm/debug_helper.c | 311 ++++++++++++++++++++++++++++++++++++++
30
tests/qtest/bcm2835-dma-test.c | 118 +++++++++++++++++++++++++++
13
target/arm/op_helper.c | 295 ------------------------------------
31
tests/qtest/meson.build | 3 +-
14
4 files changed, 315 insertions(+), 302 deletions(-)
32
4 files changed, 147 insertions(+), 2 deletions(-)
15
create mode 100644 target/arm/debug_helper.c
33
create mode 100644 tests/qtest/bcm2835-dma-test.c
16
34
17
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
35
diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h
18
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/Makefile.objs
37
--- a/include/hw/arm/bcm2835_peripherals.h
20
+++ b/target/arm/Makefile.objs
38
+++ b/include/hw/arm/bcm2835_peripherals.h
21
@@ -XXX,XX +XXX,XX @@ target/arm/translate-sve.o: target/arm/decode-sve.inc.c
39
@@ -XXX,XX +XXX,XX @@
22
target/arm/translate.o: target/arm/decode-vfp.inc.c
40
#include "hw/char/bcm2835_aux.h"
23
target/arm/translate.o: target/arm/decode-vfp-uncond.inc.c
41
#include "hw/display/bcm2835_fb.h"
24
42
#include "hw/dma/bcm2835_dma.h"
25
-obj-y += tlb_helper.o
43
+#include "hw/or-irq.h"
26
+obj-y += tlb_helper.o debug_helper.o
44
#include "hw/intc/bcm2835_ic.h"
27
obj-y += translate.o op_helper.o
45
#include "hw/misc/bcm2835_property.h"
28
obj-y += crypto_helper.o
46
#include "hw/misc/bcm2835_rng.h"
29
obj-y += iwmmxt_helper.o vec_helper.o neon_helper.o
47
@@ -XXX,XX +XXX,XX @@ struct BCM2835PeripheralState {
30
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
48
BCM2835AuxState aux;
49
BCM2835FBState fb;
50
BCM2835DMAState dma;
51
+ qemu_or_irq orgated_dma_irq;
52
BCM2835ICState ic;
53
BCM2835PropertyState property;
54
BCM2835RngState rng;
55
diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c
31
index XXXXXXX..XXXXXXX 100644
56
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/cpu.c
57
--- a/hw/arm/bcm2835_peripherals.c
33
+++ b/target/arm/cpu.c
58
+++ b/hw/arm/bcm2835_peripherals.c
34
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
59
@@ -XXX,XX +XXX,XX @@
35
cc->gdb_arch_name = arm_gdb_arch_name;
60
/* Capabilities for SD controller: no DMA, high-speed, default clocks etc. */
36
cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
61
#define BCM2835_SDHC_CAPAREG 0x52134b4
37
cc->gdb_stop_before_watchpoint = true;
62
38
- cc->debug_excp_handler = arm_debug_excp_handler;
63
+/*
39
- cc->debug_check_watchpoint = arm_debug_check_watchpoint;
64
+ * According to Linux driver & DTS, dma channels 0--10 have separate IRQ,
40
-#if !defined(CONFIG_USER_ONLY)
65
+ * while channels 11--14 share one IRQ:
41
- cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
66
+ */
42
-#endif
67
+#define SEPARATE_DMA_IRQ_MAX 10
43
-
68
+#define ORGATED_DMA_IRQ_COUNT 4
44
cc->disas_set_info = arm_disas_set_info;
69
+
45
#ifdef CONFIG_TCG
70
static void create_unimp(BCM2835PeripheralState *ps,
46
cc->tcg_initialize = arm_translate_init;
71
UnimplementedDeviceState *uds,
47
cc->tlb_fill = arm_cpu_tlb_fill;
72
const char *name, hwaddr ofs, hwaddr size)
48
+ cc->debug_excp_handler = arm_debug_excp_handler;
73
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_init(Object *obj)
49
+ cc->debug_check_watchpoint = arm_debug_check_watchpoint;
74
/* DMA Channels */
50
#if !defined(CONFIG_USER_ONLY)
75
object_initialize_child(obj, "dma", &s->dma, TYPE_BCM2835_DMA);
51
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
76
52
cc->do_transaction_failed = arm_cpu_do_transaction_failed;
77
+ object_initialize_child(obj, "orgated-dma-irq",
53
+ cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
78
+ &s->orgated_dma_irq, TYPE_OR_IRQ);
54
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
79
+ object_property_set_int(OBJECT(&s->orgated_dma_irq), "num-lines",
55
#endif
80
+ ORGATED_DMA_IRQ_COUNT, &error_abort);
56
}
81
+
57
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
82
object_property_add_const_link(OBJECT(&s->dma), "dma-mr",
83
OBJECT(&s->gpu_bus_mr));
84
85
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
86
memory_region_add_subregion(&s->peri_mr, DMA15_OFFSET,
87
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->dma), 1));
88
89
- for (n = 0; n <= 12; n++) {
90
+ for (n = 0; n <= SEPARATE_DMA_IRQ_MAX; n++) {
91
sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma), n,
92
qdev_get_gpio_in_named(DEVICE(&s->ic),
93
BCM2835_IC_GPU_IRQ,
94
INTERRUPT_DMA0 + n));
95
}
96
+ if (!qdev_realize(DEVICE(&s->orgated_dma_irq), NULL, errp)) {
97
+ return;
98
+ }
99
+ for (n = 0; n < ORGATED_DMA_IRQ_COUNT; n++) {
100
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma),
101
+ SEPARATE_DMA_IRQ_MAX + 1 + n,
102
+ qdev_get_gpio_in(DEVICE(&s->orgated_dma_irq), n));
103
+ }
104
+ qdev_connect_gpio_out(DEVICE(&s->orgated_dma_irq), 0,
105
+ qdev_get_gpio_in_named(DEVICE(&s->ic),
106
+ BCM2835_IC_GPU_IRQ,
107
+ INTERRUPT_DMA0 + SEPARATE_DMA_IRQ_MAX + 1));
108
109
/* THERMAL */
110
if (!sysbus_realize(SYS_BUS_DEVICE(&s->thermal), errp)) {
111
diff --git a/tests/qtest/bcm2835-dma-test.c b/tests/qtest/bcm2835-dma-test.c
58
new file mode 100644
112
new file mode 100644
59
index XXXXXXX..XXXXXXX
113
index XXXXXXX..XXXXXXX
60
--- /dev/null
114
--- /dev/null
61
+++ b/target/arm/debug_helper.c
115
+++ b/tests/qtest/bcm2835-dma-test.c
62
@@ -XXX,XX +XXX,XX @@
116
@@ -XXX,XX +XXX,XX @@
63
+/*
117
+/*
64
+ * ARM debug helpers.
118
+ * QTest testcase for BCM283x DMA engine (on Raspberry Pi 3)
119
+ * and its interrupts coming to Interrupt Controller.
65
+ *
120
+ *
66
+ * This code is licensed under the GNU GPL v2 or later.
121
+ * Copyright (c) 2022 Auriga LLC
67
+ *
122
+ *
68
+ * SPDX-License-Identifier: GPL-2.0-or-later
123
+ * SPDX-License-Identifier: GPL-2.0-or-later
69
+ */
124
+ */
125
+
70
+#include "qemu/osdep.h"
126
+#include "qemu/osdep.h"
71
+#include "cpu.h"
127
+#include "libqtest-single.h"
72
+#include "internals.h"
128
+
73
+#include "exec/exec-all.h"
129
+/* Offsets in raspi3b platform: */
74
+#include "exec/helper-proto.h"
130
+#define RASPI3_DMA_BASE 0x3f007000
75
+
131
+#define RASPI3_IC_BASE 0x3f00b200
76
+/* Return true if the linked breakpoint entry lbn passes its checks */
132
+
77
+static bool linked_bp_matches(ARMCPU *cpu, int lbn)
133
+/* Used register/fields definitions */
134
+
135
+/* DMA engine registers: */
136
+#define BCM2708_DMA_CS 0
137
+#define BCM2708_DMA_ACTIVE (1 << 0)
138
+#define BCM2708_DMA_INT (1 << 2)
139
+
140
+#define BCM2708_DMA_ADDR 0x04
141
+
142
+#define BCM2708_DMA_INT_STATUS 0xfe0
143
+
144
+/* DMA Trasfer Info fields: */
145
+#define BCM2708_DMA_INT_EN (1 << 0)
146
+#define BCM2708_DMA_D_INC (1 << 4)
147
+#define BCM2708_DMA_S_INC (1 << 8)
148
+
149
+/* Interrupt controller registers: */
150
+#define IRQ_PENDING_BASIC 0x00
151
+#define IRQ_GPU_PENDING1_AGGR (1 << 8)
152
+#define IRQ_PENDING_1 0x04
153
+#define IRQ_ENABLE_1 0x10
154
+
155
+/* Data for the test: */
156
+#define SCB_ADDR 256
157
+#define S_ADDR 32
158
+#define D_ADDR 64
159
+#define TXFR_LEN 32
160
+const uint32_t check_data = 0x12345678;
161
+
162
+static void bcm2835_dma_test_interrupt(int dma_c, int irq_line)
78
+{
163
+{
79
+ CPUARMState *env = &cpu->env;
164
+ uint64_t dma_base = RASPI3_DMA_BASE + dma_c * 0x100;
80
+ uint64_t bcr = env->cp15.dbgbcr[lbn];
165
+ int gpu_irq_line = 16 + irq_line;
81
+ int brps = extract32(cpu->dbgdidr, 24, 4);
166
+
82
+ int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
167
+ /* Check that interrupts are silent by default: */
83
+ int bt;
168
+ writel(RASPI3_IC_BASE + IRQ_ENABLE_1, 1 << gpu_irq_line);
84
+ uint32_t contextidr;
169
+ int isr = readl(dma_base + BCM2708_DMA_INT_STATUS);
85
+
170
+ g_assert_cmpint(isr, ==, 0);
86
+ /*
171
+ uint32_t reg0 = readl(dma_base + BCM2708_DMA_CS);
87
+ * Links to unimplemented or non-context aware breakpoints are
172
+ g_assert_cmpint(reg0, ==, 0);
88
+ * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
173
+ uint32_t ic_pending = readl(RASPI3_IC_BASE + IRQ_PENDING_BASIC);
89
+ * as if linked to an UNKNOWN context-aware breakpoint (in which
174
+ g_assert_cmpint(ic_pending, ==, 0);
90
+ * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
175
+ uint32_t gpu_pending1 = readl(RASPI3_IC_BASE + IRQ_PENDING_1);
91
+ * We choose the former.
176
+ g_assert_cmpint(gpu_pending1, ==, 0);
92
+ */
177
+
93
+ if (lbn > brps || lbn < (brps - ctx_cmps)) {
178
+ /* Prepare Control Block: */
94
+ return false;
179
+ writel(SCB_ADDR + 0, BCM2708_DMA_S_INC | BCM2708_DMA_D_INC |
95
+ }
180
+ BCM2708_DMA_INT_EN); /* transfer info */
96
+
181
+ writel(SCB_ADDR + 4, S_ADDR); /* source address */
97
+ bcr = env->cp15.dbgbcr[lbn];
182
+ writel(SCB_ADDR + 8, D_ADDR); /* destination address */
98
+
183
+ writel(SCB_ADDR + 12, TXFR_LEN); /* transfer length */
99
+ if (extract64(bcr, 0, 1) == 0) {
184
+ writel(dma_base + BCM2708_DMA_ADDR, SCB_ADDR);
100
+ /* Linked breakpoint disabled : generate no events */
185
+
101
+ return false;
186
+ writel(S_ADDR, check_data);
102
+ }
187
+ for (int word = S_ADDR + 4; word < S_ADDR + TXFR_LEN; word += 4) {
103
+
188
+ writel(word, ~check_data);
104
+ bt = extract64(bcr, 20, 4);
189
+ }
105
+
190
+ /* Perform the transfer: */
106
+ /*
191
+ writel(dma_base + BCM2708_DMA_CS, BCM2708_DMA_ACTIVE);
107
+ * We match the whole register even if this is AArch32 using the
192
+
108
+ * short descriptor format (in which case it holds both PROCID and ASID),
193
+ /* Check that destination == source: */
109
+ * since we don't implement the optional v7 context ID masking.
194
+ uint32_t data = readl(D_ADDR);
110
+ */
195
+ g_assert_cmpint(data, ==, check_data);
111
+ contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
196
+ for (int word = D_ADDR + 4; word < D_ADDR + TXFR_LEN; word += 4) {
112
+
197
+ data = readl(word);
113
+ switch (bt) {
198
+ g_assert_cmpint(data, ==, ~check_data);
114
+ case 3: /* linked context ID match */
199
+ }
115
+ if (arm_current_el(env) > 1) {
200
+
116
+ /* Context matches never fire in EL2 or (AArch64) EL3 */
201
+ /* Check that interrupt status is set both in DMA and IC controllers: */
117
+ return false;
202
+ isr = readl(RASPI3_DMA_BASE + BCM2708_DMA_INT_STATUS);
118
+ }
203
+ g_assert_cmpint(isr, ==, 1 << dma_c);
119
+ return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
204
+
120
+ case 5: /* linked address mismatch (reserved in AArch64) */
205
+ ic_pending = readl(RASPI3_IC_BASE + IRQ_PENDING_BASIC);
121
+ case 9: /* linked VMID match (reserved if no EL2) */
206
+ g_assert_cmpint(ic_pending, ==, IRQ_GPU_PENDING1_AGGR);
122
+ case 11: /* linked context ID and VMID match (reserved if no EL2) */
207
+
123
+ default:
208
+ gpu_pending1 = readl(RASPI3_IC_BASE + IRQ_PENDING_1);
124
+ /*
209
+ g_assert_cmpint(gpu_pending1, ==, 1 << gpu_irq_line);
125
+ * Links to Unlinked context breakpoints must generate no
210
+
126
+ * events; we choose to do the same for reserved values too.
211
+ /* Clean up, clear interrupt: */
127
+ */
212
+ writel(dma_base + BCM2708_DMA_CS, BCM2708_DMA_INT);
128
+ return false;
129
+ }
130
+
131
+ return false;
132
+}
213
+}
133
+
214
+
134
+static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
215
+static void bcm2835_dma_test_interrupts(void)
135
+{
216
+{
136
+ CPUARMState *env = &cpu->env;
217
+ /* DMA engines 0--10 have separate IRQ lines, 11--14 - only one: */
137
+ uint64_t cr;
218
+ bcm2835_dma_test_interrupt(0, 0);
138
+ int pac, hmc, ssc, wt, lbn;
219
+ bcm2835_dma_test_interrupt(10, 10);
139
+ /*
220
+ bcm2835_dma_test_interrupt(11, 11);
140
+ * Note that for watchpoints the check is against the CPU security
221
+ bcm2835_dma_test_interrupt(14, 11);
141
+ * state, not the S/NS attribute on the offending data access.
142
+ */
143
+ bool is_secure = arm_is_secure(env);
144
+ int access_el = arm_current_el(env);
145
+
146
+ if (is_wp) {
147
+ CPUWatchpoint *wp = env->cpu_watchpoint[n];
148
+
149
+ if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
150
+ return false;
151
+ }
152
+ cr = env->cp15.dbgwcr[n];
153
+ if (wp->hitattrs.user) {
154
+ /*
155
+ * The LDRT/STRT/LDT/STT "unprivileged access" instructions should
156
+ * match watchpoints as if they were accesses done at EL0, even if
157
+ * the CPU is at EL1 or higher.
158
+ */
159
+ access_el = 0;
160
+ }
161
+ } else {
162
+ uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
163
+
164
+ if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
165
+ return false;
166
+ }
167
+ cr = env->cp15.dbgbcr[n];
168
+ }
169
+ /*
170
+ * The WATCHPOINT_HIT flag guarantees us that the watchpoint is
171
+ * enabled and that the address and access type match; for breakpoints
172
+ * we know the address matched; check the remaining fields, including
173
+ * linked breakpoints. We rely on WCR and BCR having the same layout
174
+ * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
175
+ * Note that some combinations of {PAC, HMC, SSC} are reserved and
176
+ * must act either like some valid combination or as if the watchpoint
177
+ * were disabled. We choose the former, and use this together with
178
+ * the fact that EL3 must always be Secure and EL2 must always be
179
+ * Non-Secure to simplify the code slightly compared to the full
180
+ * table in the ARM ARM.
181
+ */
182
+ pac = extract64(cr, 1, 2);
183
+ hmc = extract64(cr, 13, 1);
184
+ ssc = extract64(cr, 14, 2);
185
+
186
+ switch (ssc) {
187
+ case 0:
188
+ break;
189
+ case 1:
190
+ case 3:
191
+ if (is_secure) {
192
+ return false;
193
+ }
194
+ break;
195
+ case 2:
196
+ if (!is_secure) {
197
+ return false;
198
+ }
199
+ break;
200
+ }
201
+
202
+ switch (access_el) {
203
+ case 3:
204
+ case 2:
205
+ if (!hmc) {
206
+ return false;
207
+ }
208
+ break;
209
+ case 1:
210
+ if (extract32(pac, 0, 1) == 0) {
211
+ return false;
212
+ }
213
+ break;
214
+ case 0:
215
+ if (extract32(pac, 1, 1) == 0) {
216
+ return false;
217
+ }
218
+ break;
219
+ default:
220
+ g_assert_not_reached();
221
+ }
222
+
223
+ wt = extract64(cr, 20, 1);
224
+ lbn = extract64(cr, 16, 4);
225
+
226
+ if (wt && !linked_bp_matches(cpu, lbn)) {
227
+ return false;
228
+ }
229
+
230
+ return true;
231
+}
222
+}
232
+
223
+
233
+static bool check_watchpoints(ARMCPU *cpu)
224
+int main(int argc, char **argv)
234
+{
225
+{
235
+ CPUARMState *env = &cpu->env;
226
+ int ret;
236
+ int n;
227
+ g_test_init(&argc, &argv, NULL);
237
+
228
+ qtest_add_func("/bcm2835/dma/test_interrupts",
238
+ /*
229
+ bcm2835_dma_test_interrupts);
239
+ * If watchpoints are disabled globally or we can't take debug
230
+ qtest_start("-machine raspi3b");
240
+ * exceptions here then watchpoint firings are ignored.
231
+ ret = g_test_run();
241
+ */
232
+ qtest_end();
242
+ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
233
+ return ret;
243
+ || !arm_generate_debug_exceptions(env)) {
244
+ return false;
245
+ }
246
+
247
+ for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
248
+ if (bp_wp_matches(cpu, n, true)) {
249
+ return true;
250
+ }
251
+ }
252
+ return false;
253
+}
234
+}
254
+
235
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
255
+static bool check_breakpoints(ARMCPU *cpu)
256
+{
257
+ CPUARMState *env = &cpu->env;
258
+ int n;
259
+
260
+ /*
261
+ * If breakpoints are disabled globally or we can't take debug
262
+ * exceptions here then breakpoint firings are ignored.
263
+ */
264
+ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
265
+ || !arm_generate_debug_exceptions(env)) {
266
+ return false;
267
+ }
268
+
269
+ for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
270
+ if (bp_wp_matches(cpu, n, false)) {
271
+ return true;
272
+ }
273
+ }
274
+ return false;
275
+}
276
+
277
+void HELPER(check_breakpoints)(CPUARMState *env)
278
+{
279
+ ARMCPU *cpu = env_archcpu(env);
280
+
281
+ if (check_breakpoints(cpu)) {
282
+ HELPER(exception_internal(env, EXCP_DEBUG));
283
+ }
284
+}
285
+
286
+bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
287
+{
288
+ /*
289
+ * Called by core code when a CPU watchpoint fires; need to check if this
290
+ * is also an architectural watchpoint match.
291
+ */
292
+ ARMCPU *cpu = ARM_CPU(cs);
293
+
294
+ return check_watchpoints(cpu);
295
+}
296
+
297
+void arm_debug_excp_handler(CPUState *cs)
298
+{
299
+ /*
300
+ * Called by core code when a watchpoint or breakpoint fires;
301
+ * need to check which one and raise the appropriate exception.
302
+ */
303
+ ARMCPU *cpu = ARM_CPU(cs);
304
+ CPUARMState *env = &cpu->env;
305
+ CPUWatchpoint *wp_hit = cs->watchpoint_hit;
306
+
307
+ if (wp_hit) {
308
+ if (wp_hit->flags & BP_CPU) {
309
+ bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
310
+ bool same_el = arm_debug_target_el(env) == arm_current_el(env);
311
+
312
+ cs->watchpoint_hit = NULL;
313
+
314
+ env->exception.fsr = arm_debug_exception_fsr(env);
315
+ env->exception.vaddress = wp_hit->hitaddr;
316
+ raise_exception(env, EXCP_DATA_ABORT,
317
+ syn_watchpoint(same_el, 0, wnr),
318
+ arm_debug_target_el(env));
319
+ }
320
+ } else {
321
+ uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
322
+ bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
323
+
324
+ /*
325
+ * (1) GDB breakpoints should be handled first.
326
+ * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
327
+ * since singlestep is also done by generating a debug internal
328
+ * exception.
329
+ */
330
+ if (cpu_breakpoint_test(cs, pc, BP_GDB)
331
+ || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
332
+ return;
333
+ }
334
+
335
+ env->exception.fsr = arm_debug_exception_fsr(env);
336
+ /*
337
+ * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
338
+ * values to the guest that it shouldn't be able to see at its
339
+ * exception/security level.
340
+ */
341
+ env->exception.vaddress = 0;
342
+ raise_exception(env, EXCP_PREFETCH_ABORT,
343
+ syn_breakpoint(same_el),
344
+ arm_debug_target_el(env));
345
+ }
346
+}
347
+
348
+#if !defined(CONFIG_USER_ONLY)
349
+
350
+vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
351
+{
352
+ ARMCPU *cpu = ARM_CPU(cs);
353
+ CPUARMState *env = &cpu->env;
354
+
355
+ /*
356
+ * In BE32 system mode, target memory is stored byteswapped (on a
357
+ * little-endian host system), and by the time we reach here (via an
358
+ * opcode helper) the addresses of subword accesses have been adjusted
359
+ * to account for that, which means that watchpoints will not match.
360
+ * Undo the adjustment here.
361
+ */
362
+ if (arm_sctlr_b(env)) {
363
+ if (len == 1) {
364
+ addr ^= 3;
365
+ } else if (len == 2) {
366
+ addr ^= 2;
367
+ }
368
+ }
369
+
370
+ return addr;
371
+}
372
+
373
+#endif
374
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
375
index XXXXXXX..XXXXXXX 100644
236
index XXXXXXX..XXXXXXX 100644
376
--- a/target/arm/op_helper.c
237
--- a/tests/qtest/meson.build
377
+++ b/target/arm/op_helper.c
238
+++ b/tests/qtest/meson.build
378
@@ -XXX,XX +XXX,XX @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
239
@@ -XXX,XX +XXX,XX @@ qtests_aarch64 = \
379
}
240
['arm-cpu-features',
380
}
241
'numa-test',
381
242
'boot-serial-test',
382
-/* Return true if the linked breakpoint entry lbn passes its checks */
243
- 'migration-test']
383
-static bool linked_bp_matches(ARMCPU *cpu, int lbn)
244
+ 'migration-test',
384
-{
245
+ 'bcm2835-dma-test']
385
- CPUARMState *env = &cpu->env;
246
386
- uint64_t bcr = env->cp15.dbgbcr[lbn];
247
qtests_s390x = \
387
- int brps = extract32(cpu->dbgdidr, 24, 4);
248
(slirp.found() ? ['pxe-test', 'test-netfilter'] : []) + \
388
- int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
389
- int bt;
390
- uint32_t contextidr;
391
-
392
- /*
393
- * Links to unimplemented or non-context aware breakpoints are
394
- * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
395
- * as if linked to an UNKNOWN context-aware breakpoint (in which
396
- * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
397
- * We choose the former.
398
- */
399
- if (lbn > brps || lbn < (brps - ctx_cmps)) {
400
- return false;
401
- }
402
-
403
- bcr = env->cp15.dbgbcr[lbn];
404
-
405
- if (extract64(bcr, 0, 1) == 0) {
406
- /* Linked breakpoint disabled : generate no events */
407
- return false;
408
- }
409
-
410
- bt = extract64(bcr, 20, 4);
411
-
412
- /*
413
- * We match the whole register even if this is AArch32 using the
414
- * short descriptor format (in which case it holds both PROCID and ASID),
415
- * since we don't implement the optional v7 context ID masking.
416
- */
417
- contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
418
-
419
- switch (bt) {
420
- case 3: /* linked context ID match */
421
- if (arm_current_el(env) > 1) {
422
- /* Context matches never fire in EL2 or (AArch64) EL3 */
423
- return false;
424
- }
425
- return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
426
- case 5: /* linked address mismatch (reserved in AArch64) */
427
- case 9: /* linked VMID match (reserved if no EL2) */
428
- case 11: /* linked context ID and VMID match (reserved if no EL2) */
429
- default:
430
- /*
431
- * Links to Unlinked context breakpoints must generate no
432
- * events; we choose to do the same for reserved values too.
433
- */
434
- return false;
435
- }
436
-
437
- return false;
438
-}
439
-
440
-static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
441
-{
442
- CPUARMState *env = &cpu->env;
443
- uint64_t cr;
444
- int pac, hmc, ssc, wt, lbn;
445
- /*
446
- * Note that for watchpoints the check is against the CPU security
447
- * state, not the S/NS attribute on the offending data access.
448
- */
449
- bool is_secure = arm_is_secure(env);
450
- int access_el = arm_current_el(env);
451
-
452
- if (is_wp) {
453
- CPUWatchpoint *wp = env->cpu_watchpoint[n];
454
-
455
- if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
456
- return false;
457
- }
458
- cr = env->cp15.dbgwcr[n];
459
- if (wp->hitattrs.user) {
460
- /*
461
- * The LDRT/STRT/LDT/STT "unprivileged access" instructions should
462
- * match watchpoints as if they were accesses done at EL0, even if
463
- * the CPU is at EL1 or higher.
464
- */
465
- access_el = 0;
466
- }
467
- } else {
468
- uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
469
-
470
- if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
471
- return false;
472
- }
473
- cr = env->cp15.dbgbcr[n];
474
- }
475
- /*
476
- * The WATCHPOINT_HIT flag guarantees us that the watchpoint is
477
- * enabled and that the address and access type match; for breakpoints
478
- * we know the address matched; check the remaining fields, including
479
- * linked breakpoints. We rely on WCR and BCR having the same layout
480
- * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
481
- * Note that some combinations of {PAC, HMC, SSC} are reserved and
482
- * must act either like some valid combination or as if the watchpoint
483
- * were disabled. We choose the former, and use this together with
484
- * the fact that EL3 must always be Secure and EL2 must always be
485
- * Non-Secure to simplify the code slightly compared to the full
486
- * table in the ARM ARM.
487
- */
488
- pac = extract64(cr, 1, 2);
489
- hmc = extract64(cr, 13, 1);
490
- ssc = extract64(cr, 14, 2);
491
-
492
- switch (ssc) {
493
- case 0:
494
- break;
495
- case 1:
496
- case 3:
497
- if (is_secure) {
498
- return false;
499
- }
500
- break;
501
- case 2:
502
- if (!is_secure) {
503
- return false;
504
- }
505
- break;
506
- }
507
-
508
- switch (access_el) {
509
- case 3:
510
- case 2:
511
- if (!hmc) {
512
- return false;
513
- }
514
- break;
515
- case 1:
516
- if (extract32(pac, 0, 1) == 0) {
517
- return false;
518
- }
519
- break;
520
- case 0:
521
- if (extract32(pac, 1, 1) == 0) {
522
- return false;
523
- }
524
- break;
525
- default:
526
- g_assert_not_reached();
527
- }
528
-
529
- wt = extract64(cr, 20, 1);
530
- lbn = extract64(cr, 16, 4);
531
-
532
- if (wt && !linked_bp_matches(cpu, lbn)) {
533
- return false;
534
- }
535
-
536
- return true;
537
-}
538
-
539
-static bool check_watchpoints(ARMCPU *cpu)
540
-{
541
- CPUARMState *env = &cpu->env;
542
- int n;
543
-
544
- /*
545
- * If watchpoints are disabled globally or we can't take debug
546
- * exceptions here then watchpoint firings are ignored.
547
- */
548
- if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
549
- || !arm_generate_debug_exceptions(env)) {
550
- return false;
551
- }
552
-
553
- for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
554
- if (bp_wp_matches(cpu, n, true)) {
555
- return true;
556
- }
557
- }
558
- return false;
559
-}
560
-
561
-static bool check_breakpoints(ARMCPU *cpu)
562
-{
563
- CPUARMState *env = &cpu->env;
564
- int n;
565
-
566
- /*
567
- * If breakpoints are disabled globally or we can't take debug
568
- * exceptions here then breakpoint firings are ignored.
569
- */
570
- if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
571
- || !arm_generate_debug_exceptions(env)) {
572
- return false;
573
- }
574
-
575
- for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
576
- if (bp_wp_matches(cpu, n, false)) {
577
- return true;
578
- }
579
- }
580
- return false;
581
-}
582
-
583
-void HELPER(check_breakpoints)(CPUARMState *env)
584
-{
585
- ARMCPU *cpu = env_archcpu(env);
586
-
587
- if (check_breakpoints(cpu)) {
588
- HELPER(exception_internal(env, EXCP_DEBUG));
589
- }
590
-}
591
-
592
-bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
593
-{
594
- /*
595
- * Called by core code when a CPU watchpoint fires; need to check if this
596
- * is also an architectural watchpoint match.
597
- */
598
- ARMCPU *cpu = ARM_CPU(cs);
599
-
600
- return check_watchpoints(cpu);
601
-}
602
-
603
-vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
604
-{
605
- ARMCPU *cpu = ARM_CPU(cs);
606
- CPUARMState *env = &cpu->env;
607
-
608
- /*
609
- * In BE32 system mode, target memory is stored byteswapped (on a
610
- * little-endian host system), and by the time we reach here (via an
611
- * opcode helper) the addresses of subword accesses have been adjusted
612
- * to account for that, which means that watchpoints will not match.
613
- * Undo the adjustment here.
614
- */
615
- if (arm_sctlr_b(env)) {
616
- if (len == 1) {
617
- addr ^= 3;
618
- } else if (len == 2) {
619
- addr ^= 2;
620
- }
621
- }
622
-
623
- return addr;
624
-}
625
-
626
-void arm_debug_excp_handler(CPUState *cs)
627
-{
628
- /*
629
- * Called by core code when a watchpoint or breakpoint fires;
630
- * need to check which one and raise the appropriate exception.
631
- */
632
- ARMCPU *cpu = ARM_CPU(cs);
633
- CPUARMState *env = &cpu->env;
634
- CPUWatchpoint *wp_hit = cs->watchpoint_hit;
635
-
636
- if (wp_hit) {
637
- if (wp_hit->flags & BP_CPU) {
638
- bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
639
- bool same_el = arm_debug_target_el(env) == arm_current_el(env);
640
-
641
- cs->watchpoint_hit = NULL;
642
-
643
- env->exception.fsr = arm_debug_exception_fsr(env);
644
- env->exception.vaddress = wp_hit->hitaddr;
645
- raise_exception(env, EXCP_DATA_ABORT,
646
- syn_watchpoint(same_el, 0, wnr),
647
- arm_debug_target_el(env));
648
- }
649
- } else {
650
- uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
651
- bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
652
-
653
- /*
654
- * (1) GDB breakpoints should be handled first.
655
- * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
656
- * since singlestep is also done by generating a debug internal
657
- * exception.
658
- */
659
- if (cpu_breakpoint_test(cs, pc, BP_GDB)
660
- || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
661
- return;
662
- }
663
-
664
- env->exception.fsr = arm_debug_exception_fsr(env);
665
- /*
666
- * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
667
- * values to the guest that it shouldn't be able to see at its
668
- * exception/security level.
669
- */
670
- env->exception.vaddress = 0;
671
- raise_exception(env, EXCP_PREFETCH_ABORT,
672
- syn_breakpoint(same_el),
673
- arm_debug_target_el(env));
674
- }
675
-}
676
-
677
/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
678
The only way to do that in TCG is a conditional branch, which clobbers
679
all our temporaries. For now implement these as helper functions. */
680
--
249
--
681
2.20.1
250
2.25.1
682
683
diff view generated by jsdifflib