1
A last collection of patches to squeeze in before rc0.
1
The following changes since commit f003dd8d81f7d88f4b1f8802309eaa76f6eb223a:
2
The patches from me are all bugfixes. Philippe's are just
3
code-movement, but I wanted to get these into 4.1 because
4
that kind of patch is so painful to have to rebase.
5
(The diffstat is huge but it's just code moving from file to file.)
6
2
7
thanks
3
Merge tag 'pull-tcg-20230305' of https://gitlab.com/rth7680/qemu into staging (2023-03-06 10:20:04 +0000)
8
-- PMM
9
10
The following changes since commit 234e256511e588680300600ce087c5185d68cf2a:
11
12
Merge remote-tracking branch 'remotes/armbru/tags/pull-build-2019-07-02-v2' into staging (2019-07-04 15:58:46 +0100)
13
4
14
are available in the Git repository at:
5
are available in the Git repository at:
15
6
16
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20190704
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230306
17
8
18
for you to fetch changes up to b75f3735802b5b33f10e4bfe374d4b17bb86d29a:
9
for you to fetch changes up to 2ddc45954f97cd1d7ee5cbca0def05e980d1da9f:
19
10
20
target/arm: Correct VMOV_imm_dp handling of short vectors (2019-07-04 16:52:05 +0100)
11
hw: arm: allwinner-h3: Fix and complete H3 i2c devices (2023-03-06 15:31:24 +0000)
21
12
22
----------------------------------------------------------------
13
----------------------------------------------------------------
23
target-arm queue:
14
target-arm queue:
24
* more code-movement to separate TCG-only functions into their own files
15
* allwinner-h3: Fix I2C controller model for Sun6i SoCs
25
* Correct VMOV_imm_dp handling of short vectors
16
* allwinner-h3: Add missing i2c controllers
26
* Execute Thumb instructions when their condbits are 0xf
17
* Expose M-profile system registers to gdbstub
27
* armv7m_systick: Forbid non-privileged accesses
18
* Expose pauth information to gdbstub
28
* Use _ra versions of cpu_stl_data() in v7M helpers
19
* Support direct boot for Linux/arm64 EFI zboot images
29
* v8M: Check state of exception being returned from
20
* Fix incorrect stage 2 MMU setup validation
30
* v8M: Forcibly clear negative-priority exceptions on deactivate
31
21
32
----------------------------------------------------------------
22
----------------------------------------------------------------
33
Peter Maydell (6):
23
Ard Biesheuvel (1):
34
arm v8M: Forcibly clear negative-priority exceptions on deactivate
24
hw: arm: Support direct boot for Linux/arm64 EFI zboot images
35
target/arm: v8M: Check state of exception being returned from
36
target/arm: Use _ra versions of cpu_stl_data() in v7M helpers
37
hw/timer/armv7m_systick: Forbid non-privileged accesses
38
target/arm: Execute Thumb instructions when their condbits are 0xf
39
target/arm: Correct VMOV_imm_dp handling of short vectors
40
25
41
Philippe Mathieu-Daudé (3):
26
David Reiss (2):
42
target/arm: Move debug routines to debug_helper.c
27
target/arm: Export arm_v7m_mrs_control
43
target/arm: Restrict semi-hosting to TCG
28
target/arm: Export arm_v7m_get_sp_ptr
44
target/arm/helper: Move M profile routines to m_helper.c
45
29
46
target/arm/Makefile.objs | 5 +-
30
Richard Henderson (16):
47
target/arm/cpu.h | 7 +
31
target/arm: Normalize aarch64 gdbstub get/set function names
48
hw/intc/armv7m_nvic.c | 54 +-
32
target/arm: Unexport arm_gen_dynamic_sysreg_xml
49
hw/timer/armv7m_systick.c | 26 +-
33
target/arm: Move arm_gen_dynamic_svereg_xml to gdbstub64.c
50
target/arm/cpu.c | 9 +-
34
target/arm: Split out output_vector_union_type
51
target/arm/debug_helper.c | 311 +++++
35
target/arm: Simplify register counting in arm_gen_dynamic_svereg_xml
52
target/arm/helper.c | 2646 +--------------------------------------
36
target/arm: Hoist pred_width in arm_gen_dynamic_svereg_xml
53
target/arm/m_helper.c | 2679 ++++++++++++++++++++++++++++++++++++++++
37
target/arm: Fix svep width in arm_gen_dynamic_svereg_xml
54
target/arm/op_helper.c | 295 -----
38
target/arm: Add name argument to output_vector_union_type
55
target/arm/translate-vfp.inc.c | 2 +-
39
target/arm: Simplify iteration over bit widths
56
target/arm/translate.c | 15 +-
40
target/arm: Create pauth_ptr_mask
57
11 files changed, 3096 insertions(+), 2953 deletions(-)
41
target/arm: Implement gdbstub pauth extension
58
create mode 100644 target/arm/debug_helper.c
42
target/arm: Implement gdbstub m-profile systemreg and secext
59
create mode 100644 target/arm/m_helper.c
43
target/arm: Handle m-profile in arm_is_secure
44
target/arm: Stub arm_hcr_el2_eff for m-profile
45
target/arm: Diagnose incorrect usage of arm_is_secure subroutines
46
target/arm: Rewrite check_s2_mmu_setup
60
47
48
qianfan Zhao (2):
49
hw: allwinner-i2c: Fix TWI_CNTR_INT_FLAG on SUN6i SoCs
50
hw: arm: allwinner-h3: Fix and complete H3 i2c devices
51
52
configs/targets/aarch64-linux-user.mak | 2 +-
53
configs/targets/aarch64-softmmu.mak | 2 +-
54
configs/targets/aarch64_be-linux-user.mak | 2 +-
55
include/hw/arm/allwinner-h3.h | 6 +
56
include/hw/i2c/allwinner-i2c.h | 6 +
57
include/hw/loader.h | 19 ++
58
target/arm/cpu.h | 17 +-
59
target/arm/internals.h | 34 +++-
60
hw/arm/allwinner-h3.c | 29 +++-
61
hw/arm/boot.c | 6 +
62
hw/core/loader.c | 91 ++++++++++
63
hw/i2c/allwinner-i2c.c | 26 ++-
64
target/arm/gdbstub.c | 278 ++++++++++++++++++------------
65
target/arm/gdbstub64.c | 175 ++++++++++++++++++-
66
target/arm/helper.c | 3 +
67
target/arm/ptw.c | 173 +++++++++++--------
68
target/arm/tcg/m_helper.c | 90 +++++-----
69
target/arm/tcg/pauth_helper.c | 26 ++-
70
gdb-xml/aarch64-pauth.xml | 15 ++
71
19 files changed, 742 insertions(+), 258 deletions(-)
72
create mode 100644 gdb-xml/aarch64-pauth.xml
diff view generated by jsdifflib
1
Like most of the v7M memory mapped system registers, the systick
1
From: Richard Henderson <richard.henderson@linaro.org>
2
registers are accessible to privileged code only and user accesses
3
must generate a BusFault. We implement that for registers in
4
the NVIC proper already, but missed it for systick since we
5
implement it as a separate device. Correct the omission.
6
2
3
Make the form of the function names between fp and sve the same:
4
- arm_gdb_*_svereg -> aarch64_gdb_*_sve_reg.
5
- aarch64_fpu_gdb_*_reg -> aarch64_gdb_*_fpu_reg.
6
7
Reviewed-by: Fabiano Rosas <farosas@suse.de>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20230227213329.793795-2-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20190617175317.27557-6-peter.maydell@linaro.org
11
---
12
---
12
hw/timer/armv7m_systick.c | 26 ++++++++++++++++++++------
13
target/arm/internals.h | 8 ++++----
13
1 file changed, 20 insertions(+), 6 deletions(-)
14
target/arm/gdbstub.c | 9 +++++----
15
target/arm/gdbstub64.c | 8 ++++----
16
3 files changed, 13 insertions(+), 12 deletions(-)
14
17
15
diff --git a/hw/timer/armv7m_systick.c b/hw/timer/armv7m_systick.c
18
diff --git a/target/arm/internals.h b/target/arm/internals.h
16
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/timer/armv7m_systick.c
20
--- a/target/arm/internals.h
18
+++ b/hw/timer/armv7m_systick.c
21
+++ b/target/arm/internals.h
19
@@ -XXX,XX +XXX,XX @@ static void systick_timer_tick(void *opaque)
22
@@ -XXX,XX +XXX,XX @@ static inline uint64_t pmu_counter_mask(CPUARMState *env)
23
}
24
25
#ifdef TARGET_AARCH64
26
-int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg);
27
-int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg);
28
-int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
29
-int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
30
+int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg);
31
+int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg);
32
+int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg);
33
+int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg);
34
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
35
void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
36
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
37
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/gdbstub.c
40
+++ b/target/arm/gdbstub.c
41
@@ -XXX,XX +XXX,XX @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
42
*/
43
#ifdef TARGET_AARCH64
44
if (isar_feature_aa64_sve(&cpu->isar)) {
45
- gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg,
46
- arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs),
47
+ int nreg = arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs);
48
+ gdb_register_coprocessor(cs, aarch64_gdb_get_sve_reg,
49
+ aarch64_gdb_set_sve_reg, nreg,
50
"sve-registers.xml", 0);
51
} else {
52
- gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
53
- aarch64_fpu_gdb_set_reg,
54
+ gdb_register_coprocessor(cs, aarch64_gdb_get_fpu_reg,
55
+ aarch64_gdb_set_fpu_reg,
56
34, "aarch64-fpu.xml", 0);
57
}
58
#endif
59
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/gdbstub64.c
62
+++ b/target/arm/gdbstub64.c
63
@@ -XXX,XX +XXX,XX @@ int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
64
return 0;
65
}
66
67
-int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
68
+int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg)
69
{
70
switch (reg) {
71
case 0 ... 31:
72
@@ -XXX,XX +XXX,XX @@ int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
20
}
73
}
21
}
74
}
22
75
23
-static uint64_t systick_read(void *opaque, hwaddr addr, unsigned size)
76
-int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
24
+static MemTxResult systick_read(void *opaque, hwaddr addr, uint64_t *data,
77
+int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg)
25
+ unsigned size, MemTxAttrs attrs)
26
{
78
{
27
SysTickState *s = opaque;
79
switch (reg) {
28
uint32_t val;
80
case 0 ... 31:
29
81
@@ -XXX,XX +XXX,XX @@ int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
30
+ if (attrs.user) {
31
+ /* Generate BusFault for unprivileged accesses */
32
+ return MEMTX_ERROR;
33
+ }
34
+
35
switch (addr) {
36
case 0x0: /* SysTick Control and Status. */
37
val = s->control;
38
@@ -XXX,XX +XXX,XX @@ static uint64_t systick_read(void *opaque, hwaddr addr, unsigned size)
39
}
82
}
40
41
trace_systick_read(addr, val, size);
42
- return val;
43
+ *data = val;
44
+ return MEMTX_OK;
45
}
83
}
46
84
47
-static void systick_write(void *opaque, hwaddr addr,
85
-int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
48
- uint64_t value, unsigned size)
86
+int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg)
49
+static MemTxResult systick_write(void *opaque, hwaddr addr,
50
+ uint64_t value, unsigned size,
51
+ MemTxAttrs attrs)
52
{
87
{
53
SysTickState *s = opaque;
88
ARMCPU *cpu = env_archcpu(env);
54
89
55
+ if (attrs.user) {
90
@@ -XXX,XX +XXX,XX @@ int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
56
+ /* Generate BusFault for unprivileged accesses */
91
return 0;
57
+ return MEMTX_ERROR;
58
+ }
59
+
60
trace_systick_write(addr, value, size);
61
62
switch (addr) {
63
@@ -XXX,XX +XXX,XX @@ static void systick_write(void *opaque, hwaddr addr,
64
qemu_log_mask(LOG_GUEST_ERROR,
65
"SysTick: Bad write offset 0x%" HWADDR_PRIx "\n", addr);
66
}
67
+ return MEMTX_OK;
68
}
92
}
69
93
70
static const MemoryRegionOps systick_ops = {
94
-int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg)
71
- .read = systick_read,
95
+int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg)
72
- .write = systick_write,
96
{
73
+ .read_with_attrs = systick_read,
97
ARMCPU *cpu = env_archcpu(env);
74
+ .write_with_attrs = systick_write,
98
75
.endianness = DEVICE_NATIVE_ENDIAN,
76
.valid.min_access_size = 4,
77
.valid.max_access_size = 4,
78
--
99
--
79
2.20.1
100
2.34.1
80
101
81
102
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
This function is not used outside gdbstub.c.
4
5
Reviewed-by: Fabiano Rosas <farosas@suse.de>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230227213329.793795-3-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/cpu.h | 1 -
12
target/arm/gdbstub.c | 2 +-
13
2 files changed, 1 insertion(+), 2 deletions(-)
14
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
20
* Helpers to dynamically generates XML descriptions of the sysregs
21
* and SVE registers. Returns the number of registers in each set.
22
*/
23
-int arm_gen_dynamic_sysreg_xml(CPUState *cpu, int base_reg);
24
int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg);
25
26
/* Returns the dynamically generated XML for the gdb stub.
27
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/gdbstub.c
30
+++ b/target/arm/gdbstub.c
31
@@ -XXX,XX +XXX,XX @@ static void arm_register_sysreg_for_xml(gpointer key, gpointer value,
32
}
33
}
34
35
-int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg)
36
+static int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg)
37
{
38
ARMCPU *cpu = ARM_CPU(cs);
39
GString *s = g_string_new(NULL);
40
--
41
2.34.1
42
43
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
The function is only used for aarch64, so move it to the
4
file that has the other aarch64 gdbstub stuff. Move the
5
declaration to internals.h.
6
7
Reviewed-by: Fabiano Rosas <farosas@suse.de>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20230227213329.793795-4-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/cpu.h | 6 ---
14
target/arm/internals.h | 1 +
15
target/arm/gdbstub.c | 120 -----------------------------------------
16
target/arm/gdbstub64.c | 118 ++++++++++++++++++++++++++++++++++++++++
17
4 files changed, 119 insertions(+), 126 deletions(-)
18
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
24
int arm_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
25
int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
26
27
-/*
28
- * Helpers to dynamically generates XML descriptions of the sysregs
29
- * and SVE registers. Returns the number of registers in each set.
30
- */
31
-int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg);
32
-
33
/* Returns the dynamically generated XML for the gdb stub.
34
* Returns a pointer to the XML contents for the specified XML file or NULL
35
* if the XML name doesn't match the predefined one.
36
diff --git a/target/arm/internals.h b/target/arm/internals.h
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/internals.h
39
+++ b/target/arm/internals.h
40
@@ -XXX,XX +XXX,XX @@ static inline uint64_t pmu_counter_mask(CPUARMState *env)
41
}
42
43
#ifdef TARGET_AARCH64
44
+int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg);
45
int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg);
46
int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg);
47
int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg);
48
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/target/arm/gdbstub.c
51
+++ b/target/arm/gdbstub.c
52
@@ -XXX,XX +XXX,XX @@ static int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg)
53
return cpu->dyn_sysreg_xml.num;
54
}
55
56
-struct TypeSize {
57
- const char *gdb_type;
58
- int size;
59
- const char sz, suffix;
60
-};
61
-
62
-static const struct TypeSize vec_lanes[] = {
63
- /* quads */
64
- { "uint128", 128, 'q', 'u' },
65
- { "int128", 128, 'q', 's' },
66
- /* 64 bit */
67
- { "ieee_double", 64, 'd', 'f' },
68
- { "uint64", 64, 'd', 'u' },
69
- { "int64", 64, 'd', 's' },
70
- /* 32 bit */
71
- { "ieee_single", 32, 's', 'f' },
72
- { "uint32", 32, 's', 'u' },
73
- { "int32", 32, 's', 's' },
74
- /* 16 bit */
75
- { "ieee_half", 16, 'h', 'f' },
76
- { "uint16", 16, 'h', 'u' },
77
- { "int16", 16, 'h', 's' },
78
- /* bytes */
79
- { "uint8", 8, 'b', 'u' },
80
- { "int8", 8, 'b', 's' },
81
-};
82
-
83
-
84
-int arm_gen_dynamic_svereg_xml(CPUState *cs, int base_reg)
85
-{
86
- ARMCPU *cpu = ARM_CPU(cs);
87
- GString *s = g_string_new(NULL);
88
- DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml;
89
- g_autoptr(GString) ts = g_string_new("");
90
- int i, j, bits, reg_width = (cpu->sve_max_vq * 128);
91
- info->num = 0;
92
- g_string_printf(s, "<?xml version=\"1.0\"?>");
93
- g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
94
- g_string_append_printf(s, "<feature name=\"org.gnu.gdb.aarch64.sve\">");
95
-
96
- /* First define types and totals in a whole VL */
97
- for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
98
- int count = reg_width / vec_lanes[i].size;
99
- g_string_printf(ts, "svev%c%c", vec_lanes[i].sz, vec_lanes[i].suffix);
100
- g_string_append_printf(s,
101
- "<vector id=\"%s\" type=\"%s\" count=\"%d\"/>",
102
- ts->str, vec_lanes[i].gdb_type, count);
103
- }
104
- /*
105
- * Now define a union for each size group containing unsigned and
106
- * signed and potentially float versions of each size from 128 to
107
- * 8 bits.
108
- */
109
- for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) {
110
- const char suf[] = { 'q', 'd', 's', 'h', 'b' };
111
- g_string_append_printf(s, "<union id=\"svevn%c\">", suf[i]);
112
- for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) {
113
- if (vec_lanes[j].size == bits) {
114
- g_string_append_printf(s, "<field name=\"%c\" type=\"svev%c%c\"/>",
115
- vec_lanes[j].suffix,
116
- vec_lanes[j].sz, vec_lanes[j].suffix);
117
- }
118
- }
119
- g_string_append(s, "</union>");
120
- }
121
- /* And now the final union of unions */
122
- g_string_append(s, "<union id=\"svev\">");
123
- for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) {
124
- const char suf[] = { 'q', 'd', 's', 'h', 'b' };
125
- g_string_append_printf(s, "<field name=\"%c\" type=\"svevn%c\"/>",
126
- suf[i], suf[i]);
127
- }
128
- g_string_append(s, "</union>");
129
-
130
- /* Finally the sve prefix type */
131
- g_string_append_printf(s,
132
- "<vector id=\"svep\" type=\"uint8\" count=\"%d\"/>",
133
- reg_width / 8);
134
-
135
- /* Then define each register in parts for each vq */
136
- for (i = 0; i < 32; i++) {
137
- g_string_append_printf(s,
138
- "<reg name=\"z%d\" bitsize=\"%d\""
139
- " regnum=\"%d\" type=\"svev\"/>",
140
- i, reg_width, base_reg++);
141
- info->num++;
142
- }
143
- /* fpscr & status registers */
144
- g_string_append_printf(s, "<reg name=\"fpsr\" bitsize=\"32\""
145
- " regnum=\"%d\" group=\"float\""
146
- " type=\"int\"/>", base_reg++);
147
- g_string_append_printf(s, "<reg name=\"fpcr\" bitsize=\"32\""
148
- " regnum=\"%d\" group=\"float\""
149
- " type=\"int\"/>", base_reg++);
150
- info->num += 2;
151
-
152
- for (i = 0; i < 16; i++) {
153
- g_string_append_printf(s,
154
- "<reg name=\"p%d\" bitsize=\"%d\""
155
- " regnum=\"%d\" type=\"svep\"/>",
156
- i, cpu->sve_max_vq * 16, base_reg++);
157
- info->num++;
158
- }
159
- g_string_append_printf(s,
160
- "<reg name=\"ffr\" bitsize=\"%d\""
161
- " regnum=\"%d\" group=\"vector\""
162
- " type=\"svep\"/>",
163
- cpu->sve_max_vq * 16, base_reg++);
164
- g_string_append_printf(s,
165
- "<reg name=\"vg\" bitsize=\"64\""
166
- " regnum=\"%d\" type=\"int\"/>",
167
- base_reg++);
168
- info->num += 2;
169
- g_string_append_printf(s, "</feature>");
170
- cpu->dyn_svereg_xml.desc = g_string_free(s, false);
171
-
172
- return cpu->dyn_svereg_xml.num;
173
-}
174
-
175
-
176
const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
177
{
178
ARMCPU *cpu = ARM_CPU(cs);
179
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
180
index XXXXXXX..XXXXXXX 100644
181
--- a/target/arm/gdbstub64.c
182
+++ b/target/arm/gdbstub64.c
183
@@ -XXX,XX +XXX,XX @@ int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg)
184
185
return 0;
186
}
187
+
188
+struct TypeSize {
189
+ const char *gdb_type;
190
+ short size;
191
+ char sz, suffix;
192
+};
193
+
194
+static const struct TypeSize vec_lanes[] = {
195
+ /* quads */
196
+ { "uint128", 128, 'q', 'u' },
197
+ { "int128", 128, 'q', 's' },
198
+ /* 64 bit */
199
+ { "ieee_double", 64, 'd', 'f' },
200
+ { "uint64", 64, 'd', 'u' },
201
+ { "int64", 64, 'd', 's' },
202
+ /* 32 bit */
203
+ { "ieee_single", 32, 's', 'f' },
204
+ { "uint32", 32, 's', 'u' },
205
+ { "int32", 32, 's', 's' },
206
+ /* 16 bit */
207
+ { "ieee_half", 16, 'h', 'f' },
208
+ { "uint16", 16, 'h', 'u' },
209
+ { "int16", 16, 'h', 's' },
210
+ /* bytes */
211
+ { "uint8", 8, 'b', 'u' },
212
+ { "int8", 8, 'b', 's' },
213
+};
214
+
215
+int arm_gen_dynamic_svereg_xml(CPUState *cs, int base_reg)
216
+{
217
+ ARMCPU *cpu = ARM_CPU(cs);
218
+ GString *s = g_string_new(NULL);
219
+ DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml;
220
+ g_autoptr(GString) ts = g_string_new("");
221
+ int i, j, bits, reg_width = (cpu->sve_max_vq * 128);
222
+ info->num = 0;
223
+ g_string_printf(s, "<?xml version=\"1.0\"?>");
224
+ g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
225
+ g_string_append_printf(s, "<feature name=\"org.gnu.gdb.aarch64.sve\">");
226
+
227
+ /* First define types and totals in a whole VL */
228
+ for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
229
+ int count = reg_width / vec_lanes[i].size;
230
+ g_string_printf(ts, "svev%c%c", vec_lanes[i].sz, vec_lanes[i].suffix);
231
+ g_string_append_printf(s,
232
+ "<vector id=\"%s\" type=\"%s\" count=\"%d\"/>",
233
+ ts->str, vec_lanes[i].gdb_type, count);
234
+ }
235
+ /*
236
+ * Now define a union for each size group containing unsigned and
237
+ * signed and potentially float versions of each size from 128 to
238
+ * 8 bits.
239
+ */
240
+ for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) {
241
+ const char suf[] = { 'q', 'd', 's', 'h', 'b' };
242
+ g_string_append_printf(s, "<union id=\"svevn%c\">", suf[i]);
243
+ for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) {
244
+ if (vec_lanes[j].size == bits) {
245
+ g_string_append_printf(s, "<field name=\"%c\" type=\"svev%c%c\"/>",
246
+ vec_lanes[j].suffix,
247
+ vec_lanes[j].sz, vec_lanes[j].suffix);
248
+ }
249
+ }
250
+ g_string_append(s, "</union>");
251
+ }
252
+ /* And now the final union of unions */
253
+ g_string_append(s, "<union id=\"svev\">");
254
+ for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) {
255
+ const char suf[] = { 'q', 'd', 's', 'h', 'b' };
256
+ g_string_append_printf(s, "<field name=\"%c\" type=\"svevn%c\"/>",
257
+ suf[i], suf[i]);
258
+ }
259
+ g_string_append(s, "</union>");
260
+
261
+ /* Finally the sve prefix type */
262
+ g_string_append_printf(s,
263
+ "<vector id=\"svep\" type=\"uint8\" count=\"%d\"/>",
264
+ reg_width / 8);
265
+
266
+ /* Then define each register in parts for each vq */
267
+ for (i = 0; i < 32; i++) {
268
+ g_string_append_printf(s,
269
+ "<reg name=\"z%d\" bitsize=\"%d\""
270
+ " regnum=\"%d\" type=\"svev\"/>",
271
+ i, reg_width, base_reg++);
272
+ info->num++;
273
+ }
274
+ /* fpscr & status registers */
275
+ g_string_append_printf(s, "<reg name=\"fpsr\" bitsize=\"32\""
276
+ " regnum=\"%d\" group=\"float\""
277
+ " type=\"int\"/>", base_reg++);
278
+ g_string_append_printf(s, "<reg name=\"fpcr\" bitsize=\"32\""
279
+ " regnum=\"%d\" group=\"float\""
280
+ " type=\"int\"/>", base_reg++);
281
+ info->num += 2;
282
+
283
+ for (i = 0; i < 16; i++) {
284
+ g_string_append_printf(s,
285
+ "<reg name=\"p%d\" bitsize=\"%d\""
286
+ " regnum=\"%d\" type=\"svep\"/>",
287
+ i, cpu->sve_max_vq * 16, base_reg++);
288
+ info->num++;
289
+ }
290
+ g_string_append_printf(s,
291
+ "<reg name=\"ffr\" bitsize=\"%d\""
292
+ " regnum=\"%d\" group=\"vector\""
293
+ " type=\"svep\"/>",
294
+ cpu->sve_max_vq * 16, base_reg++);
295
+ g_string_append_printf(s,
296
+ "<reg name=\"vg\" bitsize=\"64\""
297
+ " regnum=\"%d\" type=\"int\"/>",
298
+ base_reg++);
299
+ info->num += 2;
300
+ g_string_append_printf(s, "</feature>");
301
+ info->desc = g_string_free(s, false);
302
+
303
+ return info->num;
304
+}
305
--
306
2.34.1
307
308
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Create a subroutine for creating the union of unions
4
of the various type sizes that a vector may contain.
5
6
Reviewed-by: Fabiano Rosas <farosas@suse.de>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20230227213329.793795-5-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/gdbstub64.c | 83 +++++++++++++++++++++++-------------------
13
1 file changed, 45 insertions(+), 38 deletions(-)
14
15
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/gdbstub64.c
18
+++ b/target/arm/gdbstub64.c
19
@@ -XXX,XX +XXX,XX @@ int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg)
20
return 0;
21
}
22
23
-struct TypeSize {
24
- const char *gdb_type;
25
- short size;
26
- char sz, suffix;
27
-};
28
-
29
-static const struct TypeSize vec_lanes[] = {
30
- /* quads */
31
- { "uint128", 128, 'q', 'u' },
32
- { "int128", 128, 'q', 's' },
33
- /* 64 bit */
34
- { "ieee_double", 64, 'd', 'f' },
35
- { "uint64", 64, 'd', 'u' },
36
- { "int64", 64, 'd', 's' },
37
- /* 32 bit */
38
- { "ieee_single", 32, 's', 'f' },
39
- { "uint32", 32, 's', 'u' },
40
- { "int32", 32, 's', 's' },
41
- /* 16 bit */
42
- { "ieee_half", 16, 'h', 'f' },
43
- { "uint16", 16, 'h', 'u' },
44
- { "int16", 16, 'h', 's' },
45
- /* bytes */
46
- { "uint8", 8, 'b', 'u' },
47
- { "int8", 8, 'b', 's' },
48
-};
49
-
50
-int arm_gen_dynamic_svereg_xml(CPUState *cs, int base_reg)
51
+static void output_vector_union_type(GString *s, int reg_width)
52
{
53
- ARMCPU *cpu = ARM_CPU(cs);
54
- GString *s = g_string_new(NULL);
55
- DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml;
56
+ struct TypeSize {
57
+ const char *gdb_type;
58
+ short size;
59
+ char sz, suffix;
60
+ };
61
+
62
+ static const struct TypeSize vec_lanes[] = {
63
+ /* quads */
64
+ { "uint128", 128, 'q', 'u' },
65
+ { "int128", 128, 'q', 's' },
66
+ /* 64 bit */
67
+ { "ieee_double", 64, 'd', 'f' },
68
+ { "uint64", 64, 'd', 'u' },
69
+ { "int64", 64, 'd', 's' },
70
+ /* 32 bit */
71
+ { "ieee_single", 32, 's', 'f' },
72
+ { "uint32", 32, 's', 'u' },
73
+ { "int32", 32, 's', 's' },
74
+ /* 16 bit */
75
+ { "ieee_half", 16, 'h', 'f' },
76
+ { "uint16", 16, 'h', 'u' },
77
+ { "int16", 16, 'h', 's' },
78
+ /* bytes */
79
+ { "uint8", 8, 'b', 'u' },
80
+ { "int8", 8, 'b', 's' },
81
+ };
82
+
83
+ static const char suf[] = { 'q', 'd', 's', 'h', 'b' };
84
+
85
g_autoptr(GString) ts = g_string_new("");
86
- int i, j, bits, reg_width = (cpu->sve_max_vq * 128);
87
- info->num = 0;
88
- g_string_printf(s, "<?xml version=\"1.0\"?>");
89
- g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
90
- g_string_append_printf(s, "<feature name=\"org.gnu.gdb.aarch64.sve\">");
91
+ int i, j, bits;
92
93
/* First define types and totals in a whole VL */
94
for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
95
@@ -XXX,XX +XXX,XX @@ int arm_gen_dynamic_svereg_xml(CPUState *cs, int base_reg)
96
* 8 bits.
97
*/
98
for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) {
99
- const char suf[] = { 'q', 'd', 's', 'h', 'b' };
100
g_string_append_printf(s, "<union id=\"svevn%c\">", suf[i]);
101
for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) {
102
if (vec_lanes[j].size == bits) {
103
@@ -XXX,XX +XXX,XX @@ int arm_gen_dynamic_svereg_xml(CPUState *cs, int base_reg)
104
/* And now the final union of unions */
105
g_string_append(s, "<union id=\"svev\">");
106
for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) {
107
- const char suf[] = { 'q', 'd', 's', 'h', 'b' };
108
g_string_append_printf(s, "<field name=\"%c\" type=\"svevn%c\"/>",
109
suf[i], suf[i]);
110
}
111
g_string_append(s, "</union>");
112
+}
113
+
114
+int arm_gen_dynamic_svereg_xml(CPUState *cs, int base_reg)
115
+{
116
+ ARMCPU *cpu = ARM_CPU(cs);
117
+ GString *s = g_string_new(NULL);
118
+ DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml;
119
+ int i, reg_width = (cpu->sve_max_vq * 128);
120
+ info->num = 0;
121
+ g_string_printf(s, "<?xml version=\"1.0\"?>");
122
+ g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
123
+ g_string_append_printf(s, "<feature name=\"org.gnu.gdb.aarch64.sve\">");
124
+
125
+ output_vector_union_type(s, reg_width);
126
127
/* Finally the sve prefix type */
128
g_string_append_printf(s,
129
--
130
2.34.1
diff view generated by jsdifflib
1
In the various helper functions for v7M/v8M instructions, use
1
From: Richard Henderson <richard.henderson@linaro.org>
2
the _ra versions of cpu_stl_data() and friends. Otherwise we
3
may get wrong behaviour or an assert() due to not being able
4
to locate the TB if there is an exception on the memory access
5
or if it performs an IO operation when in icount mode.
6
2
3
Rather than increment base_reg and num, compute num from the change
4
to base_reg at the end. Clean up some nearby comments.
5
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230227213329.793795-6-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20190617175317.27557-5-peter.maydell@linaro.org
10
---
10
---
11
target/arm/m_helper.c | 21 ++++++++++++---------
11
target/arm/gdbstub64.c | 27 ++++++++++++++++-----------
12
1 file changed, 12 insertions(+), 9 deletions(-)
12
1 file changed, 16 insertions(+), 11 deletions(-)
13
13
14
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
14
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/m_helper.c
16
--- a/target/arm/gdbstub64.c
17
+++ b/target/arm/m_helper.c
17
+++ b/target/arm/gdbstub64.c
18
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
18
@@ -XXX,XX +XXX,XX @@ static void output_vector_union_type(GString *s, int reg_width)
19
g_string_append(s, "</union>");
20
}
21
22
-int arm_gen_dynamic_svereg_xml(CPUState *cs, int base_reg)
23
+int arm_gen_dynamic_svereg_xml(CPUState *cs, int orig_base_reg)
24
{
25
ARMCPU *cpu = ARM_CPU(cs);
26
GString *s = g_string_new(NULL);
27
DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml;
28
- int i, reg_width = (cpu->sve_max_vq * 128);
29
- info->num = 0;
30
+ int reg_width = cpu->sve_max_vq * 128;
31
+ int base_reg = orig_base_reg;
32
+ int i;
33
+
34
g_string_printf(s, "<?xml version=\"1.0\"?>");
35
g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
36
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.aarch64.sve\">");
37
38
+ /* Create the vector union type. */
39
output_vector_union_type(s, reg_width);
40
41
- /* Finally the sve prefix type */
42
+ /* Create the predicate vector type. */
43
g_string_append_printf(s,
44
"<vector id=\"svep\" type=\"uint8\" count=\"%d\"/>",
45
reg_width / 8);
46
47
- /* Then define each register in parts for each vq */
48
+ /* Define the vector registers. */
49
for (i = 0; i < 32; i++) {
50
g_string_append_printf(s,
51
"<reg name=\"z%d\" bitsize=\"%d\""
52
" regnum=\"%d\" type=\"svev\"/>",
53
i, reg_width, base_reg++);
54
- info->num++;
19
}
55
}
20
21
/* Note that these stores can throw exceptions on MPU faults */
22
- cpu_stl_data(env, sp, nextinst);
23
- cpu_stl_data(env, sp + 4, saved_psr);
24
+ cpu_stl_data_ra(env, sp, nextinst, GETPC());
25
+ cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
26
27
env->regs[13] = sp;
28
env->regs[14] = 0xfeffffff;
29
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
30
/* fptr is the value of Rn, the frame pointer we store the FP regs to */
31
bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
32
bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
33
+ uintptr_t ra = GETPC();
34
35
assert(env->v7m.secure);
36
37
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
38
* Note that we do not use v7m_stack_write() here, because the
39
* accesses should not set the FSR bits for stacking errors if they
40
* fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
41
- * or AccType_LAZYFP). Faults in cpu_stl_data() will throw exceptions
42
+ * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
43
* and longjmp out.
44
*/
45
if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
46
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
47
if (i >= 16) {
48
faddr += 8; /* skip the slot for the FPSCR */
49
}
50
- cpu_stl_data(env, faddr, slo);
51
- cpu_stl_data(env, faddr + 4, shi);
52
+ cpu_stl_data_ra(env, faddr, slo, ra);
53
+ cpu_stl_data_ra(env, faddr + 4, shi, ra);
54
}
55
- cpu_stl_data(env, fptr + 0x40, vfp_get_fpscr(env));
56
+ cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
57
58
/*
59
* If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
60
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
61
62
void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
63
{
64
+ uintptr_t ra = GETPC();
65
+
56
+
66
/* fptr is the value of Rn, the frame pointer we load the FP regs from */
57
/* fpscr & status registers */
67
assert(env->v7m.secure);
58
g_string_append_printf(s, "<reg name=\"fpsr\" bitsize=\"32\""
68
59
" regnum=\"%d\" group=\"float\""
69
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
60
@@ -XXX,XX +XXX,XX @@ int arm_gen_dynamic_svereg_xml(CPUState *cs, int base_reg)
70
faddr += 8; /* skip the slot for the FPSCR */
61
g_string_append_printf(s, "<reg name=\"fpcr\" bitsize=\"32\""
71
}
62
" regnum=\"%d\" group=\"float\""
72
63
" type=\"int\"/>", base_reg++);
73
- slo = cpu_ldl_data(env, faddr);
64
- info->num += 2;
74
- shi = cpu_ldl_data(env, faddr + 4);
65
75
+ slo = cpu_ldl_data_ra(env, faddr, ra);
66
+ /* Define the predicate registers. */
76
+ shi = cpu_ldl_data_ra(env, faddr + 4, ra);
67
for (i = 0; i < 16; i++) {
77
68
g_string_append_printf(s,
78
dn = (uint64_t) shi << 32 | slo;
69
"<reg name=\"p%d\" bitsize=\"%d\""
79
*aa32_vfp_dreg(env, i / 2) = dn;
70
" regnum=\"%d\" type=\"svep\"/>",
80
}
71
i, cpu->sve_max_vq * 16, base_reg++);
81
- fpscr = cpu_ldl_data(env, fptr + 0x40);
72
- info->num++;
82
+ fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
83
vfp_set_fpscr(env, fpscr);
84
}
73
}
85
74
g_string_append_printf(s,
75
"<reg name=\"ffr\" bitsize=\"%d\""
76
" regnum=\"%d\" group=\"vector\""
77
" type=\"svep\"/>",
78
cpu->sve_max_vq * 16, base_reg++);
79
+
80
+ /* Define the vector length pseudo-register. */
81
g_string_append_printf(s,
82
"<reg name=\"vg\" bitsize=\"64\""
83
" regnum=\"%d\" type=\"int\"/>",
84
base_reg++);
85
- info->num += 2;
86
- g_string_append_printf(s, "</feature>");
87
- info->desc = g_string_free(s, false);
88
89
+ g_string_append_printf(s, "</feature>");
90
+
91
+ info->desc = g_string_free(s, false);
92
+ info->num = base_reg - orig_base_reg;
93
return info->num;
94
}
86
--
95
--
87
2.20.1
96
2.34.1
88
97
89
98
diff view generated by jsdifflib
1
Coverity points out (CID 1402195) that the loop in trans_VMOV_imm_dp()
1
From: Richard Henderson <richard.henderson@linaro.org>
2
that iterates over the destination registers in a short-vector VMOV
3
accidentally throws away the returned updated register number
4
from vfp_advance_dreg(). Add the missing assignment. (We got this
5
correct in trans_VMOV_imm_sp().)
6
2
7
Fixes: 18cf951af9a27ae573a
3
Reviewed-by: Fabiano Rosas <farosas@suse.de>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20230227213329.793795-7-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20190702105115.9465-1-peter.maydell@linaro.org
11
---
8
---
12
target/arm/translate-vfp.inc.c | 2 +-
9
target/arm/gdbstub64.c | 5 +++--
13
1 file changed, 1 insertion(+), 1 deletion(-)
10
1 file changed, 3 insertions(+), 2 deletions(-)
14
11
15
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
16
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-vfp.inc.c
14
--- a/target/arm/gdbstub64.c
18
+++ b/target/arm/translate-vfp.inc.c
15
+++ b/target/arm/gdbstub64.c
19
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
16
@@ -XXX,XX +XXX,XX @@ int arm_gen_dynamic_svereg_xml(CPUState *cs, int orig_base_reg)
20
17
GString *s = g_string_new(NULL);
21
/* Set up the operands for the next iteration */
18
DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml;
22
veclen--;
19
int reg_width = cpu->sve_max_vq * 128;
23
- vfp_advance_dreg(vd, delta_d);
20
+ int pred_width = cpu->sve_max_vq * 16;
24
+ vd = vfp_advance_dreg(vd, delta_d);
21
int base_reg = orig_base_reg;
22
int i;
23
24
@@ -XXX,XX +XXX,XX @@ int arm_gen_dynamic_svereg_xml(CPUState *cs, int orig_base_reg)
25
g_string_append_printf(s,
26
"<reg name=\"p%d\" bitsize=\"%d\""
27
" regnum=\"%d\" type=\"svep\"/>",
28
- i, cpu->sve_max_vq * 16, base_reg++);
29
+ i, pred_width, base_reg++);
25
}
30
}
26
31
g_string_append_printf(s,
27
tcg_temp_free_i64(fd);
32
"<reg name=\"ffr\" bitsize=\"%d\""
33
" regnum=\"%d\" group=\"vector\""
34
" type=\"svep\"/>",
35
- cpu->sve_max_vq * 16, base_reg++);
36
+ pred_width, base_reg++);
37
38
/* Define the vector length pseudo-register. */
39
g_string_append_printf(s,
28
--
40
--
29
2.20.1
41
2.34.1
30
42
31
43
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Define svep based on the size of the predicates,
4
not the primary vector registers.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230227213329.793795-8-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/gdbstub64.c | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
13
14
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/gdbstub64.c
17
+++ b/target/arm/gdbstub64.c
18
@@ -XXX,XX +XXX,XX @@ int arm_gen_dynamic_svereg_xml(CPUState *cs, int orig_base_reg)
19
/* Create the predicate vector type. */
20
g_string_append_printf(s,
21
"<vector id=\"svep\" type=\"uint8\" count=\"%d\"/>",
22
- reg_width / 8);
23
+ pred_width / 8);
24
25
/* Define the vector registers. */
26
for (i = 0; i < 32; i++) {
27
--
28
2.34.1
diff view generated by jsdifflib
1
Thumb instructions in an IT block are set up to be conditionally
1
From: Richard Henderson <richard.henderson@linaro.org>
2
executed depending on a set of condition bits encoded into the IT
3
bits of the CPSR/XPSR. The architecture specifies that if the
4
condition bits are 0b1111 this means "always execute" (like 0b1110),
5
not "never execute"; we were treating it as "never execute". (See
6
the ConditionHolds() pseudocode in both the A-profile and M-profile
7
Arm ARM.)
8
2
9
This is a bit of an obscure corner case, because the only legal
3
This will make the function usable between SVE and SME.
10
way to get to an 0b1111 set of condbits is to do an exception
11
return which sets the XPSR/CPSR up that way. An IT instruction
12
which encodes a condition sequence that would include an 0b1111 is
13
UNPREDICTABLE, and for v8A the CONSTRAINED UNPREDICTABLE choices
14
for such an IT insn are to NOP, UNDEF, or treat 0b1111 like 0b1110.
15
Add a comment noting that we take the latter option.
16
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230227213329.793795-9-richard.henderson@linaro.org
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 20190617175317.27557-7-peter.maydell@linaro.org
20
---
10
---
21
target/arm/translate.c | 15 +++++++++++++--
11
target/arm/gdbstub64.c | 28 ++++++++++++++--------------
22
1 file changed, 13 insertions(+), 2 deletions(-)
12
1 file changed, 14 insertions(+), 14 deletions(-)
23
13
24
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
25
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate.c
16
--- a/target/arm/gdbstub64.c
27
+++ b/target/arm/translate.c
17
+++ b/target/arm/gdbstub64.c
28
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
18
@@ -XXX,XX +XXX,XX @@ int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg)
29
gen_nop_hint(s, (insn >> 4) & 0xf);
19
return 0;
30
break;
20
}
21
22
-static void output_vector_union_type(GString *s, int reg_width)
23
+static void output_vector_union_type(GString *s, int reg_width,
24
+ const char *name)
25
{
26
struct TypeSize {
27
const char *gdb_type;
28
@@ -XXX,XX +XXX,XX @@ static void output_vector_union_type(GString *s, int reg_width)
29
};
30
31
static const char suf[] = { 'q', 'd', 's', 'h', 'b' };
32
-
33
- g_autoptr(GString) ts = g_string_new("");
34
int i, j, bits;
35
36
/* First define types and totals in a whole VL */
37
for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
38
- int count = reg_width / vec_lanes[i].size;
39
- g_string_printf(ts, "svev%c%c", vec_lanes[i].sz, vec_lanes[i].suffix);
40
g_string_append_printf(s,
41
- "<vector id=\"%s\" type=\"%s\" count=\"%d\"/>",
42
- ts->str, vec_lanes[i].gdb_type, count);
43
+ "<vector id=\"%s%c%c\" type=\"%s\" count=\"%d\"/>",
44
+ name, vec_lanes[i].sz, vec_lanes[i].suffix,
45
+ vec_lanes[i].gdb_type, reg_width / vec_lanes[i].size);
46
}
47
+
48
/*
49
* Now define a union for each size group containing unsigned and
50
* signed and potentially float versions of each size from 128 to
51
* 8 bits.
52
*/
53
for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) {
54
- g_string_append_printf(s, "<union id=\"svevn%c\">", suf[i]);
55
+ g_string_append_printf(s, "<union id=\"%sn%c\">", name, suf[i]);
56
for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) {
57
if (vec_lanes[j].size == bits) {
58
- g_string_append_printf(s, "<field name=\"%c\" type=\"svev%c%c\"/>",
59
- vec_lanes[j].suffix,
60
+ g_string_append_printf(s, "<field name=\"%c\" type=\"%s%c%c\"/>",
61
+ vec_lanes[j].suffix, name,
62
vec_lanes[j].sz, vec_lanes[j].suffix);
31
}
63
}
32
- /* If Then. */
33
+ /*
34
+ * IT (If-Then)
35
+ *
36
+ * Combinations of firstcond and mask which set up an 0b1111
37
+ * condition are UNPREDICTABLE; we take the CONSTRAINED
38
+ * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
39
+ * i.e. both meaning "execute always".
40
+ */
41
s->condexec_cond = (insn >> 4) & 0xe;
42
s->condexec_mask = insn & 0x1f;
43
/* No actual code generated for this insn, just setup state. */
44
@@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
45
if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
46
uint32_t cond = dc->condexec_cond;
47
48
- if (cond != 0x0e) { /* Skip conditional when condition is AL. */
49
+ /*
50
+ * Conditionally skip the insn. Note that both 0xe and 0xf mean
51
+ * "always"; 0xf is not "never".
52
+ */
53
+ if (cond < 0x0e) {
54
arm_skip_unless(dc, cond);
55
}
64
}
65
g_string_append(s, "</union>");
56
}
66
}
67
+
68
/* And now the final union of unions */
69
- g_string_append(s, "<union id=\"svev\">");
70
+ g_string_append_printf(s, "<union id=\"%s\">", name);
71
for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) {
72
- g_string_append_printf(s, "<field name=\"%c\" type=\"svevn%c\"/>",
73
- suf[i], suf[i]);
74
+ g_string_append_printf(s, "<field name=\"%c\" type=\"%sn%c\"/>",
75
+ suf[i], name, suf[i]);
76
}
77
g_string_append(s, "</union>");
78
}
79
@@ -XXX,XX +XXX,XX @@ int arm_gen_dynamic_svereg_xml(CPUState *cs, int orig_base_reg)
80
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.aarch64.sve\">");
81
82
/* Create the vector union type. */
83
- output_vector_union_type(s, reg_width);
84
+ output_vector_union_type(s, reg_width, "svev");
85
86
/* Create the predicate vector type. */
87
g_string_append_printf(s,
57
--
88
--
58
2.20.1
89
2.34.1
59
90
60
91
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Order suf[] by the log8 of the width.
4
Use ARRAY_SIZE instead of hard-coding 128.
5
6
This changes the order of the union definitions,
7
but retains the order of the union-of-union members.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20230227213329.793795-10-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/gdbstub64.c | 10 ++++++----
15
1 file changed, 6 insertions(+), 4 deletions(-)
16
17
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/gdbstub64.c
20
+++ b/target/arm/gdbstub64.c
21
@@ -XXX,XX +XXX,XX @@ static void output_vector_union_type(GString *s, int reg_width,
22
{ "int8", 8, 'b', 's' },
23
};
24
25
- static const char suf[] = { 'q', 'd', 's', 'h', 'b' };
26
- int i, j, bits;
27
+ static const char suf[] = { 'b', 'h', 's', 'd', 'q' };
28
+ int i, j;
29
30
/* First define types and totals in a whole VL */
31
for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
32
@@ -XXX,XX +XXX,XX @@ static void output_vector_union_type(GString *s, int reg_width,
33
* signed and potentially float versions of each size from 128 to
34
* 8 bits.
35
*/
36
- for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) {
37
+ for (i = 0; i < ARRAY_SIZE(suf); i++) {
38
+ int bits = 8 << i;
39
+
40
g_string_append_printf(s, "<union id=\"%sn%c\">", name, suf[i]);
41
for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) {
42
if (vec_lanes[j].size == bits) {
43
@@ -XXX,XX +XXX,XX @@ static void output_vector_union_type(GString *s, int reg_width,
44
45
/* And now the final union of unions */
46
g_string_append_printf(s, "<union id=\"%s\">", name);
47
- for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) {
48
+ for (i = ARRAY_SIZE(suf) - 1; i >= 0; i--) {
49
g_string_append_printf(s, "<field name=\"%c\" type=\"%sn%c\"/>",
50
suf[i], name, suf[i]);
51
}
52
--
53
2.34.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Keep the logic for pauth within pauth_helper.c, and expose
4
a helper function for use with the gdbstub pac extension.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230227213329.793795-11-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/internals.h | 10 ++++++++++
12
target/arm/tcg/pauth_helper.c | 26 ++++++++++++++++++++++----
13
2 files changed, 32 insertions(+), 4 deletions(-)
14
15
diff --git a/target/arm/internals.h b/target/arm/internals.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/internals.h
18
+++ b/target/arm/internals.h
19
@@ -XXX,XX +XXX,XX @@ int exception_target_el(CPUARMState *env);
20
bool arm_singlestep_active(CPUARMState *env);
21
bool arm_generate_debug_exceptions(CPUARMState *env);
22
23
+/**
24
+ * pauth_ptr_mask:
25
+ * @env: cpu context
26
+ * @ptr: selects between TTBR0 and TTBR1
27
+ * @data: selects between TBI and TBID
28
+ *
29
+ * Return a mask of the bits of @ptr that contain the authentication code.
30
+ */
31
+uint64_t pauth_ptr_mask(CPUARMState *env, uint64_t ptr, bool data);
32
+
33
/* Add the cpreg definitions for debug related system registers */
34
void define_debug_regs(ARMCPU *cpu);
35
36
diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/tcg/pauth_helper.c
39
+++ b/target/arm/tcg/pauth_helper.c
40
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
41
return pac | ext | ptr;
42
}
43
44
-static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param)
45
+static uint64_t pauth_ptr_mask_internal(ARMVAParameters param)
46
{
47
- /* Note that bit 55 is used whether or not the regime has 2 ranges. */
48
- uint64_t extfield = sextract64(ptr, 55, 1);
49
int bot_pac_bit = 64 - param.tsz;
50
int top_pac_bit = 64 - 8 * param.tbi;
51
52
- return deposit64(ptr, bot_pac_bit, top_pac_bit - bot_pac_bit, extfield);
53
+ return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
54
+}
55
+
56
+static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param)
57
+{
58
+ uint64_t mask = pauth_ptr_mask_internal(param);
59
+
60
+ /* Note that bit 55 is used whether or not the regime has 2 ranges. */
61
+ if (extract64(ptr, 55, 1)) {
62
+ return ptr | mask;
63
+ } else {
64
+ return ptr & ~mask;
65
+ }
66
+}
67
+
68
+uint64_t pauth_ptr_mask(CPUARMState *env, uint64_t ptr, bool data)
69
+{
70
+ ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env);
71
+ ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data);
72
+
73
+ return pauth_ptr_mask_internal(param);
74
}
75
76
static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier,
77
--
78
2.34.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
The extension is primarily defined by the Linux kernel NT_ARM_PAC_MASK
4
ptrace register set.
5
6
The original gdb feature consists of two masks, data and code, which are
7
used to mask out the authentication code within a pointer. Following
8
discussion with Luis Machado, add two more masks in order to support
9
pointers within the high half of the address space (i.e. TTBR1 vs TTBR0).
10
11
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1105
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20230227213329.793795-12-richard.henderson@linaro.org
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
17
configs/targets/aarch64-linux-user.mak | 2 +-
18
configs/targets/aarch64-softmmu.mak | 2 +-
19
configs/targets/aarch64_be-linux-user.mak | 2 +-
20
target/arm/internals.h | 2 ++
21
target/arm/gdbstub.c | 5 ++++
22
target/arm/gdbstub64.c | 34 +++++++++++++++++++++++
23
gdb-xml/aarch64-pauth.xml | 15 ++++++++++
24
7 files changed, 59 insertions(+), 3 deletions(-)
25
create mode 100644 gdb-xml/aarch64-pauth.xml
26
27
diff --git a/configs/targets/aarch64-linux-user.mak b/configs/targets/aarch64-linux-user.mak
28
index XXXXXXX..XXXXXXX 100644
29
--- a/configs/targets/aarch64-linux-user.mak
30
+++ b/configs/targets/aarch64-linux-user.mak
31
@@ -XXX,XX +XXX,XX @@
32
TARGET_ARCH=aarch64
33
TARGET_BASE_ARCH=arm
34
-TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml
35
+TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch64-pauth.xml
36
TARGET_HAS_BFLT=y
37
CONFIG_SEMIHOSTING=y
38
CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y
39
diff --git a/configs/targets/aarch64-softmmu.mak b/configs/targets/aarch64-softmmu.mak
40
index XXXXXXX..XXXXXXX 100644
41
--- a/configs/targets/aarch64-softmmu.mak
42
+++ b/configs/targets/aarch64-softmmu.mak
43
@@ -XXX,XX +XXX,XX @@
44
TARGET_ARCH=aarch64
45
TARGET_BASE_ARCH=arm
46
TARGET_SUPPORTS_MTTCG=y
47
-TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml
48
+TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml gdb-xml/aarch64-pauth.xml
49
TARGET_NEED_FDT=y
50
diff --git a/configs/targets/aarch64_be-linux-user.mak b/configs/targets/aarch64_be-linux-user.mak
51
index XXXXXXX..XXXXXXX 100644
52
--- a/configs/targets/aarch64_be-linux-user.mak
53
+++ b/configs/targets/aarch64_be-linux-user.mak
54
@@ -XXX,XX +XXX,XX @@
55
TARGET_ARCH=aarch64
56
TARGET_BASE_ARCH=arm
57
TARGET_BIG_ENDIAN=y
58
-TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml
59
+TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch64-pauth.xml
60
TARGET_HAS_BFLT=y
61
CONFIG_SEMIHOSTING=y
62
CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y
63
diff --git a/target/arm/internals.h b/target/arm/internals.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/internals.h
66
+++ b/target/arm/internals.h
67
@@ -XXX,XX +XXX,XX @@ int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg);
68
int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg);
69
int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg);
70
int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg);
71
+int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg);
72
+int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg);
73
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
74
void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
75
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
76
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/gdbstub.c
79
+++ b/target/arm/gdbstub.c
80
@@ -XXX,XX +XXX,XX @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
81
aarch64_gdb_set_fpu_reg,
82
34, "aarch64-fpu.xml", 0);
83
}
84
+ if (isar_feature_aa64_pauth(&cpu->isar)) {
85
+ gdb_register_coprocessor(cs, aarch64_gdb_get_pauth_reg,
86
+ aarch64_gdb_set_pauth_reg,
87
+ 4, "aarch64-pauth.xml", 0);
88
+ }
89
#endif
90
} else {
91
if (arm_feature(env, ARM_FEATURE_NEON)) {
92
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
93
index XXXXXXX..XXXXXXX 100644
94
--- a/target/arm/gdbstub64.c
95
+++ b/target/arm/gdbstub64.c
96
@@ -XXX,XX +XXX,XX @@ int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg)
97
return 0;
98
}
99
100
+int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg)
101
+{
102
+ switch (reg) {
103
+ case 0: /* pauth_dmask */
104
+ case 1: /* pauth_cmask */
105
+ case 2: /* pauth_dmask_high */
106
+ case 3: /* pauth_cmask_high */
107
+ /*
108
+ * Note that older versions of this feature only contained
109
+ * pauth_{d,c}mask, for use with Linux user processes, and
110
+ * thus exclusively in the low half of the address space.
111
+ *
112
+ * To support system mode, and to debug kernels, two new regs
113
+ * were added to cover the high half of the address space.
114
+ * For the purpose of pauth_ptr_mask, we can use any well-formed
115
+ * address within the address space half -- here, 0 and -1.
116
+ */
117
+ {
118
+ bool is_data = !(reg & 1);
119
+ bool is_high = reg & 2;
120
+ uint64_t mask = pauth_ptr_mask(env, -is_high, is_data);
121
+ return gdb_get_reg64(buf, mask);
122
+ }
123
+ default:
124
+ return 0;
125
+ }
126
+}
127
+
128
+int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg)
129
+{
130
+ /* All pseudo registers are read-only. */
131
+ return 0;
132
+}
133
+
134
static void output_vector_union_type(GString *s, int reg_width,
135
const char *name)
136
{
137
diff --git a/gdb-xml/aarch64-pauth.xml b/gdb-xml/aarch64-pauth.xml
138
new file mode 100644
139
index XXXXXXX..XXXXXXX
140
--- /dev/null
141
+++ b/gdb-xml/aarch64-pauth.xml
142
@@ -XXX,XX +XXX,XX @@
143
+<?xml version="1.0"?>
144
+<!-- Copyright (C) 2018-2022 Free Software Foundation, Inc.
145
+
146
+ Copying and distribution of this file, with or without modification,
147
+ are permitted in any medium without royalty provided the copyright
148
+ notice and this notice are preserved. -->
149
+
150
+<!DOCTYPE feature SYSTEM "gdb-target.dtd">
151
+<feature name="org.gnu.gdb.aarch64.pauth">
152
+ <reg name="pauth_dmask" bitsize="64"/>
153
+ <reg name="pauth_cmask" bitsize="64"/>
154
+ <reg name="pauth_dmask_high" bitsize="64"/>
155
+ <reg name="pauth_cmask_high" bitsize="64"/>
156
+</feature>
157
+
158
--
159
2.34.1
diff view generated by jsdifflib
1
In v8M, an attempt to return from an exception which is not
1
From: David Reiss <dreiss@meta.com>
2
active is an illegal exception return. For this purpose,
3
exceptions which can configurably target either Secure or
4
NonSecure are not considered to be active if they are
5
configured for the opposite security state for the one
6
we're trying to return from (eg attempt to return from
7
an NS NMI but NMI targets Secure). In the pseudocode this
8
is handled by IsActiveForState().
9
2
10
Detect this case rather than counting an active exception
3
Allow the function to be used outside of m_helper.c.
11
possibly of the wrong security state as being sufficient.
4
Rename with an "arm_" prefix.
12
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Signed-off-by: David Reiss <dreiss@meta.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20230227213329.793795-13-richard.henderson@linaro.org
11
[rth: Split out of a larger patch]
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20190617175317.27557-4-peter.maydell@linaro.org
16
---
14
---
17
hw/intc/armv7m_nvic.c | 14 +++++++++++++-
15
target/arm/internals.h | 3 +++
18
1 file changed, 13 insertions(+), 1 deletion(-)
16
target/arm/tcg/m_helper.c | 6 +++---
17
2 files changed, 6 insertions(+), 3 deletions(-)
19
18
20
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
19
diff --git a/target/arm/internals.h b/target/arm/internals.h
21
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/intc/armv7m_nvic.c
21
--- a/target/arm/internals.h
23
+++ b/hw/intc/armv7m_nvic.c
22
+++ b/target/arm/internals.h
24
@@ -XXX,XX +XXX,XX @@ int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
23
@@ -XXX,XX +XXX,XX @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
25
return -1;
24
void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
26
}
25
#endif
27
26
28
- ret = nvic_rettobase(s);
27
+/* Read the CONTROL register as the MRS instruction would. */
29
+ /*
28
+uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
30
+ * If this is a configurable exception and it is currently
29
+
31
+ * targeting the opposite security state from the one we're trying
30
#ifdef CONFIG_USER_ONLY
32
+ * to complete it for, this counts as an illegal exception return.
31
static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
33
+ * We still need to deactivate whatever vector the logic above has
32
#else
34
+ * selected, though, as it might not be the same as the one for the
33
diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
35
+ * requested exception number.
34
index XXXXXXX..XXXXXXX 100644
36
+ */
35
--- a/target/arm/tcg/m_helper.c
37
+ if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) {
36
+++ b/target/arm/tcg/m_helper.c
38
+ ret = -1;
37
@@ -XXX,XX +XXX,XX @@ static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
39
+ } else {
38
return xpsr_read(env) & mask;
40
+ ret = nvic_rettobase(s);
39
}
41
+ }
40
42
41
-static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure)
43
vec->active = 0;
42
+uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure)
44
if (vec->level) {
43
{
44
uint32_t value = env->v7m.control[secure];
45
46
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
47
case 0 ... 7: /* xPSR sub-fields */
48
return v7m_mrs_xpsr(env, reg, 0);
49
case 20: /* CONTROL */
50
- return v7m_mrs_control(env, 0);
51
+ return arm_v7m_mrs_control(env, 0);
52
default:
53
/* Unprivileged reads others as zero. */
54
return 0;
55
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
56
case 0 ... 7: /* xPSR sub-fields */
57
return v7m_mrs_xpsr(env, reg, el);
58
case 20: /* CONTROL */
59
- return v7m_mrs_control(env, env->v7m.secure);
60
+ return arm_v7m_mrs_control(env, env->v7m.secure);
61
case 0x94: /* CONTROL_NS */
62
/*
63
* We have to handle this here because unprivileged Secure code
45
--
64
--
46
2.20.1
65
2.34.1
47
66
48
67
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
From: David Reiss <dreiss@meta.com>
2
2
3
In preparation for supporting TCG disablement on ARM, we move most
3
Allow the function to be used outside of m_helper.c.
4
of TCG related v7m/v8m helpers and APIs into their own file.
4
Move to be outside of ifndef CONFIG_USER_ONLY block.
5
Rename from get_v7m_sp_ptr.
5
6
6
Note: It is easier to review this commit using the 'histogram'
7
diff algorithm:
8
9
$ git diff --diff-algorithm=histogram ...
10
or
11
$ git diff --histogram ...
12
13
Suggested-by: Samuel Ortiz <sameo@linux.intel.com>
14
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
15
Message-id: 20190702144335.10717-2-philmd@redhat.com
16
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: David Reiss <dreiss@meta.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20230227213329.793795-14-richard.henderson@linaro.org
12
[rth: Split out of a larger patch]
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
15
---
19
target/arm/Makefile.objs | 1 +
16
target/arm/internals.h | 10 +++++
20
target/arm/helper.c | 2638 +------------------------------------
17
target/arm/tcg/m_helper.c | 84 +++++++++++++++++++--------------------
21
target/arm/m_helper.c | 2676 ++++++++++++++++++++++++++++++++++++++
18
2 files changed, 51 insertions(+), 43 deletions(-)
22
3 files changed, 2681 insertions(+), 2634 deletions(-)
23
create mode 100644 target/arm/m_helper.c
24
19
25
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
20
diff --git a/target/arm/internals.h b/target/arm/internals.h
26
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/Makefile.objs
22
--- a/target/arm/internals.h
28
+++ b/target/arm/Makefile.objs
23
+++ b/target/arm/internals.h
29
@@ -XXX,XX +XXX,XX @@ obj-y += tlb_helper.o debug_helper.o
24
@@ -XXX,XX +XXX,XX @@ void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
30
obj-y += translate.o op_helper.o
25
/* Read the CONTROL register as the MRS instruction would. */
31
obj-y += crypto_helper.o
26
uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
32
obj-y += iwmmxt_helper.o vec_helper.o neon_helper.o
27
33
+obj-y += m_helper.o
28
+/*
34
29
+ * Return a pointer to the location where we currently store the
35
obj-$(CONFIG_SOFTMMU) += psci.o
30
+ * stack pointer for the requested security state and thread mode.
36
31
+ * This pointer will become invalid if the CPU state is updated
37
diff --git a/target/arm/helper.c b/target/arm/helper.c
32
+ * such that the stack pointers are switched around (eg changing
33
+ * the SPSEL control bit).
34
+ */
35
+uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
36
+ bool threadmode, bool spsel);
37
+
38
#ifdef CONFIG_USER_ONLY
39
static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
40
#else
41
diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
38
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/helper.c
43
--- a/target/arm/tcg/m_helper.c
40
+++ b/target/arm/helper.c
44
+++ b/target/arm/tcg/m_helper.c
41
@@ -XXX,XX +XXX,XX @@
45
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
42
#include "qemu/crc32c.h"
46
arm_rebuild_hflags(env);
43
#include "qemu/qemu-print.h"
44
#include "exec/exec-all.h"
45
-#include "exec/cpu_ldst.h"
46
#include <zlib.h> /* For crc32 */
47
#include "hw/semihosting/semihost.h"
48
#include "sysemu/cpus.h"
49
@@ -XXX,XX +XXX,XX @@
50
#include "qemu/guest-random.h"
51
#ifdef CONFIG_TCG
52
#include "arm_ldst.h"
53
+#include "exec/cpu_ldst.h"
54
#endif
55
56
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
57
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rbit)(uint32_t x)
58
59
#ifdef CONFIG_USER_ONLY
60
61
-/* These should probably raise undefined insn exceptions. */
62
-void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
63
-{
64
- ARMCPU *cpu = env_archcpu(env);
65
-
66
- cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
67
-}
68
-
69
-uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
70
-{
71
- ARMCPU *cpu = env_archcpu(env);
72
-
73
- cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
74
- return 0;
75
-}
76
-
77
-void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
78
-{
79
- /* translate.c should never generate calls here in user-only mode */
80
- g_assert_not_reached();
81
-}
82
-
83
-void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
84
-{
85
- /* translate.c should never generate calls here in user-only mode */
86
- g_assert_not_reached();
87
-}
88
-
89
-void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
90
-{
91
- /* translate.c should never generate calls here in user-only mode */
92
- g_assert_not_reached();
93
-}
94
-
95
-void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
96
-{
97
- /* translate.c should never generate calls here in user-only mode */
98
- g_assert_not_reached();
99
-}
100
-
101
-void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
102
-{
103
- /* translate.c should never generate calls here in user-only mode */
104
- g_assert_not_reached();
105
-}
106
-
107
-uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
108
-{
109
- /*
110
- * The TT instructions can be used by unprivileged code, but in
111
- * user-only emulation we don't have the MPU.
112
- * Luckily since we know we are NonSecure unprivileged (and that in
113
- * turn means that the A flag wasn't specified), all the bits in the
114
- * register must be zero:
115
- * IREGION: 0 because IRVALID is 0
116
- * IRVALID: 0 because NS
117
- * S: 0 because NS
118
- * NSRW: 0 because NS
119
- * NSR: 0 because NS
120
- * RW: 0 because unpriv and A flag not set
121
- * R: 0 because unpriv and A flag not set
122
- * SRVALID: 0 because NS
123
- * MRVALID: 0 because unpriv and A flag not set
124
- * SREGION: 0 becaus SRVALID is 0
125
- * MREGION: 0 because MRVALID is 0
126
- */
127
- return 0;
128
-}
129
-
130
static void switch_mode(CPUARMState *env, int mode)
131
{
132
ARMCPU *cpu = env_archcpu(env);
133
@@ -XXX,XX +XXX,XX @@ void arm_log_exception(int idx)
134
}
135
}
47
}
136
48
137
-/*
138
- * What kind of stack write are we doing? This affects how exceptions
139
- * generated during the stacking are treated.
140
- */
141
-typedef enum StackingMode {
142
- STACK_NORMAL,
143
- STACK_IGNFAULTS,
144
- STACK_LAZYFP,
145
-} StackingMode;
146
-
147
-static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
148
- ARMMMUIdx mmu_idx, StackingMode mode)
149
-{
150
- CPUState *cs = CPU(cpu);
151
- CPUARMState *env = &cpu->env;
152
- MemTxAttrs attrs = {};
153
- MemTxResult txres;
154
- target_ulong page_size;
155
- hwaddr physaddr;
156
- int prot;
157
- ARMMMUFaultInfo fi = {};
158
- bool secure = mmu_idx & ARM_MMU_IDX_M_S;
159
- int exc;
160
- bool exc_secure;
161
-
162
- if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
163
- &attrs, &prot, &page_size, &fi, NULL)) {
164
- /* MPU/SAU lookup failed */
165
- if (fi.type == ARMFault_QEMU_SFault) {
166
- if (mode == STACK_LAZYFP) {
167
- qemu_log_mask(CPU_LOG_INT,
168
- "...SecureFault with SFSR.LSPERR "
169
- "during lazy stacking\n");
170
- env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
171
- } else {
172
- qemu_log_mask(CPU_LOG_INT,
173
- "...SecureFault with SFSR.AUVIOL "
174
- "during stacking\n");
175
- env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
176
- }
177
- env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
178
- env->v7m.sfar = addr;
179
- exc = ARMV7M_EXCP_SECURE;
180
- exc_secure = false;
181
- } else {
182
- if (mode == STACK_LAZYFP) {
183
- qemu_log_mask(CPU_LOG_INT,
184
- "...MemManageFault with CFSR.MLSPERR\n");
185
- env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
186
- } else {
187
- qemu_log_mask(CPU_LOG_INT,
188
- "...MemManageFault with CFSR.MSTKERR\n");
189
- env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
190
- }
191
- exc = ARMV7M_EXCP_MEM;
192
- exc_secure = secure;
193
- }
194
- goto pend_fault;
195
- }
196
- address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
197
- attrs, &txres);
198
- if (txres != MEMTX_OK) {
199
- /* BusFault trying to write the data */
200
- if (mode == STACK_LAZYFP) {
201
- qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
202
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
203
- } else {
204
- qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
205
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
206
- }
207
- exc = ARMV7M_EXCP_BUS;
208
- exc_secure = false;
209
- goto pend_fault;
210
- }
211
- return true;
212
-
213
-pend_fault:
214
- /*
215
- * By pending the exception at this point we are making
216
- * the IMPDEF choice "overridden exceptions pended" (see the
217
- * MergeExcInfo() pseudocode). The other choice would be to not
218
- * pend them now and then make a choice about which to throw away
219
- * later if we have two derived exceptions.
220
- * The only case when we must not pend the exception but instead
221
- * throw it away is if we are doing the push of the callee registers
222
- * and we've already generated a derived exception (this is indicated
223
- * by the caller passing STACK_IGNFAULTS). Even in this case we will
224
- * still update the fault status registers.
225
- */
226
- switch (mode) {
227
- case STACK_NORMAL:
228
- armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
229
- break;
230
- case STACK_LAZYFP:
231
- armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
232
- break;
233
- case STACK_IGNFAULTS:
234
- break;
235
- }
236
- return false;
237
-}
238
-
239
-static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
240
- ARMMMUIdx mmu_idx)
241
-{
242
- CPUState *cs = CPU(cpu);
243
- CPUARMState *env = &cpu->env;
244
- MemTxAttrs attrs = {};
245
- MemTxResult txres;
246
- target_ulong page_size;
247
- hwaddr physaddr;
248
- int prot;
249
- ARMMMUFaultInfo fi = {};
250
- bool secure = mmu_idx & ARM_MMU_IDX_M_S;
251
- int exc;
252
- bool exc_secure;
253
- uint32_t value;
254
-
255
- if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
256
- &attrs, &prot, &page_size, &fi, NULL)) {
257
- /* MPU/SAU lookup failed */
258
- if (fi.type == ARMFault_QEMU_SFault) {
259
- qemu_log_mask(CPU_LOG_INT,
260
- "...SecureFault with SFSR.AUVIOL during unstack\n");
261
- env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
262
- env->v7m.sfar = addr;
263
- exc = ARMV7M_EXCP_SECURE;
264
- exc_secure = false;
265
- } else {
266
- qemu_log_mask(CPU_LOG_INT,
267
- "...MemManageFault with CFSR.MUNSTKERR\n");
268
- env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
269
- exc = ARMV7M_EXCP_MEM;
270
- exc_secure = secure;
271
- }
272
- goto pend_fault;
273
- }
274
-
275
- value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
276
- attrs, &txres);
277
- if (txres != MEMTX_OK) {
278
- /* BusFault trying to read the data */
279
- qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
280
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
281
- exc = ARMV7M_EXCP_BUS;
282
- exc_secure = false;
283
- goto pend_fault;
284
- }
285
-
286
- *dest = value;
287
- return true;
288
-
289
-pend_fault:
290
- /*
291
- * By pending the exception at this point we are making
292
- * the IMPDEF choice "overridden exceptions pended" (see the
293
- * MergeExcInfo() pseudocode). The other choice would be to not
294
- * pend them now and then make a choice about which to throw away
295
- * later if we have two derived exceptions.
296
- */
297
- armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
298
- return false;
299
-}
300
-
301
-void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
302
-{
303
- /*
304
- * Preserve FP state (because LSPACT was set and we are about
305
- * to execute an FP instruction). This corresponds to the
306
- * PreserveFPState() pseudocode.
307
- * We may throw an exception if the stacking fails.
308
- */
309
- ARMCPU *cpu = env_archcpu(env);
310
- bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
311
- bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
312
- bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
313
- bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
314
- uint32_t fpcar = env->v7m.fpcar[is_secure];
315
- bool stacked_ok = true;
316
- bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
317
- bool take_exception;
318
-
319
- /* Take the iothread lock as we are going to touch the NVIC */
320
- qemu_mutex_lock_iothread();
321
-
322
- /* Check the background context had access to the FPU */
323
- if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
324
- armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
325
- env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
326
- stacked_ok = false;
327
- } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
328
- armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
329
- env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
330
- stacked_ok = false;
331
- }
332
-
333
- if (!splimviol && stacked_ok) {
334
- /* We only stack if the stack limit wasn't violated */
335
- int i;
336
- ARMMMUIdx mmu_idx;
337
-
338
- mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
339
- for (i = 0; i < (ts ? 32 : 16); i += 2) {
340
- uint64_t dn = *aa32_vfp_dreg(env, i / 2);
341
- uint32_t faddr = fpcar + 4 * i;
342
- uint32_t slo = extract64(dn, 0, 32);
343
- uint32_t shi = extract64(dn, 32, 32);
344
-
345
- if (i >= 16) {
346
- faddr += 8; /* skip the slot for the FPSCR */
347
- }
348
- stacked_ok = stacked_ok &&
349
- v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
350
- v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
351
- }
352
-
353
- stacked_ok = stacked_ok &&
354
- v7m_stack_write(cpu, fpcar + 0x40,
355
- vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
356
- }
357
-
358
- /*
359
- * We definitely pended an exception, but it's possible that it
360
- * might not be able to be taken now. If its priority permits us
361
- * to take it now, then we must not update the LSPACT or FP regs,
362
- * but instead jump out to take the exception immediately.
363
- * If it's just pending and won't be taken until the current
364
- * handler exits, then we do update LSPACT and the FP regs.
365
- */
366
- take_exception = !stacked_ok &&
367
- armv7m_nvic_can_take_pending_exception(env->nvic);
368
-
369
- qemu_mutex_unlock_iothread();
370
-
371
- if (take_exception) {
372
- raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
373
- }
374
-
375
- env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
376
-
377
- if (ts) {
378
- /* Clear s0 to s31 and the FPSCR */
379
- int i;
380
-
381
- for (i = 0; i < 32; i += 2) {
382
- *aa32_vfp_dreg(env, i / 2) = 0;
383
- }
384
- vfp_set_fpscr(env, 0);
385
- }
386
- /*
387
- * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
388
- * unchanged.
389
- */
390
-}
391
-
392
-/*
393
- * Write to v7M CONTROL.SPSEL bit for the specified security bank.
394
- * This may change the current stack pointer between Main and Process
395
- * stack pointers if it is done for the CONTROL register for the current
396
- * security state.
397
- */
398
-static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
399
- bool new_spsel,
400
- bool secstate)
401
-{
402
- bool old_is_psp = v7m_using_psp(env);
403
-
404
- env->v7m.control[secstate] =
405
- deposit32(env->v7m.control[secstate],
406
- R_V7M_CONTROL_SPSEL_SHIFT,
407
- R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
408
-
409
- if (secstate == env->v7m.secure) {
410
- bool new_is_psp = v7m_using_psp(env);
411
- uint32_t tmp;
412
-
413
- if (old_is_psp != new_is_psp) {
414
- tmp = env->v7m.other_sp;
415
- env->v7m.other_sp = env->regs[13];
416
- env->regs[13] = tmp;
417
- }
418
- }
419
-}
420
-
421
-/*
422
- * Write to v7M CONTROL.SPSEL bit. This may change the current
423
- * stack pointer between Main and Process stack pointers.
424
- */
425
-static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
426
-{
427
- write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
428
-}
429
-
430
-void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
431
-{
432
- /*
433
- * Write a new value to v7m.exception, thus transitioning into or out
434
- * of Handler mode; this may result in a change of active stack pointer.
435
- */
436
- bool new_is_psp, old_is_psp = v7m_using_psp(env);
437
- uint32_t tmp;
438
-
439
- env->v7m.exception = new_exc;
440
-
441
- new_is_psp = v7m_using_psp(env);
442
-
443
- if (old_is_psp != new_is_psp) {
444
- tmp = env->v7m.other_sp;
445
- env->v7m.other_sp = env->regs[13];
446
- env->regs[13] = tmp;
447
- }
448
-}
449
-
450
-/* Switch M profile security state between NS and S */
451
-static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
452
-{
453
- uint32_t new_ss_msp, new_ss_psp;
454
-
455
- if (env->v7m.secure == new_secstate) {
456
- return;
457
- }
458
-
459
- /*
460
- * All the banked state is accessed by looking at env->v7m.secure
461
- * except for the stack pointer; rearrange the SP appropriately.
462
- */
463
- new_ss_msp = env->v7m.other_ss_msp;
464
- new_ss_psp = env->v7m.other_ss_psp;
465
-
466
- if (v7m_using_psp(env)) {
467
- env->v7m.other_ss_psp = env->regs[13];
468
- env->v7m.other_ss_msp = env->v7m.other_sp;
469
- } else {
470
- env->v7m.other_ss_msp = env->regs[13];
471
- env->v7m.other_ss_psp = env->v7m.other_sp;
472
- }
473
-
474
- env->v7m.secure = new_secstate;
475
-
476
- if (v7m_using_psp(env)) {
477
- env->regs[13] = new_ss_psp;
478
- env->v7m.other_sp = new_ss_msp;
479
- } else {
480
- env->regs[13] = new_ss_msp;
481
- env->v7m.other_sp = new_ss_psp;
482
- }
483
-}
484
-
485
-void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
486
-{
487
- /*
488
- * Handle v7M BXNS:
489
- * - if the return value is a magic value, do exception return (like BX)
490
- * - otherwise bit 0 of the return value is the target security state
491
- */
492
- uint32_t min_magic;
493
-
494
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
495
- /* Covers FNC_RETURN and EXC_RETURN magic */
496
- min_magic = FNC_RETURN_MIN_MAGIC;
497
- } else {
498
- /* EXC_RETURN magic only */
499
- min_magic = EXC_RETURN_MIN_MAGIC;
500
- }
501
-
502
- if (dest >= min_magic) {
503
- /*
504
- * This is an exception return magic value; put it where
505
- * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
506
- * Note that if we ever add gen_ss_advance() singlestep support to
507
- * M profile this should count as an "instruction execution complete"
508
- * event (compare gen_bx_excret_final_code()).
509
- */
510
- env->regs[15] = dest & ~1;
511
- env->thumb = dest & 1;
512
- HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
513
- /* notreached */
514
- }
515
-
516
- /* translate.c should have made BXNS UNDEF unless we're secure */
517
- assert(env->v7m.secure);
518
-
519
- if (!(dest & 1)) {
520
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
521
- }
522
- switch_v7m_security_state(env, dest & 1);
523
- env->thumb = 1;
524
- env->regs[15] = dest & ~1;
525
-}
526
-
527
-void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
528
-{
529
- /*
530
- * Handle v7M BLXNS:
531
- * - bit 0 of the destination address is the target security state
532
- */
533
-
534
- /* At this point regs[15] is the address just after the BLXNS */
535
- uint32_t nextinst = env->regs[15] | 1;
536
- uint32_t sp = env->regs[13] - 8;
537
- uint32_t saved_psr;
538
-
539
- /* translate.c will have made BLXNS UNDEF unless we're secure */
540
- assert(env->v7m.secure);
541
-
542
- if (dest & 1) {
543
- /*
544
- * Target is Secure, so this is just a normal BLX,
545
- * except that the low bit doesn't indicate Thumb/not.
546
- */
547
- env->regs[14] = nextinst;
548
- env->thumb = 1;
549
- env->regs[15] = dest & ~1;
550
- return;
551
- }
552
-
553
- /* Target is non-secure: first push a stack frame */
554
- if (!QEMU_IS_ALIGNED(sp, 8)) {
555
- qemu_log_mask(LOG_GUEST_ERROR,
556
- "BLXNS with misaligned SP is UNPREDICTABLE\n");
557
- }
558
-
559
- if (sp < v7m_sp_limit(env)) {
560
- raise_exception(env, EXCP_STKOF, 0, 1);
561
- }
562
-
563
- saved_psr = env->v7m.exception;
564
- if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
565
- saved_psr |= XPSR_SFPA;
566
- }
567
-
568
- /* Note that these stores can throw exceptions on MPU faults */
569
- cpu_stl_data(env, sp, nextinst);
570
- cpu_stl_data(env, sp + 4, saved_psr);
571
-
572
- env->regs[13] = sp;
573
- env->regs[14] = 0xfeffffff;
574
- if (arm_v7m_is_handler_mode(env)) {
575
- /*
576
- * Write a dummy value to IPSR, to avoid leaking the current secure
577
- * exception number to non-secure code. This is guaranteed not
578
- * to cause write_v7m_exception() to actually change stacks.
579
- */
580
- write_v7m_exception(env, 1);
581
- }
582
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
583
- switch_v7m_security_state(env, 0);
584
- env->thumb = 1;
585
- env->regs[15] = dest;
586
-}
587
-
588
-static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
49
-static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
589
- bool spsel)
50
- bool spsel)
590
-{
51
-{
591
- /*
52
- /*
592
- * Return a pointer to the location where we currently store the
53
- * Return a pointer to the location where we currently store the
...
...
619
- return &env->v7m.other_ss_msp;
80
- return &env->v7m.other_ss_msp;
620
- }
81
- }
621
- }
82
- }
622
-}
83
-}
623
-
84
-
624
-static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
85
static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
625
- uint32_t *pvec)
86
uint32_t *pvec)
626
-{
87
{
627
- CPUState *cs = CPU(cpu);
88
@@ -XXX,XX +XXX,XX @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
628
- CPUARMState *env = &cpu->env;
89
!mode;
629
- MemTxResult result;
90
630
- uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
91
mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
631
- uint32_t vector_entry;
632
- MemTxAttrs attrs = {};
633
- ARMMMUIdx mmu_idx;
634
- bool exc_secure;
635
-
636
- mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
637
-
638
- /*
639
- * We don't do a get_phys_addr() here because the rules for vector
640
- * loads are special: they always use the default memory map, and
641
- * the default memory map permits reads from all addresses.
642
- * Since there's no easy way to pass through to pmsav8_mpu_lookup()
643
- * that we want this special case which would always say "yes",
644
- * we just do the SAU lookup here followed by a direct physical load.
645
- */
646
- attrs.secure = targets_secure;
647
- attrs.user = false;
648
-
649
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
650
- V8M_SAttributes sattrs = {};
651
-
652
- v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
653
- if (sattrs.ns) {
654
- attrs.secure = false;
655
- } else if (!targets_secure) {
656
- /* NS access to S memory */
657
- goto load_fail;
658
- }
659
- }
660
-
661
- vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
662
- attrs, &result);
663
- if (result != MEMTX_OK) {
664
- goto load_fail;
665
- }
666
- *pvec = vector_entry;
667
- return true;
668
-
669
-load_fail:
670
- /*
671
- * All vector table fetch fails are reported as HardFault, with
672
- * HFSR.VECTTBL and .FORCED set. (FORCED is set because
673
- * technically the underlying exception is a MemManage or BusFault
674
- * that is escalated to HardFault.) This is a terminal exception,
675
- * so we will either take the HardFault immediately or else enter
676
- * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
677
- */
678
- exc_secure = targets_secure ||
679
- !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
680
- env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
681
- armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
682
- return false;
683
-}
684
-
685
-static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
686
-{
687
- /*
688
- * Return the integrity signature value for the callee-saves
689
- * stack frame section. @lr is the exception return payload/LR value
690
- * whose FType bit forms bit 0 of the signature if FP is present.
691
- */
692
- uint32_t sig = 0xfefa125a;
693
-
694
- if (!arm_feature(env, ARM_FEATURE_VFP) || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
695
- sig |= 1;
696
- }
697
- return sig;
698
-}
699
-
700
-static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
701
- bool ignore_faults)
702
-{
703
- /*
704
- * For v8M, push the callee-saves register part of the stack frame.
705
- * Compare the v8M pseudocode PushCalleeStack().
706
- * In the tailchaining case this may not be the current stack.
707
- */
708
- CPUARMState *env = &cpu->env;
709
- uint32_t *frame_sp_p;
710
- uint32_t frameptr;
711
- ARMMMUIdx mmu_idx;
712
- bool stacked_ok;
713
- uint32_t limit;
714
- bool want_psp;
715
- uint32_t sig;
716
- StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
717
-
718
- if (dotailchain) {
719
- bool mode = lr & R_V7M_EXCRET_MODE_MASK;
720
- bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
721
- !mode;
722
-
723
- mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
724
- frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
92
- frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
725
- lr & R_V7M_EXCRET_SPSEL_MASK);
93
- lr & R_V7M_EXCRET_SPSEL_MASK);
726
- want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
94
+ frame_sp_p = arm_v7m_get_sp_ptr(env, M_REG_S, mode,
727
- if (want_psp) {
95
+ lr & R_V7M_EXCRET_SPSEL_MASK);
728
- limit = env->v7m.psplim[M_REG_S];
96
want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
729
- } else {
97
if (want_psp) {
730
- limit = env->v7m.msplim[M_REG_S];
98
limit = env->v7m.psplim[M_REG_S];
731
- }
99
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
732
- } else {
100
* use 'frame_sp_p' after we do something that makes it invalid.
733
- mmu_idx = arm_mmu_idx(env);
101
*/
734
- frame_sp_p = &env->regs[13];
102
bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK;
735
- limit = v7m_sp_limit(env);
736
- }
737
-
738
- frameptr = *frame_sp_p - 0x28;
739
- if (frameptr < limit) {
740
- /*
741
- * Stack limit failure: set SP to the limit value, and generate
742
- * STKOF UsageFault. Stack pushes below the limit must not be
743
- * performed. It is IMPDEF whether pushes above the limit are
744
- * performed; we choose not to.
745
- */
746
- qemu_log_mask(CPU_LOG_INT,
747
- "...STKOF during callee-saves register stacking\n");
748
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
749
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
750
- env->v7m.secure);
751
- *frame_sp_p = limit;
752
- return true;
753
- }
754
-
755
- /*
756
- * Write as much of the stack frame as we can. A write failure may
757
- * cause us to pend a derived exception.
758
- */
759
- sig = v7m_integrity_sig(env, lr);
760
- stacked_ok =
761
- v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
762
- v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
763
- v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
764
- v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
765
- v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
766
- v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
767
- v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
768
- v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
769
- v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
770
-
771
- /* Update SP regardless of whether any of the stack accesses failed. */
772
- *frame_sp_p = frameptr;
773
-
774
- return !stacked_ok;
775
-}
776
-
777
-static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
778
- bool ignore_stackfaults)
779
-{
780
- /*
781
- * Do the "take the exception" parts of exception entry,
782
- * but not the pushing of state to the stack. This is
783
- * similar to the pseudocode ExceptionTaken() function.
784
- */
785
- CPUARMState *env = &cpu->env;
786
- uint32_t addr;
787
- bool targets_secure;
788
- int exc;
789
- bool push_failed = false;
790
-
791
- armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
792
- qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
793
- targets_secure ? "secure" : "nonsecure", exc);
794
-
795
- if (dotailchain) {
796
- /* Sanitize LR FType and PREFIX bits */
797
- if (!arm_feature(env, ARM_FEATURE_VFP)) {
798
- lr |= R_V7M_EXCRET_FTYPE_MASK;
799
- }
800
- lr = deposit32(lr, 24, 8, 0xff);
801
- }
802
-
803
- if (arm_feature(env, ARM_FEATURE_V8)) {
804
- if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
805
- (lr & R_V7M_EXCRET_S_MASK)) {
806
- /*
807
- * The background code (the owner of the registers in the
808
- * exception frame) is Secure. This means it may either already
809
- * have or now needs to push callee-saves registers.
810
- */
811
- if (targets_secure) {
812
- if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
813
- /*
814
- * We took an exception from Secure to NonSecure
815
- * (which means the callee-saved registers got stacked)
816
- * and are now tailchaining to a Secure exception.
817
- * Clear DCRS so eventual return from this Secure
818
- * exception unstacks the callee-saved registers.
819
- */
820
- lr &= ~R_V7M_EXCRET_DCRS_MASK;
821
- }
822
- } else {
823
- /*
824
- * We're going to a non-secure exception; push the
825
- * callee-saves registers to the stack now, if they're
826
- * not already saved.
827
- */
828
- if (lr & R_V7M_EXCRET_DCRS_MASK &&
829
- !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
830
- push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
831
- ignore_stackfaults);
832
- }
833
- lr |= R_V7M_EXCRET_DCRS_MASK;
834
- }
835
- }
836
-
837
- lr &= ~R_V7M_EXCRET_ES_MASK;
838
- if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
839
- lr |= R_V7M_EXCRET_ES_MASK;
840
- }
841
- lr &= ~R_V7M_EXCRET_SPSEL_MASK;
842
- if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
843
- lr |= R_V7M_EXCRET_SPSEL_MASK;
844
- }
845
-
846
- /*
847
- * Clear registers if necessary to prevent non-secure exception
848
- * code being able to see register values from secure code.
849
- * Where register values become architecturally UNKNOWN we leave
850
- * them with their previous values.
851
- */
852
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
853
- if (!targets_secure) {
854
- /*
855
- * Always clear the caller-saved registers (they have been
856
- * pushed to the stack earlier in v7m_push_stack()).
857
- * Clear callee-saved registers if the background code is
858
- * Secure (in which case these regs were saved in
859
- * v7m_push_callee_stack()).
860
- */
861
- int i;
862
-
863
- for (i = 0; i < 13; i++) {
864
- /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
865
- if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
866
- env->regs[i] = 0;
867
- }
868
- }
869
- /* Clear EAPSR */
870
- xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
871
- }
872
- }
873
- }
874
-
875
- if (push_failed && !ignore_stackfaults) {
876
- /*
877
- * Derived exception on callee-saves register stacking:
878
- * we might now want to take a different exception which
879
- * targets a different security state, so try again from the top.
880
- */
881
- qemu_log_mask(CPU_LOG_INT,
882
- "...derived exception on callee-saves register stacking");
883
- v7m_exception_taken(cpu, lr, true, true);
884
- return;
885
- }
886
-
887
- if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
888
- /* Vector load failed: derived exception */
889
- qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
890
- v7m_exception_taken(cpu, lr, true, true);
891
- return;
892
- }
893
-
894
- /*
895
- * Now we've done everything that might cause a derived exception
896
- * we can go ahead and activate whichever exception we're going to
897
- * take (which might now be the derived exception).
898
- */
899
- armv7m_nvic_acknowledge_irq(env->nvic);
900
-
901
- /* Switch to target security state -- must do this before writing SPSEL */
902
- switch_v7m_security_state(env, targets_secure);
903
- write_v7m_control_spsel(env, 0);
904
- arm_clear_exclusive(env);
905
- /* Clear SFPA and FPCA (has no effect if no FPU) */
906
- env->v7m.control[M_REG_S] &=
907
- ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
908
- /* Clear IT bits */
909
- env->condexec_bits = 0;
910
- env->regs[14] = lr;
911
- env->regs[15] = addr & 0xfffffffe;
912
- env->thumb = addr & 1;
913
-}
914
-
915
-static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
916
- bool apply_splim)
917
-{
918
- /*
919
- * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
920
- * that we will need later in order to do lazy FP reg stacking.
921
- */
922
- bool is_secure = env->v7m.secure;
923
- void *nvic = env->nvic;
924
- /*
925
- * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
926
- * are banked and we want to update the bit in the bank for the
927
- * current security state; and in one case we want to specifically
928
- * update the NS banked version of a bit even if we are secure.
929
- */
930
- uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
931
- uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
932
- uint32_t *fpccr = &env->v7m.fpccr[is_secure];
933
- bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
934
-
935
- env->v7m.fpcar[is_secure] = frameptr & ~0x7;
936
-
937
- if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
938
- bool splimviol;
939
- uint32_t splim = v7m_sp_limit(env);
940
- bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
941
- (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
942
-
943
- splimviol = !ign && frameptr < splim;
944
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
945
- }
946
-
947
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
948
-
949
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
950
-
951
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
952
-
953
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
954
- !arm_v7m_is_handler_mode(env));
955
-
956
- hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
957
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
958
-
959
- bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
960
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
961
-
962
- mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
963
- *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
964
-
965
- ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
966
- *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
967
-
968
- monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
969
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
970
-
971
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
972
- s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
973
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
974
-
975
- sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
976
- *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
977
- }
978
-}
979
-
980
-void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
981
-{
982
- /* fptr is the value of Rn, the frame pointer we store the FP regs to */
983
- bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
984
- bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
985
-
986
- assert(env->v7m.secure);
987
-
988
- if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
989
- return;
990
- }
991
-
992
- /* Check access to the coprocessor is permitted */
993
- if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
994
- raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
995
- }
996
-
997
- if (lspact) {
998
- /* LSPACT should not be active when there is active FP state */
999
- raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
1000
- }
1001
-
1002
- if (fptr & 7) {
1003
- raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1004
- }
1005
-
1006
- /*
1007
- * Note that we do not use v7m_stack_write() here, because the
1008
- * accesses should not set the FSR bits for stacking errors if they
1009
- * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1010
- * or AccType_LAZYFP). Faults in cpu_stl_data() will throw exceptions
1011
- * and longjmp out.
1012
- */
1013
- if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1014
- bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1015
- int i;
1016
-
1017
- for (i = 0; i < (ts ? 32 : 16); i += 2) {
1018
- uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1019
- uint32_t faddr = fptr + 4 * i;
1020
- uint32_t slo = extract64(dn, 0, 32);
1021
- uint32_t shi = extract64(dn, 32, 32);
1022
-
1023
- if (i >= 16) {
1024
- faddr += 8; /* skip the slot for the FPSCR */
1025
- }
1026
- cpu_stl_data(env, faddr, slo);
1027
- cpu_stl_data(env, faddr + 4, shi);
1028
- }
1029
- cpu_stl_data(env, fptr + 0x40, vfp_get_fpscr(env));
1030
-
1031
- /*
1032
- * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
1033
- * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1034
- */
1035
- if (ts) {
1036
- for (i = 0; i < 32; i += 2) {
1037
- *aa32_vfp_dreg(env, i / 2) = 0;
1038
- }
1039
- vfp_set_fpscr(env, 0);
1040
- }
1041
- } else {
1042
- v7m_update_fpccr(env, fptr, false);
1043
- }
1044
-
1045
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1046
-}
1047
-
1048
-void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1049
-{
1050
- /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1051
- assert(env->v7m.secure);
1052
-
1053
- if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1054
- return;
1055
- }
1056
-
1057
- /* Check access to the coprocessor is permitted */
1058
- if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1059
- raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1060
- }
1061
-
1062
- if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1063
- /* State in FP is still valid */
1064
- env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1065
- } else {
1066
- bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1067
- int i;
1068
- uint32_t fpscr;
1069
-
1070
- if (fptr & 7) {
1071
- raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1072
- }
1073
-
1074
- for (i = 0; i < (ts ? 32 : 16); i += 2) {
1075
- uint32_t slo, shi;
1076
- uint64_t dn;
1077
- uint32_t faddr = fptr + 4 * i;
1078
-
1079
- if (i >= 16) {
1080
- faddr += 8; /* skip the slot for the FPSCR */
1081
- }
1082
-
1083
- slo = cpu_ldl_data(env, faddr);
1084
- shi = cpu_ldl_data(env, faddr + 4);
1085
-
1086
- dn = (uint64_t) shi << 32 | slo;
1087
- *aa32_vfp_dreg(env, i / 2) = dn;
1088
- }
1089
- fpscr = cpu_ldl_data(env, fptr + 0x40);
1090
- vfp_set_fpscr(env, fpscr);
1091
- }
1092
-
1093
- env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1094
-}
1095
-
1096
-static bool v7m_push_stack(ARMCPU *cpu)
1097
-{
1098
- /*
1099
- * Do the "set up stack frame" part of exception entry,
1100
- * similar to pseudocode PushStack().
1101
- * Return true if we generate a derived exception (and so
1102
- * should ignore further stack faults trying to process
1103
- * that derived exception.)
1104
- */
1105
- bool stacked_ok = true, limitviol = false;
1106
- CPUARMState *env = &cpu->env;
1107
- uint32_t xpsr = xpsr_read(env);
1108
- uint32_t frameptr = env->regs[13];
1109
- ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1110
- uint32_t framesize;
1111
- bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1112
-
1113
- if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1114
- (env->v7m.secure || nsacr_cp10)) {
1115
- if (env->v7m.secure &&
1116
- env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1117
- framesize = 0xa8;
1118
- } else {
1119
- framesize = 0x68;
1120
- }
1121
- } else {
1122
- framesize = 0x20;
1123
- }
1124
-
1125
- /* Align stack pointer if the guest wants that */
1126
- if ((frameptr & 4) &&
1127
- (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1128
- frameptr -= 4;
1129
- xpsr |= XPSR_SPREALIGN;
1130
- }
1131
-
1132
- xpsr &= ~XPSR_SFPA;
1133
- if (env->v7m.secure &&
1134
- (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1135
- xpsr |= XPSR_SFPA;
1136
- }
1137
-
1138
- frameptr -= framesize;
1139
-
1140
- if (arm_feature(env, ARM_FEATURE_V8)) {
1141
- uint32_t limit = v7m_sp_limit(env);
1142
-
1143
- if (frameptr < limit) {
1144
- /*
1145
- * Stack limit failure: set SP to the limit value, and generate
1146
- * STKOF UsageFault. Stack pushes below the limit must not be
1147
- * performed. It is IMPDEF whether pushes above the limit are
1148
- * performed; we choose not to.
1149
- */
1150
- qemu_log_mask(CPU_LOG_INT,
1151
- "...STKOF during stacking\n");
1152
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1153
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1154
- env->v7m.secure);
1155
- env->regs[13] = limit;
1156
- /*
1157
- * We won't try to perform any further memory accesses but
1158
- * we must continue through the following code to check for
1159
- * permission faults during FPU state preservation, and we
1160
- * must update FPCCR if lazy stacking is enabled.
1161
- */
1162
- limitviol = true;
1163
- stacked_ok = false;
1164
- }
1165
- }
1166
-
1167
- /*
1168
- * Write as much of the stack frame as we can. If we fail a stack
1169
- * write this will result in a derived exception being pended
1170
- * (which may be taken in preference to the one we started with
1171
- * if it has higher priority).
1172
- */
1173
- stacked_ok = stacked_ok &&
1174
- v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1175
- v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1176
- mmu_idx, STACK_NORMAL) &&
1177
- v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1178
- mmu_idx, STACK_NORMAL) &&
1179
- v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1180
- mmu_idx, STACK_NORMAL) &&
1181
- v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1182
- mmu_idx, STACK_NORMAL) &&
1183
- v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1184
- mmu_idx, STACK_NORMAL) &&
1185
- v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1186
- mmu_idx, STACK_NORMAL) &&
1187
- v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1188
-
1189
- if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1190
- /* FPU is active, try to save its registers */
1191
- bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1192
- bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1193
-
1194
- if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1195
- qemu_log_mask(CPU_LOG_INT,
1196
- "...SecureFault because LSPACT and FPCA both set\n");
1197
- env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1198
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1199
- } else if (!env->v7m.secure && !nsacr_cp10) {
1200
- qemu_log_mask(CPU_LOG_INT,
1201
- "...Secure UsageFault with CFSR.NOCP because "
1202
- "NSACR.CP10 prevents stacking FP regs\n");
1203
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1204
- env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1205
- } else {
1206
- if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1207
- /* Lazy stacking disabled, save registers now */
1208
- int i;
1209
- bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1210
- arm_current_el(env) != 0);
1211
-
1212
- if (stacked_ok && !cpacr_pass) {
1213
- /*
1214
- * Take UsageFault if CPACR forbids access. The pseudocode
1215
- * here does a full CheckCPEnabled() but we know the NSACR
1216
- * check can never fail as we have already handled that.
1217
- */
1218
- qemu_log_mask(CPU_LOG_INT,
1219
- "...UsageFault with CFSR.NOCP because "
1220
- "CPACR.CP10 prevents stacking FP regs\n");
1221
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1222
- env->v7m.secure);
1223
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1224
- stacked_ok = false;
1225
- }
1226
-
1227
- for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1228
- uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1229
- uint32_t faddr = frameptr + 0x20 + 4 * i;
1230
- uint32_t slo = extract64(dn, 0, 32);
1231
- uint32_t shi = extract64(dn, 32, 32);
1232
-
1233
- if (i >= 16) {
1234
- faddr += 8; /* skip the slot for the FPSCR */
1235
- }
1236
- stacked_ok = stacked_ok &&
1237
- v7m_stack_write(cpu, faddr, slo,
1238
- mmu_idx, STACK_NORMAL) &&
1239
- v7m_stack_write(cpu, faddr + 4, shi,
1240
- mmu_idx, STACK_NORMAL);
1241
- }
1242
- stacked_ok = stacked_ok &&
1243
- v7m_stack_write(cpu, frameptr + 0x60,
1244
- vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1245
- if (cpacr_pass) {
1246
- for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1247
- *aa32_vfp_dreg(env, i / 2) = 0;
1248
- }
1249
- vfp_set_fpscr(env, 0);
1250
- }
1251
- } else {
1252
- /* Lazy stacking enabled, save necessary info to stack later */
1253
- v7m_update_fpccr(env, frameptr + 0x20, true);
1254
- }
1255
- }
1256
- }
1257
-
1258
- /*
1259
- * If we broke a stack limit then SP was already updated earlier;
1260
- * otherwise we update SP regardless of whether any of the stack
1261
- * accesses failed or we took some other kind of fault.
1262
- */
1263
- if (!limitviol) {
1264
- env->regs[13] = frameptr;
1265
- }
1266
-
1267
- return !stacked_ok;
1268
-}
1269
-
1270
-static void do_v7m_exception_exit(ARMCPU *cpu)
1271
-{
1272
- CPUARMState *env = &cpu->env;
1273
- uint32_t excret;
1274
- uint32_t xpsr, xpsr_mask;
1275
- bool ufault = false;
1276
- bool sfault = false;
1277
- bool return_to_sp_process;
1278
- bool return_to_handler;
1279
- bool rettobase = false;
1280
- bool exc_secure = false;
1281
- bool return_to_secure;
1282
- bool ftype;
1283
- bool restore_s16_s31;
1284
-
1285
- /*
1286
- * If we're not in Handler mode then jumps to magic exception-exit
1287
- * addresses don't have magic behaviour. However for the v8M
1288
- * security extensions the magic secure-function-return has to
1289
- * work in thread mode too, so to avoid doing an extra check in
1290
- * the generated code we allow exception-exit magic to also cause the
1291
- * internal exception and bring us here in thread mode. Correct code
1292
- * will never try to do this (the following insn fetch will always
1293
- * fault) so we the overhead of having taken an unnecessary exception
1294
- * doesn't matter.
1295
- */
1296
- if (!arm_v7m_is_handler_mode(env)) {
1297
- return;
1298
- }
1299
-
1300
- /*
1301
- * In the spec pseudocode ExceptionReturn() is called directly
1302
- * from BXWritePC() and gets the full target PC value including
1303
- * bit zero. In QEMU's implementation we treat it as a normal
1304
- * jump-to-register (which is then caught later on), and so split
1305
- * the target value up between env->regs[15] and env->thumb in
1306
- * gen_bx(). Reconstitute it.
1307
- */
1308
- excret = env->regs[15];
1309
- if (env->thumb) {
1310
- excret |= 1;
1311
- }
1312
-
1313
- qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1314
- " previous exception %d\n",
1315
- excret, env->v7m.exception);
1316
-
1317
- if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1318
- qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1319
- "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1320
- excret);
1321
- }
1322
-
1323
- ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1324
-
1325
- if (!arm_feature(env, ARM_FEATURE_VFP) && !ftype) {
1326
- qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1327
- "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1328
- "if FPU not present\n",
1329
- excret);
1330
- ftype = true;
1331
- }
1332
-
1333
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1334
- /*
1335
- * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1336
- * we pick which FAULTMASK to clear.
1337
- */
1338
- if (!env->v7m.secure &&
1339
- ((excret & R_V7M_EXCRET_ES_MASK) ||
1340
- !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1341
- sfault = 1;
1342
- /* For all other purposes, treat ES as 0 (R_HXSR) */
1343
- excret &= ~R_V7M_EXCRET_ES_MASK;
1344
- }
1345
- exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1346
- }
1347
-
1348
- if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1349
- /*
1350
- * Auto-clear FAULTMASK on return from other than NMI.
1351
- * If the security extension is implemented then this only
1352
- * happens if the raw execution priority is >= 0; the
1353
- * value of the ES bit in the exception return value indicates
1354
- * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1355
- */
1356
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1357
- if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1358
- env->v7m.faultmask[exc_secure] = 0;
1359
- }
1360
- } else {
1361
- env->v7m.faultmask[M_REG_NS] = 0;
1362
- }
1363
- }
1364
-
1365
- switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1366
- exc_secure)) {
1367
- case -1:
1368
- /* attempt to exit an exception that isn't active */
1369
- ufault = true;
1370
- break;
1371
- case 0:
1372
- /* still an irq active now */
1373
- break;
1374
- case 1:
1375
- /*
1376
- * We returned to base exception level, no nesting.
1377
- * (In the pseudocode this is written using "NestedActivation != 1"
1378
- * where we have 'rettobase == false'.)
1379
- */
1380
- rettobase = true;
1381
- break;
1382
- default:
1383
- g_assert_not_reached();
1384
- }
1385
-
1386
- return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1387
- return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1388
- return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1389
- (excret & R_V7M_EXCRET_S_MASK);
1390
-
1391
- if (arm_feature(env, ARM_FEATURE_V8)) {
1392
- if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1393
- /*
1394
- * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1395
- * we choose to take the UsageFault.
1396
- */
1397
- if ((excret & R_V7M_EXCRET_S_MASK) ||
1398
- (excret & R_V7M_EXCRET_ES_MASK) ||
1399
- !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1400
- ufault = true;
1401
- }
1402
- }
1403
- if (excret & R_V7M_EXCRET_RES0_MASK) {
1404
- ufault = true;
1405
- }
1406
- } else {
1407
- /* For v7M we only recognize certain combinations of the low bits */
1408
- switch (excret & 0xf) {
1409
- case 1: /* Return to Handler */
1410
- break;
1411
- case 13: /* Return to Thread using Process stack */
1412
- case 9: /* Return to Thread using Main stack */
1413
- /*
1414
- * We only need to check NONBASETHRDENA for v7M, because in
1415
- * v8M this bit does not exist (it is RES1).
1416
- */
1417
- if (!rettobase &&
1418
- !(env->v7m.ccr[env->v7m.secure] &
1419
- R_V7M_CCR_NONBASETHRDENA_MASK)) {
1420
- ufault = true;
1421
- }
1422
- break;
1423
- default:
1424
- ufault = true;
1425
- }
1426
- }
1427
-
1428
- /*
1429
- * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1430
- * Handler mode (and will be until we write the new XPSR.Interrupt
1431
- * field) this does not switch around the current stack pointer.
1432
- * We must do this before we do any kind of tailchaining, including
1433
- * for the derived exceptions on integrity check failures, or we will
1434
- * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1435
- */
1436
- write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1437
-
1438
- /*
1439
- * Clear scratch FP values left in caller saved registers; this
1440
- * must happen before any kind of tail chaining.
1441
- */
1442
- if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1443
- (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1444
- if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1445
- env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1446
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1447
- qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1448
- "stackframe: error during lazy state deactivation\n");
1449
- v7m_exception_taken(cpu, excret, true, false);
1450
- return;
1451
- } else {
1452
- /* Clear s0..s15 and FPSCR */
1453
- int i;
1454
-
1455
- for (i = 0; i < 16; i += 2) {
1456
- *aa32_vfp_dreg(env, i / 2) = 0;
1457
- }
1458
- vfp_set_fpscr(env, 0);
1459
- }
1460
- }
1461
-
1462
- if (sfault) {
1463
- env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1464
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1465
- qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1466
- "stackframe: failed EXC_RETURN.ES validity check\n");
1467
- v7m_exception_taken(cpu, excret, true, false);
1468
- return;
1469
- }
1470
-
1471
- if (ufault) {
1472
- /*
1473
- * Bad exception return: instead of popping the exception
1474
- * stack, directly take a usage fault on the current stack.
1475
- */
1476
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1477
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1478
- qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1479
- "stackframe: failed exception return integrity check\n");
1480
- v7m_exception_taken(cpu, excret, true, false);
1481
- return;
1482
- }
1483
-
1484
- /*
1485
- * Tailchaining: if there is currently a pending exception that
1486
- * is high enough priority to preempt execution at the level we're
1487
- * about to return to, then just directly take that exception now,
1488
- * avoiding an unstack-and-then-stack. Note that now we have
1489
- * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1490
- * our current execution priority is already the execution priority we are
1491
- * returning to -- none of the state we would unstack or set based on
1492
- * the EXCRET value affects it.
1493
- */
1494
- if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1495
- qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1496
- v7m_exception_taken(cpu, excret, true, false);
1497
- return;
1498
- }
1499
-
1500
- switch_v7m_security_state(env, return_to_secure);
1501
-
1502
- {
1503
- /*
1504
- * The stack pointer we should be reading the exception frame from
1505
- * depends on bits in the magic exception return type value (and
1506
- * for v8M isn't necessarily the stack pointer we will eventually
1507
- * end up resuming execution with). Get a pointer to the location
1508
- * in the CPU state struct where the SP we need is currently being
1509
- * stored; we will use and modify it in place.
1510
- * We use this limited C variable scope so we don't accidentally
1511
- * use 'frame_sp_p' after we do something that makes it invalid.
1512
- */
1513
- uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
103
- uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
1514
- return_to_secure,
104
- return_to_secure,
1515
- !return_to_handler,
105
- !return_to_handler,
1516
- return_to_sp_process);
106
- spsel);
1517
- uint32_t frameptr = *frame_sp_p;
107
+ uint32_t *frame_sp_p = arm_v7m_get_sp_ptr(env, return_to_secure,
1518
- bool pop_ok = true;
108
+ !return_to_handler, spsel);
1519
- ARMMMUIdx mmu_idx;
109
uint32_t frameptr = *frame_sp_p;
1520
- bool return_to_priv = return_to_handler ||
110
bool pop_ok = true;
1521
- !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
111
ARMMMUIdx mmu_idx;
1522
-
112
@@ -XXX,XX +XXX,XX @@ static bool do_v7m_function_return(ARMCPU *cpu)
1523
- mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
113
threadmode = !arm_v7m_is_handler_mode(env);
1524
- return_to_priv);
114
spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1525
-
115
1526
- if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1527
- arm_feature(env, ARM_FEATURE_V8)) {
1528
- qemu_log_mask(LOG_GUEST_ERROR,
1529
- "M profile exception return with non-8-aligned SP "
1530
- "for destination state is UNPREDICTABLE\n");
1531
- }
1532
-
1533
- /* Do we need to pop callee-saved registers? */
1534
- if (return_to_secure &&
1535
- ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1536
- (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1537
- uint32_t actual_sig;
1538
-
1539
- pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1540
-
1541
- if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1542
- /* Take a SecureFault on the current stack */
1543
- env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1544
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1545
- qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1546
- "stackframe: failed exception return integrity "
1547
- "signature check\n");
1548
- v7m_exception_taken(cpu, excret, true, false);
1549
- return;
1550
- }
1551
-
1552
- pop_ok = pop_ok &&
1553
- v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1554
- v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1555
- v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1556
- v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1557
- v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1558
- v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1559
- v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1560
- v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1561
-
1562
- frameptr += 0x28;
1563
- }
1564
-
1565
- /* Pop registers */
1566
- pop_ok = pop_ok &&
1567
- v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1568
- v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1569
- v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1570
- v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1571
- v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1572
- v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1573
- v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1574
- v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1575
-
1576
- if (!pop_ok) {
1577
- /*
1578
- * v7m_stack_read() pended a fault, so take it (as a tail
1579
- * chained exception on the same stack frame)
1580
- */
1581
- qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1582
- v7m_exception_taken(cpu, excret, true, false);
1583
- return;
1584
- }
1585
-
1586
- /*
1587
- * Returning from an exception with a PC with bit 0 set is defined
1588
- * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1589
- * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1590
- * the lsbit, and there are several RTOSes out there which incorrectly
1591
- * assume the r15 in the stack frame should be a Thumb-style "lsbit
1592
- * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1593
- * complain about the badly behaved guest.
1594
- */
1595
- if (env->regs[15] & 1) {
1596
- env->regs[15] &= ~1U;
1597
- if (!arm_feature(env, ARM_FEATURE_V8)) {
1598
- qemu_log_mask(LOG_GUEST_ERROR,
1599
- "M profile return from interrupt with misaligned "
1600
- "PC is UNPREDICTABLE on v7M\n");
1601
- }
1602
- }
1603
-
1604
- if (arm_feature(env, ARM_FEATURE_V8)) {
1605
- /*
1606
- * For v8M we have to check whether the xPSR exception field
1607
- * matches the EXCRET value for return to handler/thread
1608
- * before we commit to changing the SP and xPSR.
1609
- */
1610
- bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1611
- if (return_to_handler != will_be_handler) {
1612
- /*
1613
- * Take an INVPC UsageFault on the current stack.
1614
- * By this point we will have switched to the security state
1615
- * for the background state, so this UsageFault will target
1616
- * that state.
1617
- */
1618
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1619
- env->v7m.secure);
1620
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1621
- qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1622
- "stackframe: failed exception return integrity "
1623
- "check\n");
1624
- v7m_exception_taken(cpu, excret, true, false);
1625
- return;
1626
- }
1627
- }
1628
-
1629
- if (!ftype) {
1630
- /* FP present and we need to handle it */
1631
- if (!return_to_secure &&
1632
- (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1633
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1634
- env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1635
- qemu_log_mask(CPU_LOG_INT,
1636
- "...taking SecureFault on existing stackframe: "
1637
- "Secure LSPACT set but exception return is "
1638
- "not to secure state\n");
1639
- v7m_exception_taken(cpu, excret, true, false);
1640
- return;
1641
- }
1642
-
1643
- restore_s16_s31 = return_to_secure &&
1644
- (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1645
-
1646
- if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1647
- /* State in FPU is still valid, just clear LSPACT */
1648
- env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1649
- } else {
1650
- int i;
1651
- uint32_t fpscr;
1652
- bool cpacr_pass, nsacr_pass;
1653
-
1654
- cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1655
- return_to_priv);
1656
- nsacr_pass = return_to_secure ||
1657
- extract32(env->v7m.nsacr, 10, 1);
1658
-
1659
- if (!cpacr_pass) {
1660
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1661
- return_to_secure);
1662
- env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1663
- qemu_log_mask(CPU_LOG_INT,
1664
- "...taking UsageFault on existing "
1665
- "stackframe: CPACR.CP10 prevents unstacking "
1666
- "FP regs\n");
1667
- v7m_exception_taken(cpu, excret, true, false);
1668
- return;
1669
- } else if (!nsacr_pass) {
1670
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1671
- env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1672
- qemu_log_mask(CPU_LOG_INT,
1673
- "...taking Secure UsageFault on existing "
1674
- "stackframe: NSACR.CP10 prevents unstacking "
1675
- "FP regs\n");
1676
- v7m_exception_taken(cpu, excret, true, false);
1677
- return;
1678
- }
1679
-
1680
- for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1681
- uint32_t slo, shi;
1682
- uint64_t dn;
1683
- uint32_t faddr = frameptr + 0x20 + 4 * i;
1684
-
1685
- if (i >= 16) {
1686
- faddr += 8; /* Skip the slot for the FPSCR */
1687
- }
1688
-
1689
- pop_ok = pop_ok &&
1690
- v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1691
- v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1692
-
1693
- if (!pop_ok) {
1694
- break;
1695
- }
1696
-
1697
- dn = (uint64_t)shi << 32 | slo;
1698
- *aa32_vfp_dreg(env, i / 2) = dn;
1699
- }
1700
- pop_ok = pop_ok &&
1701
- v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1702
- if (pop_ok) {
1703
- vfp_set_fpscr(env, fpscr);
1704
- }
1705
- if (!pop_ok) {
1706
- /*
1707
- * These regs are 0 if security extension present;
1708
- * otherwise merely UNKNOWN. We zero always.
1709
- */
1710
- for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1711
- *aa32_vfp_dreg(env, i / 2) = 0;
1712
- }
1713
- vfp_set_fpscr(env, 0);
1714
- }
1715
- }
1716
- }
1717
- env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1718
- V7M_CONTROL, FPCA, !ftype);
1719
-
1720
- /* Commit to consuming the stack frame */
1721
- frameptr += 0x20;
1722
- if (!ftype) {
1723
- frameptr += 0x48;
1724
- if (restore_s16_s31) {
1725
- frameptr += 0x40;
1726
- }
1727
- }
1728
- /*
1729
- * Undo stack alignment (the SPREALIGN bit indicates that the original
1730
- * pre-exception SP was not 8-aligned and we added a padding word to
1731
- * align it, so we undo this by ORing in the bit that increases it
1732
- * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1733
- * would work too but a logical OR is how the pseudocode specifies it.)
1734
- */
1735
- if (xpsr & XPSR_SPREALIGN) {
1736
- frameptr |= 4;
1737
- }
1738
- *frame_sp_p = frameptr;
1739
- }
1740
-
1741
- xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1742
- if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1743
- xpsr_mask &= ~XPSR_GE;
1744
- }
1745
- /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1746
- xpsr_write(env, xpsr, xpsr_mask);
1747
-
1748
- if (env->v7m.secure) {
1749
- bool sfpa = xpsr & XPSR_SFPA;
1750
-
1751
- env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1752
- V7M_CONTROL, SFPA, sfpa);
1753
- }
1754
-
1755
- /*
1756
- * The restored xPSR exception field will be zero if we're
1757
- * resuming in Thread mode. If that doesn't match what the
1758
- * exception return excret specified then this is a UsageFault.
1759
- * v7M requires we make this check here; v8M did it earlier.
1760
- */
1761
- if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1762
- /*
1763
- * Take an INVPC UsageFault by pushing the stack again;
1764
- * we know we're v7M so this is never a Secure UsageFault.
1765
- */
1766
- bool ignore_stackfaults;
1767
-
1768
- assert(!arm_feature(env, ARM_FEATURE_V8));
1769
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1770
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1771
- ignore_stackfaults = v7m_push_stack(cpu);
1772
- qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1773
- "failed exception return integrity check\n");
1774
- v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1775
- return;
1776
- }
1777
-
1778
- /* Otherwise, we have a successful exception exit. */
1779
- arm_clear_exclusive(env);
1780
- qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1781
-}
1782
-
1783
-static bool do_v7m_function_return(ARMCPU *cpu)
1784
-{
1785
- /*
1786
- * v8M security extensions magic function return.
1787
- * We may either:
1788
- * (1) throw an exception (longjump)
1789
- * (2) return true if we successfully handled the function return
1790
- * (3) return false if we failed a consistency check and have
1791
- * pended a UsageFault that needs to be taken now
1792
- *
1793
- * At this point the magic return value is split between env->regs[15]
1794
- * and env->thumb. We don't bother to reconstitute it because we don't
1795
- * need it (all values are handled the same way).
1796
- */
1797
- CPUARMState *env = &cpu->env;
1798
- uint32_t newpc, newpsr, newpsr_exc;
1799
-
1800
- qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1801
-
1802
- {
1803
- bool threadmode, spsel;
1804
- TCGMemOpIdx oi;
1805
- ARMMMUIdx mmu_idx;
1806
- uint32_t *frame_sp_p;
1807
- uint32_t frameptr;
1808
-
1809
- /* Pull the return address and IPSR from the Secure stack */
1810
- threadmode = !arm_v7m_is_handler_mode(env);
1811
- spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1812
-
1813
- frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
116
- frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
1814
- frameptr = *frame_sp_p;
117
+ frame_sp_p = arm_v7m_get_sp_ptr(env, true, threadmode, spsel);
1815
-
118
frameptr = *frame_sp_p;
1816
- /*
119
1817
- * These loads may throw an exception (for MPU faults). We want to
120
/*
1818
- * do them as secure, so work out what MMU index that is.
121
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
1819
- */
1820
- mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1821
- oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
1822
- newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
1823
- newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
1824
-
1825
- /* Consistency checks on new IPSR */
1826
- newpsr_exc = newpsr & XPSR_EXCP;
1827
- if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1828
- (env->v7m.exception == 1 && newpsr_exc != 0))) {
1829
- /* Pend the fault and tell our caller to take it */
1830
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1831
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1832
- env->v7m.secure);
1833
- qemu_log_mask(CPU_LOG_INT,
1834
- "...taking INVPC UsageFault: "
1835
- "IPSR consistency check failed\n");
1836
- return false;
1837
- }
1838
-
1839
- *frame_sp_p = frameptr + 8;
1840
- }
1841
-
1842
- /* This invalidates frame_sp_p */
1843
- switch_v7m_security_state(env, true);
1844
- env->v7m.exception = newpsr_exc;
1845
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1846
- if (newpsr & XPSR_SFPA) {
1847
- env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1848
- }
1849
- xpsr_write(env, 0, XPSR_IT);
1850
- env->thumb = newpc & 1;
1851
- env->regs[15] = newpc & ~1;
1852
-
1853
- qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1854
- return true;
1855
-}
1856
-
1857
-static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1858
- uint32_t addr, uint16_t *insn)
1859
-{
1860
- /*
1861
- * Load a 16-bit portion of a v7M instruction, returning true on success,
1862
- * or false on failure (in which case we will have pended the appropriate
1863
- * exception).
1864
- * We need to do the instruction fetch's MPU and SAU checks
1865
- * like this because there is no MMU index that would allow
1866
- * doing the load with a single function call. Instead we must
1867
- * first check that the security attributes permit the load
1868
- * and that they don't mismatch on the two halves of the instruction,
1869
- * and then we do the load as a secure load (ie using the security
1870
- * attributes of the address, not the CPU, as architecturally required).
1871
- */
1872
- CPUState *cs = CPU(cpu);
1873
- CPUARMState *env = &cpu->env;
1874
- V8M_SAttributes sattrs = {};
1875
- MemTxAttrs attrs = {};
1876
- ARMMMUFaultInfo fi = {};
1877
- MemTxResult txres;
1878
- target_ulong page_size;
1879
- hwaddr physaddr;
1880
- int prot;
1881
-
1882
- v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
1883
- if (!sattrs.nsc || sattrs.ns) {
1884
- /*
1885
- * This must be the second half of the insn, and it straddles a
1886
- * region boundary with the second half not being S&NSC.
1887
- */
1888
- env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
1889
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1890
- qemu_log_mask(CPU_LOG_INT,
1891
- "...really SecureFault with SFSR.INVEP\n");
1892
- return false;
1893
- }
1894
- if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
1895
- &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
1896
- /* the MPU lookup failed */
1897
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
1898
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
1899
- qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
1900
- return false;
1901
- }
1902
- *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
1903
- attrs, &txres);
1904
- if (txres != MEMTX_OK) {
1905
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
1906
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
1907
- qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
1908
- return false;
1909
- }
1910
- return true;
1911
-}
1912
-
1913
-static bool v7m_handle_execute_nsc(ARMCPU *cpu)
1914
-{
1915
- /*
1916
- * Check whether this attempt to execute code in a Secure & NS-Callable
1917
- * memory region is for an SG instruction; if so, then emulate the
1918
- * effect of the SG instruction and return true. Otherwise pend
1919
- * the correct kind of exception and return false.
1920
- */
1921
- CPUARMState *env = &cpu->env;
1922
- ARMMMUIdx mmu_idx;
1923
- uint16_t insn;
1924
-
1925
- /*
1926
- * We should never get here unless get_phys_addr_pmsav8() caused
1927
- * an exception for NS executing in S&NSC memory.
1928
- */
1929
- assert(!env->v7m.secure);
1930
- assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
1931
-
1932
- /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
1933
- mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1934
-
1935
- if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
1936
- return false;
1937
- }
1938
-
1939
- if (!env->thumb) {
1940
- goto gen_invep;
1941
- }
1942
-
1943
- if (insn != 0xe97f) {
1944
- /*
1945
- * Not an SG instruction first half (we choose the IMPDEF
1946
- * early-SG-check option).
1947
- */
1948
- goto gen_invep;
1949
- }
1950
-
1951
- if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
1952
- return false;
1953
- }
1954
-
1955
- if (insn != 0xe97f) {
1956
- /*
1957
- * Not an SG instruction second half (yes, both halves of the SG
1958
- * insn have the same hex value)
1959
- */
1960
- goto gen_invep;
1961
- }
1962
-
1963
- /*
1964
- * OK, we have confirmed that we really have an SG instruction.
1965
- * We know we're NS in S memory so don't need to repeat those checks.
1966
- */
1967
- qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
1968
- ", executing it\n", env->regs[15]);
1969
- env->regs[14] &= ~1;
1970
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1971
- switch_v7m_security_state(env, true);
1972
- xpsr_write(env, 0, XPSR_IT);
1973
- env->regs[15] += 4;
1974
- return true;
1975
-
1976
-gen_invep:
1977
- env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
1978
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1979
- qemu_log_mask(CPU_LOG_INT,
1980
- "...really SecureFault with SFSR.INVEP\n");
1981
- return false;
1982
-}
1983
-
1984
-void arm_v7m_cpu_do_interrupt(CPUState *cs)
1985
-{
1986
- ARMCPU *cpu = ARM_CPU(cs);
1987
- CPUARMState *env = &cpu->env;
1988
- uint32_t lr;
1989
- bool ignore_stackfaults;
1990
-
1991
- arm_log_exception(cs->exception_index);
1992
-
1993
- /*
1994
- * For exceptions we just mark as pending on the NVIC, and let that
1995
- * handle it.
1996
- */
1997
- switch (cs->exception_index) {
1998
- case EXCP_UDEF:
1999
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2000
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
2001
- break;
2002
- case EXCP_NOCP:
2003
- {
2004
- /*
2005
- * NOCP might be directed to something other than the current
2006
- * security state if this fault is because of NSACR; we indicate
2007
- * the target security state using exception.target_el.
2008
- */
2009
- int target_secstate;
2010
-
2011
- if (env->exception.target_el == 3) {
2012
- target_secstate = M_REG_S;
2013
- } else {
2014
- target_secstate = env->v7m.secure;
2015
- }
2016
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2017
- env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2018
- break;
2019
- }
2020
- case EXCP_INVSTATE:
2021
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2022
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2023
- break;
2024
- case EXCP_STKOF:
2025
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2026
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2027
- break;
2028
- case EXCP_LSERR:
2029
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2030
- env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2031
- break;
2032
- case EXCP_UNALIGNED:
2033
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2034
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2035
- break;
2036
- case EXCP_SWI:
2037
- /* The PC already points to the next instruction. */
2038
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2039
- break;
2040
- case EXCP_PREFETCH_ABORT:
2041
- case EXCP_DATA_ABORT:
2042
- /*
2043
- * Note that for M profile we don't have a guest facing FSR, but
2044
- * the env->exception.fsr will be populated by the code that
2045
- * raises the fault, in the A profile short-descriptor format.
2046
- */
2047
- switch (env->exception.fsr & 0xf) {
2048
- case M_FAKE_FSR_NSC_EXEC:
2049
- /*
2050
- * Exception generated when we try to execute code at an address
2051
- * which is marked as Secure & Non-Secure Callable and the CPU
2052
- * is in the Non-Secure state. The only instruction which can
2053
- * be executed like this is SG (and that only if both halves of
2054
- * the SG instruction have the same security attributes.)
2055
- * Everything else must generate an INVEP SecureFault, so we
2056
- * emulate the SG instruction here.
2057
- */
2058
- if (v7m_handle_execute_nsc(cpu)) {
2059
- return;
2060
- }
2061
- break;
2062
- case M_FAKE_FSR_SFAULT:
2063
- /*
2064
- * Various flavours of SecureFault for attempts to execute or
2065
- * access data in the wrong security state.
2066
- */
2067
- switch (cs->exception_index) {
2068
- case EXCP_PREFETCH_ABORT:
2069
- if (env->v7m.secure) {
2070
- env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2071
- qemu_log_mask(CPU_LOG_INT,
2072
- "...really SecureFault with SFSR.INVTRAN\n");
2073
- } else {
2074
- env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2075
- qemu_log_mask(CPU_LOG_INT,
2076
- "...really SecureFault with SFSR.INVEP\n");
2077
- }
2078
- break;
2079
- case EXCP_DATA_ABORT:
2080
- /* This must be an NS access to S memory */
2081
- env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2082
- qemu_log_mask(CPU_LOG_INT,
2083
- "...really SecureFault with SFSR.AUVIOL\n");
2084
- break;
2085
- }
2086
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2087
- break;
2088
- case 0x8: /* External Abort */
2089
- switch (cs->exception_index) {
2090
- case EXCP_PREFETCH_ABORT:
2091
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2092
- qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2093
- break;
2094
- case EXCP_DATA_ABORT:
2095
- env->v7m.cfsr[M_REG_NS] |=
2096
- (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2097
- env->v7m.bfar = env->exception.vaddress;
2098
- qemu_log_mask(CPU_LOG_INT,
2099
- "...with CFSR.PRECISERR and BFAR 0x%x\n",
2100
- env->v7m.bfar);
2101
- break;
2102
- }
2103
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2104
- break;
2105
- default:
2106
- /*
2107
- * All other FSR values are either MPU faults or "can't happen
2108
- * for M profile" cases.
2109
- */
2110
- switch (cs->exception_index) {
2111
- case EXCP_PREFETCH_ABORT:
2112
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2113
- qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2114
- break;
2115
- case EXCP_DATA_ABORT:
2116
- env->v7m.cfsr[env->v7m.secure] |=
2117
- (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2118
- env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2119
- qemu_log_mask(CPU_LOG_INT,
2120
- "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2121
- env->v7m.mmfar[env->v7m.secure]);
2122
- break;
2123
- }
2124
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2125
- env->v7m.secure);
2126
- break;
2127
- }
2128
- break;
2129
- case EXCP_BKPT:
2130
- if (semihosting_enabled()) {
2131
- int nr;
2132
- nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
2133
- if (nr == 0xab) {
2134
- env->regs[15] += 2;
2135
- qemu_log_mask(CPU_LOG_INT,
2136
- "...handling as semihosting call 0x%x\n",
2137
- env->regs[0]);
2138
- env->regs[0] = do_arm_semihosting(env);
2139
- return;
2140
- }
2141
- }
2142
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2143
- break;
2144
- case EXCP_IRQ:
2145
- break;
2146
- case EXCP_EXCEPTION_EXIT:
2147
- if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2148
- /* Must be v8M security extension function return */
2149
- assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2150
- assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2151
- if (do_v7m_function_return(cpu)) {
2152
- return;
2153
- }
2154
- } else {
2155
- do_v7m_exception_exit(cpu);
2156
- return;
2157
- }
2158
- break;
2159
- case EXCP_LAZYFP:
2160
- /*
2161
- * We already pended the specific exception in the NVIC in the
2162
- * v7m_preserve_fp_state() helper function.
2163
- */
2164
- break;
2165
- default:
2166
- cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2167
- return; /* Never happens. Keep compiler happy. */
2168
- }
2169
-
2170
- if (arm_feature(env, ARM_FEATURE_V8)) {
2171
- lr = R_V7M_EXCRET_RES1_MASK |
2172
- R_V7M_EXCRET_DCRS_MASK;
2173
- /*
2174
- * The S bit indicates whether we should return to Secure
2175
- * or NonSecure (ie our current state).
2176
- * The ES bit indicates whether we're taking this exception
2177
- * to Secure or NonSecure (ie our target state). We set it
2178
- * later, in v7m_exception_taken().
2179
- * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2180
- * This corresponds to the ARM ARM pseudocode for v8M setting
2181
- * some LR bits in PushStack() and some in ExceptionTaken();
2182
- * the distinction matters for the tailchain cases where we
2183
- * can take an exception without pushing the stack.
2184
- */
2185
- if (env->v7m.secure) {
2186
- lr |= R_V7M_EXCRET_S_MASK;
2187
- }
2188
- if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2189
- lr |= R_V7M_EXCRET_FTYPE_MASK;
2190
- }
2191
- } else {
2192
- lr = R_V7M_EXCRET_RES1_MASK |
2193
- R_V7M_EXCRET_S_MASK |
2194
- R_V7M_EXCRET_DCRS_MASK |
2195
- R_V7M_EXCRET_FTYPE_MASK |
2196
- R_V7M_EXCRET_ES_MASK;
2197
- if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2198
- lr |= R_V7M_EXCRET_SPSEL_MASK;
2199
- }
2200
- }
2201
- if (!arm_v7m_is_handler_mode(env)) {
2202
- lr |= R_V7M_EXCRET_MODE_MASK;
2203
- }
2204
-
2205
- ignore_stackfaults = v7m_push_stack(cpu);
2206
- v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2207
-}
2208
-
2209
/*
2210
* Function used to synchronize QEMU's AArch64 register set with AArch32
2211
* register set. This is necessary when switching between AArch32 and AArch64
2212
@@ -XXX,XX +XXX,XX @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
2213
return phys_addr;
2214
}
122
}
2215
123
2216
-uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
124
#endif /* !CONFIG_USER_ONLY */
2217
-{
2218
- uint32_t mask;
2219
- unsigned el = arm_current_el(env);
2220
-
2221
- /* First handle registers which unprivileged can read */
2222
-
2223
- switch (reg) {
2224
- case 0 ... 7: /* xPSR sub-fields */
2225
- mask = 0;
2226
- if ((reg & 1) && el) {
2227
- mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
2228
- }
2229
- if (!(reg & 4)) {
2230
- mask |= XPSR_NZCV | XPSR_Q; /* APSR */
2231
- if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
2232
- mask |= XPSR_GE;
2233
- }
2234
- }
2235
- /* EPSR reads as zero */
2236
- return xpsr_read(env) & mask;
2237
- break;
2238
- case 20: /* CONTROL */
2239
- {
2240
- uint32_t value = env->v7m.control[env->v7m.secure];
2241
- if (!env->v7m.secure) {
2242
- /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
2243
- value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
2244
- }
2245
- return value;
2246
- }
2247
- case 0x94: /* CONTROL_NS */
2248
- /*
2249
- * We have to handle this here because unprivileged Secure code
2250
- * can read the NS CONTROL register.
2251
- */
2252
- if (!env->v7m.secure) {
2253
- return 0;
2254
- }
2255
- return env->v7m.control[M_REG_NS] |
2256
- (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2257
- }
2258
-
2259
- if (el == 0) {
2260
- return 0; /* unprivileged reads others as zero */
2261
- }
2262
-
2263
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2264
- switch (reg) {
2265
- case 0x88: /* MSP_NS */
2266
- if (!env->v7m.secure) {
2267
- return 0;
2268
- }
2269
- return env->v7m.other_ss_msp;
2270
- case 0x89: /* PSP_NS */
2271
- if (!env->v7m.secure) {
2272
- return 0;
2273
- }
2274
- return env->v7m.other_ss_psp;
2275
- case 0x8a: /* MSPLIM_NS */
2276
- if (!env->v7m.secure) {
2277
- return 0;
2278
- }
2279
- return env->v7m.msplim[M_REG_NS];
2280
- case 0x8b: /* PSPLIM_NS */
2281
- if (!env->v7m.secure) {
2282
- return 0;
2283
- }
2284
- return env->v7m.psplim[M_REG_NS];
2285
- case 0x90: /* PRIMASK_NS */
2286
- if (!env->v7m.secure) {
2287
- return 0;
2288
- }
2289
- return env->v7m.primask[M_REG_NS];
2290
- case 0x91: /* BASEPRI_NS */
2291
- if (!env->v7m.secure) {
2292
- return 0;
2293
- }
2294
- return env->v7m.basepri[M_REG_NS];
2295
- case 0x93: /* FAULTMASK_NS */
2296
- if (!env->v7m.secure) {
2297
- return 0;
2298
- }
2299
- return env->v7m.faultmask[M_REG_NS];
2300
- case 0x98: /* SP_NS */
2301
- {
2302
- /*
2303
- * This gives the non-secure SP selected based on whether we're
2304
- * currently in handler mode or not, using the NS CONTROL.SPSEL.
2305
- */
2306
- bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2307
-
2308
- if (!env->v7m.secure) {
2309
- return 0;
2310
- }
2311
- if (!arm_v7m_is_handler_mode(env) && spsel) {
2312
- return env->v7m.other_ss_psp;
2313
- } else {
2314
- return env->v7m.other_ss_msp;
2315
- }
2316
- }
2317
- default:
2318
- break;
2319
- }
2320
- }
2321
-
2322
- switch (reg) {
2323
- case 8: /* MSP */
2324
- return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2325
- case 9: /* PSP */
2326
- return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2327
- case 10: /* MSPLIM */
2328
- if (!arm_feature(env, ARM_FEATURE_V8)) {
2329
- goto bad_reg;
2330
- }
2331
- return env->v7m.msplim[env->v7m.secure];
2332
- case 11: /* PSPLIM */
2333
- if (!arm_feature(env, ARM_FEATURE_V8)) {
2334
- goto bad_reg;
2335
- }
2336
- return env->v7m.psplim[env->v7m.secure];
2337
- case 16: /* PRIMASK */
2338
- return env->v7m.primask[env->v7m.secure];
2339
- case 17: /* BASEPRI */
2340
- case 18: /* BASEPRI_MAX */
2341
- return env->v7m.basepri[env->v7m.secure];
2342
- case 19: /* FAULTMASK */
2343
- return env->v7m.faultmask[env->v7m.secure];
2344
- default:
2345
- bad_reg:
2346
- qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2347
- " register %d\n", reg);
2348
- return 0;
2349
- }
2350
-}
2351
-
2352
-void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2353
-{
2354
- /*
2355
- * We're passed bits [11..0] of the instruction; extract
2356
- * SYSm and the mask bits.
2357
- * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2358
- * we choose to treat them as if the mask bits were valid.
2359
- * NB that the pseudocode 'mask' variable is bits [11..10],
2360
- * whereas ours is [11..8].
2361
- */
2362
- uint32_t mask = extract32(maskreg, 8, 4);
2363
- uint32_t reg = extract32(maskreg, 0, 8);
2364
- int cur_el = arm_current_el(env);
2365
-
2366
- if (cur_el == 0 && reg > 7 && reg != 20) {
2367
- /*
2368
- * only xPSR sub-fields and CONTROL.SFPA may be written by
2369
- * unprivileged code
2370
- */
2371
- return;
2372
- }
2373
-
2374
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2375
- switch (reg) {
2376
- case 0x88: /* MSP_NS */
2377
- if (!env->v7m.secure) {
2378
- return;
2379
- }
2380
- env->v7m.other_ss_msp = val;
2381
- return;
2382
- case 0x89: /* PSP_NS */
2383
- if (!env->v7m.secure) {
2384
- return;
2385
- }
2386
- env->v7m.other_ss_psp = val;
2387
- return;
2388
- case 0x8a: /* MSPLIM_NS */
2389
- if (!env->v7m.secure) {
2390
- return;
2391
- }
2392
- env->v7m.msplim[M_REG_NS] = val & ~7;
2393
- return;
2394
- case 0x8b: /* PSPLIM_NS */
2395
- if (!env->v7m.secure) {
2396
- return;
2397
- }
2398
- env->v7m.psplim[M_REG_NS] = val & ~7;
2399
- return;
2400
- case 0x90: /* PRIMASK_NS */
2401
- if (!env->v7m.secure) {
2402
- return;
2403
- }
2404
- env->v7m.primask[M_REG_NS] = val & 1;
2405
- return;
2406
- case 0x91: /* BASEPRI_NS */
2407
- if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2408
- return;
2409
- }
2410
- env->v7m.basepri[M_REG_NS] = val & 0xff;
2411
- return;
2412
- case 0x93: /* FAULTMASK_NS */
2413
- if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2414
- return;
2415
- }
2416
- env->v7m.faultmask[M_REG_NS] = val & 1;
2417
- return;
2418
- case 0x94: /* CONTROL_NS */
2419
- if (!env->v7m.secure) {
2420
- return;
2421
- }
2422
- write_v7m_control_spsel_for_secstate(env,
2423
- val & R_V7M_CONTROL_SPSEL_MASK,
2424
- M_REG_NS);
2425
- if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2426
- env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2427
- env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2428
- }
2429
- /*
2430
- * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2431
- * RES0 if the FPU is not present, and is stored in the S bank
2432
- */
2433
- if (arm_feature(env, ARM_FEATURE_VFP) &&
2434
- extract32(env->v7m.nsacr, 10, 1)) {
2435
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2436
- env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2437
- }
2438
- return;
2439
- case 0x98: /* SP_NS */
2440
- {
2441
- /*
2442
- * This gives the non-secure SP selected based on whether we're
2443
- * currently in handler mode or not, using the NS CONTROL.SPSEL.
2444
- */
2445
- bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2446
- bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2447
- uint32_t limit;
2448
-
2449
- if (!env->v7m.secure) {
2450
- return;
2451
- }
2452
-
2453
- limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2454
-
2455
- if (val < limit) {
2456
- CPUState *cs = env_cpu(env);
2457
-
2458
- cpu_restore_state(cs, GETPC(), true);
2459
- raise_exception(env, EXCP_STKOF, 0, 1);
2460
- }
2461
-
2462
- if (is_psp) {
2463
- env->v7m.other_ss_psp = val;
2464
- } else {
2465
- env->v7m.other_ss_msp = val;
2466
- }
2467
- return;
2468
- }
2469
- default:
2470
- break;
2471
- }
2472
- }
2473
-
2474
- switch (reg) {
2475
- case 0 ... 7: /* xPSR sub-fields */
2476
- /* only APSR is actually writable */
2477
- if (!(reg & 4)) {
2478
- uint32_t apsrmask = 0;
2479
-
2480
- if (mask & 8) {
2481
- apsrmask |= XPSR_NZCV | XPSR_Q;
2482
- }
2483
- if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
2484
- apsrmask |= XPSR_GE;
2485
- }
2486
- xpsr_write(env, val, apsrmask);
2487
- }
2488
- break;
2489
- case 8: /* MSP */
2490
- if (v7m_using_psp(env)) {
2491
- env->v7m.other_sp = val;
2492
- } else {
2493
- env->regs[13] = val;
2494
- }
2495
- break;
2496
- case 9: /* PSP */
2497
- if (v7m_using_psp(env)) {
2498
- env->regs[13] = val;
2499
- } else {
2500
- env->v7m.other_sp = val;
2501
- }
2502
- break;
2503
- case 10: /* MSPLIM */
2504
- if (!arm_feature(env, ARM_FEATURE_V8)) {
2505
- goto bad_reg;
2506
- }
2507
- env->v7m.msplim[env->v7m.secure] = val & ~7;
2508
- break;
2509
- case 11: /* PSPLIM */
2510
- if (!arm_feature(env, ARM_FEATURE_V8)) {
2511
- goto bad_reg;
2512
- }
2513
- env->v7m.psplim[env->v7m.secure] = val & ~7;
2514
- break;
2515
- case 16: /* PRIMASK */
2516
- env->v7m.primask[env->v7m.secure] = val & 1;
2517
- break;
2518
- case 17: /* BASEPRI */
2519
- if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2520
- goto bad_reg;
2521
- }
2522
- env->v7m.basepri[env->v7m.secure] = val & 0xff;
2523
- break;
2524
- case 18: /* BASEPRI_MAX */
2525
- if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2526
- goto bad_reg;
2527
- }
2528
- val &= 0xff;
2529
- if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2530
- || env->v7m.basepri[env->v7m.secure] == 0)) {
2531
- env->v7m.basepri[env->v7m.secure] = val;
2532
- }
2533
- break;
2534
- case 19: /* FAULTMASK */
2535
- if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2536
- goto bad_reg;
2537
- }
2538
- env->v7m.faultmask[env->v7m.secure] = val & 1;
2539
- break;
2540
- case 20: /* CONTROL */
2541
- /*
2542
- * Writing to the SPSEL bit only has an effect if we are in
2543
- * thread mode; other bits can be updated by any privileged code.
2544
- * write_v7m_control_spsel() deals with updating the SPSEL bit in
2545
- * env->v7m.control, so we only need update the others.
2546
- * For v7M, we must just ignore explicit writes to SPSEL in handler
2547
- * mode; for v8M the write is permitted but will have no effect.
2548
- * All these bits are writes-ignored from non-privileged code,
2549
- * except for SFPA.
2550
- */
2551
- if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2552
- !arm_v7m_is_handler_mode(env))) {
2553
- write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2554
- }
2555
- if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2556
- env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2557
- env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2558
- }
2559
- if (arm_feature(env, ARM_FEATURE_VFP)) {
2560
- /*
2561
- * SFPA is RAZ/WI from NS or if no FPU.
2562
- * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2563
- * Both are stored in the S bank.
2564
- */
2565
- if (env->v7m.secure) {
2566
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2567
- env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2568
- }
2569
- if (cur_el > 0 &&
2570
- (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2571
- extract32(env->v7m.nsacr, 10, 1))) {
2572
- env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2573
- env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2574
- }
2575
- }
2576
- break;
2577
- default:
2578
- bad_reg:
2579
- qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2580
- " register %d\n", reg);
2581
- return;
2582
- }
2583
-}
2584
-
2585
-uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2586
-{
2587
- /* Implement the TT instruction. op is bits [7:6] of the insn. */
2588
- bool forceunpriv = op & 1;
2589
- bool alt = op & 2;
2590
- V8M_SAttributes sattrs = {};
2591
- uint32_t tt_resp;
2592
- bool r, rw, nsr, nsrw, mrvalid;
2593
- int prot;
2594
- ARMMMUFaultInfo fi = {};
2595
- MemTxAttrs attrs = {};
2596
- hwaddr phys_addr;
2597
- ARMMMUIdx mmu_idx;
2598
- uint32_t mregion;
2599
- bool targetpriv;
2600
- bool targetsec = env->v7m.secure;
2601
- bool is_subpage;
2602
-
2603
- /*
2604
- * Work out what the security state and privilege level we're
2605
- * interested in is...
2606
- */
2607
- if (alt) {
2608
- targetsec = !targetsec;
2609
- }
2610
-
2611
- if (forceunpriv) {
2612
- targetpriv = false;
2613
- } else {
2614
- targetpriv = arm_v7m_is_handler_mode(env) ||
2615
- !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2616
- }
2617
-
2618
- /* ...and then figure out which MMU index this is */
2619
- mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2620
-
2621
- /*
2622
- * We know that the MPU and SAU don't care about the access type
2623
- * for our purposes beyond that we don't want to claim to be
2624
- * an insn fetch, so we arbitrarily call this a read.
2625
- */
2626
-
2627
- /*
2628
- * MPU region info only available for privileged or if
2629
- * inspecting the other MPU state.
2630
- */
2631
- if (arm_current_el(env) != 0 || alt) {
2632
- /* We can ignore the return value as prot is always set */
2633
- pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2634
- &phys_addr, &attrs, &prot, &is_subpage,
2635
- &fi, &mregion);
2636
- if (mregion == -1) {
2637
- mrvalid = false;
2638
- mregion = 0;
2639
- } else {
2640
- mrvalid = true;
2641
- }
2642
- r = prot & PAGE_READ;
2643
- rw = prot & PAGE_WRITE;
2644
- } else {
2645
- r = false;
2646
- rw = false;
2647
- mrvalid = false;
2648
- mregion = 0;
2649
- }
2650
-
2651
- if (env->v7m.secure) {
2652
- v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
2653
- nsr = sattrs.ns && r;
2654
- nsrw = sattrs.ns && rw;
2655
- } else {
2656
- sattrs.ns = true;
2657
- nsr = false;
2658
- nsrw = false;
2659
- }
2660
-
2661
- tt_resp = (sattrs.iregion << 24) |
2662
- (sattrs.irvalid << 23) |
2663
- ((!sattrs.ns) << 22) |
2664
- (nsrw << 21) |
2665
- (nsr << 20) |
2666
- (rw << 19) |
2667
- (r << 18) |
2668
- (sattrs.srvalid << 17) |
2669
- (mrvalid << 16) |
2670
- (sattrs.sregion << 8) |
2671
- mregion;
2672
-
2673
- return tt_resp;
2674
-}
2675
-
2676
#endif
2677
2678
/* Note that signed overflow is undefined in C. The following routines are
2679
@@ -XXX,XX +XXX,XX @@ int fp_exception_el(CPUARMState *env, int cur_el)
2680
return 0;
2681
}
2682
2683
-ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
2684
- bool secstate, bool priv, bool negpri)
2685
-{
2686
- ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
2687
-
2688
- if (priv) {
2689
- mmu_idx |= ARM_MMU_IDX_M_PRIV;
2690
- }
2691
-
2692
- if (negpri) {
2693
- mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
2694
- }
2695
-
2696
- if (secstate) {
2697
- mmu_idx |= ARM_MMU_IDX_M_S;
2698
- }
2699
-
2700
- return mmu_idx;
2701
-}
2702
-
2703
-ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
2704
- bool secstate, bool priv)
2705
-{
2706
- bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
2707
-
2708
- return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
2709
-}
2710
-
2711
-/* Return the MMU index for a v7M CPU in the specified security state */
2712
+#ifndef CONFIG_TCG
2713
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
2714
{
2715
- bool priv = arm_current_el(env) != 0;
2716
-
2717
- return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
2718
+ g_assert_not_reached();
2719
}
2720
+#endif
2721
2722
ARMMMUIdx arm_mmu_idx(CPUARMState *env)
2723
{
2724
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
2725
new file mode 100644
2726
index XXXXXXX..XXXXXXX
2727
--- /dev/null
2728
+++ b/target/arm/m_helper.c
2729
@@ -XXX,XX +XXX,XX @@
2730
+/*
2731
+ * ARM generic helpers.
2732
+ *
2733
+ * This code is licensed under the GNU GPL v2 or later.
2734
+ *
2735
+ * SPDX-License-Identifier: GPL-2.0-or-later
2736
+ */
2737
+#include "qemu/osdep.h"
2738
+#include "qemu/units.h"
2739
+#include "target/arm/idau.h"
2740
+#include "trace.h"
2741
+#include "cpu.h"
2742
+#include "internals.h"
2743
+#include "exec/gdbstub.h"
2744
+#include "exec/helper-proto.h"
2745
+#include "qemu/host-utils.h"
2746
+#include "sysemu/sysemu.h"
2747
+#include "qemu/bitops.h"
2748
+#include "qemu/crc32c.h"
2749
+#include "qemu/qemu-print.h"
2750
+#include "exec/exec-all.h"
2751
+#include <zlib.h> /* For crc32 */
2752
+#include "hw/semihosting/semihost.h"
2753
+#include "sysemu/cpus.h"
2754
+#include "sysemu/kvm.h"
2755
+#include "qemu/range.h"
2756
+#include "qapi/qapi-commands-target.h"
2757
+#include "qapi/error.h"
2758
+#include "qemu/guest-random.h"
2759
+#ifdef CONFIG_TCG
2760
+#include "arm_ldst.h"
2761
+#include "exec/cpu_ldst.h"
2762
+#endif
2763
+
125
+
2764
+#ifdef CONFIG_USER_ONLY
126
+uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
2765
+
127
+ bool spsel)
2766
+/* These should probably raise undefined insn exceptions. */
2767
+void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
2768
+{
2769
+ ARMCPU *cpu = env_archcpu(env);
2770
+
2771
+ cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
2772
+}
2773
+
2774
+uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2775
+{
2776
+ ARMCPU *cpu = env_archcpu(env);
2777
+
2778
+ cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
2779
+ return 0;
2780
+}
2781
+
2782
+void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
2783
+{
2784
+ /* translate.c should never generate calls here in user-only mode */
2785
+ g_assert_not_reached();
2786
+}
2787
+
2788
+void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
2789
+{
2790
+ /* translate.c should never generate calls here in user-only mode */
2791
+ g_assert_not_reached();
2792
+}
2793
+
2794
+void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
2795
+{
2796
+ /* translate.c should never generate calls here in user-only mode */
2797
+ g_assert_not_reached();
2798
+}
2799
+
2800
+void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
2801
+{
2802
+ /* translate.c should never generate calls here in user-only mode */
2803
+ g_assert_not_reached();
2804
+}
2805
+
2806
+void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
2807
+{
2808
+ /* translate.c should never generate calls here in user-only mode */
2809
+ g_assert_not_reached();
2810
+}
2811
+
2812
+uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2813
+{
2814
+ /*
2815
+ * The TT instructions can be used by unprivileged code, but in
2816
+ * user-only emulation we don't have the MPU.
2817
+ * Luckily since we know we are NonSecure unprivileged (and that in
2818
+ * turn means that the A flag wasn't specified), all the bits in the
2819
+ * register must be zero:
2820
+ * IREGION: 0 because IRVALID is 0
2821
+ * IRVALID: 0 because NS
2822
+ * S: 0 because NS
2823
+ * NSRW: 0 because NS
2824
+ * NSR: 0 because NS
2825
+ * RW: 0 because unpriv and A flag not set
2826
+ * R: 0 because unpriv and A flag not set
2827
+ * SRVALID: 0 because NS
2828
+ * MRVALID: 0 because unpriv and A flag not set
2829
+ * SREGION: 0 becaus SRVALID is 0
2830
+ * MREGION: 0 because MRVALID is 0
2831
+ */
2832
+ return 0;
2833
+}
2834
+
2835
+#else
2836
+
2837
+/*
2838
+ * What kind of stack write are we doing? This affects how exceptions
2839
+ * generated during the stacking are treated.
2840
+ */
2841
+typedef enum StackingMode {
2842
+ STACK_NORMAL,
2843
+ STACK_IGNFAULTS,
2844
+ STACK_LAZYFP,
2845
+} StackingMode;
2846
+
2847
+static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
2848
+ ARMMMUIdx mmu_idx, StackingMode mode)
2849
+{
2850
+ CPUState *cs = CPU(cpu);
2851
+ CPUARMState *env = &cpu->env;
2852
+ MemTxAttrs attrs = {};
2853
+ MemTxResult txres;
2854
+ target_ulong page_size;
2855
+ hwaddr physaddr;
2856
+ int prot;
2857
+ ARMMMUFaultInfo fi = {};
2858
+ bool secure = mmu_idx & ARM_MMU_IDX_M_S;
2859
+ int exc;
2860
+ bool exc_secure;
2861
+
2862
+ if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
2863
+ &attrs, &prot, &page_size, &fi, NULL)) {
2864
+ /* MPU/SAU lookup failed */
2865
+ if (fi.type == ARMFault_QEMU_SFault) {
2866
+ if (mode == STACK_LAZYFP) {
2867
+ qemu_log_mask(CPU_LOG_INT,
2868
+ "...SecureFault with SFSR.LSPERR "
2869
+ "during lazy stacking\n");
2870
+ env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
2871
+ } else {
2872
+ qemu_log_mask(CPU_LOG_INT,
2873
+ "...SecureFault with SFSR.AUVIOL "
2874
+ "during stacking\n");
2875
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2876
+ }
2877
+ env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
2878
+ env->v7m.sfar = addr;
2879
+ exc = ARMV7M_EXCP_SECURE;
2880
+ exc_secure = false;
2881
+ } else {
2882
+ if (mode == STACK_LAZYFP) {
2883
+ qemu_log_mask(CPU_LOG_INT,
2884
+ "...MemManageFault with CFSR.MLSPERR\n");
2885
+ env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
2886
+ } else {
2887
+ qemu_log_mask(CPU_LOG_INT,
2888
+ "...MemManageFault with CFSR.MSTKERR\n");
2889
+ env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
2890
+ }
2891
+ exc = ARMV7M_EXCP_MEM;
2892
+ exc_secure = secure;
2893
+ }
2894
+ goto pend_fault;
2895
+ }
2896
+ address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
2897
+ attrs, &txres);
2898
+ if (txres != MEMTX_OK) {
2899
+ /* BusFault trying to write the data */
2900
+ if (mode == STACK_LAZYFP) {
2901
+ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
2902
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
2903
+ } else {
2904
+ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
2905
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
2906
+ }
2907
+ exc = ARMV7M_EXCP_BUS;
2908
+ exc_secure = false;
2909
+ goto pend_fault;
2910
+ }
2911
+ return true;
2912
+
2913
+pend_fault:
2914
+ /*
2915
+ * By pending the exception at this point we are making
2916
+ * the IMPDEF choice "overridden exceptions pended" (see the
2917
+ * MergeExcInfo() pseudocode). The other choice would be to not
2918
+ * pend them now and then make a choice about which to throw away
2919
+ * later if we have two derived exceptions.
2920
+ * The only case when we must not pend the exception but instead
2921
+ * throw it away is if we are doing the push of the callee registers
2922
+ * and we've already generated a derived exception (this is indicated
2923
+ * by the caller passing STACK_IGNFAULTS). Even in this case we will
2924
+ * still update the fault status registers.
2925
+ */
2926
+ switch (mode) {
2927
+ case STACK_NORMAL:
2928
+ armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
2929
+ break;
2930
+ case STACK_LAZYFP:
2931
+ armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
2932
+ break;
2933
+ case STACK_IGNFAULTS:
2934
+ break;
2935
+ }
2936
+ return false;
2937
+}
2938
+
2939
+static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
2940
+ ARMMMUIdx mmu_idx)
2941
+{
2942
+ CPUState *cs = CPU(cpu);
2943
+ CPUARMState *env = &cpu->env;
2944
+ MemTxAttrs attrs = {};
2945
+ MemTxResult txres;
2946
+ target_ulong page_size;
2947
+ hwaddr physaddr;
2948
+ int prot;
2949
+ ARMMMUFaultInfo fi = {};
2950
+ bool secure = mmu_idx & ARM_MMU_IDX_M_S;
2951
+ int exc;
2952
+ bool exc_secure;
2953
+ uint32_t value;
2954
+
2955
+ if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
2956
+ &attrs, &prot, &page_size, &fi, NULL)) {
2957
+ /* MPU/SAU lookup failed */
2958
+ if (fi.type == ARMFault_QEMU_SFault) {
2959
+ qemu_log_mask(CPU_LOG_INT,
2960
+ "...SecureFault with SFSR.AUVIOL during unstack\n");
2961
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
2962
+ env->v7m.sfar = addr;
2963
+ exc = ARMV7M_EXCP_SECURE;
2964
+ exc_secure = false;
2965
+ } else {
2966
+ qemu_log_mask(CPU_LOG_INT,
2967
+ "...MemManageFault with CFSR.MUNSTKERR\n");
2968
+ env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
2969
+ exc = ARMV7M_EXCP_MEM;
2970
+ exc_secure = secure;
2971
+ }
2972
+ goto pend_fault;
2973
+ }
2974
+
2975
+ value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
2976
+ attrs, &txres);
2977
+ if (txres != MEMTX_OK) {
2978
+ /* BusFault trying to read the data */
2979
+ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
2980
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
2981
+ exc = ARMV7M_EXCP_BUS;
2982
+ exc_secure = false;
2983
+ goto pend_fault;
2984
+ }
2985
+
2986
+ *dest = value;
2987
+ return true;
2988
+
2989
+pend_fault:
2990
+ /*
2991
+ * By pending the exception at this point we are making
2992
+ * the IMPDEF choice "overridden exceptions pended" (see the
2993
+ * MergeExcInfo() pseudocode). The other choice would be to not
2994
+ * pend them now and then make a choice about which to throw away
2995
+ * later if we have two derived exceptions.
2996
+ */
2997
+ armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
2998
+ return false;
2999
+}
3000
+
3001
+void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
3002
+{
3003
+ /*
3004
+ * Preserve FP state (because LSPACT was set and we are about
3005
+ * to execute an FP instruction). This corresponds to the
3006
+ * PreserveFPState() pseudocode.
3007
+ * We may throw an exception if the stacking fails.
3008
+ */
3009
+ ARMCPU *cpu = env_archcpu(env);
3010
+ bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
3011
+ bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
3012
+ bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
3013
+ bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
3014
+ uint32_t fpcar = env->v7m.fpcar[is_secure];
3015
+ bool stacked_ok = true;
3016
+ bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
3017
+ bool take_exception;
3018
+
3019
+ /* Take the iothread lock as we are going to touch the NVIC */
3020
+ qemu_mutex_lock_iothread();
3021
+
3022
+ /* Check the background context had access to the FPU */
3023
+ if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
3024
+ armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
3025
+ env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
3026
+ stacked_ok = false;
3027
+ } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
3028
+ armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
3029
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
3030
+ stacked_ok = false;
3031
+ }
3032
+
3033
+ if (!splimviol && stacked_ok) {
3034
+ /* We only stack if the stack limit wasn't violated */
3035
+ int i;
3036
+ ARMMMUIdx mmu_idx;
3037
+
3038
+ mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
3039
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
3040
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
3041
+ uint32_t faddr = fpcar + 4 * i;
3042
+ uint32_t slo = extract64(dn, 0, 32);
3043
+ uint32_t shi = extract64(dn, 32, 32);
3044
+
3045
+ if (i >= 16) {
3046
+ faddr += 8; /* skip the slot for the FPSCR */
3047
+ }
3048
+ stacked_ok = stacked_ok &&
3049
+ v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
3050
+ v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
3051
+ }
3052
+
3053
+ stacked_ok = stacked_ok &&
3054
+ v7m_stack_write(cpu, fpcar + 0x40,
3055
+ vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
3056
+ }
3057
+
3058
+ /*
3059
+ * We definitely pended an exception, but it's possible that it
3060
+ * might not be able to be taken now. If its priority permits us
3061
+ * to take it now, then we must not update the LSPACT or FP regs,
3062
+ * but instead jump out to take the exception immediately.
3063
+ * If it's just pending and won't be taken until the current
3064
+ * handler exits, then we do update LSPACT and the FP regs.
3065
+ */
3066
+ take_exception = !stacked_ok &&
3067
+ armv7m_nvic_can_take_pending_exception(env->nvic);
3068
+
3069
+ qemu_mutex_unlock_iothread();
3070
+
3071
+ if (take_exception) {
3072
+ raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
3073
+ }
3074
+
3075
+ env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
3076
+
3077
+ if (ts) {
3078
+ /* Clear s0 to s31 and the FPSCR */
3079
+ int i;
3080
+
3081
+ for (i = 0; i < 32; i += 2) {
3082
+ *aa32_vfp_dreg(env, i / 2) = 0;
3083
+ }
3084
+ vfp_set_fpscr(env, 0);
3085
+ }
3086
+ /*
3087
+ * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
3088
+ * unchanged.
3089
+ */
3090
+}
3091
+
3092
+/*
3093
+ * Write to v7M CONTROL.SPSEL bit for the specified security bank.
3094
+ * This may change the current stack pointer between Main and Process
3095
+ * stack pointers if it is done for the CONTROL register for the current
3096
+ * security state.
3097
+ */
3098
+static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
3099
+ bool new_spsel,
3100
+ bool secstate)
3101
+{
3102
+ bool old_is_psp = v7m_using_psp(env);
3103
+
3104
+ env->v7m.control[secstate] =
3105
+ deposit32(env->v7m.control[secstate],
3106
+ R_V7M_CONTROL_SPSEL_SHIFT,
3107
+ R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
3108
+
3109
+ if (secstate == env->v7m.secure) {
3110
+ bool new_is_psp = v7m_using_psp(env);
3111
+ uint32_t tmp;
3112
+
3113
+ if (old_is_psp != new_is_psp) {
3114
+ tmp = env->v7m.other_sp;
3115
+ env->v7m.other_sp = env->regs[13];
3116
+ env->regs[13] = tmp;
3117
+ }
3118
+ }
3119
+}
3120
+
3121
+/*
3122
+ * Write to v7M CONTROL.SPSEL bit. This may change the current
3123
+ * stack pointer between Main and Process stack pointers.
3124
+ */
3125
+static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
3126
+{
3127
+ write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
3128
+}
3129
+
3130
+void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
3131
+{
3132
+ /*
3133
+ * Write a new value to v7m.exception, thus transitioning into or out
3134
+ * of Handler mode; this may result in a change of active stack pointer.
3135
+ */
3136
+ bool new_is_psp, old_is_psp = v7m_using_psp(env);
3137
+ uint32_t tmp;
3138
+
3139
+ env->v7m.exception = new_exc;
3140
+
3141
+ new_is_psp = v7m_using_psp(env);
3142
+
3143
+ if (old_is_psp != new_is_psp) {
3144
+ tmp = env->v7m.other_sp;
3145
+ env->v7m.other_sp = env->regs[13];
3146
+ env->regs[13] = tmp;
3147
+ }
3148
+}
3149
+
3150
+/* Switch M profile security state between NS and S */
3151
+static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
3152
+{
3153
+ uint32_t new_ss_msp, new_ss_psp;
3154
+
3155
+ if (env->v7m.secure == new_secstate) {
3156
+ return;
3157
+ }
3158
+
3159
+ /*
3160
+ * All the banked state is accessed by looking at env->v7m.secure
3161
+ * except for the stack pointer; rearrange the SP appropriately.
3162
+ */
3163
+ new_ss_msp = env->v7m.other_ss_msp;
3164
+ new_ss_psp = env->v7m.other_ss_psp;
3165
+
3166
+ if (v7m_using_psp(env)) {
3167
+ env->v7m.other_ss_psp = env->regs[13];
3168
+ env->v7m.other_ss_msp = env->v7m.other_sp;
3169
+ } else {
3170
+ env->v7m.other_ss_msp = env->regs[13];
3171
+ env->v7m.other_ss_psp = env->v7m.other_sp;
3172
+ }
3173
+
3174
+ env->v7m.secure = new_secstate;
3175
+
3176
+ if (v7m_using_psp(env)) {
3177
+ env->regs[13] = new_ss_psp;
3178
+ env->v7m.other_sp = new_ss_msp;
3179
+ } else {
3180
+ env->regs[13] = new_ss_msp;
3181
+ env->v7m.other_sp = new_ss_psp;
3182
+ }
3183
+}
3184
+
3185
+void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
3186
+{
3187
+ /*
3188
+ * Handle v7M BXNS:
3189
+ * - if the return value is a magic value, do exception return (like BX)
3190
+ * - otherwise bit 0 of the return value is the target security state
3191
+ */
3192
+ uint32_t min_magic;
3193
+
3194
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3195
+ /* Covers FNC_RETURN and EXC_RETURN magic */
3196
+ min_magic = FNC_RETURN_MIN_MAGIC;
3197
+ } else {
3198
+ /* EXC_RETURN magic only */
3199
+ min_magic = EXC_RETURN_MIN_MAGIC;
3200
+ }
3201
+
3202
+ if (dest >= min_magic) {
3203
+ /*
3204
+ * This is an exception return magic value; put it where
3205
+ * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
3206
+ * Note that if we ever add gen_ss_advance() singlestep support to
3207
+ * M profile this should count as an "instruction execution complete"
3208
+ * event (compare gen_bx_excret_final_code()).
3209
+ */
3210
+ env->regs[15] = dest & ~1;
3211
+ env->thumb = dest & 1;
3212
+ HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
3213
+ /* notreached */
3214
+ }
3215
+
3216
+ /* translate.c should have made BXNS UNDEF unless we're secure */
3217
+ assert(env->v7m.secure);
3218
+
3219
+ if (!(dest & 1)) {
3220
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
3221
+ }
3222
+ switch_v7m_security_state(env, dest & 1);
3223
+ env->thumb = 1;
3224
+ env->regs[15] = dest & ~1;
3225
+}
3226
+
3227
+void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
3228
+{
3229
+ /*
3230
+ * Handle v7M BLXNS:
3231
+ * - bit 0 of the destination address is the target security state
3232
+ */
3233
+
3234
+ /* At this point regs[15] is the address just after the BLXNS */
3235
+ uint32_t nextinst = env->regs[15] | 1;
3236
+ uint32_t sp = env->regs[13] - 8;
3237
+ uint32_t saved_psr;
3238
+
3239
+ /* translate.c will have made BLXNS UNDEF unless we're secure */
3240
+ assert(env->v7m.secure);
3241
+
3242
+ if (dest & 1) {
3243
+ /*
3244
+ * Target is Secure, so this is just a normal BLX,
3245
+ * except that the low bit doesn't indicate Thumb/not.
3246
+ */
3247
+ env->regs[14] = nextinst;
3248
+ env->thumb = 1;
3249
+ env->regs[15] = dest & ~1;
3250
+ return;
3251
+ }
3252
+
3253
+ /* Target is non-secure: first push a stack frame */
3254
+ if (!QEMU_IS_ALIGNED(sp, 8)) {
3255
+ qemu_log_mask(LOG_GUEST_ERROR,
3256
+ "BLXNS with misaligned SP is UNPREDICTABLE\n");
3257
+ }
3258
+
3259
+ if (sp < v7m_sp_limit(env)) {
3260
+ raise_exception(env, EXCP_STKOF, 0, 1);
3261
+ }
3262
+
3263
+ saved_psr = env->v7m.exception;
3264
+ if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
3265
+ saved_psr |= XPSR_SFPA;
3266
+ }
3267
+
3268
+ /* Note that these stores can throw exceptions on MPU faults */
3269
+ cpu_stl_data(env, sp, nextinst);
3270
+ cpu_stl_data(env, sp + 4, saved_psr);
3271
+
3272
+ env->regs[13] = sp;
3273
+ env->regs[14] = 0xfeffffff;
3274
+ if (arm_v7m_is_handler_mode(env)) {
3275
+ /*
3276
+ * Write a dummy value to IPSR, to avoid leaking the current secure
3277
+ * exception number to non-secure code. This is guaranteed not
3278
+ * to cause write_v7m_exception() to actually change stacks.
3279
+ */
3280
+ write_v7m_exception(env, 1);
3281
+ }
3282
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
3283
+ switch_v7m_security_state(env, 0);
3284
+ env->thumb = 1;
3285
+ env->regs[15] = dest;
3286
+}
3287
+
3288
+static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
3289
+ bool spsel)
3290
+{
128
+{
3291
+ /*
129
+ /*
3292
+ * Return a pointer to the location where we currently store the
130
+ * Return a pointer to the location where we currently store the
3293
+ * stack pointer for the requested security state and thread mode.
131
+ * stack pointer for the requested security state and thread mode.
3294
+ * This pointer will become invalid if the CPU state is updated
132
+ * This pointer will become invalid if the CPU state is updated
...
...
3318
+ } else {
156
+ } else {
3319
+ return &env->v7m.other_ss_msp;
157
+ return &env->v7m.other_ss_msp;
3320
+ }
158
+ }
3321
+ }
159
+ }
3322
+}
160
+}
3323
+
3324
+static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
3325
+ uint32_t *pvec)
3326
+{
3327
+ CPUState *cs = CPU(cpu);
3328
+ CPUARMState *env = &cpu->env;
3329
+ MemTxResult result;
3330
+ uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
3331
+ uint32_t vector_entry;
3332
+ MemTxAttrs attrs = {};
3333
+ ARMMMUIdx mmu_idx;
3334
+ bool exc_secure;
3335
+
3336
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
3337
+
3338
+ /*
3339
+ * We don't do a get_phys_addr() here because the rules for vector
3340
+ * loads are special: they always use the default memory map, and
3341
+ * the default memory map permits reads from all addresses.
3342
+ * Since there's no easy way to pass through to pmsav8_mpu_lookup()
3343
+ * that we want this special case which would always say "yes",
3344
+ * we just do the SAU lookup here followed by a direct physical load.
3345
+ */
3346
+ attrs.secure = targets_secure;
3347
+ attrs.user = false;
3348
+
3349
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3350
+ V8M_SAttributes sattrs = {};
3351
+
3352
+ v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
3353
+ if (sattrs.ns) {
3354
+ attrs.secure = false;
3355
+ } else if (!targets_secure) {
3356
+ /* NS access to S memory */
3357
+ goto load_fail;
3358
+ }
3359
+ }
3360
+
3361
+ vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
3362
+ attrs, &result);
3363
+ if (result != MEMTX_OK) {
3364
+ goto load_fail;
3365
+ }
3366
+ *pvec = vector_entry;
3367
+ return true;
3368
+
3369
+load_fail:
3370
+ /*
3371
+ * All vector table fetch fails are reported as HardFault, with
3372
+ * HFSR.VECTTBL and .FORCED set. (FORCED is set because
3373
+ * technically the underlying exception is a MemManage or BusFault
3374
+ * that is escalated to HardFault.) This is a terminal exception,
3375
+ * so we will either take the HardFault immediately or else enter
3376
+ * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
3377
+ */
3378
+ exc_secure = targets_secure ||
3379
+ !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
3380
+ env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
3381
+ armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
3382
+ return false;
3383
+}
3384
+
3385
+static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
3386
+{
3387
+ /*
3388
+ * Return the integrity signature value for the callee-saves
3389
+ * stack frame section. @lr is the exception return payload/LR value
3390
+ * whose FType bit forms bit 0 of the signature if FP is present.
3391
+ */
3392
+ uint32_t sig = 0xfefa125a;
3393
+
3394
+ if (!arm_feature(env, ARM_FEATURE_VFP) || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
3395
+ sig |= 1;
3396
+ }
3397
+ return sig;
3398
+}
3399
+
3400
+static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
3401
+ bool ignore_faults)
3402
+{
3403
+ /*
3404
+ * For v8M, push the callee-saves register part of the stack frame.
3405
+ * Compare the v8M pseudocode PushCalleeStack().
3406
+ * In the tailchaining case this may not be the current stack.
3407
+ */
3408
+ CPUARMState *env = &cpu->env;
3409
+ uint32_t *frame_sp_p;
3410
+ uint32_t frameptr;
3411
+ ARMMMUIdx mmu_idx;
3412
+ bool stacked_ok;
3413
+ uint32_t limit;
3414
+ bool want_psp;
3415
+ uint32_t sig;
3416
+ StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
3417
+
3418
+ if (dotailchain) {
3419
+ bool mode = lr & R_V7M_EXCRET_MODE_MASK;
3420
+ bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
3421
+ !mode;
3422
+
3423
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
3424
+ frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
3425
+ lr & R_V7M_EXCRET_SPSEL_MASK);
3426
+ want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
3427
+ if (want_psp) {
3428
+ limit = env->v7m.psplim[M_REG_S];
3429
+ } else {
3430
+ limit = env->v7m.msplim[M_REG_S];
3431
+ }
3432
+ } else {
3433
+ mmu_idx = arm_mmu_idx(env);
3434
+ frame_sp_p = &env->regs[13];
3435
+ limit = v7m_sp_limit(env);
3436
+ }
3437
+
3438
+ frameptr = *frame_sp_p - 0x28;
3439
+ if (frameptr < limit) {
3440
+ /*
3441
+ * Stack limit failure: set SP to the limit value, and generate
3442
+ * STKOF UsageFault. Stack pushes below the limit must not be
3443
+ * performed. It is IMPDEF whether pushes above the limit are
3444
+ * performed; we choose not to.
3445
+ */
3446
+ qemu_log_mask(CPU_LOG_INT,
3447
+ "...STKOF during callee-saves register stacking\n");
3448
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
3449
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
3450
+ env->v7m.secure);
3451
+ *frame_sp_p = limit;
3452
+ return true;
3453
+ }
3454
+
3455
+ /*
3456
+ * Write as much of the stack frame as we can. A write failure may
3457
+ * cause us to pend a derived exception.
3458
+ */
3459
+ sig = v7m_integrity_sig(env, lr);
3460
+ stacked_ok =
3461
+ v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
3462
+ v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
3463
+ v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
3464
+ v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
3465
+ v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
3466
+ v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
3467
+ v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
3468
+ v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
3469
+ v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
3470
+
3471
+ /* Update SP regardless of whether any of the stack accesses failed. */
3472
+ *frame_sp_p = frameptr;
3473
+
3474
+ return !stacked_ok;
3475
+}
3476
+
3477
+static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
3478
+ bool ignore_stackfaults)
3479
+{
3480
+ /*
3481
+ * Do the "take the exception" parts of exception entry,
3482
+ * but not the pushing of state to the stack. This is
3483
+ * similar to the pseudocode ExceptionTaken() function.
3484
+ */
3485
+ CPUARMState *env = &cpu->env;
3486
+ uint32_t addr;
3487
+ bool targets_secure;
3488
+ int exc;
3489
+ bool push_failed = false;
3490
+
3491
+ armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
3492
+ qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
3493
+ targets_secure ? "secure" : "nonsecure", exc);
3494
+
3495
+ if (dotailchain) {
3496
+ /* Sanitize LR FType and PREFIX bits */
3497
+ if (!arm_feature(env, ARM_FEATURE_VFP)) {
3498
+ lr |= R_V7M_EXCRET_FTYPE_MASK;
3499
+ }
3500
+ lr = deposit32(lr, 24, 8, 0xff);
3501
+ }
3502
+
3503
+ if (arm_feature(env, ARM_FEATURE_V8)) {
3504
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
3505
+ (lr & R_V7M_EXCRET_S_MASK)) {
3506
+ /*
3507
+ * The background code (the owner of the registers in the
3508
+ * exception frame) is Secure. This means it may either already
3509
+ * have or now needs to push callee-saves registers.
3510
+ */
3511
+ if (targets_secure) {
3512
+ if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
3513
+ /*
3514
+ * We took an exception from Secure to NonSecure
3515
+ * (which means the callee-saved registers got stacked)
3516
+ * and are now tailchaining to a Secure exception.
3517
+ * Clear DCRS so eventual return from this Secure
3518
+ * exception unstacks the callee-saved registers.
3519
+ */
3520
+ lr &= ~R_V7M_EXCRET_DCRS_MASK;
3521
+ }
3522
+ } else {
3523
+ /*
3524
+ * We're going to a non-secure exception; push the
3525
+ * callee-saves registers to the stack now, if they're
3526
+ * not already saved.
3527
+ */
3528
+ if (lr & R_V7M_EXCRET_DCRS_MASK &&
3529
+ !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
3530
+ push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
3531
+ ignore_stackfaults);
3532
+ }
3533
+ lr |= R_V7M_EXCRET_DCRS_MASK;
3534
+ }
3535
+ }
3536
+
3537
+ lr &= ~R_V7M_EXCRET_ES_MASK;
3538
+ if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3539
+ lr |= R_V7M_EXCRET_ES_MASK;
3540
+ }
3541
+ lr &= ~R_V7M_EXCRET_SPSEL_MASK;
3542
+ if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
3543
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
3544
+ }
3545
+
3546
+ /*
3547
+ * Clear registers if necessary to prevent non-secure exception
3548
+ * code being able to see register values from secure code.
3549
+ * Where register values become architecturally UNKNOWN we leave
3550
+ * them with their previous values.
3551
+ */
3552
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3553
+ if (!targets_secure) {
3554
+ /*
3555
+ * Always clear the caller-saved registers (they have been
3556
+ * pushed to the stack earlier in v7m_push_stack()).
3557
+ * Clear callee-saved registers if the background code is
3558
+ * Secure (in which case these regs were saved in
3559
+ * v7m_push_callee_stack()).
3560
+ */
3561
+ int i;
3562
+
3563
+ for (i = 0; i < 13; i++) {
3564
+ /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
3565
+ if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
3566
+ env->regs[i] = 0;
3567
+ }
3568
+ }
3569
+ /* Clear EAPSR */
3570
+ xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
3571
+ }
3572
+ }
3573
+ }
3574
+
3575
+ if (push_failed && !ignore_stackfaults) {
3576
+ /*
3577
+ * Derived exception on callee-saves register stacking:
3578
+ * we might now want to take a different exception which
3579
+ * targets a different security state, so try again from the top.
3580
+ */
3581
+ qemu_log_mask(CPU_LOG_INT,
3582
+ "...derived exception on callee-saves register stacking");
3583
+ v7m_exception_taken(cpu, lr, true, true);
3584
+ return;
3585
+ }
3586
+
3587
+ if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
3588
+ /* Vector load failed: derived exception */
3589
+ qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
3590
+ v7m_exception_taken(cpu, lr, true, true);
3591
+ return;
3592
+ }
3593
+
3594
+ /*
3595
+ * Now we've done everything that might cause a derived exception
3596
+ * we can go ahead and activate whichever exception we're going to
3597
+ * take (which might now be the derived exception).
3598
+ */
3599
+ armv7m_nvic_acknowledge_irq(env->nvic);
3600
+
3601
+ /* Switch to target security state -- must do this before writing SPSEL */
3602
+ switch_v7m_security_state(env, targets_secure);
3603
+ write_v7m_control_spsel(env, 0);
3604
+ arm_clear_exclusive(env);
3605
+ /* Clear SFPA and FPCA (has no effect if no FPU) */
3606
+ env->v7m.control[M_REG_S] &=
3607
+ ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
3608
+ /* Clear IT bits */
3609
+ env->condexec_bits = 0;
3610
+ env->regs[14] = lr;
3611
+ env->regs[15] = addr & 0xfffffffe;
3612
+ env->thumb = addr & 1;
3613
+}
3614
+
3615
+static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
3616
+ bool apply_splim)
3617
+{
3618
+ /*
3619
+ * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
3620
+ * that we will need later in order to do lazy FP reg stacking.
3621
+ */
3622
+ bool is_secure = env->v7m.secure;
3623
+ void *nvic = env->nvic;
3624
+ /*
3625
+ * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
3626
+ * are banked and we want to update the bit in the bank for the
3627
+ * current security state; and in one case we want to specifically
3628
+ * update the NS banked version of a bit even if we are secure.
3629
+ */
3630
+ uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
3631
+ uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
3632
+ uint32_t *fpccr = &env->v7m.fpccr[is_secure];
3633
+ bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
3634
+
3635
+ env->v7m.fpcar[is_secure] = frameptr & ~0x7;
3636
+
3637
+ if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
3638
+ bool splimviol;
3639
+ uint32_t splim = v7m_sp_limit(env);
3640
+ bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
3641
+ (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
3642
+
3643
+ splimviol = !ign && frameptr < splim;
3644
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
3645
+ }
3646
+
3647
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
3648
+
3649
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
3650
+
3651
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
3652
+
3653
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
3654
+ !arm_v7m_is_handler_mode(env));
3655
+
3656
+ hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
3657
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
3658
+
3659
+ bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
3660
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
3661
+
3662
+ mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
3663
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
3664
+
3665
+ ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
3666
+ *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
3667
+
3668
+ monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
3669
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
3670
+
3671
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3672
+ s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
3673
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
3674
+
3675
+ sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
3676
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
3677
+ }
3678
+}
3679
+
3680
+void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
3681
+{
3682
+ /* fptr is the value of Rn, the frame pointer we store the FP regs to */
3683
+ bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
3684
+ bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
3685
+
3686
+ assert(env->v7m.secure);
3687
+
3688
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
3689
+ return;
3690
+ }
3691
+
3692
+ /* Check access to the coprocessor is permitted */
3693
+ if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
3694
+ raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
3695
+ }
3696
+
3697
+ if (lspact) {
3698
+ /* LSPACT should not be active when there is active FP state */
3699
+ raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
3700
+ }
3701
+
3702
+ if (fptr & 7) {
3703
+ raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
3704
+ }
3705
+
3706
+ /*
3707
+ * Note that we do not use v7m_stack_write() here, because the
3708
+ * accesses should not set the FSR bits for stacking errors if they
3709
+ * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
3710
+ * or AccType_LAZYFP). Faults in cpu_stl_data() will throw exceptions
3711
+ * and longjmp out.
3712
+ */
3713
+ if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
3714
+ bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
3715
+ int i;
3716
+
3717
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
3718
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
3719
+ uint32_t faddr = fptr + 4 * i;
3720
+ uint32_t slo = extract64(dn, 0, 32);
3721
+ uint32_t shi = extract64(dn, 32, 32);
3722
+
3723
+ if (i >= 16) {
3724
+ faddr += 8; /* skip the slot for the FPSCR */
3725
+ }
3726
+ cpu_stl_data(env, faddr, slo);
3727
+ cpu_stl_data(env, faddr + 4, shi);
3728
+ }
3729
+ cpu_stl_data(env, fptr + 0x40, vfp_get_fpscr(env));
3730
+
3731
+ /*
3732
+ * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
3733
+ * leave them unchanged, matching our choice in v7m_preserve_fp_state.
3734
+ */
3735
+ if (ts) {
3736
+ for (i = 0; i < 32; i += 2) {
3737
+ *aa32_vfp_dreg(env, i / 2) = 0;
3738
+ }
3739
+ vfp_set_fpscr(env, 0);
3740
+ }
3741
+ } else {
3742
+ v7m_update_fpccr(env, fptr, false);
3743
+ }
3744
+
3745
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
3746
+}
3747
+
3748
+void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
3749
+{
3750
+ /* fptr is the value of Rn, the frame pointer we load the FP regs from */
3751
+ assert(env->v7m.secure);
3752
+
3753
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
3754
+ return;
3755
+ }
3756
+
3757
+ /* Check access to the coprocessor is permitted */
3758
+ if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
3759
+ raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
3760
+ }
3761
+
3762
+ if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
3763
+ /* State in FP is still valid */
3764
+ env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
3765
+ } else {
3766
+ bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
3767
+ int i;
3768
+ uint32_t fpscr;
3769
+
3770
+ if (fptr & 7) {
3771
+ raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
3772
+ }
3773
+
3774
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
3775
+ uint32_t slo, shi;
3776
+ uint64_t dn;
3777
+ uint32_t faddr = fptr + 4 * i;
3778
+
3779
+ if (i >= 16) {
3780
+ faddr += 8; /* skip the slot for the FPSCR */
3781
+ }
3782
+
3783
+ slo = cpu_ldl_data(env, faddr);
3784
+ shi = cpu_ldl_data(env, faddr + 4);
3785
+
3786
+ dn = (uint64_t) shi << 32 | slo;
3787
+ *aa32_vfp_dreg(env, i / 2) = dn;
3788
+ }
3789
+ fpscr = cpu_ldl_data(env, fptr + 0x40);
3790
+ vfp_set_fpscr(env, fpscr);
3791
+ }
3792
+
3793
+ env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
3794
+}
3795
+
3796
+static bool v7m_push_stack(ARMCPU *cpu)
3797
+{
3798
+ /*
3799
+ * Do the "set up stack frame" part of exception entry,
3800
+ * similar to pseudocode PushStack().
3801
+ * Return true if we generate a derived exception (and so
3802
+ * should ignore further stack faults trying to process
3803
+ * that derived exception.)
3804
+ */
3805
+ bool stacked_ok = true, limitviol = false;
3806
+ CPUARMState *env = &cpu->env;
3807
+ uint32_t xpsr = xpsr_read(env);
3808
+ uint32_t frameptr = env->regs[13];
3809
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
3810
+ uint32_t framesize;
3811
+ bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
3812
+
3813
+ if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
3814
+ (env->v7m.secure || nsacr_cp10)) {
3815
+ if (env->v7m.secure &&
3816
+ env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
3817
+ framesize = 0xa8;
3818
+ } else {
3819
+ framesize = 0x68;
3820
+ }
3821
+ } else {
3822
+ framesize = 0x20;
3823
+ }
3824
+
3825
+ /* Align stack pointer if the guest wants that */
3826
+ if ((frameptr & 4) &&
3827
+ (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
3828
+ frameptr -= 4;
3829
+ xpsr |= XPSR_SPREALIGN;
3830
+ }
3831
+
3832
+ xpsr &= ~XPSR_SFPA;
3833
+ if (env->v7m.secure &&
3834
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
3835
+ xpsr |= XPSR_SFPA;
3836
+ }
3837
+
3838
+ frameptr -= framesize;
3839
+
3840
+ if (arm_feature(env, ARM_FEATURE_V8)) {
3841
+ uint32_t limit = v7m_sp_limit(env);
3842
+
3843
+ if (frameptr < limit) {
3844
+ /*
3845
+ * Stack limit failure: set SP to the limit value, and generate
3846
+ * STKOF UsageFault. Stack pushes below the limit must not be
3847
+ * performed. It is IMPDEF whether pushes above the limit are
3848
+ * performed; we choose not to.
3849
+ */
3850
+ qemu_log_mask(CPU_LOG_INT,
3851
+ "...STKOF during stacking\n");
3852
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
3853
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
3854
+ env->v7m.secure);
3855
+ env->regs[13] = limit;
3856
+ /*
3857
+ * We won't try to perform any further memory accesses but
3858
+ * we must continue through the following code to check for
3859
+ * permission faults during FPU state preservation, and we
3860
+ * must update FPCCR if lazy stacking is enabled.
3861
+ */
3862
+ limitviol = true;
3863
+ stacked_ok = false;
3864
+ }
3865
+ }
3866
+
3867
+ /*
3868
+ * Write as much of the stack frame as we can. If we fail a stack
3869
+ * write this will result in a derived exception being pended
3870
+ * (which may be taken in preference to the one we started with
3871
+ * if it has higher priority).
3872
+ */
3873
+ stacked_ok = stacked_ok &&
3874
+ v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
3875
+ v7m_stack_write(cpu, frameptr + 4, env->regs[1],
3876
+ mmu_idx, STACK_NORMAL) &&
3877
+ v7m_stack_write(cpu, frameptr + 8, env->regs[2],
3878
+ mmu_idx, STACK_NORMAL) &&
3879
+ v7m_stack_write(cpu, frameptr + 12, env->regs[3],
3880
+ mmu_idx, STACK_NORMAL) &&
3881
+ v7m_stack_write(cpu, frameptr + 16, env->regs[12],
3882
+ mmu_idx, STACK_NORMAL) &&
3883
+ v7m_stack_write(cpu, frameptr + 20, env->regs[14],
3884
+ mmu_idx, STACK_NORMAL) &&
3885
+ v7m_stack_write(cpu, frameptr + 24, env->regs[15],
3886
+ mmu_idx, STACK_NORMAL) &&
3887
+ v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
3888
+
3889
+ if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
3890
+ /* FPU is active, try to save its registers */
3891
+ bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
3892
+ bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
3893
+
3894
+ if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
3895
+ qemu_log_mask(CPU_LOG_INT,
3896
+ "...SecureFault because LSPACT and FPCA both set\n");
3897
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
3898
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
3899
+ } else if (!env->v7m.secure && !nsacr_cp10) {
3900
+ qemu_log_mask(CPU_LOG_INT,
3901
+ "...Secure UsageFault with CFSR.NOCP because "
3902
+ "NSACR.CP10 prevents stacking FP regs\n");
3903
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
3904
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
3905
+ } else {
3906
+ if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
3907
+ /* Lazy stacking disabled, save registers now */
3908
+ int i;
3909
+ bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
3910
+ arm_current_el(env) != 0);
3911
+
3912
+ if (stacked_ok && !cpacr_pass) {
3913
+ /*
3914
+ * Take UsageFault if CPACR forbids access. The pseudocode
3915
+ * here does a full CheckCPEnabled() but we know the NSACR
3916
+ * check can never fail as we have already handled that.
3917
+ */
3918
+ qemu_log_mask(CPU_LOG_INT,
3919
+ "...UsageFault with CFSR.NOCP because "
3920
+ "CPACR.CP10 prevents stacking FP regs\n");
3921
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
3922
+ env->v7m.secure);
3923
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
3924
+ stacked_ok = false;
3925
+ }
3926
+
3927
+ for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
3928
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
3929
+ uint32_t faddr = frameptr + 0x20 + 4 * i;
3930
+ uint32_t slo = extract64(dn, 0, 32);
3931
+ uint32_t shi = extract64(dn, 32, 32);
3932
+
3933
+ if (i >= 16) {
3934
+ faddr += 8; /* skip the slot for the FPSCR */
3935
+ }
3936
+ stacked_ok = stacked_ok &&
3937
+ v7m_stack_write(cpu, faddr, slo,
3938
+ mmu_idx, STACK_NORMAL) &&
3939
+ v7m_stack_write(cpu, faddr + 4, shi,
3940
+ mmu_idx, STACK_NORMAL);
3941
+ }
3942
+ stacked_ok = stacked_ok &&
3943
+ v7m_stack_write(cpu, frameptr + 0x60,
3944
+ vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
3945
+ if (cpacr_pass) {
3946
+ for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
3947
+ *aa32_vfp_dreg(env, i / 2) = 0;
3948
+ }
3949
+ vfp_set_fpscr(env, 0);
3950
+ }
3951
+ } else {
3952
+ /* Lazy stacking enabled, save necessary info to stack later */
3953
+ v7m_update_fpccr(env, frameptr + 0x20, true);
3954
+ }
3955
+ }
3956
+ }
3957
+
3958
+ /*
3959
+ * If we broke a stack limit then SP was already updated earlier;
3960
+ * otherwise we update SP regardless of whether any of the stack
3961
+ * accesses failed or we took some other kind of fault.
3962
+ */
3963
+ if (!limitviol) {
3964
+ env->regs[13] = frameptr;
3965
+ }
3966
+
3967
+ return !stacked_ok;
3968
+}
3969
+
3970
+static void do_v7m_exception_exit(ARMCPU *cpu)
3971
+{
3972
+ CPUARMState *env = &cpu->env;
3973
+ uint32_t excret;
3974
+ uint32_t xpsr, xpsr_mask;
3975
+ bool ufault = false;
3976
+ bool sfault = false;
3977
+ bool return_to_sp_process;
3978
+ bool return_to_handler;
3979
+ bool rettobase = false;
3980
+ bool exc_secure = false;
3981
+ bool return_to_secure;
3982
+ bool ftype;
3983
+ bool restore_s16_s31;
3984
+
3985
+ /*
3986
+ * If we're not in Handler mode then jumps to magic exception-exit
3987
+ * addresses don't have magic behaviour. However for the v8M
3988
+ * security extensions the magic secure-function-return has to
3989
+ * work in thread mode too, so to avoid doing an extra check in
3990
+ * the generated code we allow exception-exit magic to also cause the
3991
+ * internal exception and bring us here in thread mode. Correct code
3992
+ * will never try to do this (the following insn fetch will always
3993
+ * fault) so we the overhead of having taken an unnecessary exception
3994
+ * doesn't matter.
3995
+ */
3996
+ if (!arm_v7m_is_handler_mode(env)) {
3997
+ return;
3998
+ }
3999
+
4000
+ /*
4001
+ * In the spec pseudocode ExceptionReturn() is called directly
4002
+ * from BXWritePC() and gets the full target PC value including
4003
+ * bit zero. In QEMU's implementation we treat it as a normal
4004
+ * jump-to-register (which is then caught later on), and so split
4005
+ * the target value up between env->regs[15] and env->thumb in
4006
+ * gen_bx(). Reconstitute it.
4007
+ */
4008
+ excret = env->regs[15];
4009
+ if (env->thumb) {
4010
+ excret |= 1;
4011
+ }
4012
+
4013
+ qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
4014
+ " previous exception %d\n",
4015
+ excret, env->v7m.exception);
4016
+
4017
+ if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
4018
+ qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
4019
+ "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
4020
+ excret);
4021
+ }
4022
+
4023
+ ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
4024
+
4025
+ if (!arm_feature(env, ARM_FEATURE_VFP) && !ftype) {
4026
+ qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
4027
+ "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
4028
+ "if FPU not present\n",
4029
+ excret);
4030
+ ftype = true;
4031
+ }
4032
+
4033
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
4034
+ /*
4035
+ * EXC_RETURN.ES validation check (R_SMFL). We must do this before
4036
+ * we pick which FAULTMASK to clear.
4037
+ */
4038
+ if (!env->v7m.secure &&
4039
+ ((excret & R_V7M_EXCRET_ES_MASK) ||
4040
+ !(excret & R_V7M_EXCRET_DCRS_MASK))) {
4041
+ sfault = 1;
4042
+ /* For all other purposes, treat ES as 0 (R_HXSR) */
4043
+ excret &= ~R_V7M_EXCRET_ES_MASK;
4044
+ }
4045
+ exc_secure = excret & R_V7M_EXCRET_ES_MASK;
4046
+ }
4047
+
4048
+ if (env->v7m.exception != ARMV7M_EXCP_NMI) {
4049
+ /*
4050
+ * Auto-clear FAULTMASK on return from other than NMI.
4051
+ * If the security extension is implemented then this only
4052
+ * happens if the raw execution priority is >= 0; the
4053
+ * value of the ES bit in the exception return value indicates
4054
+ * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
4055
+ */
4056
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
4057
+ if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
4058
+ env->v7m.faultmask[exc_secure] = 0;
4059
+ }
4060
+ } else {
4061
+ env->v7m.faultmask[M_REG_NS] = 0;
4062
+ }
4063
+ }
4064
+
4065
+ switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
4066
+ exc_secure)) {
4067
+ case -1:
4068
+ /* attempt to exit an exception that isn't active */
4069
+ ufault = true;
4070
+ break;
4071
+ case 0:
4072
+ /* still an irq active now */
4073
+ break;
4074
+ case 1:
4075
+ /*
4076
+ * We returned to base exception level, no nesting.
4077
+ * (In the pseudocode this is written using "NestedActivation != 1"
4078
+ * where we have 'rettobase == false'.)
4079
+ */
4080
+ rettobase = true;
4081
+ break;
4082
+ default:
4083
+ g_assert_not_reached();
4084
+ }
4085
+
4086
+ return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
4087
+ return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
4088
+ return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
4089
+ (excret & R_V7M_EXCRET_S_MASK);
4090
+
4091
+ if (arm_feature(env, ARM_FEATURE_V8)) {
4092
+ if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
4093
+ /*
4094
+ * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
4095
+ * we choose to take the UsageFault.
4096
+ */
4097
+ if ((excret & R_V7M_EXCRET_S_MASK) ||
4098
+ (excret & R_V7M_EXCRET_ES_MASK) ||
4099
+ !(excret & R_V7M_EXCRET_DCRS_MASK)) {
4100
+ ufault = true;
4101
+ }
4102
+ }
4103
+ if (excret & R_V7M_EXCRET_RES0_MASK) {
4104
+ ufault = true;
4105
+ }
4106
+ } else {
4107
+ /* For v7M we only recognize certain combinations of the low bits */
4108
+ switch (excret & 0xf) {
4109
+ case 1: /* Return to Handler */
4110
+ break;
4111
+ case 13: /* Return to Thread using Process stack */
4112
+ case 9: /* Return to Thread using Main stack */
4113
+ /*
4114
+ * We only need to check NONBASETHRDENA for v7M, because in
4115
+ * v8M this bit does not exist (it is RES1).
4116
+ */
4117
+ if (!rettobase &&
4118
+ !(env->v7m.ccr[env->v7m.secure] &
4119
+ R_V7M_CCR_NONBASETHRDENA_MASK)) {
4120
+ ufault = true;
4121
+ }
4122
+ break;
4123
+ default:
4124
+ ufault = true;
4125
+ }
4126
+ }
4127
+
4128
+ /*
4129
+ * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
4130
+ * Handler mode (and will be until we write the new XPSR.Interrupt
4131
+ * field) this does not switch around the current stack pointer.
4132
+ * We must do this before we do any kind of tailchaining, including
4133
+ * for the derived exceptions on integrity check failures, or we will
4134
+ * give the guest an incorrect EXCRET.SPSEL value on exception entry.
4135
+ */
4136
+ write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
4137
+
4138
+ /*
4139
+ * Clear scratch FP values left in caller saved registers; this
4140
+ * must happen before any kind of tail chaining.
4141
+ */
4142
+ if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
4143
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
4144
+ if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
4145
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
4146
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4147
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
4148
+ "stackframe: error during lazy state deactivation\n");
4149
+ v7m_exception_taken(cpu, excret, true, false);
4150
+ return;
4151
+ } else {
4152
+ /* Clear s0..s15 and FPSCR */
4153
+ int i;
4154
+
4155
+ for (i = 0; i < 16; i += 2) {
4156
+ *aa32_vfp_dreg(env, i / 2) = 0;
4157
+ }
4158
+ vfp_set_fpscr(env, 0);
4159
+ }
4160
+ }
4161
+
4162
+ if (sfault) {
4163
+ env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
4164
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4165
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
4166
+ "stackframe: failed EXC_RETURN.ES validity check\n");
4167
+ v7m_exception_taken(cpu, excret, true, false);
4168
+ return;
4169
+ }
4170
+
4171
+ if (ufault) {
4172
+ /*
4173
+ * Bad exception return: instead of popping the exception
4174
+ * stack, directly take a usage fault on the current stack.
4175
+ */
4176
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
4177
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4178
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
4179
+ "stackframe: failed exception return integrity check\n");
4180
+ v7m_exception_taken(cpu, excret, true, false);
4181
+ return;
4182
+ }
4183
+
4184
+ /*
4185
+ * Tailchaining: if there is currently a pending exception that
4186
+ * is high enough priority to preempt execution at the level we're
4187
+ * about to return to, then just directly take that exception now,
4188
+ * avoiding an unstack-and-then-stack. Note that now we have
4189
+ * deactivated the previous exception by calling armv7m_nvic_complete_irq()
4190
+ * our current execution priority is already the execution priority we are
4191
+ * returning to -- none of the state we would unstack or set based on
4192
+ * the EXCRET value affects it.
4193
+ */
4194
+ if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
4195
+ qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
4196
+ v7m_exception_taken(cpu, excret, true, false);
4197
+ return;
4198
+ }
4199
+
4200
+ switch_v7m_security_state(env, return_to_secure);
4201
+
4202
+ {
4203
+ /*
4204
+ * The stack pointer we should be reading the exception frame from
4205
+ * depends on bits in the magic exception return type value (and
4206
+ * for v8M isn't necessarily the stack pointer we will eventually
4207
+ * end up resuming execution with). Get a pointer to the location
4208
+ * in the CPU state struct where the SP we need is currently being
4209
+ * stored; we will use and modify it in place.
4210
+ * We use this limited C variable scope so we don't accidentally
4211
+ * use 'frame_sp_p' after we do something that makes it invalid.
4212
+ */
4213
+ uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
4214
+ return_to_secure,
4215
+ !return_to_handler,
4216
+ return_to_sp_process);
4217
+ uint32_t frameptr = *frame_sp_p;
4218
+ bool pop_ok = true;
4219
+ ARMMMUIdx mmu_idx;
4220
+ bool return_to_priv = return_to_handler ||
4221
+ !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
4222
+
4223
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
4224
+ return_to_priv);
4225
+
4226
+ if (!QEMU_IS_ALIGNED(frameptr, 8) &&
4227
+ arm_feature(env, ARM_FEATURE_V8)) {
4228
+ qemu_log_mask(LOG_GUEST_ERROR,
4229
+ "M profile exception return with non-8-aligned SP "
4230
+ "for destination state is UNPREDICTABLE\n");
4231
+ }
4232
+
4233
+ /* Do we need to pop callee-saved registers? */
4234
+ if (return_to_secure &&
4235
+ ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
4236
+ (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
4237
+ uint32_t actual_sig;
4238
+
4239
+ pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
4240
+
4241
+ if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
4242
+ /* Take a SecureFault on the current stack */
4243
+ env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
4244
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4245
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
4246
+ "stackframe: failed exception return integrity "
4247
+ "signature check\n");
4248
+ v7m_exception_taken(cpu, excret, true, false);
4249
+ return;
4250
+ }
4251
+
4252
+ pop_ok = pop_ok &&
4253
+ v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
4254
+ v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
4255
+ v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
4256
+ v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
4257
+ v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
4258
+ v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
4259
+ v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
4260
+ v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
4261
+
4262
+ frameptr += 0x28;
4263
+ }
4264
+
4265
+ /* Pop registers */
4266
+ pop_ok = pop_ok &&
4267
+ v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
4268
+ v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
4269
+ v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
4270
+ v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
4271
+ v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
4272
+ v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
4273
+ v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
4274
+ v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
4275
+
4276
+ if (!pop_ok) {
4277
+ /*
4278
+ * v7m_stack_read() pended a fault, so take it (as a tail
4279
+ * chained exception on the same stack frame)
4280
+ */
4281
+ qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
4282
+ v7m_exception_taken(cpu, excret, true, false);
4283
+ return;
4284
+ }
4285
+
4286
+ /*
4287
+ * Returning from an exception with a PC with bit 0 set is defined
4288
+ * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
4289
+ * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
4290
+ * the lsbit, and there are several RTOSes out there which incorrectly
4291
+ * assume the r15 in the stack frame should be a Thumb-style "lsbit
4292
+ * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
4293
+ * complain about the badly behaved guest.
4294
+ */
4295
+ if (env->regs[15] & 1) {
4296
+ env->regs[15] &= ~1U;
4297
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
4298
+ qemu_log_mask(LOG_GUEST_ERROR,
4299
+ "M profile return from interrupt with misaligned "
4300
+ "PC is UNPREDICTABLE on v7M\n");
4301
+ }
4302
+ }
4303
+
4304
+ if (arm_feature(env, ARM_FEATURE_V8)) {
4305
+ /*
4306
+ * For v8M we have to check whether the xPSR exception field
4307
+ * matches the EXCRET value for return to handler/thread
4308
+ * before we commit to changing the SP and xPSR.
4309
+ */
4310
+ bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
4311
+ if (return_to_handler != will_be_handler) {
4312
+ /*
4313
+ * Take an INVPC UsageFault on the current stack.
4314
+ * By this point we will have switched to the security state
4315
+ * for the background state, so this UsageFault will target
4316
+ * that state.
4317
+ */
4318
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
4319
+ env->v7m.secure);
4320
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
4321
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
4322
+ "stackframe: failed exception return integrity "
4323
+ "check\n");
4324
+ v7m_exception_taken(cpu, excret, true, false);
4325
+ return;
4326
+ }
4327
+ }
4328
+
4329
+ if (!ftype) {
4330
+ /* FP present and we need to handle it */
4331
+ if (!return_to_secure &&
4332
+ (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
4333
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4334
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
4335
+ qemu_log_mask(CPU_LOG_INT,
4336
+ "...taking SecureFault on existing stackframe: "
4337
+ "Secure LSPACT set but exception return is "
4338
+ "not to secure state\n");
4339
+ v7m_exception_taken(cpu, excret, true, false);
4340
+ return;
4341
+ }
4342
+
4343
+ restore_s16_s31 = return_to_secure &&
4344
+ (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
4345
+
4346
+ if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
4347
+ /* State in FPU is still valid, just clear LSPACT */
4348
+ env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
4349
+ } else {
4350
+ int i;
4351
+ uint32_t fpscr;
4352
+ bool cpacr_pass, nsacr_pass;
4353
+
4354
+ cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
4355
+ return_to_priv);
4356
+ nsacr_pass = return_to_secure ||
4357
+ extract32(env->v7m.nsacr, 10, 1);
4358
+
4359
+ if (!cpacr_pass) {
4360
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
4361
+ return_to_secure);
4362
+ env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
4363
+ qemu_log_mask(CPU_LOG_INT,
4364
+ "...taking UsageFault on existing "
4365
+ "stackframe: CPACR.CP10 prevents unstacking "
4366
+ "FP regs\n");
4367
+ v7m_exception_taken(cpu, excret, true, false);
4368
+ return;
4369
+ } else if (!nsacr_pass) {
4370
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
4371
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
4372
+ qemu_log_mask(CPU_LOG_INT,
4373
+ "...taking Secure UsageFault on existing "
4374
+ "stackframe: NSACR.CP10 prevents unstacking "
4375
+ "FP regs\n");
4376
+ v7m_exception_taken(cpu, excret, true, false);
4377
+ return;
4378
+ }
4379
+
4380
+ for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
4381
+ uint32_t slo, shi;
4382
+ uint64_t dn;
4383
+ uint32_t faddr = frameptr + 0x20 + 4 * i;
4384
+
4385
+ if (i >= 16) {
4386
+ faddr += 8; /* Skip the slot for the FPSCR */
4387
+ }
4388
+
4389
+ pop_ok = pop_ok &&
4390
+ v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
4391
+ v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
4392
+
4393
+ if (!pop_ok) {
4394
+ break;
4395
+ }
4396
+
4397
+ dn = (uint64_t)shi << 32 | slo;
4398
+ *aa32_vfp_dreg(env, i / 2) = dn;
4399
+ }
4400
+ pop_ok = pop_ok &&
4401
+ v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
4402
+ if (pop_ok) {
4403
+ vfp_set_fpscr(env, fpscr);
4404
+ }
4405
+ if (!pop_ok) {
4406
+ /*
4407
+ * These regs are 0 if security extension present;
4408
+ * otherwise merely UNKNOWN. We zero always.
4409
+ */
4410
+ for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
4411
+ *aa32_vfp_dreg(env, i / 2) = 0;
4412
+ }
4413
+ vfp_set_fpscr(env, 0);
4414
+ }
4415
+ }
4416
+ }
4417
+ env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
4418
+ V7M_CONTROL, FPCA, !ftype);
4419
+
4420
+ /* Commit to consuming the stack frame */
4421
+ frameptr += 0x20;
4422
+ if (!ftype) {
4423
+ frameptr += 0x48;
4424
+ if (restore_s16_s31) {
4425
+ frameptr += 0x40;
4426
+ }
4427
+ }
4428
+ /*
4429
+ * Undo stack alignment (the SPREALIGN bit indicates that the original
4430
+ * pre-exception SP was not 8-aligned and we added a padding word to
4431
+ * align it, so we undo this by ORing in the bit that increases it
4432
+ * from the current 8-aligned value to the 8-unaligned value. (Adding 4
4433
+ * would work too but a logical OR is how the pseudocode specifies it.)
4434
+ */
4435
+ if (xpsr & XPSR_SPREALIGN) {
4436
+ frameptr |= 4;
4437
+ }
4438
+ *frame_sp_p = frameptr;
4439
+ }
4440
+
4441
+ xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
4442
+ if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
4443
+ xpsr_mask &= ~XPSR_GE;
4444
+ }
4445
+ /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
4446
+ xpsr_write(env, xpsr, xpsr_mask);
4447
+
4448
+ if (env->v7m.secure) {
4449
+ bool sfpa = xpsr & XPSR_SFPA;
4450
+
4451
+ env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
4452
+ V7M_CONTROL, SFPA, sfpa);
4453
+ }
4454
+
4455
+ /*
4456
+ * The restored xPSR exception field will be zero if we're
4457
+ * resuming in Thread mode. If that doesn't match what the
4458
+ * exception return excret specified then this is a UsageFault.
4459
+ * v7M requires we make this check here; v8M did it earlier.
4460
+ */
4461
+ if (return_to_handler != arm_v7m_is_handler_mode(env)) {
4462
+ /*
4463
+ * Take an INVPC UsageFault by pushing the stack again;
4464
+ * we know we're v7M so this is never a Secure UsageFault.
4465
+ */
4466
+ bool ignore_stackfaults;
4467
+
4468
+ assert(!arm_feature(env, ARM_FEATURE_V8));
4469
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
4470
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
4471
+ ignore_stackfaults = v7m_push_stack(cpu);
4472
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
4473
+ "failed exception return integrity check\n");
4474
+ v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
4475
+ return;
4476
+ }
4477
+
4478
+ /* Otherwise, we have a successful exception exit. */
4479
+ arm_clear_exclusive(env);
4480
+ qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
4481
+}
4482
+
4483
+static bool do_v7m_function_return(ARMCPU *cpu)
4484
+{
4485
+ /*
4486
+ * v8M security extensions magic function return.
4487
+ * We may either:
4488
+ * (1) throw an exception (longjump)
4489
+ * (2) return true if we successfully handled the function return
4490
+ * (3) return false if we failed a consistency check and have
4491
+ * pended a UsageFault that needs to be taken now
4492
+ *
4493
+ * At this point the magic return value is split between env->regs[15]
4494
+ * and env->thumb. We don't bother to reconstitute it because we don't
4495
+ * need it (all values are handled the same way).
4496
+ */
4497
+ CPUARMState *env = &cpu->env;
4498
+ uint32_t newpc, newpsr, newpsr_exc;
4499
+
4500
+ qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
4501
+
4502
+ {
4503
+ bool threadmode, spsel;
4504
+ TCGMemOpIdx oi;
4505
+ ARMMMUIdx mmu_idx;
4506
+ uint32_t *frame_sp_p;
4507
+ uint32_t frameptr;
4508
+
4509
+ /* Pull the return address and IPSR from the Secure stack */
4510
+ threadmode = !arm_v7m_is_handler_mode(env);
4511
+ spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
4512
+
4513
+ frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
4514
+ frameptr = *frame_sp_p;
4515
+
4516
+ /*
4517
+ * These loads may throw an exception (for MPU faults). We want to
4518
+ * do them as secure, so work out what MMU index that is.
4519
+ */
4520
+ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
4521
+ oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
4522
+ newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
4523
+ newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
4524
+
4525
+ /* Consistency checks on new IPSR */
4526
+ newpsr_exc = newpsr & XPSR_EXCP;
4527
+ if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
4528
+ (env->v7m.exception == 1 && newpsr_exc != 0))) {
4529
+ /* Pend the fault and tell our caller to take it */
4530
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
4531
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
4532
+ env->v7m.secure);
4533
+ qemu_log_mask(CPU_LOG_INT,
4534
+ "...taking INVPC UsageFault: "
4535
+ "IPSR consistency check failed\n");
4536
+ return false;
4537
+ }
4538
+
4539
+ *frame_sp_p = frameptr + 8;
4540
+ }
4541
+
4542
+ /* This invalidates frame_sp_p */
4543
+ switch_v7m_security_state(env, true);
4544
+ env->v7m.exception = newpsr_exc;
4545
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
4546
+ if (newpsr & XPSR_SFPA) {
4547
+ env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
4548
+ }
4549
+ xpsr_write(env, 0, XPSR_IT);
4550
+ env->thumb = newpc & 1;
4551
+ env->regs[15] = newpc & ~1;
4552
+
4553
+ qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
4554
+ return true;
4555
+}
4556
+
4557
+static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
4558
+ uint32_t addr, uint16_t *insn)
4559
+{
4560
+ /*
4561
+ * Load a 16-bit portion of a v7M instruction, returning true on success,
4562
+ * or false on failure (in which case we will have pended the appropriate
4563
+ * exception).
4564
+ * We need to do the instruction fetch's MPU and SAU checks
4565
+ * like this because there is no MMU index that would allow
4566
+ * doing the load with a single function call. Instead we must
4567
+ * first check that the security attributes permit the load
4568
+ * and that they don't mismatch on the two halves of the instruction,
4569
+ * and then we do the load as a secure load (ie using the security
4570
+ * attributes of the address, not the CPU, as architecturally required).
4571
+ */
4572
+ CPUState *cs = CPU(cpu);
4573
+ CPUARMState *env = &cpu->env;
4574
+ V8M_SAttributes sattrs = {};
4575
+ MemTxAttrs attrs = {};
4576
+ ARMMMUFaultInfo fi = {};
4577
+ MemTxResult txres;
4578
+ target_ulong page_size;
4579
+ hwaddr physaddr;
4580
+ int prot;
4581
+
4582
+ v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
4583
+ if (!sattrs.nsc || sattrs.ns) {
4584
+ /*
4585
+ * This must be the second half of the insn, and it straddles a
4586
+ * region boundary with the second half not being S&NSC.
4587
+ */
4588
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
4589
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4590
+ qemu_log_mask(CPU_LOG_INT,
4591
+ "...really SecureFault with SFSR.INVEP\n");
4592
+ return false;
4593
+ }
4594
+ if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
4595
+ &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
4596
+ /* the MPU lookup failed */
4597
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
4598
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
4599
+ qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
4600
+ return false;
4601
+ }
4602
+ *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
4603
+ attrs, &txres);
4604
+ if (txres != MEMTX_OK) {
4605
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
4606
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
4607
+ qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
4608
+ return false;
4609
+ }
4610
+ return true;
4611
+}
4612
+
4613
+static bool v7m_handle_execute_nsc(ARMCPU *cpu)
4614
+{
4615
+ /*
4616
+ * Check whether this attempt to execute code in a Secure & NS-Callable
4617
+ * memory region is for an SG instruction; if so, then emulate the
4618
+ * effect of the SG instruction and return true. Otherwise pend
4619
+ * the correct kind of exception and return false.
4620
+ */
4621
+ CPUARMState *env = &cpu->env;
4622
+ ARMMMUIdx mmu_idx;
4623
+ uint16_t insn;
4624
+
4625
+ /*
4626
+ * We should never get here unless get_phys_addr_pmsav8() caused
4627
+ * an exception for NS executing in S&NSC memory.
4628
+ */
4629
+ assert(!env->v7m.secure);
4630
+ assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
4631
+
4632
+ /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
4633
+ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
4634
+
4635
+ if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
4636
+ return false;
4637
+ }
4638
+
4639
+ if (!env->thumb) {
4640
+ goto gen_invep;
4641
+ }
4642
+
4643
+ if (insn != 0xe97f) {
4644
+ /*
4645
+ * Not an SG instruction first half (we choose the IMPDEF
4646
+ * early-SG-check option).
4647
+ */
4648
+ goto gen_invep;
4649
+ }
4650
+
4651
+ if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
4652
+ return false;
4653
+ }
4654
+
4655
+ if (insn != 0xe97f) {
4656
+ /*
4657
+ * Not an SG instruction second half (yes, both halves of the SG
4658
+ * insn have the same hex value)
4659
+ */
4660
+ goto gen_invep;
4661
+ }
4662
+
4663
+ /*
4664
+ * OK, we have confirmed that we really have an SG instruction.
4665
+ * We know we're NS in S memory so don't need to repeat those checks.
4666
+ */
4667
+ qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
4668
+ ", executing it\n", env->regs[15]);
4669
+ env->regs[14] &= ~1;
4670
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
4671
+ switch_v7m_security_state(env, true);
4672
+ xpsr_write(env, 0, XPSR_IT);
4673
+ env->regs[15] += 4;
4674
+ return true;
4675
+
4676
+gen_invep:
4677
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
4678
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4679
+ qemu_log_mask(CPU_LOG_INT,
4680
+ "...really SecureFault with SFSR.INVEP\n");
4681
+ return false;
4682
+}
4683
+
4684
+void arm_v7m_cpu_do_interrupt(CPUState *cs)
4685
+{
4686
+ ARMCPU *cpu = ARM_CPU(cs);
4687
+ CPUARMState *env = &cpu->env;
4688
+ uint32_t lr;
4689
+ bool ignore_stackfaults;
4690
+
4691
+ arm_log_exception(cs->exception_index);
4692
+
4693
+ /*
4694
+ * For exceptions we just mark as pending on the NVIC, and let that
4695
+ * handle it.
4696
+ */
4697
+ switch (cs->exception_index) {
4698
+ case EXCP_UDEF:
4699
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4700
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
4701
+ break;
4702
+ case EXCP_NOCP:
4703
+ {
4704
+ /*
4705
+ * NOCP might be directed to something other than the current
4706
+ * security state if this fault is because of NSACR; we indicate
4707
+ * the target security state using exception.target_el.
4708
+ */
4709
+ int target_secstate;
4710
+
4711
+ if (env->exception.target_el == 3) {
4712
+ target_secstate = M_REG_S;
4713
+ } else {
4714
+ target_secstate = env->v7m.secure;
4715
+ }
4716
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
4717
+ env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
4718
+ break;
4719
+ }
4720
+ case EXCP_INVSTATE:
4721
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4722
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
4723
+ break;
4724
+ case EXCP_STKOF:
4725
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4726
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
4727
+ break;
4728
+ case EXCP_LSERR:
4729
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4730
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
4731
+ break;
4732
+ case EXCP_UNALIGNED:
4733
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
4734
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
4735
+ break;
4736
+ case EXCP_SWI:
4737
+ /* The PC already points to the next instruction. */
4738
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
4739
+ break;
4740
+ case EXCP_PREFETCH_ABORT:
4741
+ case EXCP_DATA_ABORT:
4742
+ /*
4743
+ * Note that for M profile we don't have a guest facing FSR, but
4744
+ * the env->exception.fsr will be populated by the code that
4745
+ * raises the fault, in the A profile short-descriptor format.
4746
+ */
4747
+ switch (env->exception.fsr & 0xf) {
4748
+ case M_FAKE_FSR_NSC_EXEC:
4749
+ /*
4750
+ * Exception generated when we try to execute code at an address
4751
+ * which is marked as Secure & Non-Secure Callable and the CPU
4752
+ * is in the Non-Secure state. The only instruction which can
4753
+ * be executed like this is SG (and that only if both halves of
4754
+ * the SG instruction have the same security attributes.)
4755
+ * Everything else must generate an INVEP SecureFault, so we
4756
+ * emulate the SG instruction here.
4757
+ */
4758
+ if (v7m_handle_execute_nsc(cpu)) {
4759
+ return;
4760
+ }
4761
+ break;
4762
+ case M_FAKE_FSR_SFAULT:
4763
+ /*
4764
+ * Various flavours of SecureFault for attempts to execute or
4765
+ * access data in the wrong security state.
4766
+ */
4767
+ switch (cs->exception_index) {
4768
+ case EXCP_PREFETCH_ABORT:
4769
+ if (env->v7m.secure) {
4770
+ env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
4771
+ qemu_log_mask(CPU_LOG_INT,
4772
+ "...really SecureFault with SFSR.INVTRAN\n");
4773
+ } else {
4774
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
4775
+ qemu_log_mask(CPU_LOG_INT,
4776
+ "...really SecureFault with SFSR.INVEP\n");
4777
+ }
4778
+ break;
4779
+ case EXCP_DATA_ABORT:
4780
+ /* This must be an NS access to S memory */
4781
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
4782
+ qemu_log_mask(CPU_LOG_INT,
4783
+ "...really SecureFault with SFSR.AUVIOL\n");
4784
+ break;
4785
+ }
4786
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
4787
+ break;
4788
+ case 0x8: /* External Abort */
4789
+ switch (cs->exception_index) {
4790
+ case EXCP_PREFETCH_ABORT:
4791
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
4792
+ qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
4793
+ break;
4794
+ case EXCP_DATA_ABORT:
4795
+ env->v7m.cfsr[M_REG_NS] |=
4796
+ (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
4797
+ env->v7m.bfar = env->exception.vaddress;
4798
+ qemu_log_mask(CPU_LOG_INT,
4799
+ "...with CFSR.PRECISERR and BFAR 0x%x\n",
4800
+ env->v7m.bfar);
4801
+ break;
4802
+ }
4803
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
4804
+ break;
4805
+ default:
4806
+ /*
4807
+ * All other FSR values are either MPU faults or "can't happen
4808
+ * for M profile" cases.
4809
+ */
4810
+ switch (cs->exception_index) {
4811
+ case EXCP_PREFETCH_ABORT:
4812
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
4813
+ qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
4814
+ break;
4815
+ case EXCP_DATA_ABORT:
4816
+ env->v7m.cfsr[env->v7m.secure] |=
4817
+ (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
4818
+ env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
4819
+ qemu_log_mask(CPU_LOG_INT,
4820
+ "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
4821
+ env->v7m.mmfar[env->v7m.secure]);
4822
+ break;
4823
+ }
4824
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
4825
+ env->v7m.secure);
4826
+ break;
4827
+ }
4828
+ break;
4829
+ case EXCP_BKPT:
4830
+ if (semihosting_enabled()) {
4831
+ int nr;
4832
+ nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
4833
+ if (nr == 0xab) {
4834
+ env->regs[15] += 2;
4835
+ qemu_log_mask(CPU_LOG_INT,
4836
+ "...handling as semihosting call 0x%x\n",
4837
+ env->regs[0]);
4838
+ env->regs[0] = do_arm_semihosting(env);
4839
+ return;
4840
+ }
4841
+ }
4842
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
4843
+ break;
4844
+ case EXCP_IRQ:
4845
+ break;
4846
+ case EXCP_EXCEPTION_EXIT:
4847
+ if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
4848
+ /* Must be v8M security extension function return */
4849
+ assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
4850
+ assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
4851
+ if (do_v7m_function_return(cpu)) {
4852
+ return;
4853
+ }
4854
+ } else {
4855
+ do_v7m_exception_exit(cpu);
4856
+ return;
4857
+ }
4858
+ break;
4859
+ case EXCP_LAZYFP:
4860
+ /*
4861
+ * We already pended the specific exception in the NVIC in the
4862
+ * v7m_preserve_fp_state() helper function.
4863
+ */
4864
+ break;
4865
+ default:
4866
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
4867
+ return; /* Never happens. Keep compiler happy. */
4868
+ }
4869
+
4870
+ if (arm_feature(env, ARM_FEATURE_V8)) {
4871
+ lr = R_V7M_EXCRET_RES1_MASK |
4872
+ R_V7M_EXCRET_DCRS_MASK;
4873
+ /*
4874
+ * The S bit indicates whether we should return to Secure
4875
+ * or NonSecure (ie our current state).
4876
+ * The ES bit indicates whether we're taking this exception
4877
+ * to Secure or NonSecure (ie our target state). We set it
4878
+ * later, in v7m_exception_taken().
4879
+ * The SPSEL bit is also set in v7m_exception_taken() for v8M.
4880
+ * This corresponds to the ARM ARM pseudocode for v8M setting
4881
+ * some LR bits in PushStack() and some in ExceptionTaken();
4882
+ * the distinction matters for the tailchain cases where we
4883
+ * can take an exception without pushing the stack.
4884
+ */
4885
+ if (env->v7m.secure) {
4886
+ lr |= R_V7M_EXCRET_S_MASK;
4887
+ }
4888
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
4889
+ lr |= R_V7M_EXCRET_FTYPE_MASK;
4890
+ }
4891
+ } else {
4892
+ lr = R_V7M_EXCRET_RES1_MASK |
4893
+ R_V7M_EXCRET_S_MASK |
4894
+ R_V7M_EXCRET_DCRS_MASK |
4895
+ R_V7M_EXCRET_FTYPE_MASK |
4896
+ R_V7M_EXCRET_ES_MASK;
4897
+ if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
4898
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
4899
+ }
4900
+ }
4901
+ if (!arm_v7m_is_handler_mode(env)) {
4902
+ lr |= R_V7M_EXCRET_MODE_MASK;
4903
+ }
4904
+
4905
+ ignore_stackfaults = v7m_push_stack(cpu);
4906
+ v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
4907
+}
4908
+
4909
+uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
4910
+{
4911
+ uint32_t mask;
4912
+ unsigned el = arm_current_el(env);
4913
+
4914
+ /* First handle registers which unprivileged can read */
4915
+
4916
+ switch (reg) {
4917
+ case 0 ... 7: /* xPSR sub-fields */
4918
+ mask = 0;
4919
+ if ((reg & 1) && el) {
4920
+ mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
4921
+ }
4922
+ if (!(reg & 4)) {
4923
+ mask |= XPSR_NZCV | XPSR_Q; /* APSR */
4924
+ if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
4925
+ mask |= XPSR_GE;
4926
+ }
4927
+ }
4928
+ /* EPSR reads as zero */
4929
+ return xpsr_read(env) & mask;
4930
+ break;
4931
+ case 20: /* CONTROL */
4932
+ {
4933
+ uint32_t value = env->v7m.control[env->v7m.secure];
4934
+ if (!env->v7m.secure) {
4935
+ /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
4936
+ value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
4937
+ }
4938
+ return value;
4939
+ }
4940
+ case 0x94: /* CONTROL_NS */
4941
+ /*
4942
+ * We have to handle this here because unprivileged Secure code
4943
+ * can read the NS CONTROL register.
4944
+ */
4945
+ if (!env->v7m.secure) {
4946
+ return 0;
4947
+ }
4948
+ return env->v7m.control[M_REG_NS] |
4949
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
4950
+ }
4951
+
4952
+ if (el == 0) {
4953
+ return 0; /* unprivileged reads others as zero */
4954
+ }
4955
+
4956
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
4957
+ switch (reg) {
4958
+ case 0x88: /* MSP_NS */
4959
+ if (!env->v7m.secure) {
4960
+ return 0;
4961
+ }
4962
+ return env->v7m.other_ss_msp;
4963
+ case 0x89: /* PSP_NS */
4964
+ if (!env->v7m.secure) {
4965
+ return 0;
4966
+ }
4967
+ return env->v7m.other_ss_psp;
4968
+ case 0x8a: /* MSPLIM_NS */
4969
+ if (!env->v7m.secure) {
4970
+ return 0;
4971
+ }
4972
+ return env->v7m.msplim[M_REG_NS];
4973
+ case 0x8b: /* PSPLIM_NS */
4974
+ if (!env->v7m.secure) {
4975
+ return 0;
4976
+ }
4977
+ return env->v7m.psplim[M_REG_NS];
4978
+ case 0x90: /* PRIMASK_NS */
4979
+ if (!env->v7m.secure) {
4980
+ return 0;
4981
+ }
4982
+ return env->v7m.primask[M_REG_NS];
4983
+ case 0x91: /* BASEPRI_NS */
4984
+ if (!env->v7m.secure) {
4985
+ return 0;
4986
+ }
4987
+ return env->v7m.basepri[M_REG_NS];
4988
+ case 0x93: /* FAULTMASK_NS */
4989
+ if (!env->v7m.secure) {
4990
+ return 0;
4991
+ }
4992
+ return env->v7m.faultmask[M_REG_NS];
4993
+ case 0x98: /* SP_NS */
4994
+ {
4995
+ /*
4996
+ * This gives the non-secure SP selected based on whether we're
4997
+ * currently in handler mode or not, using the NS CONTROL.SPSEL.
4998
+ */
4999
+ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
5000
+
5001
+ if (!env->v7m.secure) {
5002
+ return 0;
5003
+ }
5004
+ if (!arm_v7m_is_handler_mode(env) && spsel) {
5005
+ return env->v7m.other_ss_psp;
5006
+ } else {
5007
+ return env->v7m.other_ss_msp;
5008
+ }
5009
+ }
5010
+ default:
5011
+ break;
5012
+ }
5013
+ }
5014
+
5015
+ switch (reg) {
5016
+ case 8: /* MSP */
5017
+ return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
5018
+ case 9: /* PSP */
5019
+ return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
5020
+ case 10: /* MSPLIM */
5021
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
5022
+ goto bad_reg;
5023
+ }
5024
+ return env->v7m.msplim[env->v7m.secure];
5025
+ case 11: /* PSPLIM */
5026
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
5027
+ goto bad_reg;
5028
+ }
5029
+ return env->v7m.psplim[env->v7m.secure];
5030
+ case 16: /* PRIMASK */
5031
+ return env->v7m.primask[env->v7m.secure];
5032
+ case 17: /* BASEPRI */
5033
+ case 18: /* BASEPRI_MAX */
5034
+ return env->v7m.basepri[env->v7m.secure];
5035
+ case 19: /* FAULTMASK */
5036
+ return env->v7m.faultmask[env->v7m.secure];
5037
+ default:
5038
+ bad_reg:
5039
+ qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
5040
+ " register %d\n", reg);
5041
+ return 0;
5042
+ }
5043
+}
5044
+
5045
+void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
5046
+{
5047
+ /*
5048
+ * We're passed bits [11..0] of the instruction; extract
5049
+ * SYSm and the mask bits.
5050
+ * Invalid combinations of SYSm and mask are UNPREDICTABLE;
5051
+ * we choose to treat them as if the mask bits were valid.
5052
+ * NB that the pseudocode 'mask' variable is bits [11..10],
5053
+ * whereas ours is [11..8].
5054
+ */
5055
+ uint32_t mask = extract32(maskreg, 8, 4);
5056
+ uint32_t reg = extract32(maskreg, 0, 8);
5057
+ int cur_el = arm_current_el(env);
5058
+
5059
+ if (cur_el == 0 && reg > 7 && reg != 20) {
5060
+ /*
5061
+ * only xPSR sub-fields and CONTROL.SFPA may be written by
5062
+ * unprivileged code
5063
+ */
5064
+ return;
5065
+ }
5066
+
5067
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
5068
+ switch (reg) {
5069
+ case 0x88: /* MSP_NS */
5070
+ if (!env->v7m.secure) {
5071
+ return;
5072
+ }
5073
+ env->v7m.other_ss_msp = val;
5074
+ return;
5075
+ case 0x89: /* PSP_NS */
5076
+ if (!env->v7m.secure) {
5077
+ return;
5078
+ }
5079
+ env->v7m.other_ss_psp = val;
5080
+ return;
5081
+ case 0x8a: /* MSPLIM_NS */
5082
+ if (!env->v7m.secure) {
5083
+ return;
5084
+ }
5085
+ env->v7m.msplim[M_REG_NS] = val & ~7;
5086
+ return;
5087
+ case 0x8b: /* PSPLIM_NS */
5088
+ if (!env->v7m.secure) {
5089
+ return;
5090
+ }
5091
+ env->v7m.psplim[M_REG_NS] = val & ~7;
5092
+ return;
5093
+ case 0x90: /* PRIMASK_NS */
5094
+ if (!env->v7m.secure) {
5095
+ return;
5096
+ }
5097
+ env->v7m.primask[M_REG_NS] = val & 1;
5098
+ return;
5099
+ case 0x91: /* BASEPRI_NS */
5100
+ if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
5101
+ return;
5102
+ }
5103
+ env->v7m.basepri[M_REG_NS] = val & 0xff;
5104
+ return;
5105
+ case 0x93: /* FAULTMASK_NS */
5106
+ if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
5107
+ return;
5108
+ }
5109
+ env->v7m.faultmask[M_REG_NS] = val & 1;
5110
+ return;
5111
+ case 0x94: /* CONTROL_NS */
5112
+ if (!env->v7m.secure) {
5113
+ return;
5114
+ }
5115
+ write_v7m_control_spsel_for_secstate(env,
5116
+ val & R_V7M_CONTROL_SPSEL_MASK,
5117
+ M_REG_NS);
5118
+ if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
5119
+ env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
5120
+ env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
5121
+ }
5122
+ /*
5123
+ * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
5124
+ * RES0 if the FPU is not present, and is stored in the S bank
5125
+ */
5126
+ if (arm_feature(env, ARM_FEATURE_VFP) &&
5127
+ extract32(env->v7m.nsacr, 10, 1)) {
5128
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
5129
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
5130
+ }
5131
+ return;
5132
+ case 0x98: /* SP_NS */
5133
+ {
5134
+ /*
5135
+ * This gives the non-secure SP selected based on whether we're
5136
+ * currently in handler mode or not, using the NS CONTROL.SPSEL.
5137
+ */
5138
+ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
5139
+ bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
5140
+ uint32_t limit;
5141
+
5142
+ if (!env->v7m.secure) {
5143
+ return;
5144
+ }
5145
+
5146
+ limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
5147
+
5148
+ if (val < limit) {
5149
+ CPUState *cs = env_cpu(env);
5150
+
5151
+ cpu_restore_state(cs, GETPC(), true);
5152
+ raise_exception(env, EXCP_STKOF, 0, 1);
5153
+ }
5154
+
5155
+ if (is_psp) {
5156
+ env->v7m.other_ss_psp = val;
5157
+ } else {
5158
+ env->v7m.other_ss_msp = val;
5159
+ }
5160
+ return;
5161
+ }
5162
+ default:
5163
+ break;
5164
+ }
5165
+ }
5166
+
5167
+ switch (reg) {
5168
+ case 0 ... 7: /* xPSR sub-fields */
5169
+ /* only APSR is actually writable */
5170
+ if (!(reg & 4)) {
5171
+ uint32_t apsrmask = 0;
5172
+
5173
+ if (mask & 8) {
5174
+ apsrmask |= XPSR_NZCV | XPSR_Q;
5175
+ }
5176
+ if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
5177
+ apsrmask |= XPSR_GE;
5178
+ }
5179
+ xpsr_write(env, val, apsrmask);
5180
+ }
5181
+ break;
5182
+ case 8: /* MSP */
5183
+ if (v7m_using_psp(env)) {
5184
+ env->v7m.other_sp = val;
5185
+ } else {
5186
+ env->regs[13] = val;
5187
+ }
5188
+ break;
5189
+ case 9: /* PSP */
5190
+ if (v7m_using_psp(env)) {
5191
+ env->regs[13] = val;
5192
+ } else {
5193
+ env->v7m.other_sp = val;
5194
+ }
5195
+ break;
5196
+ case 10: /* MSPLIM */
5197
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
5198
+ goto bad_reg;
5199
+ }
5200
+ env->v7m.msplim[env->v7m.secure] = val & ~7;
5201
+ break;
5202
+ case 11: /* PSPLIM */
5203
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
5204
+ goto bad_reg;
5205
+ }
5206
+ env->v7m.psplim[env->v7m.secure] = val & ~7;
5207
+ break;
5208
+ case 16: /* PRIMASK */
5209
+ env->v7m.primask[env->v7m.secure] = val & 1;
5210
+ break;
5211
+ case 17: /* BASEPRI */
5212
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
5213
+ goto bad_reg;
5214
+ }
5215
+ env->v7m.basepri[env->v7m.secure] = val & 0xff;
5216
+ break;
5217
+ case 18: /* BASEPRI_MAX */
5218
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
5219
+ goto bad_reg;
5220
+ }
5221
+ val &= 0xff;
5222
+ if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
5223
+ || env->v7m.basepri[env->v7m.secure] == 0)) {
5224
+ env->v7m.basepri[env->v7m.secure] = val;
5225
+ }
5226
+ break;
5227
+ case 19: /* FAULTMASK */
5228
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
5229
+ goto bad_reg;
5230
+ }
5231
+ env->v7m.faultmask[env->v7m.secure] = val & 1;
5232
+ break;
5233
+ case 20: /* CONTROL */
5234
+ /*
5235
+ * Writing to the SPSEL bit only has an effect if we are in
5236
+ * thread mode; other bits can be updated by any privileged code.
5237
+ * write_v7m_control_spsel() deals with updating the SPSEL bit in
5238
+ * env->v7m.control, so we only need update the others.
5239
+ * For v7M, we must just ignore explicit writes to SPSEL in handler
5240
+ * mode; for v8M the write is permitted but will have no effect.
5241
+ * All these bits are writes-ignored from non-privileged code,
5242
+ * except for SFPA.
5243
+ */
5244
+ if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
5245
+ !arm_v7m_is_handler_mode(env))) {
5246
+ write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
5247
+ }
5248
+ if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
5249
+ env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
5250
+ env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
5251
+ }
5252
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
5253
+ /*
5254
+ * SFPA is RAZ/WI from NS or if no FPU.
5255
+ * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
5256
+ * Both are stored in the S bank.
5257
+ */
5258
+ if (env->v7m.secure) {
5259
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
5260
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
5261
+ }
5262
+ if (cur_el > 0 &&
5263
+ (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
5264
+ extract32(env->v7m.nsacr, 10, 1))) {
5265
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
5266
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
5267
+ }
5268
+ }
5269
+ break;
5270
+ default:
5271
+ bad_reg:
5272
+ qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
5273
+ " register %d\n", reg);
5274
+ return;
5275
+ }
5276
+}
5277
+
5278
+uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
5279
+{
5280
+ /* Implement the TT instruction. op is bits [7:6] of the insn. */
5281
+ bool forceunpriv = op & 1;
5282
+ bool alt = op & 2;
5283
+ V8M_SAttributes sattrs = {};
5284
+ uint32_t tt_resp;
5285
+ bool r, rw, nsr, nsrw, mrvalid;
5286
+ int prot;
5287
+ ARMMMUFaultInfo fi = {};
5288
+ MemTxAttrs attrs = {};
5289
+ hwaddr phys_addr;
5290
+ ARMMMUIdx mmu_idx;
5291
+ uint32_t mregion;
5292
+ bool targetpriv;
5293
+ bool targetsec = env->v7m.secure;
5294
+ bool is_subpage;
5295
+
5296
+ /*
5297
+ * Work out what the security state and privilege level we're
5298
+ * interested in is...
5299
+ */
5300
+ if (alt) {
5301
+ targetsec = !targetsec;
5302
+ }
5303
+
5304
+ if (forceunpriv) {
5305
+ targetpriv = false;
5306
+ } else {
5307
+ targetpriv = arm_v7m_is_handler_mode(env) ||
5308
+ !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
5309
+ }
5310
+
5311
+ /* ...and then figure out which MMU index this is */
5312
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
5313
+
5314
+ /*
5315
+ * We know that the MPU and SAU don't care about the access type
5316
+ * for our purposes beyond that we don't want to claim to be
5317
+ * an insn fetch, so we arbitrarily call this a read.
5318
+ */
5319
+
5320
+ /*
5321
+ * MPU region info only available for privileged or if
5322
+ * inspecting the other MPU state.
5323
+ */
5324
+ if (arm_current_el(env) != 0 || alt) {
5325
+ /* We can ignore the return value as prot is always set */
5326
+ pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
5327
+ &phys_addr, &attrs, &prot, &is_subpage,
5328
+ &fi, &mregion);
5329
+ if (mregion == -1) {
5330
+ mrvalid = false;
5331
+ mregion = 0;
5332
+ } else {
5333
+ mrvalid = true;
5334
+ }
5335
+ r = prot & PAGE_READ;
5336
+ rw = prot & PAGE_WRITE;
5337
+ } else {
5338
+ r = false;
5339
+ rw = false;
5340
+ mrvalid = false;
5341
+ mregion = 0;
5342
+ }
5343
+
5344
+ if (env->v7m.secure) {
5345
+ v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
5346
+ nsr = sattrs.ns && r;
5347
+ nsrw = sattrs.ns && rw;
5348
+ } else {
5349
+ sattrs.ns = true;
5350
+ nsr = false;
5351
+ nsrw = false;
5352
+ }
5353
+
5354
+ tt_resp = (sattrs.iregion << 24) |
5355
+ (sattrs.irvalid << 23) |
5356
+ ((!sattrs.ns) << 22) |
5357
+ (nsrw << 21) |
5358
+ (nsr << 20) |
5359
+ (rw << 19) |
5360
+ (r << 18) |
5361
+ (sattrs.srvalid << 17) |
5362
+ (mrvalid << 16) |
5363
+ (sattrs.sregion << 8) |
5364
+ mregion;
5365
+
5366
+ return tt_resp;
5367
+}
5368
+
5369
+#endif /* !CONFIG_USER_ONLY */
5370
+
5371
+ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
5372
+ bool secstate, bool priv, bool negpri)
5373
+{
5374
+ ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
5375
+
5376
+ if (priv) {
5377
+ mmu_idx |= ARM_MMU_IDX_M_PRIV;
5378
+ }
5379
+
5380
+ if (negpri) {
5381
+ mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
5382
+ }
5383
+
5384
+ if (secstate) {
5385
+ mmu_idx |= ARM_MMU_IDX_M_S;
5386
+ }
5387
+
5388
+ return mmu_idx;
5389
+}
5390
+
5391
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
5392
+ bool secstate, bool priv)
5393
+{
5394
+ bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
5395
+
5396
+ return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
5397
+}
5398
+
5399
+/* Return the MMU index for a v7M CPU in the specified security state */
5400
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
5401
+{
5402
+ bool priv = arm_current_el(env) != 0;
5403
+
5404
+ return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
5405
+}
5406
--
161
--
5407
2.20.1
162
2.34.1
5408
163
5409
164
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
The upstream gdb xml only implements {MSP,PSP}{,_NS,S}, but
4
go ahead and implement the other system registers as well.
5
6
Since there is significant overlap between the two, implement
7
them with common code. The only exception is the systemreg
8
view of CONTROL, which merges the banked bits as per MRS.
9
10
Signed-off-by: David Reiss <dreiss@meta.com>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20230227213329.793795-15-richard.henderson@linaro.org
13
[rth: Substatial rewrite using enumerator and shared code.]
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
18
target/arm/cpu.h | 2 +
19
target/arm/gdbstub.c | 178 +++++++++++++++++++++++++++++++++++++++++++
20
2 files changed, 180 insertions(+)
21
22
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/cpu.h
25
+++ b/target/arm/cpu.h
26
@@ -XXX,XX +XXX,XX @@ struct ArchCPU {
27
28
DynamicGDBXMLInfo dyn_sysreg_xml;
29
DynamicGDBXMLInfo dyn_svereg_xml;
30
+ DynamicGDBXMLInfo dyn_m_systemreg_xml;
31
+ DynamicGDBXMLInfo dyn_m_secextreg_xml;
32
33
/* Timers used by the generic (architected) timer */
34
QEMUTimer *gt_timer[NUM_GTIMERS];
35
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/gdbstub.c
38
+++ b/target/arm/gdbstub.c
39
@@ -XXX,XX +XXX,XX @@ static int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg)
40
return cpu->dyn_sysreg_xml.num;
41
}
42
43
+typedef enum {
44
+ M_SYSREG_MSP,
45
+ M_SYSREG_PSP,
46
+ M_SYSREG_PRIMASK,
47
+ M_SYSREG_CONTROL,
48
+ M_SYSREG_BASEPRI,
49
+ M_SYSREG_FAULTMASK,
50
+ M_SYSREG_MSPLIM,
51
+ M_SYSREG_PSPLIM,
52
+} MProfileSysreg;
53
+
54
+static const struct {
55
+ const char *name;
56
+ int feature;
57
+} m_sysreg_def[] = {
58
+ [M_SYSREG_MSP] = { "msp", ARM_FEATURE_M },
59
+ [M_SYSREG_PSP] = { "psp", ARM_FEATURE_M },
60
+ [M_SYSREG_PRIMASK] = { "primask", ARM_FEATURE_M },
61
+ [M_SYSREG_CONTROL] = { "control", ARM_FEATURE_M },
62
+ [M_SYSREG_BASEPRI] = { "basepri", ARM_FEATURE_M_MAIN },
63
+ [M_SYSREG_FAULTMASK] = { "faultmask", ARM_FEATURE_M_MAIN },
64
+ [M_SYSREG_MSPLIM] = { "msplim", ARM_FEATURE_V8 },
65
+ [M_SYSREG_PSPLIM] = { "psplim", ARM_FEATURE_V8 },
66
+};
67
+
68
+static uint32_t *m_sysreg_ptr(CPUARMState *env, MProfileSysreg reg, bool sec)
69
+{
70
+ uint32_t *ptr;
71
+
72
+ switch (reg) {
73
+ case M_SYSREG_MSP:
74
+ ptr = arm_v7m_get_sp_ptr(env, sec, false, true);
75
+ break;
76
+ case M_SYSREG_PSP:
77
+ ptr = arm_v7m_get_sp_ptr(env, sec, true, true);
78
+ break;
79
+ case M_SYSREG_MSPLIM:
80
+ ptr = &env->v7m.msplim[sec];
81
+ break;
82
+ case M_SYSREG_PSPLIM:
83
+ ptr = &env->v7m.psplim[sec];
84
+ break;
85
+ case M_SYSREG_PRIMASK:
86
+ ptr = &env->v7m.primask[sec];
87
+ break;
88
+ case M_SYSREG_BASEPRI:
89
+ ptr = &env->v7m.basepri[sec];
90
+ break;
91
+ case M_SYSREG_FAULTMASK:
92
+ ptr = &env->v7m.faultmask[sec];
93
+ break;
94
+ case M_SYSREG_CONTROL:
95
+ ptr = &env->v7m.control[sec];
96
+ break;
97
+ default:
98
+ return NULL;
99
+ }
100
+ return arm_feature(env, m_sysreg_def[reg].feature) ? ptr : NULL;
101
+}
102
+
103
+static int m_sysreg_get(CPUARMState *env, GByteArray *buf,
104
+ MProfileSysreg reg, bool secure)
105
+{
106
+ uint32_t *ptr = m_sysreg_ptr(env, reg, secure);
107
+
108
+ if (ptr == NULL) {
109
+ return 0;
110
+ }
111
+ return gdb_get_reg32(buf, *ptr);
112
+}
113
+
114
+static int arm_gdb_get_m_systemreg(CPUARMState *env, GByteArray *buf, int reg)
115
+{
116
+ /*
117
+ * Here, we emulate MRS instruction, where CONTROL has a mix of
118
+ * banked and non-banked bits.
119
+ */
120
+ if (reg == M_SYSREG_CONTROL) {
121
+ return gdb_get_reg32(buf, arm_v7m_mrs_control(env, env->v7m.secure));
122
+ }
123
+ return m_sysreg_get(env, buf, reg, env->v7m.secure);
124
+}
125
+
126
+static int arm_gdb_set_m_systemreg(CPUARMState *env, uint8_t *buf, int reg)
127
+{
128
+ return 0; /* TODO */
129
+}
130
+
131
+static int arm_gen_dynamic_m_systemreg_xml(CPUState *cs, int orig_base_reg)
132
+{
133
+ ARMCPU *cpu = ARM_CPU(cs);
134
+ CPUARMState *env = &cpu->env;
135
+ GString *s = g_string_new(NULL);
136
+ int base_reg = orig_base_reg;
137
+ int i;
138
+
139
+ g_string_printf(s, "<?xml version=\"1.0\"?>");
140
+ g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
141
+ g_string_append_printf(s, "<feature name=\"org.gnu.gdb.arm.m-system\">\n");
142
+
143
+ for (i = 0; i < ARRAY_SIZE(m_sysreg_def); i++) {
144
+ if (arm_feature(env, m_sysreg_def[i].feature)) {
145
+ g_string_append_printf(s,
146
+ "<reg name=\"%s\" bitsize=\"32\" regnum=\"%d\"/>\n",
147
+ m_sysreg_def[i].name, base_reg++);
148
+ }
149
+ }
150
+
151
+ g_string_append_printf(s, "</feature>");
152
+ cpu->dyn_m_systemreg_xml.desc = g_string_free(s, false);
153
+ cpu->dyn_m_systemreg_xml.num = base_reg - orig_base_reg;
154
+
155
+ return cpu->dyn_m_systemreg_xml.num;
156
+}
157
+
158
+#ifndef CONFIG_USER_ONLY
159
+/*
160
+ * For user-only, we see the non-secure registers via m_systemreg above.
161
+ * For secext, encode the non-secure view as even and secure view as odd.
162
+ */
163
+static int arm_gdb_get_m_secextreg(CPUARMState *env, GByteArray *buf, int reg)
164
+{
165
+ return m_sysreg_get(env, buf, reg >> 1, reg & 1);
166
+}
167
+
168
+static int arm_gdb_set_m_secextreg(CPUARMState *env, uint8_t *buf, int reg)
169
+{
170
+ return 0; /* TODO */
171
+}
172
+
173
+static int arm_gen_dynamic_m_secextreg_xml(CPUState *cs, int orig_base_reg)
174
+{
175
+ ARMCPU *cpu = ARM_CPU(cs);
176
+ GString *s = g_string_new(NULL);
177
+ int base_reg = orig_base_reg;
178
+ int i;
179
+
180
+ g_string_printf(s, "<?xml version=\"1.0\"?>");
181
+ g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
182
+ g_string_append_printf(s, "<feature name=\"org.gnu.gdb.arm.secext\">\n");
183
+
184
+ for (i = 0; i < ARRAY_SIZE(m_sysreg_def); i++) {
185
+ g_string_append_printf(s,
186
+ "<reg name=\"%s_ns\" bitsize=\"32\" regnum=\"%d\"/>\n",
187
+ m_sysreg_def[i].name, base_reg++);
188
+ g_string_append_printf(s,
189
+ "<reg name=\"%s_s\" bitsize=\"32\" regnum=\"%d\"/>\n",
190
+ m_sysreg_def[i].name, base_reg++);
191
+ }
192
+
193
+ g_string_append_printf(s, "</feature>");
194
+ cpu->dyn_m_secextreg_xml.desc = g_string_free(s, false);
195
+ cpu->dyn_m_secextreg_xml.num = base_reg - orig_base_reg;
196
+
197
+ return cpu->dyn_m_secextreg_xml.num;
198
+}
199
+#endif
200
+
201
const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
202
{
203
ARMCPU *cpu = ARM_CPU(cs);
204
@@ -XXX,XX +XXX,XX @@ const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
205
return cpu->dyn_sysreg_xml.desc;
206
} else if (strcmp(xmlname, "sve-registers.xml") == 0) {
207
return cpu->dyn_svereg_xml.desc;
208
+ } else if (strcmp(xmlname, "arm-m-system.xml") == 0) {
209
+ return cpu->dyn_m_systemreg_xml.desc;
210
+#ifndef CONFIG_USER_ONLY
211
+ } else if (strcmp(xmlname, "arm-m-secext.xml") == 0) {
212
+ return cpu->dyn_m_secextreg_xml.desc;
213
+#endif
214
}
215
return NULL;
216
}
217
@@ -XXX,XX +XXX,XX @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
218
arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs),
219
"system-registers.xml", 0);
220
221
+ if (arm_feature(env, ARM_FEATURE_M)) {
222
+ gdb_register_coprocessor(cs,
223
+ arm_gdb_get_m_systemreg, arm_gdb_set_m_systemreg,
224
+ arm_gen_dynamic_m_systemreg_xml(cs, cs->gdb_num_regs),
225
+ "arm-m-system.xml", 0);
226
+#ifndef CONFIG_USER_ONLY
227
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
228
+ gdb_register_coprocessor(cs,
229
+ arm_gdb_get_m_secextreg, arm_gdb_set_m_secextreg,
230
+ arm_gen_dynamic_m_secextreg_xml(cs, cs->gdb_num_regs),
231
+ "arm-m-secext.xml", 0);
232
+ }
233
+#endif
234
+ }
235
}
236
--
237
2.34.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1421
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20230227225832.816605-2-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
target/arm/cpu.h | 3 +++
10
1 file changed, 3 insertions(+)
11
12
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/cpu.h
15
+++ b/target/arm/cpu.h
16
@@ -XXX,XX +XXX,XX @@ static inline bool arm_is_el3_or_mon(CPUARMState *env)
17
/* Return true if the processor is in secure state */
18
static inline bool arm_is_secure(CPUARMState *env)
19
{
20
+ if (arm_feature(env, ARM_FEATURE_M)) {
21
+ return env->v7m.secure;
22
+ }
23
if (arm_is_el3_or_mon(env)) {
24
return true;
25
}
26
--
27
2.34.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
M-profile doesn't have HCR_EL2. While we could test features
4
before each call, zero is a generally safe return value to
5
disable the code in the caller. This test is required to
6
avoid an assert in arm_is_secure_below_el3.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20230227225832.816605-3-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/helper.c | 3 +++
14
1 file changed, 3 insertions(+)
15
16
diff --git a/target/arm/helper.c b/target/arm/helper.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper.c
19
+++ b/target/arm/helper.c
20
@@ -XXX,XX +XXX,XX @@ uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure)
21
22
uint64_t arm_hcr_el2_eff(CPUARMState *env)
23
{
24
+ if (arm_feature(env, ARM_FEATURE_M)) {
25
+ return 0;
26
+ }
27
return arm_hcr_el2_eff_secstate(env, arm_is_secure_below_el3(env));
28
}
29
30
--
31
2.34.1
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Per Peter Maydell:
3
In several places we use arm_is_secure_below_el3 and
4
arm_is_el3_or_mon separately from arm_is_secure.
5
These functions make no sense for m-profile, and
6
would indicate prior incorrect feature testing.
4
7
5
Semihosting hooks either SVC or HLT instructions, and inside KVM
6
both of those go to EL1, ie to the guest, and can't be trapped to
7
KVM.
8
9
Let check_for_semihosting() return False when not running on TCG.
10
11
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
12
Message-id: 20190701194942.10092-3-philmd@redhat.com
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20230227225832.816605-4-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
13
---
16
target/arm/Makefile.objs | 2 +-
14
target/arm/cpu.h | 5 ++++-
17
target/arm/cpu.h | 7 +++++++
15
1 file changed, 4 insertions(+), 1 deletion(-)
18
target/arm/helper.c | 8 +++++++-
19
3 files changed, 15 insertions(+), 2 deletions(-)
20
16
21
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/Makefile.objs
24
+++ b/target/arm/Makefile.objs
25
@@ -XXX,XX +XXX,XX @@
26
-obj-y += arm-semi.o
27
+obj-$(CONFIG_TCG) += arm-semi.o
28
obj-y += helper.o vfp_helper.o
29
obj-y += cpu.o gdbstub.o
30
obj-$(TARGET_AARCH64) += cpu64.o gdbstub64.o
31
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
32
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/cpu.h
19
--- a/target/arm/cpu.h
34
+++ b/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
35
@@ -XXX,XX +XXX,XX @@ static inline void aarch64_sve_change_el(CPUARMState *env, int o,
21
@@ -XXX,XX +XXX,XX @@ static inline int arm_feature(CPUARMState *env, int feature)
36
{ }
22
void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp);
37
#endif
23
38
24
#if !defined(CONFIG_USER_ONLY)
39
+#if !defined(CONFIG_TCG)
25
-/* Return true if exception levels below EL3 are in secure state,
40
+static inline target_ulong do_arm_semihosting(CPUARMState *env)
26
+/*
41
+{
27
+ * Return true if exception levels below EL3 are in secure state,
42
+ g_assert_not_reached();
28
* or would be following an exception return to that level.
43
+}
29
* Unlike arm_is_secure() (which is always a question about the
44
+#else
30
* _current_ state of the CPU) this doesn't care about the current
45
target_ulong do_arm_semihosting(CPUARMState *env);
31
@@ -XXX,XX +XXX,XX @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp);
46
+#endif
32
*/
47
void aarch64_sync_32_to_64(CPUARMState *env);
33
static inline bool arm_is_secure_below_el3(CPUARMState *env)
48
void aarch64_sync_64_to_32(CPUARMState *env);
49
50
diff --git a/target/arm/helper.c b/target/arm/helper.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/helper.c
53
+++ b/target/arm/helper.c
54
@@ -XXX,XX +XXX,XX @@
55
#include "qemu/qemu-print.h"
56
#include "exec/exec-all.h"
57
#include "exec/cpu_ldst.h"
58
-#include "arm_ldst.h"
59
#include <zlib.h> /* For crc32 */
60
#include "hw/semihosting/semihost.h"
61
#include "sysemu/cpus.h"
62
@@ -XXX,XX +XXX,XX @@
63
#include "qapi/qapi-commands-machine-target.h"
64
#include "qapi/error.h"
65
#include "qemu/guest-random.h"
66
+#ifdef CONFIG_TCG
67
+#include "arm_ldst.h"
68
+#endif
69
70
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
71
72
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
73
74
static inline bool check_for_semihosting(CPUState *cs)
75
{
34
{
76
+#ifdef CONFIG_TCG
35
+ assert(!arm_feature(env, ARM_FEATURE_M));
77
/* Check whether this exception is a semihosting call; if so
36
if (arm_feature(env, ARM_FEATURE_EL3)) {
78
* then handle it and return true; otherwise return false.
37
return !(env->cp15.scr_el3 & SCR_NS);
79
*/
38
} else {
80
@@ -XXX,XX +XXX,XX @@ static inline bool check_for_semihosting(CPUState *cs)
39
@@ -XXX,XX +XXX,XX @@ static inline bool arm_is_secure_below_el3(CPUARMState *env)
81
env->regs[0] = do_arm_semihosting(env);
40
/* Return true if the CPU is AArch64 EL3 or AArch32 Mon */
82
return true;
41
static inline bool arm_is_el3_or_mon(CPUARMState *env)
83
}
42
{
84
+#else
43
+ assert(!arm_feature(env, ARM_FEATURE_M));
85
+ return false;
44
if (arm_feature(env, ARM_FEATURE_EL3)) {
86
+#endif
45
if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) {
87
}
46
/* CPU currently in AArch64 state and EL3 */
88
89
/* Handle a CPU exception for A and R profile CPUs.
90
--
47
--
91
2.20.1
48
2.34.1
92
49
93
50
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
These routines are TCG specific.
3
Integrate neighboring code from get_phys_addr_lpae which computed
4
4
starting level, as it is easier to validate when doing both at the
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
same time. Mirror the checks at the start of AArch{64,32}.S2Walk,
6
Message-id: 20190701194942.10092-2-philmd@redhat.com
6
especially S2InvalidSL and S2InconsistentSL.
7
8
This reverts 49ba115bb74, which was incorrect -- there is nothing
9
in the ARM pseudocode that depends on TxSZ, i.e. outputsize; the
10
pseudocode is consistent in referencing PAMax.
11
12
Fixes: 49ba115bb74 ("target/arm: Pass outputsize down to check_s2_mmu_setup")
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20230227225832.816605-5-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
17
---
10
target/arm/Makefile.objs | 2 +-
18
target/arm/ptw.c | 173 ++++++++++++++++++++++++++---------------------
11
target/arm/cpu.c | 9 +-
19
1 file changed, 97 insertions(+), 76 deletions(-)
12
target/arm/debug_helper.c | 311 ++++++++++++++++++++++++++++++++++++++
20
13
target/arm/op_helper.c | 295 ------------------------------------
21
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
14
4 files changed, 315 insertions(+), 302 deletions(-)
15
create mode 100644 target/arm/debug_helper.c
16
17
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
18
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/Makefile.objs
23
--- a/target/arm/ptw.c
20
+++ b/target/arm/Makefile.objs
24
+++ b/target/arm/ptw.c
21
@@ -XXX,XX +XXX,XX @@ target/arm/translate-sve.o: target/arm/decode-sve.inc.c
25
@@ -XXX,XX +XXX,XX @@ static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
22
target/arm/translate.o: target/arm/decode-vfp.inc.c
26
* check_s2_mmu_setup
23
target/arm/translate.o: target/arm/decode-vfp-uncond.inc.c
27
* @cpu: ARMCPU
24
28
* @is_aa64: True if the translation regime is in AArch64 state
25
-obj-y += tlb_helper.o
29
- * @startlevel: Suggested starting level
26
+obj-y += tlb_helper.o debug_helper.o
30
- * @inputsize: Bitsize of IPAs
27
obj-y += translate.o op_helper.o
31
+ * @tcr: VTCR_EL2 or VSTCR_EL2
28
obj-y += crypto_helper.o
32
+ * @ds: Effective value of TCR.DS.
29
obj-y += iwmmxt_helper.o vec_helper.o neon_helper.o
33
+ * @iasize: Bitsize of IPAs
30
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
34
* @stride: Page-table stride (See the ARM ARM)
31
index XXXXXXX..XXXXXXX 100644
35
*
32
--- a/target/arm/cpu.c
36
- * Returns true if the suggested S2 translation parameters are OK and
33
+++ b/target/arm/cpu.c
37
- * false otherwise.
34
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
38
+ * Decode the starting level of the S2 lookup, returning INT_MIN if
35
cc->gdb_arch_name = arm_gdb_arch_name;
39
+ * the configuration is invalid.
36
cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
40
*/
37
cc->gdb_stop_before_watchpoint = true;
41
-static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
38
- cc->debug_excp_handler = arm_debug_excp_handler;
42
- int inputsize, int stride, int outputsize)
39
- cc->debug_check_watchpoint = arm_debug_check_watchpoint;
43
+static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr,
40
-#if !defined(CONFIG_USER_ONLY)
44
+ bool ds, int iasize, int stride)
41
- cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
45
{
42
-#endif
46
- const int grainsize = stride + 3;
43
-
47
- int startsizecheck;
44
cc->disas_set_info = arm_disas_set_info;
45
#ifdef CONFIG_TCG
46
cc->tcg_initialize = arm_translate_init;
47
cc->tlb_fill = arm_cpu_tlb_fill;
48
+ cc->debug_excp_handler = arm_debug_excp_handler;
49
+ cc->debug_check_watchpoint = arm_debug_check_watchpoint;
50
#if !defined(CONFIG_USER_ONLY)
51
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
52
cc->do_transaction_failed = arm_cpu_do_transaction_failed;
53
+ cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
54
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
55
#endif
56
}
57
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
58
new file mode 100644
59
index XXXXXXX..XXXXXXX
60
--- /dev/null
61
+++ b/target/arm/debug_helper.c
62
@@ -XXX,XX +XXX,XX @@
63
+/*
64
+ * ARM debug helpers.
65
+ *
66
+ * This code is licensed under the GNU GPL v2 or later.
67
+ *
68
+ * SPDX-License-Identifier: GPL-2.0-or-later
69
+ */
70
+#include "qemu/osdep.h"
71
+#include "cpu.h"
72
+#include "internals.h"
73
+#include "exec/exec-all.h"
74
+#include "exec/helper-proto.h"
75
+
76
+/* Return true if the linked breakpoint entry lbn passes its checks */
77
+static bool linked_bp_matches(ARMCPU *cpu, int lbn)
78
+{
79
+ CPUARMState *env = &cpu->env;
80
+ uint64_t bcr = env->cp15.dbgbcr[lbn];
81
+ int brps = extract32(cpu->dbgdidr, 24, 4);
82
+ int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
83
+ int bt;
84
+ uint32_t contextidr;
85
+
86
+ /*
87
+ * Links to unimplemented or non-context aware breakpoints are
88
+ * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
89
+ * as if linked to an UNKNOWN context-aware breakpoint (in which
90
+ * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
91
+ * We choose the former.
92
+ */
93
+ if (lbn > brps || lbn < (brps - ctx_cmps)) {
94
+ return false;
95
+ }
96
+
97
+ bcr = env->cp15.dbgbcr[lbn];
98
+
99
+ if (extract64(bcr, 0, 1) == 0) {
100
+ /* Linked breakpoint disabled : generate no events */
101
+ return false;
102
+ }
103
+
104
+ bt = extract64(bcr, 20, 4);
105
+
106
+ /*
107
+ * We match the whole register even if this is AArch32 using the
108
+ * short descriptor format (in which case it holds both PROCID and ASID),
109
+ * since we don't implement the optional v7 context ID masking.
110
+ */
111
+ contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
112
+
113
+ switch (bt) {
114
+ case 3: /* linked context ID match */
115
+ if (arm_current_el(env) > 1) {
116
+ /* Context matches never fire in EL2 or (AArch64) EL3 */
117
+ return false;
118
+ }
119
+ return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
120
+ case 5: /* linked address mismatch (reserved in AArch64) */
121
+ case 9: /* linked VMID match (reserved if no EL2) */
122
+ case 11: /* linked context ID and VMID match (reserved if no EL2) */
123
+ default:
124
+ /*
125
+ * Links to Unlinked context breakpoints must generate no
126
+ * events; we choose to do the same for reserved values too.
127
+ */
128
+ return false;
129
+ }
130
+
131
+ return false;
132
+}
133
+
134
+static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
135
+{
136
+ CPUARMState *env = &cpu->env;
137
+ uint64_t cr;
138
+ int pac, hmc, ssc, wt, lbn;
139
+ /*
140
+ * Note that for watchpoints the check is against the CPU security
141
+ * state, not the S/NS attribute on the offending data access.
142
+ */
143
+ bool is_secure = arm_is_secure(env);
144
+ int access_el = arm_current_el(env);
145
+
146
+ if (is_wp) {
147
+ CPUWatchpoint *wp = env->cpu_watchpoint[n];
148
+
149
+ if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
150
+ return false;
151
+ }
152
+ cr = env->cp15.dbgwcr[n];
153
+ if (wp->hitattrs.user) {
154
+ /*
155
+ * The LDRT/STRT/LDT/STT "unprivileged access" instructions should
156
+ * match watchpoints as if they were accesses done at EL0, even if
157
+ * the CPU is at EL1 or higher.
158
+ */
159
+ access_el = 0;
160
+ }
161
+ } else {
162
+ uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
163
+
164
+ if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
165
+ return false;
166
+ }
167
+ cr = env->cp15.dbgbcr[n];
168
+ }
169
+ /*
170
+ * The WATCHPOINT_HIT flag guarantees us that the watchpoint is
171
+ * enabled and that the address and access type match; for breakpoints
172
+ * we know the address matched; check the remaining fields, including
173
+ * linked breakpoints. We rely on WCR and BCR having the same layout
174
+ * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
175
+ * Note that some combinations of {PAC, HMC, SSC} are reserved and
176
+ * must act either like some valid combination or as if the watchpoint
177
+ * were disabled. We choose the former, and use this together with
178
+ * the fact that EL3 must always be Secure and EL2 must always be
179
+ * Non-Secure to simplify the code slightly compared to the full
180
+ * table in the ARM ARM.
181
+ */
182
+ pac = extract64(cr, 1, 2);
183
+ hmc = extract64(cr, 13, 1);
184
+ ssc = extract64(cr, 14, 2);
185
+
186
+ switch (ssc) {
187
+ case 0:
188
+ break;
189
+ case 1:
190
+ case 3:
191
+ if (is_secure) {
192
+ return false;
193
+ }
194
+ break;
195
+ case 2:
196
+ if (!is_secure) {
197
+ return false;
198
+ }
199
+ break;
200
+ }
201
+
202
+ switch (access_el) {
203
+ case 3:
204
+ case 2:
205
+ if (!hmc) {
206
+ return false;
207
+ }
208
+ break;
209
+ case 1:
210
+ if (extract32(pac, 0, 1) == 0) {
211
+ return false;
212
+ }
213
+ break;
214
+ case 0:
215
+ if (extract32(pac, 1, 1) == 0) {
216
+ return false;
217
+ }
218
+ break;
219
+ default:
220
+ g_assert_not_reached();
221
+ }
222
+
223
+ wt = extract64(cr, 20, 1);
224
+ lbn = extract64(cr, 16, 4);
225
+
226
+ if (wt && !linked_bp_matches(cpu, lbn)) {
227
+ return false;
228
+ }
229
+
230
+ return true;
231
+}
232
+
233
+static bool check_watchpoints(ARMCPU *cpu)
234
+{
235
+ CPUARMState *env = &cpu->env;
236
+ int n;
237
+
238
+ /*
239
+ * If watchpoints are disabled globally or we can't take debug
240
+ * exceptions here then watchpoint firings are ignored.
241
+ */
242
+ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
243
+ || !arm_generate_debug_exceptions(env)) {
244
+ return false;
245
+ }
246
+
247
+ for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
248
+ if (bp_wp_matches(cpu, n, true)) {
249
+ return true;
250
+ }
251
+ }
252
+ return false;
253
+}
254
+
255
+static bool check_breakpoints(ARMCPU *cpu)
256
+{
257
+ CPUARMState *env = &cpu->env;
258
+ int n;
259
+
260
+ /*
261
+ * If breakpoints are disabled globally or we can't take debug
262
+ * exceptions here then breakpoint firings are ignored.
263
+ */
264
+ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
265
+ || !arm_generate_debug_exceptions(env)) {
266
+ return false;
267
+ }
268
+
269
+ for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
270
+ if (bp_wp_matches(cpu, n, false)) {
271
+ return true;
272
+ }
273
+ }
274
+ return false;
275
+}
276
+
277
+void HELPER(check_breakpoints)(CPUARMState *env)
278
+{
279
+ ARMCPU *cpu = env_archcpu(env);
280
+
281
+ if (check_breakpoints(cpu)) {
282
+ HELPER(exception_internal(env, EXCP_DEBUG));
283
+ }
284
+}
285
+
286
+bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
287
+{
288
+ /*
289
+ * Called by core code when a CPU watchpoint fires; need to check if this
290
+ * is also an architectural watchpoint match.
291
+ */
292
+ ARMCPU *cpu = ARM_CPU(cs);
293
+
294
+ return check_watchpoints(cpu);
295
+}
296
+
297
+void arm_debug_excp_handler(CPUState *cs)
298
+{
299
+ /*
300
+ * Called by core code when a watchpoint or breakpoint fires;
301
+ * need to check which one and raise the appropriate exception.
302
+ */
303
+ ARMCPU *cpu = ARM_CPU(cs);
304
+ CPUARMState *env = &cpu->env;
305
+ CPUWatchpoint *wp_hit = cs->watchpoint_hit;
306
+
307
+ if (wp_hit) {
308
+ if (wp_hit->flags & BP_CPU) {
309
+ bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
310
+ bool same_el = arm_debug_target_el(env) == arm_current_el(env);
311
+
312
+ cs->watchpoint_hit = NULL;
313
+
314
+ env->exception.fsr = arm_debug_exception_fsr(env);
315
+ env->exception.vaddress = wp_hit->hitaddr;
316
+ raise_exception(env, EXCP_DATA_ABORT,
317
+ syn_watchpoint(same_el, 0, wnr),
318
+ arm_debug_target_el(env));
319
+ }
320
+ } else {
321
+ uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
322
+ bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
323
+
324
+ /*
325
+ * (1) GDB breakpoints should be handled first.
326
+ * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
327
+ * since singlestep is also done by generating a debug internal
328
+ * exception.
329
+ */
330
+ if (cpu_breakpoint_test(cs, pc, BP_GDB)
331
+ || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
332
+ return;
333
+ }
334
+
335
+ env->exception.fsr = arm_debug_exception_fsr(env);
336
+ /*
337
+ * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
338
+ * values to the guest that it shouldn't be able to see at its
339
+ * exception/security level.
340
+ */
341
+ env->exception.vaddress = 0;
342
+ raise_exception(env, EXCP_PREFETCH_ABORT,
343
+ syn_breakpoint(same_el),
344
+ arm_debug_target_el(env));
345
+ }
346
+}
347
+
348
+#if !defined(CONFIG_USER_ONLY)
349
+
350
+vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
351
+{
352
+ ARMCPU *cpu = ARM_CPU(cs);
353
+ CPUARMState *env = &cpu->env;
354
+
355
+ /*
356
+ * In BE32 system mode, target memory is stored byteswapped (on a
357
+ * little-endian host system), and by the time we reach here (via an
358
+ * opcode helper) the addresses of subword accesses have been adjusted
359
+ * to account for that, which means that watchpoints will not match.
360
+ * Undo the adjustment here.
361
+ */
362
+ if (arm_sctlr_b(env)) {
363
+ if (len == 1) {
364
+ addr ^= 3;
365
+ } else if (len == 2) {
366
+ addr ^= 2;
367
+ }
368
+ }
369
+
370
+ return addr;
371
+}
372
+
373
+#endif
374
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
375
index XXXXXXX..XXXXXXX 100644
376
--- a/target/arm/op_helper.c
377
+++ b/target/arm/op_helper.c
378
@@ -XXX,XX +XXX,XX @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
379
}
380
}
381
382
-/* Return true if the linked breakpoint entry lbn passes its checks */
383
-static bool linked_bp_matches(ARMCPU *cpu, int lbn)
384
-{
385
- CPUARMState *env = &cpu->env;
386
- uint64_t bcr = env->cp15.dbgbcr[lbn];
387
- int brps = extract32(cpu->dbgdidr, 24, 4);
388
- int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
389
- int bt;
390
- uint32_t contextidr;
391
-
48
-
392
- /*
49
- /*
393
- * Links to unimplemented or non-context aware breakpoints are
50
- * Negative levels are usually not allowed...
394
- * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
51
- * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
395
- * as if linked to an UNKNOWN context-aware breakpoint (in which
52
- * begins with level -1. Note that previous feature tests will have
396
- * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
53
- * eliminated this combination if it is not enabled.
397
- * We choose the former.
398
- */
54
- */
399
- if (lbn > brps || lbn < (brps - ctx_cmps)) {
55
- if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
400
- return false;
56
- return false;
401
- }
57
- }
402
-
58
-
403
- bcr = env->cp15.dbgbcr[lbn];
59
- startsizecheck = inputsize - ((3 - level) * stride + grainsize);
404
-
60
- if (startsizecheck < 1 || startsizecheck > stride + 4) {
405
- if (extract64(bcr, 0, 1) == 0) {
406
- /* Linked breakpoint disabled : generate no events */
407
- return false;
61
- return false;
408
- }
62
- }
409
-
63
+ int sl0, sl2, startlevel, granulebits, levels;
410
- bt = extract64(bcr, 20, 4);
64
+ int s1_min_iasize, s1_max_iasize;
411
-
65
412
- /*
66
+ sl0 = extract32(tcr, 6, 2);
413
- * We match the whole register even if this is AArch32 using the
67
if (is_aa64) {
414
- * short descriptor format (in which case it holds both PROCID and ASID),
68
+ /*
415
- * since we don't implement the optional v7 context ID masking.
69
+ * AArch64.S2InvalidTxSZ: While we checked tsz_oob near the top of
416
- */
70
+ * get_phys_addr_lpae, that used aa64_va_parameters which apply
417
- contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
71
+ * to aarch64. If Stage1 is aarch32, the min_txsz is larger.
418
-
72
+ * See AArch64.S2MinTxSZ, where min_tsz is 24, translated to
419
- switch (bt) {
73
+ * inputsize is 64 - 24 = 40.
420
- case 3: /* linked context ID match */
74
+ */
421
- if (arm_current_el(env) > 1) {
75
+ if (iasize < 40 && !arm_el_is_aa64(&cpu->env, 1)) {
422
- /* Context matches never fire in EL2 or (AArch64) EL3 */
76
+ goto fail;
77
+ }
78
+
79
+ /*
80
+ * AArch64.S2InvalidSL: Interpretation of SL depends on the page size,
81
+ * so interleave AArch64.S2StartLevel.
82
+ */
83
switch (stride) {
84
- case 13: /* 64KB Pages. */
85
- if (level == 0 || (level == 1 && outputsize <= 42)) {
86
- return false;
87
+ case 9: /* 4KB */
88
+ /* SL2 is RES0 unless DS=1 & 4KB granule. */
89
+ sl2 = extract64(tcr, 33, 1);
90
+ if (ds && sl2) {
91
+ if (sl0 != 0) {
92
+ goto fail;
93
+ }
94
+ startlevel = -1;
95
+ } else {
96
+ startlevel = 2 - sl0;
97
+ switch (sl0) {
98
+ case 2:
99
+ if (arm_pamax(cpu) < 44) {
100
+ goto fail;
101
+ }
102
+ break;
103
+ case 3:
104
+ if (!cpu_isar_feature(aa64_st, cpu)) {
105
+ goto fail;
106
+ }
107
+ startlevel = 3;
108
+ break;
109
+ }
110
}
111
break;
112
- case 11: /* 16KB Pages. */
113
- if (level == 0 || (level == 1 && outputsize <= 40)) {
114
- return false;
115
+ case 11: /* 16KB */
116
+ switch (sl0) {
117
+ case 2:
118
+ if (arm_pamax(cpu) < 42) {
119
+ goto fail;
120
+ }
121
+ break;
122
+ case 3:
123
+ if (!ds) {
124
+ goto fail;
125
+ }
126
+ break;
127
}
128
+ startlevel = 3 - sl0;
129
break;
130
- case 9: /* 4KB Pages. */
131
- if (level == 0 && outputsize <= 42) {
132
- return false;
133
+ case 13: /* 64KB */
134
+ switch (sl0) {
135
+ case 2:
136
+ if (arm_pamax(cpu) < 44) {
137
+ goto fail;
138
+ }
139
+ break;
140
+ case 3:
141
+ goto fail;
142
}
143
+ startlevel = 3 - sl0;
144
break;
145
default:
146
g_assert_not_reached();
147
}
148
-
149
- /* Inputsize checks. */
150
- if (inputsize > outputsize &&
151
- (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
152
- /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
423
- return false;
153
- return false;
424
- }
154
- }
425
- return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
155
} else {
426
- case 5: /* linked address mismatch (reserved in AArch64) */
156
- /* AArch32 only supports 4KB pages. Assert on that. */
427
- case 9: /* linked VMID match (reserved if no EL2) */
157
+ /*
428
- case 11: /* linked context ID and VMID match (reserved if no EL2) */
158
+ * Things are simpler for AArch32 EL2, with only 4k pages.
429
- default:
159
+ * There is no separate S2InvalidSL function, but AArch32.S2Walk
160
+ * begins with walkparms.sl0 in {'1x'}.
161
+ */
162
assert(stride == 9);
163
-
164
- if (level == 0) {
165
- return false;
166
+ if (sl0 >= 2) {
167
+ goto fail;
168
}
169
+ startlevel = 2 - sl0;
170
}
171
- return true;
172
+
173
+ /* AArch{64,32}.S2InconsistentSL are functionally equivalent. */
174
+ levels = 3 - startlevel;
175
+ granulebits = stride + 3;
176
+
177
+ s1_min_iasize = levels * stride + granulebits + 1;
178
+ s1_max_iasize = s1_min_iasize + (stride - 1) + 4;
179
+
180
+ if (iasize >= s1_min_iasize && iasize <= s1_max_iasize) {
181
+ return startlevel;
182
+ }
183
+
184
+ fail:
185
+ return INT_MIN;
186
}
187
188
/**
189
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
190
*/
191
level = 4 - (inputsize - 4) / stride;
192
} else {
430
- /*
193
- /*
431
- * Links to Unlinked context breakpoints must generate no
194
- * For stage 2 translations the starting level is specified by the
432
- * events; we choose to do the same for reserved values too.
195
- * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
433
- */
196
- */
434
- return false;
197
- uint32_t sl0 = extract32(tcr, 6, 2);
435
- }
198
- uint32_t sl2 = extract64(tcr, 33, 1);
436
-
199
- int32_t startlevel;
437
- return false;
200
- bool ok;
438
-}
201
-
439
-
202
- /* SL2 is RES0 unless DS=1 & 4kb granule. */
440
-static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
203
- if (param.ds && stride == 9 && sl2) {
441
-{
204
- if (sl0 != 0) {
442
- CPUARMState *env = &cpu->env;
205
- level = 0;
443
- uint64_t cr;
206
- goto do_translation_fault;
444
- int pac, hmc, ssc, wt, lbn;
207
- }
445
- /*
208
- startlevel = -1;
446
- * Note that for watchpoints the check is against the CPU security
209
- } else if (!aarch64 || stride == 9) {
447
- * state, not the S/NS attribute on the offending data access.
210
- /* AArch32 or 4KB pages */
448
- */
211
- startlevel = 2 - sl0;
449
- bool is_secure = arm_is_secure(env);
212
-
450
- int access_el = arm_current_el(env);
213
- if (cpu_isar_feature(aa64_st, cpu)) {
451
-
214
- startlevel &= 3;
452
- if (is_wp) {
215
- }
453
- CPUWatchpoint *wp = env->cpu_watchpoint[n];
216
- } else {
454
-
217
- /* 16KB or 64KB pages */
455
- if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
218
- startlevel = 3 - sl0;
456
- return false;
457
- }
219
- }
458
- cr = env->cp15.dbgwcr[n];
220
-
459
- if (wp->hitattrs.user) {
221
- /* Check that the starting level is valid. */
460
- /*
222
- ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
461
- * The LDRT/STRT/LDT/STT "unprivileged access" instructions should
223
- inputsize, stride, outputsize);
462
- * match watchpoints as if they were accesses done at EL0, even if
224
- if (!ok) {
463
- * the CPU is at EL1 or higher.
225
+ int startlevel = check_s2_mmu_setup(cpu, aarch64, tcr, param.ds,
464
- */
226
+ inputsize, stride);
465
- access_el = 0;
227
+ if (startlevel == INT_MIN) {
466
- }
228
+ level = 0;
467
- } else {
229
goto do_translation_fault;
468
- uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
230
}
469
-
231
level = startlevel;
470
- if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
471
- return false;
472
- }
473
- cr = env->cp15.dbgbcr[n];
474
- }
475
- /*
476
- * The WATCHPOINT_HIT flag guarantees us that the watchpoint is
477
- * enabled and that the address and access type match; for breakpoints
478
- * we know the address matched; check the remaining fields, including
479
- * linked breakpoints. We rely on WCR and BCR having the same layout
480
- * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
481
- * Note that some combinations of {PAC, HMC, SSC} are reserved and
482
- * must act either like some valid combination or as if the watchpoint
483
- * were disabled. We choose the former, and use this together with
484
- * the fact that EL3 must always be Secure and EL2 must always be
485
- * Non-Secure to simplify the code slightly compared to the full
486
- * table in the ARM ARM.
487
- */
488
- pac = extract64(cr, 1, 2);
489
- hmc = extract64(cr, 13, 1);
490
- ssc = extract64(cr, 14, 2);
491
-
492
- switch (ssc) {
493
- case 0:
494
- break;
495
- case 1:
496
- case 3:
497
- if (is_secure) {
498
- return false;
499
- }
500
- break;
501
- case 2:
502
- if (!is_secure) {
503
- return false;
504
- }
505
- break;
506
- }
507
-
508
- switch (access_el) {
509
- case 3:
510
- case 2:
511
- if (!hmc) {
512
- return false;
513
- }
514
- break;
515
- case 1:
516
- if (extract32(pac, 0, 1) == 0) {
517
- return false;
518
- }
519
- break;
520
- case 0:
521
- if (extract32(pac, 1, 1) == 0) {
522
- return false;
523
- }
524
- break;
525
- default:
526
- g_assert_not_reached();
527
- }
528
-
529
- wt = extract64(cr, 20, 1);
530
- lbn = extract64(cr, 16, 4);
531
-
532
- if (wt && !linked_bp_matches(cpu, lbn)) {
533
- return false;
534
- }
535
-
536
- return true;
537
-}
538
-
539
-static bool check_watchpoints(ARMCPU *cpu)
540
-{
541
- CPUARMState *env = &cpu->env;
542
- int n;
543
-
544
- /*
545
- * If watchpoints are disabled globally or we can't take debug
546
- * exceptions here then watchpoint firings are ignored.
547
- */
548
- if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
549
- || !arm_generate_debug_exceptions(env)) {
550
- return false;
551
- }
552
-
553
- for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
554
- if (bp_wp_matches(cpu, n, true)) {
555
- return true;
556
- }
557
- }
558
- return false;
559
-}
560
-
561
-static bool check_breakpoints(ARMCPU *cpu)
562
-{
563
- CPUARMState *env = &cpu->env;
564
- int n;
565
-
566
- /*
567
- * If breakpoints are disabled globally or we can't take debug
568
- * exceptions here then breakpoint firings are ignored.
569
- */
570
- if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
571
- || !arm_generate_debug_exceptions(env)) {
572
- return false;
573
- }
574
-
575
- for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
576
- if (bp_wp_matches(cpu, n, false)) {
577
- return true;
578
- }
579
- }
580
- return false;
581
-}
582
-
583
-void HELPER(check_breakpoints)(CPUARMState *env)
584
-{
585
- ARMCPU *cpu = env_archcpu(env);
586
-
587
- if (check_breakpoints(cpu)) {
588
- HELPER(exception_internal(env, EXCP_DEBUG));
589
- }
590
-}
591
-
592
-bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
593
-{
594
- /*
595
- * Called by core code when a CPU watchpoint fires; need to check if this
596
- * is also an architectural watchpoint match.
597
- */
598
- ARMCPU *cpu = ARM_CPU(cs);
599
-
600
- return check_watchpoints(cpu);
601
-}
602
-
603
-vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
604
-{
605
- ARMCPU *cpu = ARM_CPU(cs);
606
- CPUARMState *env = &cpu->env;
607
-
608
- /*
609
- * In BE32 system mode, target memory is stored byteswapped (on a
610
- * little-endian host system), and by the time we reach here (via an
611
- * opcode helper) the addresses of subword accesses have been adjusted
612
- * to account for that, which means that watchpoints will not match.
613
- * Undo the adjustment here.
614
- */
615
- if (arm_sctlr_b(env)) {
616
- if (len == 1) {
617
- addr ^= 3;
618
- } else if (len == 2) {
619
- addr ^= 2;
620
- }
621
- }
622
-
623
- return addr;
624
-}
625
-
626
-void arm_debug_excp_handler(CPUState *cs)
627
-{
628
- /*
629
- * Called by core code when a watchpoint or breakpoint fires;
630
- * need to check which one and raise the appropriate exception.
631
- */
632
- ARMCPU *cpu = ARM_CPU(cs);
633
- CPUARMState *env = &cpu->env;
634
- CPUWatchpoint *wp_hit = cs->watchpoint_hit;
635
-
636
- if (wp_hit) {
637
- if (wp_hit->flags & BP_CPU) {
638
- bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
639
- bool same_el = arm_debug_target_el(env) == arm_current_el(env);
640
-
641
- cs->watchpoint_hit = NULL;
642
-
643
- env->exception.fsr = arm_debug_exception_fsr(env);
644
- env->exception.vaddress = wp_hit->hitaddr;
645
- raise_exception(env, EXCP_DATA_ABORT,
646
- syn_watchpoint(same_el, 0, wnr),
647
- arm_debug_target_el(env));
648
- }
649
- } else {
650
- uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
651
- bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
652
-
653
- /*
654
- * (1) GDB breakpoints should be handled first.
655
- * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
656
- * since singlestep is also done by generating a debug internal
657
- * exception.
658
- */
659
- if (cpu_breakpoint_test(cs, pc, BP_GDB)
660
- || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
661
- return;
662
- }
663
-
664
- env->exception.fsr = arm_debug_exception_fsr(env);
665
- /*
666
- * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
667
- * values to the guest that it shouldn't be able to see at its
668
- * exception/security level.
669
- */
670
- env->exception.vaddress = 0;
671
- raise_exception(env, EXCP_PREFETCH_ABORT,
672
- syn_breakpoint(same_el),
673
- arm_debug_target_el(env));
674
- }
675
-}
676
-
677
/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
678
The only way to do that in TCG is a conditional branch, which clobbers
679
all our temporaries. For now implement these as helper functions. */
680
--
232
--
681
2.20.1
233
2.34.1
682
683
diff view generated by jsdifflib
1
To prevent execution priority remaining negative if the guest
1
From: Ard Biesheuvel <ardb@kernel.org>
2
returns from an NMI or HardFault with a corrupted IPSR, the
3
v8M interrupt deactivation process forces the HardFault and NMI
4
to inactive based on the current raw execution priority,
5
even if the interrupt the guest is trying to deactivate
6
is something else. In the pseudocode this is done in the
7
Deactivate() function.
8
2
3
Fedora 39 will ship its arm64 kernels in the new generic EFI zboot
4
format, using gzip compression for the payload.
5
6
For doing EFI boot in QEMU, this is completely transparent, as the
7
firmware or bootloader will take care of this. However, for direct
8
kernel boot without firmware, we will lose the ability to boot such
9
distro kernels unless we deal with the new format directly.
10
11
EFI zboot images contain metadata in the header regarding the placement
12
of the compressed payload inside the image, and the type of compression
13
used. This means we can wire up the existing gzip support without too
14
much hassle, by parsing the header and grabbing the payload from inside
15
the loaded zboot image.
16
17
Cc: Peter Maydell <peter.maydell@linaro.org>
18
Cc: Alex Bennée <alex.bennee@linaro.org>
19
Cc: Richard Henderson <richard.henderson@linaro.org>
20
Cc: Philippe Mathieu-Daudé <f4bug@amsat.org>
21
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
22
Message-id: 20230303160109.3626966-1-ardb@kernel.org
23
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
24
[PMM: tweaked comment formatting, fixed checkpatch nits]
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20190617175317.27557-3-peter.maydell@linaro.org
12
---
26
---
13
hw/intc/armv7m_nvic.c | 40 +++++++++++++++++++++++++++++++++++-----
27
include/hw/loader.h | 19 ++++++++++
14
1 file changed, 35 insertions(+), 5 deletions(-)
28
hw/arm/boot.c | 6 +++
29
hw/core/loader.c | 91 +++++++++++++++++++++++++++++++++++++++++++++
30
3 files changed, 116 insertions(+)
15
31
16
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
32
diff --git a/include/hw/loader.h b/include/hw/loader.h
17
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/intc/armv7m_nvic.c
34
--- a/include/hw/loader.h
19
+++ b/hw/intc/armv7m_nvic.c
35
+++ b/include/hw/loader.h
20
@@ -XXX,XX +XXX,XX @@ void armv7m_nvic_get_pending_irq_info(void *opaque,
36
@@ -XXX,XX +XXX,XX @@ ssize_t load_image_gzipped_buffer(const char *filename, uint64_t max_sz,
21
int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
37
uint8_t **buffer);
22
{
38
ssize_t load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz);
23
NVICState *s = (NVICState *)opaque;
39
24
- VecInfo *vec;
40
+/**
25
+ VecInfo *vec = NULL;
41
+ * unpack_efi_zboot_image:
26
int ret;
42
+ * @buffer: pointer to a variable holding the address of a buffer containing the
27
43
+ * image
28
assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
44
+ * @size: pointer to a variable holding the size of the buffer
29
45
+ *
30
- if (secure && exc_is_banked(irq)) {
46
+ * Check whether the buffer contains a EFI zboot image, and if it does, extract
31
- vec = &s->sec_vectors[irq];
47
+ * the compressed payload and decompress it into a new buffer. If successful,
32
- } else {
48
+ * the old buffer is freed, and the *buffer and size variables pointed to by the
33
- vec = &s->vectors[irq];
49
+ * function arguments are updated to refer to the newly populated buffer.
34
+ /*
50
+ *
35
+ * For negative priorities, v8M will forcibly deactivate the appropriate
51
+ * Returns 0 if the image could not be identified as a EFI zboot image.
36
+ * NMI or HardFault regardless of what interrupt we're being asked to
52
+ * Returns -1 if the buffer contents were identified as a EFI zboot image, but
37
+ * deactivate (compare the DeActivate() pseudocode). This is a guard
53
+ * unpacking failed for any reason.
38
+ * against software returning from NMI or HardFault with a corrupted
54
+ * Returns the size of the decompressed payload if decompression was performed
39
+ * IPSR and leaving the CPU in a negative-priority state.
55
+ * successfully.
40
+ * v7M does not do this, but simply deactivates the requested interrupt.
56
+ */
41
+ */
57
+ssize_t unpack_efi_zboot_image(uint8_t **buffer, int *size);
42
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
58
+
43
+ switch (armv7m_nvic_raw_execution_priority(s)) {
59
#define ELF_LOAD_FAILED -1
44
+ case -1:
60
#define ELF_LOAD_NOT_ELF -2
45
+ if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
61
#define ELF_LOAD_WRONG_ARCH -3
46
+ vec = &s->vectors[ARMV7M_EXCP_HARD];
62
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
47
+ } else {
63
index XXXXXXX..XXXXXXX 100644
48
+ vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
64
--- a/hw/arm/boot.c
49
+ }
65
+++ b/hw/arm/boot.c
50
+ break;
66
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
51
+ case -2:
67
return -1;
52
+ vec = &s->vectors[ARMV7M_EXCP_NMI];
68
}
53
+ break;
69
size = len;
54
+ case -3:
70
+
55
+ vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
71
+ /* Unpack the image if it is a EFI zboot image */
56
+ break;
72
+ if (unpack_efi_zboot_image(&buffer, &size) < 0) {
57
+ default:
73
+ g_free(buffer);
58
+ break;
74
+ return -1;
59
+ }
75
+ }
76
}
77
78
/* check the arm64 magic header value -- very old kernels may not have it */
79
diff --git a/hw/core/loader.c b/hw/core/loader.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/hw/core/loader.c
82
+++ b/hw/core/loader.c
83
@@ -XXX,XX +XXX,XX @@ ssize_t load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz)
84
return bytes;
85
}
86
87
+/* The PE/COFF MS-DOS stub magic number */
88
+#define EFI_PE_MSDOS_MAGIC "MZ"
89
+
90
+/*
91
+ * The Linux header magic number for a EFI PE/COFF
92
+ * image targetting an unspecified architecture.
93
+ */
94
+#define EFI_PE_LINUX_MAGIC "\xcd\x23\x82\x81"
95
+
96
+/*
97
+ * Bootable Linux kernel images may be packaged as EFI zboot images, which are
98
+ * self-decompressing executables when loaded via EFI. The compressed payload
99
+ * can also be extracted from the image and decompressed by a non-EFI loader.
100
+ *
101
+ * The de facto specification for this format is at the following URL:
102
+ *
103
+ * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/firmware/efi/libstub/zboot-header.S
104
+ *
105
+ * This definition is based on Linux upstream commit 29636a5ce87beba.
106
+ */
107
+struct linux_efi_zboot_header {
108
+ uint8_t msdos_magic[2]; /* PE/COFF 'MZ' magic number */
109
+ uint8_t reserved0[2];
110
+ uint8_t zimg[4]; /* "zimg" for Linux EFI zboot images */
111
+ uint32_t payload_offset; /* LE offset to compressed payload */
112
+ uint32_t payload_size; /* LE size of the compressed payload */
113
+ uint8_t reserved1[8];
114
+ char compression_type[32]; /* Compression type, NUL terminated */
115
+ uint8_t linux_magic[4]; /* Linux header magic */
116
+ uint32_t pe_header_offset; /* LE offset to the PE header */
117
+};
118
+
119
+/*
120
+ * Check whether *buffer points to a Linux EFI zboot image in memory.
121
+ *
122
+ * If it does, attempt to decompress it to a new buffer, and free the old one.
123
+ * If any of this fails, return an error to the caller.
124
+ *
125
+ * If the image is not a Linux EFI zboot image, do nothing and return success.
126
+ */
127
+ssize_t unpack_efi_zboot_image(uint8_t **buffer, int *size)
128
+{
129
+ const struct linux_efi_zboot_header *header;
130
+ uint8_t *data = NULL;
131
+ int ploff, plsize;
132
+ ssize_t bytes;
133
+
134
+ /* ignore if this is too small to be a EFI zboot image */
135
+ if (*size < sizeof(*header)) {
136
+ return 0;
60
+ }
137
+ }
61
+
138
+
62
+ if (!vec) {
139
+ header = (struct linux_efi_zboot_header *)*buffer;
63
+ if (secure && exc_is_banked(irq)) {
140
+
64
+ vec = &s->sec_vectors[irq];
141
+ /* ignore if this is not a Linux EFI zboot image */
65
+ } else {
142
+ if (memcmp(&header->msdos_magic, EFI_PE_MSDOS_MAGIC, 2) != 0 ||
66
+ vec = &s->vectors[irq];
143
+ memcmp(&header->zimg, "zimg", 4) != 0 ||
67
+ }
144
+ memcmp(&header->linux_magic, EFI_PE_LINUX_MAGIC, 4) != 0) {
68
}
145
+ return 0;
69
146
+ }
70
trace_nvic_complete_irq(irq, secure);
147
+
148
+ if (strcmp(header->compression_type, "gzip") != 0) {
149
+ fprintf(stderr,
150
+ "unable to handle EFI zboot image with \"%.*s\" compression\n",
151
+ (int)sizeof(header->compression_type) - 1,
152
+ header->compression_type);
153
+ return -1;
154
+ }
155
+
156
+ ploff = ldl_le_p(&header->payload_offset);
157
+ plsize = ldl_le_p(&header->payload_size);
158
+
159
+ if (ploff < 0 || plsize < 0 || ploff + plsize > *size) {
160
+ fprintf(stderr, "unable to handle corrupt EFI zboot image\n");
161
+ return -1;
162
+ }
163
+
164
+ data = g_malloc(LOAD_IMAGE_MAX_GUNZIP_BYTES);
165
+ bytes = gunzip(data, LOAD_IMAGE_MAX_GUNZIP_BYTES, *buffer + ploff, plsize);
166
+ if (bytes < 0) {
167
+ fprintf(stderr, "failed to decompress EFI zboot image\n");
168
+ g_free(data);
169
+ return -1;
170
+ }
171
+
172
+ g_free(*buffer);
173
+ *buffer = g_realloc(data, bytes);
174
+ *size = bytes;
175
+ return bytes;
176
+}
177
+
178
/*
179
* Functions for reboot-persistent memory regions.
180
* - used for vga bios and option roms.
71
--
181
--
72
2.20.1
182
2.34.1
73
183
74
184
diff view generated by jsdifflib
New patch
1
From: qianfan Zhao <qianfanguijin@163.com>
1
2
3
TWI_CNTR_INT_FLAG is W1C(write 1 to clear and write 0 has non-effect)
4
register on SUN6i based SoCs, we should lower interrupt when the guest
5
set this bit.
6
7
The linux kernel will hang in irq handler(mv64xxx_i2c_intr) if no
8
device connected on the i2c bus, next is the trace log:
9
10
allwinner_i2c_write write CNTR(0x0c): 0xc4 A_ACK BUS_EN INT_EN
11
allwinner_i2c_write write CNTR(0x0c): 0xcc A_ACK INT_FLAG BUS_EN INT_EN
12
allwinner_i2c_read read CNTR(0x0c): 0xcc A_ACK INT_FLAG BUS_EN INT_EN
13
allwinner_i2c_read read STAT(0x10): 0x20 STAT_M_ADDR_WR_NACK
14
allwinner_i2c_write write CNTR(0x0c): 0x54 A_ACK M_STP BUS_EN
15
allwinner_i2c_write write CNTR(0x0c): 0x4c A_ACK INT_FLAG BUS_EN
16
allwinner_i2c_read read CNTR(0x0c): 0x4c A_ACK INT_FLAG BUS_EN
17
allwinner_i2c_read read STAT(0x10): 0xf8 STAT_IDLE
18
allwinner_i2c_write write CNTR(0x0c): 0x54 A_ACK M_STP BUS_EN
19
allwinner_i2c_write write CNTR(0x0c): 0x4c A_ACK INT_FLAG BUS_EN
20
allwinner_i2c_read read CNTR(0x0c): 0x4c A_ACK INT_FLAG BUS_EN
21
allwinner_i2c_read read STAT(0x10): 0xf8 STAT_IDLE
22
...
23
24
Fix it.
25
26
Signed-off-by: qianfan Zhao <qianfanguijin@163.com>
27
Reviewed-by: Strahinja Jankovic <strahinja.p.jankovic@gmail.com>
28
Tested-by: Strahinja Jankovic <strahinja.p.jankovic@gmail.com>
29
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
30
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
31
---
32
include/hw/i2c/allwinner-i2c.h | 6 ++++++
33
hw/i2c/allwinner-i2c.c | 26 ++++++++++++++++++++++++--
34
2 files changed, 30 insertions(+), 2 deletions(-)
35
36
diff --git a/include/hw/i2c/allwinner-i2c.h b/include/hw/i2c/allwinner-i2c.h
37
index XXXXXXX..XXXXXXX 100644
38
--- a/include/hw/i2c/allwinner-i2c.h
39
+++ b/include/hw/i2c/allwinner-i2c.h
40
@@ -XXX,XX +XXX,XX @@
41
#include "qom/object.h"
42
43
#define TYPE_AW_I2C "allwinner.i2c"
44
+
45
+/** Allwinner I2C sun6i family and newer (A31, H2+, H3, etc) */
46
+#define TYPE_AW_I2C_SUN6I TYPE_AW_I2C "-sun6i"
47
+
48
OBJECT_DECLARE_SIMPLE_TYPE(AWI2CState, AW_I2C)
49
50
#define AW_I2C_MEM_SIZE 0x24
51
@@ -XXX,XX +XXX,XX @@ struct AWI2CState {
52
uint8_t srst;
53
uint8_t efr;
54
uint8_t lcr;
55
+
56
+ bool irq_clear_inverted;
57
};
58
59
#endif /* ALLWINNER_I2C_H */
60
diff --git a/hw/i2c/allwinner-i2c.c b/hw/i2c/allwinner-i2c.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/hw/i2c/allwinner-i2c.c
63
+++ b/hw/i2c/allwinner-i2c.c
64
@@ -XXX,XX +XXX,XX @@ static void allwinner_i2c_write(void *opaque, hwaddr offset,
65
s->stat = STAT_FROM_STA(STAT_IDLE);
66
s->cntr &= ~TWI_CNTR_M_STP;
67
}
68
- if ((s->cntr & TWI_CNTR_INT_FLAG) == 0) {
69
- /* Interrupt flag cleared */
70
+
71
+ if (!s->irq_clear_inverted && !(s->cntr & TWI_CNTR_INT_FLAG)) {
72
+ /* Write 0 to clear this flag */
73
+ qemu_irq_lower(s->irq);
74
+ } else if (s->irq_clear_inverted && (s->cntr & TWI_CNTR_INT_FLAG)) {
75
+ /* Write 1 to clear this flag */
76
+ s->cntr &= ~TWI_CNTR_INT_FLAG;
77
qemu_irq_lower(s->irq);
78
}
79
+
80
if ((s->cntr & TWI_CNTR_A_ACK) == 0) {
81
if (STAT_TO_STA(s->stat) == STAT_M_DATA_RX_ACK) {
82
s->stat = STAT_FROM_STA(STAT_M_DATA_RX_NACK);
83
@@ -XXX,XX +XXX,XX @@ static const TypeInfo allwinner_i2c_type_info = {
84
.class_init = allwinner_i2c_class_init,
85
};
86
87
+static void allwinner_i2c_sun6i_init(Object *obj)
88
+{
89
+ AWI2CState *s = AW_I2C(obj);
90
+
91
+ s->irq_clear_inverted = true;
92
+}
93
+
94
+static const TypeInfo allwinner_i2c_sun6i_type_info = {
95
+ .name = TYPE_AW_I2C_SUN6I,
96
+ .parent = TYPE_SYS_BUS_DEVICE,
97
+ .instance_size = sizeof(AWI2CState),
98
+ .instance_init = allwinner_i2c_sun6i_init,
99
+ .class_init = allwinner_i2c_class_init,
100
+};
101
+
102
static void allwinner_i2c_register_types(void)
103
{
104
type_register_static(&allwinner_i2c_type_info);
105
+ type_register_static(&allwinner_i2c_sun6i_type_info);
106
}
107
108
type_init(allwinner_i2c_register_types)
109
--
110
2.34.1
diff view generated by jsdifflib
New patch
1
From: qianfan Zhao <qianfanguijin@163.com>
1
2
3
Allwinner h3 has 4 twi(i2c) devices named twi0, twi1, twi2 and r_twi.
4
The registers are compatible with TYPE_AW_I2C_SUN6I, write 1 to clear
5
control register's INT_FLAG bit.
6
7
Signed-off-by: qianfan Zhao <qianfanguijin@163.com>
8
Reviewed-by: Strahinja Jankovic <strahinja.p.jankovic@gmail.com>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
include/hw/arm/allwinner-h3.h | 6 ++++++
13
hw/arm/allwinner-h3.c | 29 +++++++++++++++++++++++++----
14
2 files changed, 31 insertions(+), 4 deletions(-)
15
16
diff --git a/include/hw/arm/allwinner-h3.h b/include/hw/arm/allwinner-h3.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/hw/arm/allwinner-h3.h
19
+++ b/include/hw/arm/allwinner-h3.h
20
@@ -XXX,XX +XXX,XX @@ enum {
21
AW_H3_DEV_UART3,
22
AW_H3_DEV_EMAC,
23
AW_H3_DEV_TWI0,
24
+ AW_H3_DEV_TWI1,
25
+ AW_H3_DEV_TWI2,
26
AW_H3_DEV_DRAMCOM,
27
AW_H3_DEV_DRAMCTL,
28
AW_H3_DEV_DRAMPHY,
29
@@ -XXX,XX +XXX,XX @@ enum {
30
AW_H3_DEV_GIC_VCPU,
31
AW_H3_DEV_RTC,
32
AW_H3_DEV_CPUCFG,
33
+ AW_H3_DEV_R_TWI,
34
AW_H3_DEV_SDRAM
35
};
36
37
@@ -XXX,XX +XXX,XX @@ struct AwH3State {
38
AwSidState sid;
39
AwSdHostState mmc0;
40
AWI2CState i2c0;
41
+ AWI2CState i2c1;
42
+ AWI2CState i2c2;
43
+ AWI2CState r_twi;
44
AwSun8iEmacState emac;
45
AwRtcState rtc;
46
GICState gic;
47
diff --git a/hw/arm/allwinner-h3.c b/hw/arm/allwinner-h3.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/hw/arm/allwinner-h3.c
50
+++ b/hw/arm/allwinner-h3.c
51
@@ -XXX,XX +XXX,XX @@ const hwaddr allwinner_h3_memmap[] = {
52
[AW_H3_DEV_UART2] = 0x01c28800,
53
[AW_H3_DEV_UART3] = 0x01c28c00,
54
[AW_H3_DEV_TWI0] = 0x01c2ac00,
55
+ [AW_H3_DEV_TWI1] = 0x01c2b000,
56
+ [AW_H3_DEV_TWI2] = 0x01c2b400,
57
[AW_H3_DEV_EMAC] = 0x01c30000,
58
[AW_H3_DEV_DRAMCOM] = 0x01c62000,
59
[AW_H3_DEV_DRAMCTL] = 0x01c63000,
60
@@ -XXX,XX +XXX,XX @@ const hwaddr allwinner_h3_memmap[] = {
61
[AW_H3_DEV_GIC_VCPU] = 0x01c86000,
62
[AW_H3_DEV_RTC] = 0x01f00000,
63
[AW_H3_DEV_CPUCFG] = 0x01f01c00,
64
+ [AW_H3_DEV_R_TWI] = 0x01f02400,
65
[AW_H3_DEV_SDRAM] = 0x40000000
66
};
67
68
@@ -XXX,XX +XXX,XX @@ struct AwH3Unimplemented {
69
{ "uart1", 0x01c28400, 1 * KiB },
70
{ "uart2", 0x01c28800, 1 * KiB },
71
{ "uart3", 0x01c28c00, 1 * KiB },
72
- { "twi1", 0x01c2b000, 1 * KiB },
73
- { "twi2", 0x01c2b400, 1 * KiB },
74
{ "scr", 0x01c2c400, 1 * KiB },
75
{ "gpu", 0x01c40000, 64 * KiB },
76
{ "hstmr", 0x01c60000, 4 * KiB },
77
@@ -XXX,XX +XXX,XX @@ struct AwH3Unimplemented {
78
{ "r_prcm", 0x01f01400, 1 * KiB },
79
{ "r_twd", 0x01f01800, 1 * KiB },
80
{ "r_cir-rx", 0x01f02000, 1 * KiB },
81
- { "r_twi", 0x01f02400, 1 * KiB },
82
{ "r_uart", 0x01f02800, 1 * KiB },
83
{ "r_pio", 0x01f02c00, 1 * KiB },
84
{ "r_pwm", 0x01f03800, 1 * KiB },
85
@@ -XXX,XX +XXX,XX @@ enum {
86
AW_H3_GIC_SPI_UART2 = 2,
87
AW_H3_GIC_SPI_UART3 = 3,
88
AW_H3_GIC_SPI_TWI0 = 6,
89
+ AW_H3_GIC_SPI_TWI1 = 7,
90
+ AW_H3_GIC_SPI_TWI2 = 8,
91
AW_H3_GIC_SPI_TIMER0 = 18,
92
AW_H3_GIC_SPI_TIMER1 = 19,
93
+ AW_H3_GIC_SPI_R_TWI = 44,
94
AW_H3_GIC_SPI_MMC0 = 60,
95
AW_H3_GIC_SPI_EHCI0 = 72,
96
AW_H3_GIC_SPI_OHCI0 = 73,
97
@@ -XXX,XX +XXX,XX @@ static void allwinner_h3_init(Object *obj)
98
99
object_initialize_child(obj, "rtc", &s->rtc, TYPE_AW_RTC_SUN6I);
100
101
- object_initialize_child(obj, "twi0", &s->i2c0, TYPE_AW_I2C);
102
+ object_initialize_child(obj, "twi0", &s->i2c0, TYPE_AW_I2C_SUN6I);
103
+ object_initialize_child(obj, "twi1", &s->i2c1, TYPE_AW_I2C_SUN6I);
104
+ object_initialize_child(obj, "twi2", &s->i2c2, TYPE_AW_I2C_SUN6I);
105
+ object_initialize_child(obj, "r_twi", &s->r_twi, TYPE_AW_I2C_SUN6I);
106
}
107
108
static void allwinner_h3_realize(DeviceState *dev, Error **errp)
109
@@ -XXX,XX +XXX,XX @@ static void allwinner_h3_realize(DeviceState *dev, Error **errp)
110
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c0), 0,
111
qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_TWI0));
112
113
+ sysbus_realize(SYS_BUS_DEVICE(&s->i2c1), &error_fatal);
114
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c1), 0, s->memmap[AW_H3_DEV_TWI1]);
115
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c1), 0,
116
+ qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_TWI1));
117
+
118
+ sysbus_realize(SYS_BUS_DEVICE(&s->i2c2), &error_fatal);
119
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c2), 0, s->memmap[AW_H3_DEV_TWI2]);
120
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c2), 0,
121
+ qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_TWI2));
122
+
123
+ sysbus_realize(SYS_BUS_DEVICE(&s->r_twi), &error_fatal);
124
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->r_twi), 0, s->memmap[AW_H3_DEV_R_TWI]);
125
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->r_twi), 0,
126
+ qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_R_TWI));
127
+
128
/* Unimplemented devices */
129
for (i = 0; i < ARRAY_SIZE(unimplemented); i++) {
130
create_unimplemented_device(unimplemented[i].device_name,
131
--
132
2.34.1
diff view generated by jsdifflib