1
Mostly my stuff with a few easy patches from others. I know I have
1
First arm pullreq for 6.1 cycle. The big stuff here is RTH's alignment series.
2
a few big series in my to-review queue, but I've been too jetlagged
3
to try to tackle those :-(
4
2
5
thanks
3
thanks
6
-- PMM
4
-- PMM
7
5
8
The following changes since commit a26a98dfb9d448d7234d931ae3720feddf6f0651:
6
The following changes since commit ccdf06c1db192152ac70a1dd974c624f566cb7d4:
9
7
10
Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20171006' into staging (2017-10-06 13:19:03 +0100)
8
Open 6.1 development tree (2021-04-30 11:15:40 +0100)
11
9
12
are available in the git repository at:
10
are available in the Git repository at:
13
11
14
git://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20171006
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210430
15
13
16
for you to fetch changes up to 04829ce334bece78d4fa1d0fdbc8bc27dae9b242:
14
for you to fetch changes up to a6091108aa44e9017af4ca13c43f55a629e3744c:
17
15
18
nvic: Add missing code for writing SHCSR.HARDFAULTPENDED bit (2017-10-06 16:46:49 +0100)
16
hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows (2021-04-30 11:16:52 +0100)
19
17
20
----------------------------------------------------------------
18
----------------------------------------------------------------
21
target-arm:
19
target-arm queue:
22
* v8M: more preparatory work
20
* hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows
23
* nvic: reset properly rather than leaving the nvic in a weird state
21
* hw: add compat machines for 6.1
24
* xlnx-zynqmp: Mark the "xlnx, zynqmp" device with user_creatable = false
22
* Fault misaligned accesses where the architecture requires it
25
* sd: fix out-of-bounds check for multi block reads
23
* Fix some corner cases of MTE faults (notably with misaligned accesses)
26
* arm: Fix SMC reporting to EL2 when QEMU provides PSCI
24
* Make Thumb store insns UNDEF for Rn==1111
25
* hw/arm/smmuv3: Support 16K translation granule
27
26
28
----------------------------------------------------------------
27
----------------------------------------------------------------
29
Jan Kiszka (1):
28
Cornelia Huck (1):
30
arm: Fix SMC reporting to EL2 when QEMU provides PSCI
29
hw: add compat machines for 6.1
31
30
32
Michael Olbrich (1):
31
Kunkun Jiang (1):
33
hw/sd: fix out-of-bounds check for multi block reads
32
hw/arm/smmuv3: Support 16K translation granule
34
33
35
Peter Maydell (17):
34
Peter Maydell (2):
36
nvic: Clear the vector arrays and prigroup on reset
35
target/arm: Make Thumb store insns UNDEF for Rn==1111
37
target/arm: Don't switch to target stack early in v7M exception return
36
hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows
38
target/arm: Prepare for CONTROL.SPSEL being nonzero in Handler mode
39
target/arm: Restore security state on exception return
40
target/arm: Restore SPSEL to correct CONTROL register on exception return
41
target/arm: Check for xPSR mismatch usage faults earlier for v8M
42
target/arm: Warn about restoring to unaligned stack
43
target/arm: Don't warn about exception return with PC low bit set for v8M
44
target/arm: Add new-in-v8M SFSR and SFAR
45
target/arm: Update excret sanity checks for v8M
46
target/arm: Add support for restoring v8M additional state context
47
target/arm: Add v8M support to exception entry code
48
nvic: Implement Security Attribution Unit registers
49
target/arm: Implement security attribute lookups for memory accesses
50
target/arm: Fix calculation of secure mm_idx values
51
target/arm: Factor out "get mmuidx for specified security state"
52
nvic: Add missing code for writing SHCSR.HARDFAULTPENDED bit
53
37
54
Thomas Huth (1):
38
Richard Henderson (39):
55
hw/arm/xlnx-zynqmp: Mark the "xlnx, zynqmp" device with user_creatable = false
39
target/arm: Fix mte_checkN
40
target/arm: Split out mte_probe_int
41
target/arm: Fix unaligned checks for mte_check1, mte_probe1
42
test/tcg/aarch64: Add mte-5
43
target/arm: Replace MTEDESC ESIZE+TSIZE with SIZEM1
44
target/arm: Merge mte_check1, mte_checkN
45
target/arm: Rename mte_probe1 to mte_probe
46
target/arm: Simplify sve mte checking
47
target/arm: Remove log2_esize parameter to gen_mte_checkN
48
target/arm: Fix decode of align in VLDST_single
49
target/arm: Rename TBFLAG_A32, SCTLR_B
50
target/arm: Rename TBFLAG_ANY, PSTATE_SS
51
target/arm: Add wrapper macros for accessing tbflags
52
target/arm: Introduce CPUARMTBFlags
53
target/arm: Move mode specific TB flags to tb->cs_base
54
target/arm: Move TBFLAG_AM32 bits to the top
55
target/arm: Move TBFLAG_ANY bits to the bottom
56
target/arm: Add ALIGN_MEM to TBFLAG_ANY
57
target/arm: Adjust gen_aa32_{ld, st}_i32 for align+endianness
58
target/arm: Merge gen_aa32_frob64 into gen_aa32_ld_i64
59
target/arm: Fix SCTLR_B test for TCGv_i64 load/store
60
target/arm: Adjust gen_aa32_{ld, st}_i64 for align+endianness
61
target/arm: Enforce word alignment for LDRD/STRD
62
target/arm: Enforce alignment for LDA/LDAH/STL/STLH
63
target/arm: Enforce alignment for LDM/STM
64
target/arm: Enforce alignment for RFE
65
target/arm: Enforce alignment for SRS
66
target/arm: Enforce alignment for VLDM/VSTM
67
target/arm: Enforce alignment for VLDR/VSTR
68
target/arm: Enforce alignment for VLDn (all lanes)
69
target/arm: Enforce alignment for VLDn/VSTn (multiple)
70
target/arm: Enforce alignment for VLDn/VSTn (single)
71
target/arm: Use finalize_memop for aa64 gpr load/store
72
target/arm: Use finalize_memop for aa64 fpr load/store
73
target/arm: Enforce alignment for aa64 load-acq/store-rel
74
target/arm: Use MemOp for size + endian in aa64 vector ld/st
75
target/arm: Enforce alignment for aa64 vector LDn/STn (multiple)
76
target/arm: Enforce alignment for aa64 vector LDn/STn (single)
77
target/arm: Enforce alignment for sve LD1R
56
78
57
target/arm/cpu.h | 60 ++++-
79
include/hw/boards.h | 3 +
58
target/arm/internals.h | 15 ++
80
include/hw/i386/pc.h | 3 +
59
hw/arm/xlnx-zynqmp.c | 2 +
81
include/hw/pci-host/gpex.h | 4 +
60
hw/intc/armv7m_nvic.c | 158 ++++++++++-
82
target/arm/cpu.h | 105 ++++++++++-----
61
hw/sd/sd.c | 12 +-
83
target/arm/helper-a64.h | 3 +-
62
target/arm/cpu.c | 27 ++
84
target/arm/internals.h | 11 +-
63
target/arm/helper.c | 691 +++++++++++++++++++++++++++++++++++++++++++------
85
target/arm/translate-a64.h | 2 +-
64
target/arm/machine.c | 16 ++
86
target/arm/translate.h | 38 ++++++
65
target/arm/op_helper.c | 27 +-
87
target/arm/neon-ls.decode | 4 +-
66
9 files changed, 898 insertions(+), 110 deletions(-)
88
hw/arm/smmuv3.c | 6 +-
89
hw/arm/virt.c | 7 +-
90
hw/core/machine.c | 5 +
91
hw/i386/pc.c | 3 +
92
hw/i386/pc_piix.c | 14 +-
93
hw/i386/pc_q35.c | 13 +-
94
hw/pci-host/gpex.c | 56 +++++++-
95
hw/ppc/spapr.c | 17 ++-
96
hw/s390x/s390-virtio-ccw.c | 14 +-
97
target/arm/helper-a64.c | 2 +-
98
target/arm/helper.c | 162 ++++++++++++----------
99
target/arm/mte_helper.c | 185 ++++++++++---------------
100
target/arm/sve_helper.c | 100 +++++---------
101
target/arm/translate-a64.c | 236 ++++++++++++++++----------------
102
target/arm/translate-sve.c | 11 +-
103
target/arm/translate.c | 274 ++++++++++++++++++++++----------------
104
tests/tcg/aarch64/mte-5.c | 44 ++++++
105
target/arm/translate-neon.c.inc | 117 ++++++++++++----
106
target/arm/translate-vfp.c.inc | 20 +--
107
tests/tcg/aarch64/Makefile.target | 2 +-
108
29 files changed, 878 insertions(+), 583 deletions(-)
109
create mode 100644 tests/tcg/aarch64/mte-5.c
67
110
diff view generated by jsdifflib
New patch
1
From: Kunkun Jiang <jiangkunkun@huawei.com>
1
2
3
The driver can query some bits in SMMUv3 IDR5 to learn which
4
translation granules are supported. Arm recommends that SMMUv3
5
implementations support at least 4K and 64K granules. But in
6
the vSMMUv3, there seems to be no reason not to support 16K
7
translation granule. In addition, if 16K is not supported,
8
vSVA will failed to be enabled in the future for 16K guest
9
kernel. So it'd better to support it.
10
11
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
12
Reviewed-by: Eric Auger <eric.auger@redhat.com>
13
Tested-by: Eric Auger <eric.auger@redhat.com>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
hw/arm/smmuv3.c | 6 ++++--
17
1 file changed, 4 insertions(+), 2 deletions(-)
18
19
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/arm/smmuv3.c
22
+++ b/hw/arm/smmuv3.c
23
@@ -XXX,XX +XXX,XX @@ static void smmuv3_init_regs(SMMUv3State *s)
24
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
25
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
26
27
- /* 4K and 64K granule support */
28
+ /* 4K, 16K and 64K granule support */
29
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
30
+ s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1);
31
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
32
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
33
34
@@ -XXX,XX +XXX,XX @@ static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
35
36
tg = CD_TG(cd, i);
37
tt->granule_sz = tg2granule(tg, i);
38
- if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) {
39
+ if ((tt->granule_sz != 12 && tt->granule_sz != 14 &&
40
+ tt->granule_sz != 16) || CD_ENDI(cd)) {
41
goto bad_cd;
42
}
43
44
--
45
2.20.1
46
47
diff view generated by jsdifflib
1
ARM v8M specifies that the INVPC usage fault for mismatched
1
The Arm ARM specifies that for Thumb encodings of the various plain
2
xPSR exception field and handler mode bit should be checked
2
store insns, if the Rn field is 1111 then we must UNDEF. This is
3
before updating the PSR and SP, so that the fault is taken
3
different from the Arm encodings, where this case is either
4
with the existing stack frame rather than by pushing a new one.
4
UNPREDICTABLE or has well-defined behaviour. The exclusive stores,
5
Perform this check in the right place for v8M.
5
store-release and STRD do not have this UNDEF case for any encoding.
6
6
7
Since v7M specifies in its pseudocode that this usage fault
7
Enforce the UNDEF for this case in the Thumb plain store insns.
8
check should happen later, we have to retain the original
9
code for that check rather than being able to merge the two.
10
(The distinction is architecturally visible but only in
11
very obscure corner cases like attempting an invalid exception
12
return with an exception frame in read only memory.)
13
8
9
Fixes: https://bugs.launchpad.net/qemu/+bug/1922887
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 1506092407-26985-7-git-send-email-peter.maydell@linaro.org
12
Message-id: 20210408162402.5822-1-peter.maydell@linaro.org
17
---
13
---
18
target/arm/helper.c | 30 +++++++++++++++++++++++++++---
14
target/arm/translate.c | 16 ++++++++++++++++
19
1 file changed, 27 insertions(+), 3 deletions(-)
15
1 file changed, 16 insertions(+)
20
16
21
diff --git a/target/arm/helper.c b/target/arm/helper.c
17
diff --git a/target/arm/translate.c b/target/arm/translate.c
22
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/helper.c
19
--- a/target/arm/translate.c
24
+++ b/target/arm/helper.c
20
+++ b/target/arm/translate.c
25
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
21
@@ -XXX,XX +XXX,XX @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
26
}
22
ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
27
xpsr = ldl_phys(cs->as, frameptr + 0x1c);
23
TCGv_i32 addr, tmp;
28
24
29
+ if (arm_feature(env, ARM_FEATURE_V8)) {
25
+ /*
30
+ /* For v8M we have to check whether the xPSR exception field
26
+ * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
31
+ * matches the EXCRET value for return to handler/thread
27
+ * is either UNPREDICTABLE or has defined behaviour
32
+ * before we commit to changing the SP and xPSR.
28
+ */
33
+ */
29
+ if (s->thumb && a->rn == 15) {
34
+ bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
30
+ return false;
35
+ if (return_to_handler != will_be_handler) {
31
+ }
36
+ /* Take an INVPC UsageFault on the current stack.
37
+ * By this point we will have switched to the security state
38
+ * for the background state, so this UsageFault will target
39
+ * that state.
40
+ */
41
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
42
+ env->v7m.secure);
43
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
44
+ v7m_exception_taken(cpu, excret);
45
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
46
+ "stackframe: failed exception return integrity "
47
+ "check\n");
48
+ return;
49
+ }
50
+ }
51
+
32
+
52
/* Commit to consuming the stack frame */
33
addr = op_addr_rr_pre(s, a);
53
frameptr += 0x20;
34
54
/* Undo stack alignment (the SPREALIGN bit indicates that the original
35
tmp = load_reg(s, a->rt);
55
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
36
@@ -XXX,XX +XXX,XX @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
56
/* The restored xPSR exception field will be zero if we're
37
ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
57
* resuming in Thread mode. If that doesn't match what the
38
TCGv_i32 addr, tmp;
58
* exception return excret specified then this is a UsageFault.
39
59
+ * v7M requires we make this check here; v8M did it earlier.
40
+ /*
60
*/
41
+ * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
61
if (return_to_handler != arm_v7m_is_handler_mode(env)) {
42
+ * is either UNPREDICTABLE or has defined behaviour
62
- /* Take an INVPC UsageFault by pushing the stack again.
43
+ */
63
- * TODO: the v8M version of this code should target the
44
+ if (s->thumb && a->rn == 15) {
64
- * background state for this exception.
45
+ return false;
65
+ /* Take an INVPC UsageFault by pushing the stack again;
46
+ }
66
+ * we know we're v7M so this is never a Secure UsageFault.
47
+
67
*/
48
addr = op_addr_ri_pre(s, a);
68
+ assert(!arm_feature(env, ARM_FEATURE_V8));
49
69
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
50
tmp = load_reg(s, a->rt);
70
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
71
v7m_push_stack(cpu);
72
--
51
--
73
2.7.4
52
2.20.1
74
53
75
54
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
We were incorrectly assuming that only the first byte of an MTE access
4
is checked against the tags. But per the ARM, unaligned accesses are
5
pre-decomposed into single-byte accesses. So by the time we reach the
6
actual MTE check in the ARM pseudocode, all accesses are aligned.
7
8
Therefore, the first failure is always either the first byte of the
9
access, or the first byte of the granule.
10
11
In addition, some of the arithmetic is off for last-first -> count.
12
This does not become directly visible until a later patch that passes
13
single bytes into this function, so ptr == ptr_last.
14
15
Buglink: https://bugs.launchpad.net/bugs/1921948
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20210416183106.1516563-2-richard.henderson@linaro.org
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
19
[PMM: tweaked a comment]
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
---
22
target/arm/mte_helper.c | 40 ++++++++++++++++++----------------------
23
1 file changed, 18 insertions(+), 22 deletions(-)
24
25
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/mte_helper.c
28
+++ b/target/arm/mte_helper.c
29
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
30
uint64_t ptr, uintptr_t ra)
31
{
32
int mmu_idx, ptr_tag, bit55;
33
- uint64_t ptr_last, ptr_end, prev_page, next_page;
34
- uint64_t tag_first, tag_end;
35
- uint64_t tag_byte_first, tag_byte_end;
36
- uint32_t esize, total, tag_count, tag_size, n, c;
37
+ uint64_t ptr_last, prev_page, next_page;
38
+ uint64_t tag_first, tag_last;
39
+ uint64_t tag_byte_first, tag_byte_last;
40
+ uint32_t total, tag_count, tag_size, n, c;
41
uint8_t *mem1, *mem2;
42
MMUAccessType type;
43
44
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
45
46
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
47
type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
48
- esize = FIELD_EX32(desc, MTEDESC, ESIZE);
49
total = FIELD_EX32(desc, MTEDESC, TSIZE);
50
51
- /* Find the addr of the end of the access, and of the last element. */
52
- ptr_end = ptr + total;
53
- ptr_last = ptr_end - esize;
54
+ /* Find the addr of the end of the access */
55
+ ptr_last = ptr + total - 1;
56
57
/* Round the bounds to the tag granule, and compute the number of tags. */
58
tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
59
- tag_end = QEMU_ALIGN_UP(ptr_last, TAG_GRANULE);
60
- tag_count = (tag_end - tag_first) / TAG_GRANULE;
61
+ tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
62
+ tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
63
64
/* Round the bounds to twice the tag granule, and compute the bytes. */
65
tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE);
66
- tag_byte_end = QEMU_ALIGN_UP(ptr_last, 2 * TAG_GRANULE);
67
+ tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE);
68
69
/* Locate the page boundaries. */
70
prev_page = ptr & TARGET_PAGE_MASK;
71
next_page = prev_page + TARGET_PAGE_SIZE;
72
73
- if (likely(tag_end - prev_page <= TARGET_PAGE_SIZE)) {
74
+ if (likely(tag_last - prev_page <= TARGET_PAGE_SIZE)) {
75
/* Memory access stays on one page. */
76
- tag_size = (tag_byte_end - tag_byte_first) / (2 * TAG_GRANULE);
77
+ tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
78
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total,
79
MMU_DATA_LOAD, tag_size, ra);
80
if (!mem1) {
81
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
82
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
83
MMU_DATA_LOAD, tag_size, ra);
84
85
- tag_size = (tag_byte_end - next_page) / (2 * TAG_GRANULE);
86
+ tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1;
87
mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
88
- ptr_end - next_page,
89
+ ptr_last - next_page + 1,
90
MMU_DATA_LOAD, tag_size, ra);
91
92
/*
93
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
94
}
95
96
/*
97
- * If we failed, we know which granule. Compute the element that
98
- * is first in that granule, and signal failure on that element.
99
+ * If we failed, we know which granule. For the first granule, the
100
+ * failure address is @ptr, the first byte accessed. Otherwise the
101
+ * failure address is the first byte of the nth granule.
102
*/
103
if (unlikely(n < tag_count)) {
104
- uint64_t fail_ofs;
105
-
106
- fail_ofs = tag_first + n * TAG_GRANULE - ptr;
107
- fail_ofs = ROUND_UP(fail_ofs, esize);
108
- mte_check_fail(env, desc, ptr + fail_ofs, ra);
109
+ uint64_t fault = (n == 0 ? ptr : tag_first + n * TAG_GRANULE);
110
+ mte_check_fail(env, desc, fault, ra);
111
}
112
113
done:
114
--
115
2.20.1
116
117
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Split out a helper function from mte_checkN to perform
4
all of the checking and address manpulation. So far,
5
just use this in mte_checkN itself.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210416183106.1516563-3-richard.henderson@linaro.org
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/mte_helper.c | 52 +++++++++++++++++++++++++++++++----------
14
1 file changed, 40 insertions(+), 12 deletions(-)
15
16
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/mte_helper.c
19
+++ b/target/arm/mte_helper.c
20
@@ -XXX,XX +XXX,XX @@ static int checkN(uint8_t *mem, int odd, int cmp, int count)
21
return n;
22
}
23
24
-uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
25
- uint64_t ptr, uintptr_t ra)
26
+/**
27
+ * mte_probe_int() - helper for mte_probe and mte_check
28
+ * @env: CPU environment
29
+ * @desc: MTEDESC descriptor
30
+ * @ptr: virtual address of the base of the access
31
+ * @fault: return virtual address of the first check failure
32
+ *
33
+ * Internal routine for both mte_probe and mte_check.
34
+ * Return zero on failure, filling in *fault.
35
+ * Return negative on trivial success for tbi disabled.
36
+ * Return positive on success with tbi enabled.
37
+ */
38
+static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
39
+ uintptr_t ra, uint32_t total, uint64_t *fault)
40
{
41
int mmu_idx, ptr_tag, bit55;
42
uint64_t ptr_last, prev_page, next_page;
43
uint64_t tag_first, tag_last;
44
uint64_t tag_byte_first, tag_byte_last;
45
- uint32_t total, tag_count, tag_size, n, c;
46
+ uint32_t tag_count, tag_size, n, c;
47
uint8_t *mem1, *mem2;
48
MMUAccessType type;
49
50
bit55 = extract64(ptr, 55, 1);
51
+ *fault = ptr;
52
53
/* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
54
if (unlikely(!tbi_check(desc, bit55))) {
55
- return ptr;
56
+ return -1;
57
}
58
59
ptr_tag = allocation_tag_from_addr(ptr);
60
61
if (tcma_check(desc, bit55, ptr_tag)) {
62
- goto done;
63
+ return 1;
64
}
65
66
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
67
type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
68
- total = FIELD_EX32(desc, MTEDESC, TSIZE);
69
70
/* Find the addr of the end of the access */
71
ptr_last = ptr + total - 1;
72
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
73
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total,
74
MMU_DATA_LOAD, tag_size, ra);
75
if (!mem1) {
76
- goto done;
77
+ return 1;
78
}
79
/* Perform all of the comparisons. */
80
n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
81
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
82
}
83
if (n == c) {
84
if (!mem2) {
85
- goto done;
86
+ return 1;
87
}
88
n += checkN(mem2, 0, ptr_tag, tag_count - c);
89
}
90
}
91
92
+ if (likely(n == tag_count)) {
93
+ return 1;
94
+ }
95
+
96
/*
97
* If we failed, we know which granule. For the first granule, the
98
* failure address is @ptr, the first byte accessed. Otherwise the
99
* failure address is the first byte of the nth granule.
100
*/
101
- if (unlikely(n < tag_count)) {
102
- uint64_t fault = (n == 0 ? ptr : tag_first + n * TAG_GRANULE);
103
- mte_check_fail(env, desc, fault, ra);
104
+ if (n > 0) {
105
+ *fault = tag_first + n * TAG_GRANULE;
106
}
107
+ return 0;
108
+}
109
110
- done:
111
+uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
112
+ uint64_t ptr, uintptr_t ra)
113
+{
114
+ uint64_t fault;
115
+ uint32_t total = FIELD_EX32(desc, MTEDESC, TSIZE);
116
+ int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
117
+
118
+ if (unlikely(ret == 0)) {
119
+ mte_check_fail(env, desc, fault, ra);
120
+ } else if (ret < 0) {
121
+ return ptr;
122
+ }
123
return useronly_clean_ptr(ptr);
124
}
125
126
--
127
2.20.1
128
129
diff view generated by jsdifflib
1
Add support for v8M and in particular the security extension
1
From: Richard Henderson <richard.henderson@linaro.org>
2
to the exception entry code. This requires changes to:
3
* calculation of the exception-return magic LR value
4
* push the callee-saves registers in certain cases
5
* clear registers when taking non-secure exceptions to avoid
6
leaking information from the interrupted secure code
7
* switch to the correct security state on entry
8
* use the vector table for the security state we're targeting
9
2
3
We were incorrectly assuming that only the first byte of an MTE access
4
is checked against the tags. But per the ARM, unaligned accesses are
5
pre-decomposed into single-byte accesses. So by the time we reach the
6
actual MTE check in the ARM pseudocode, all accesses are aligned.
7
8
We cannot tell a priori whether or not a given scalar access is aligned,
9
therefore we must at least check. Use mte_probe_int, which is already
10
set up for checking multiple granules.
11
12
Buglink: https://bugs.launchpad.net/bugs/1921948
13
Tested-by: Alex Bennée <alex.bennee@linaro.org>
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20210416183106.1516563-4-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 1506092407-26985-13-git-send-email-peter.maydell@linaro.org
13
---
18
---
14
target/arm/helper.c | 165 +++++++++++++++++++++++++++++++++++++++++++++-------
19
target/arm/mte_helper.c | 109 +++++++++++++---------------------------
15
1 file changed, 145 insertions(+), 20 deletions(-)
20
1 file changed, 35 insertions(+), 74 deletions(-)
16
21
17
diff --git a/target/arm/helper.c b/target/arm/helper.c
22
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
18
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper.c
24
--- a/target/arm/mte_helper.c
20
+++ b/target/arm/helper.c
25
+++ b/target/arm/mte_helper.c
21
@@ -XXX,XX +XXX,XX @@ static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
26
@@ -XXX,XX +XXX,XX @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
22
}
27
}
23
}
28
}
24
29
25
-static uint32_t arm_v7m_load_vector(ARMCPU *cpu)
30
-/*
26
+static uint32_t arm_v7m_load_vector(ARMCPU *cpu, bool targets_secure)
31
- * Perform an MTE checked access for a single logical or atomic access.
27
{
32
- */
28
CPUState *cs = CPU(cpu);
33
-static bool mte_probe1_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
29
CPUARMState *env = &cpu->env;
34
- uintptr_t ra, int bit55)
30
MemTxResult result;
35
-{
31
- hwaddr vec = env->v7m.vecbase[env->v7m.secure] + env->v7m.exception * 4;
36
- int mem_tag, mmu_idx, ptr_tag, size;
32
+ hwaddr vec = env->v7m.vecbase[targets_secure] + env->v7m.exception * 4;
37
- MMUAccessType type;
33
uint32_t addr;
38
- uint8_t *mem;
34
39
-
35
addr = address_space_ldl(cs->as, vec,
40
- ptr_tag = allocation_tag_from_addr(ptr);
36
@@ -XXX,XX +XXX,XX @@ static uint32_t arm_v7m_load_vector(ARMCPU *cpu)
41
-
37
* Since we don't model Lockup, we just report this guest error
42
- if (tcma_check(desc, bit55, ptr_tag)) {
38
* via cpu_abort().
43
- return true;
39
*/
44
- }
40
- cpu_abort(cs, "Failed to read from exception vector table "
45
-
41
- "entry %08x\n", (unsigned)vec);
46
- mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
42
+ cpu_abort(cs, "Failed to read from %s exception vector table "
47
- type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
43
+ "entry %08x\n", targets_secure ? "secure" : "nonsecure",
48
- size = FIELD_EX32(desc, MTEDESC, ESIZE);
44
+ (unsigned)vec);
49
-
45
}
50
- mem = allocation_tag_mem(env, mmu_idx, ptr, type, size,
46
return addr;
51
- MMU_DATA_LOAD, 1, ra);
52
- if (!mem) {
53
- return true;
54
- }
55
-
56
- mem_tag = load_tag1(ptr, mem);
57
- return ptr_tag == mem_tag;
58
-}
59
-
60
-/*
61
- * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
62
- * Returns false if the access is Checked and the check failed. This
63
- * is only intended to probe the tag -- the validity of the page must
64
- * be checked beforehand.
65
- */
66
-bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
67
-{
68
- int bit55 = extract64(ptr, 55, 1);
69
-
70
- /* If TBI is disabled, the access is unchecked. */
71
- if (unlikely(!tbi_check(desc, bit55))) {
72
- return true;
73
- }
74
-
75
- return mte_probe1_int(env, desc, ptr, 0, bit55);
76
-}
77
-
78
-uint64_t mte_check1(CPUARMState *env, uint32_t desc,
79
- uint64_t ptr, uintptr_t ra)
80
-{
81
- int bit55 = extract64(ptr, 55, 1);
82
-
83
- /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
84
- if (unlikely(!tbi_check(desc, bit55))) {
85
- return ptr;
86
- }
87
-
88
- if (unlikely(!mte_probe1_int(env, desc, ptr, ra, bit55))) {
89
- mte_check_fail(env, desc, ptr, ra);
90
- }
91
-
92
- return useronly_clean_ptr(ptr);
93
-}
94
-
95
-uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
96
-{
97
- return mte_check1(env, desc, ptr, GETPC());
98
-}
99
-
100
-/*
101
- * Perform an MTE checked access for multiple logical accesses.
102
- */
103
-
104
/**
105
* checkN:
106
* @tag: tag memory to test
107
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr)
108
return mte_checkN(env, desc, ptr, GETPC());
47
}
109
}
48
110
49
-static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr)
111
+uint64_t mte_check1(CPUARMState *env, uint32_t desc,
50
+static void v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain)
112
+ uint64_t ptr, uintptr_t ra)
51
+{
113
+{
52
+ /* For v8M, push the callee-saves register part of the stack frame.
114
+ uint64_t fault;
53
+ * Compare the v8M pseudocode PushCalleeStack().
115
+ uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
54
+ * In the tailchaining case this may not be the current stack.
116
+ int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
55
+ */
56
+ CPUARMState *env = &cpu->env;
57
+ CPUState *cs = CPU(cpu);
58
+ uint32_t *frame_sp_p;
59
+ uint32_t frameptr;
60
+
117
+
61
+ if (dotailchain) {
118
+ if (unlikely(ret == 0)) {
62
+ frame_sp_p = get_v7m_sp_ptr(env, true,
119
+ mte_check_fail(env, desc, fault, ra);
63
+ lr & R_V7M_EXCRET_MODE_MASK,
120
+ } else if (ret < 0) {
64
+ lr & R_V7M_EXCRET_SPSEL_MASK);
121
+ return ptr;
65
+ } else {
66
+ frame_sp_p = &env->regs[13];
67
+ }
122
+ }
68
+
123
+ return useronly_clean_ptr(ptr);
69
+ frameptr = *frame_sp_p - 0x28;
70
+
71
+ stl_phys(cs->as, frameptr, 0xfefa125b);
72
+ stl_phys(cs->as, frameptr + 0x8, env->regs[4]);
73
+ stl_phys(cs->as, frameptr + 0xc, env->regs[5]);
74
+ stl_phys(cs->as, frameptr + 0x10, env->regs[6]);
75
+ stl_phys(cs->as, frameptr + 0x14, env->regs[7]);
76
+ stl_phys(cs->as, frameptr + 0x18, env->regs[8]);
77
+ stl_phys(cs->as, frameptr + 0x1c, env->regs[9]);
78
+ stl_phys(cs->as, frameptr + 0x20, env->regs[10]);
79
+ stl_phys(cs->as, frameptr + 0x24, env->regs[11]);
80
+
81
+ *frame_sp_p = frameptr;
82
+}
124
+}
83
+
125
+
84
+static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain)
126
+uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
85
{
127
+{
86
/* Do the "take the exception" parts of exception entry,
128
+ return mte_check1(env, desc, ptr, GETPC());
87
* but not the pushing of state to the stack. This is
129
+}
88
@@ -XXX,XX +XXX,XX @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr)
89
*/
90
CPUARMState *env = &cpu->env;
91
uint32_t addr;
92
+ bool targets_secure;
93
+
130
+
94
+ targets_secure = armv7m_nvic_acknowledge_irq(env->nvic);
131
+/*
95
132
+ * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
96
- armv7m_nvic_acknowledge_irq(env->nvic);
133
+ * Returns false if the access is Checked and the check failed. This
97
+ if (arm_feature(env, ARM_FEATURE_V8)) {
134
+ * is only intended to probe the tag -- the validity of the page must
98
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
135
+ * be checked beforehand.
99
+ (lr & R_V7M_EXCRET_S_MASK)) {
136
+ */
100
+ /* The background code (the owner of the registers in the
137
+bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
101
+ * exception frame) is Secure. This means it may either already
138
+{
102
+ * have or now needs to push callee-saves registers.
139
+ uint64_t fault;
103
+ */
140
+ uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
104
+ if (targets_secure) {
141
+ int ret = mte_probe_int(env, desc, ptr, 0, total, &fault);
105
+ if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
106
+ /* We took an exception from Secure to NonSecure
107
+ * (which means the callee-saved registers got stacked)
108
+ * and are now tailchaining to a Secure exception.
109
+ * Clear DCRS so eventual return from this Secure
110
+ * exception unstacks the callee-saved registers.
111
+ */
112
+ lr &= ~R_V7M_EXCRET_DCRS_MASK;
113
+ }
114
+ } else {
115
+ /* We're going to a non-secure exception; push the
116
+ * callee-saves registers to the stack now, if they're
117
+ * not already saved.
118
+ */
119
+ if (lr & R_V7M_EXCRET_DCRS_MASK &&
120
+ !(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) {
121
+ v7m_push_callee_stack(cpu, lr, dotailchain);
122
+ }
123
+ lr |= R_V7M_EXCRET_DCRS_MASK;
124
+ }
125
+ }
126
+
142
+
127
+ lr &= ~R_V7M_EXCRET_ES_MASK;
143
+ return ret != 0;
128
+ if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
144
+}
129
+ lr |= R_V7M_EXCRET_ES_MASK;
130
+ }
131
+ lr &= ~R_V7M_EXCRET_SPSEL_MASK;
132
+ if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
133
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
134
+ }
135
+
145
+
136
+ /* Clear registers if necessary to prevent non-secure exception
146
/*
137
+ * code being able to see register values from secure code.
147
* Perform an MTE checked access for DC_ZVA.
138
+ * Where register values become architecturally UNKNOWN we leave
148
*/
139
+ * them with their previous values.
140
+ */
141
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
142
+ if (!targets_secure) {
143
+ /* Always clear the caller-saved registers (they have been
144
+ * pushed to the stack earlier in v7m_push_stack()).
145
+ * Clear callee-saved registers if the background code is
146
+ * Secure (in which case these regs were saved in
147
+ * v7m_push_callee_stack()).
148
+ */
149
+ int i;
150
+
151
+ for (i = 0; i < 13; i++) {
152
+ /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
153
+ if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
154
+ env->regs[i] = 0;
155
+ }
156
+ }
157
+ /* Clear EAPSR */
158
+ xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
159
+ }
160
+ }
161
+ }
162
+
163
+ /* Switch to target security state -- must do this before writing SPSEL */
164
+ switch_v7m_security_state(env, targets_secure);
165
write_v7m_control_spsel(env, 0);
166
arm_clear_exclusive(env);
167
/* Clear IT bits */
168
env->condexec_bits = 0;
169
env->regs[14] = lr;
170
- addr = arm_v7m_load_vector(cpu);
171
+ addr = arm_v7m_load_vector(cpu, targets_secure);
172
env->regs[15] = addr & 0xfffffffe;
173
env->thumb = addr & 1;
174
}
175
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
176
if (sfault) {
177
env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
178
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
179
- v7m_exception_taken(cpu, excret);
180
+ v7m_exception_taken(cpu, excret, true);
181
qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
182
"stackframe: failed EXC_RETURN.ES validity check\n");
183
return;
184
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
185
*/
186
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
187
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
188
- v7m_exception_taken(cpu, excret);
189
+ v7m_exception_taken(cpu, excret, true);
190
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
191
"stackframe: failed exception return integrity check\n");
192
return;
193
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
194
/* Take a SecureFault on the current stack */
195
env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
196
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
197
- v7m_exception_taken(cpu, excret);
198
+ v7m_exception_taken(cpu, excret, true);
199
qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
200
"stackframe: failed exception return integrity "
201
"signature check\n");
202
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
203
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
204
env->v7m.secure);
205
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
206
- v7m_exception_taken(cpu, excret);
207
+ v7m_exception_taken(cpu, excret, true);
208
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
209
"stackframe: failed exception return integrity "
210
"check\n");
211
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
212
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
213
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
214
v7m_push_stack(cpu);
215
- v7m_exception_taken(cpu, excret);
216
+ v7m_exception_taken(cpu, excret, false);
217
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
218
"failed exception return integrity check\n");
219
return;
220
@@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
221
return; /* Never happens. Keep compiler happy. */
222
}
223
224
- lr = R_V7M_EXCRET_RES1_MASK |
225
- R_V7M_EXCRET_S_MASK |
226
- R_V7M_EXCRET_DCRS_MASK |
227
- R_V7M_EXCRET_FTYPE_MASK |
228
- R_V7M_EXCRET_ES_MASK;
229
- if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) {
230
- lr |= R_V7M_EXCRET_SPSEL_MASK;
231
+ if (arm_feature(env, ARM_FEATURE_V8)) {
232
+ lr = R_V7M_EXCRET_RES1_MASK |
233
+ R_V7M_EXCRET_DCRS_MASK |
234
+ R_V7M_EXCRET_FTYPE_MASK;
235
+ /* The S bit indicates whether we should return to Secure
236
+ * or NonSecure (ie our current state).
237
+ * The ES bit indicates whether we're taking this exception
238
+ * to Secure or NonSecure (ie our target state). We set it
239
+ * later, in v7m_exception_taken().
240
+ * The SPSEL bit is also set in v7m_exception_taken() for v8M.
241
+ * This corresponds to the ARM ARM pseudocode for v8M setting
242
+ * some LR bits in PushStack() and some in ExceptionTaken();
243
+ * the distinction matters for the tailchain cases where we
244
+ * can take an exception without pushing the stack.
245
+ */
246
+ if (env->v7m.secure) {
247
+ lr |= R_V7M_EXCRET_S_MASK;
248
+ }
249
+ } else {
250
+ lr = R_V7M_EXCRET_RES1_MASK |
251
+ R_V7M_EXCRET_S_MASK |
252
+ R_V7M_EXCRET_DCRS_MASK |
253
+ R_V7M_EXCRET_FTYPE_MASK |
254
+ R_V7M_EXCRET_ES_MASK;
255
+ if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
256
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
257
+ }
258
}
259
if (!arm_v7m_is_handler_mode(env)) {
260
lr |= R_V7M_EXCRET_MODE_MASK;
261
}
262
263
v7m_push_stack(cpu);
264
- v7m_exception_taken(cpu, lr);
265
+ v7m_exception_taken(cpu, lr, false);
266
qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
267
}
268
269
--
149
--
270
2.7.4
150
2.20.1
271
151
272
152
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Buglink: https://bugs.launchpad.net/bugs/1921948
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210416183106.1516563-5-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
tests/tcg/aarch64/mte-5.c | 44 +++++++++++++++++++++++++++++++
10
tests/tcg/aarch64/Makefile.target | 2 +-
11
2 files changed, 45 insertions(+), 1 deletion(-)
12
create mode 100644 tests/tcg/aarch64/mte-5.c
13
14
diff --git a/tests/tcg/aarch64/mte-5.c b/tests/tcg/aarch64/mte-5.c
15
new file mode 100644
16
index XXXXXXX..XXXXXXX
17
--- /dev/null
18
+++ b/tests/tcg/aarch64/mte-5.c
19
@@ -XXX,XX +XXX,XX @@
20
+/*
21
+ * Memory tagging, faulting unaligned access.
22
+ *
23
+ * Copyright (c) 2021 Linaro Ltd
24
+ * SPDX-License-Identifier: GPL-2.0-or-later
25
+ */
26
+
27
+#include "mte.h"
28
+
29
+void pass(int sig, siginfo_t *info, void *uc)
30
+{
31
+ assert(info->si_code == SEGV_MTESERR);
32
+ exit(0);
33
+}
34
+
35
+int main(int ac, char **av)
36
+{
37
+ struct sigaction sa;
38
+ void *p0, *p1, *p2;
39
+ long excl = 1;
40
+
41
+ enable_mte(PR_MTE_TCF_SYNC);
42
+ p0 = alloc_mte_mem(sizeof(*p0));
43
+
44
+ /* Create two differently tagged pointers. */
45
+ asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(excl));
46
+ asm("gmi %0,%1,%0" : "+r"(excl) : "r" (p1));
47
+ assert(excl != 1);
48
+ asm("irg %0,%1,%2" : "=r"(p2) : "r"(p0), "r"(excl));
49
+ assert(p1 != p2);
50
+
51
+ memset(&sa, 0, sizeof(sa));
52
+ sa.sa_sigaction = pass;
53
+ sa.sa_flags = SA_SIGINFO;
54
+ sigaction(SIGSEGV, &sa, NULL);
55
+
56
+ /* Store store two different tags in sequential granules. */
57
+ asm("stg %0, [%0]" : : "r"(p1));
58
+ asm("stg %0, [%0]" : : "r"(p2 + 16));
59
+
60
+ /* Perform an unaligned load crossing the granules. */
61
+ asm volatile("ldr %0, [%1]" : "=r"(p0) : "r"(p1 + 12));
62
+ abort();
63
+}
64
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
65
index XXXXXXX..XXXXXXX 100644
66
--- a/tests/tcg/aarch64/Makefile.target
67
+++ b/tests/tcg/aarch64/Makefile.target
68
@@ -XXX,XX +XXX,XX @@ AARCH64_TESTS += bti-2
69
70
# MTE Tests
71
ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_ARMV8_MTE),)
72
-AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-6
73
+AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-5 mte-6
74
mte-%: CFLAGS += -march=armv8.5-a+memtag
75
endif
76
77
--
78
2.20.1
79
80
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
After recent changes, mte_checkN does not use ESIZE,
4
and mte_check1 never used TSIZE. We can combine the
5
two into a single field: SIZEM1.
6
7
Choose to pass size - 1 because size == 0 is never used,
8
our immediate need in mte_probe_int is for the address
9
of the last byte (ptr + size - 1), and since almost all
10
operations are powers of 2, this makes the immediate
11
constant one bit smaller.
12
13
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20210416183106.1516563-6-richard.henderson@linaro.org
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
18
target/arm/internals.h | 4 ++--
19
target/arm/mte_helper.c | 18 ++++++++----------
20
target/arm/translate-a64.c | 5 ++---
21
target/arm/translate-sve.c | 5 ++---
22
4 files changed, 14 insertions(+), 18 deletions(-)
23
24
diff --git a/target/arm/internals.h b/target/arm/internals.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/internals.h
27
+++ b/target/arm/internals.h
28
@@ -XXX,XX +XXX,XX @@
29
#define TARGET_ARM_INTERNALS_H
30
31
#include "hw/registerfields.h"
32
+#include "tcg/tcg-gvec-desc.h"
33
#include "syndrome.h"
34
35
/* register banks for CPU modes */
36
@@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, MIDX, 0, 4)
37
FIELD(MTEDESC, TBI, 4, 2)
38
FIELD(MTEDESC, TCMA, 6, 2)
39
FIELD(MTEDESC, WRITE, 8, 1)
40
-FIELD(MTEDESC, ESIZE, 9, 5)
41
-FIELD(MTEDESC, TSIZE, 14, 10) /* mte_checkN only */
42
+FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
43
44
bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
45
uint64_t mte_check1(CPUARMState *env, uint32_t desc,
46
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/mte_helper.c
49
+++ b/target/arm/mte_helper.c
50
@@ -XXX,XX +XXX,XX @@ static int checkN(uint8_t *mem, int odd, int cmp, int count)
51
* Return positive on success with tbi enabled.
52
*/
53
static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
54
- uintptr_t ra, uint32_t total, uint64_t *fault)
55
+ uintptr_t ra, uint64_t *fault)
56
{
57
int mmu_idx, ptr_tag, bit55;
58
uint64_t ptr_last, prev_page, next_page;
59
uint64_t tag_first, tag_last;
60
uint64_t tag_byte_first, tag_byte_last;
61
- uint32_t tag_count, tag_size, n, c;
62
+ uint32_t sizem1, tag_count, tag_size, n, c;
63
uint8_t *mem1, *mem2;
64
MMUAccessType type;
65
66
@@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
67
68
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
69
type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
70
+ sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
71
72
/* Find the addr of the end of the access */
73
- ptr_last = ptr + total - 1;
74
+ ptr_last = ptr + sizem1;
75
76
/* Round the bounds to the tag granule, and compute the number of tags. */
77
tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
78
@@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
79
if (likely(tag_last - prev_page <= TARGET_PAGE_SIZE)) {
80
/* Memory access stays on one page. */
81
tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
82
- mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total,
83
+ mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
84
MMU_DATA_LOAD, tag_size, ra);
85
if (!mem1) {
86
return 1;
87
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
88
uint64_t ptr, uintptr_t ra)
89
{
90
uint64_t fault;
91
- uint32_t total = FIELD_EX32(desc, MTEDESC, TSIZE);
92
- int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
93
+ int ret = mte_probe_int(env, desc, ptr, ra, &fault);
94
95
if (unlikely(ret == 0)) {
96
mte_check_fail(env, desc, fault, ra);
97
@@ -XXX,XX +XXX,XX @@ uint64_t mte_check1(CPUARMState *env, uint32_t desc,
98
uint64_t ptr, uintptr_t ra)
99
{
100
uint64_t fault;
101
- uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
102
- int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
103
+ int ret = mte_probe_int(env, desc, ptr, ra, &fault);
104
105
if (unlikely(ret == 0)) {
106
mte_check_fail(env, desc, fault, ra);
107
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
108
bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
109
{
110
uint64_t fault;
111
- uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
112
- int ret = mte_probe_int(env, desc, ptr, 0, total, &fault);
113
+ int ret = mte_probe_int(env, desc, ptr, 0, &fault);
114
115
return ret != 0;
116
}
117
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
118
index XXXXXXX..XXXXXXX 100644
119
--- a/target/arm/translate-a64.c
120
+++ b/target/arm/translate-a64.c
121
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
122
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
123
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
124
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
125
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_size);
126
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1);
127
tcg_desc = tcg_const_i32(desc);
128
129
ret = new_tmp_a64(s);
130
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
131
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
132
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
133
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
134
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_esize);
135
- desc = FIELD_DP32(desc, MTEDESC, TSIZE, total_size);
136
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
137
tcg_desc = tcg_const_i32(desc);
138
139
ret = new_tmp_a64(s);
140
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
141
index XXXXXXX..XXXXXXX 100644
142
--- a/target/arm/translate-sve.c
143
+++ b/target/arm/translate-sve.c
144
@@ -XXX,XX +XXX,XX @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
145
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
146
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
147
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
148
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << msz);
149
- desc = FIELD_DP32(desc, MTEDESC, TSIZE, mte_n << msz);
150
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (mte_n << msz) - 1);
151
desc <<= SVE_MTEDESC_SHIFT;
152
} else {
153
addr = clean_data_tbi(s, addr);
154
@@ -XXX,XX +XXX,XX @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
155
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
156
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
157
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
158
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << msz);
159
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << msz) - 1);
160
desc <<= SVE_MTEDESC_SHIFT;
161
}
162
desc = simd_desc(vsz, vsz, desc | scale);
163
--
164
2.20.1
165
166
diff view generated by jsdifflib
1
For v8M, exceptions from Secure to Non-Secure state will save
1
From: Richard Henderson <richard.henderson@linaro.org>
2
callee-saved registers to the exception frame as well as the
3
caller-saved registers. Add support for unstacking these
4
registers in exception exit when necessary.
5
2
3
The mte_check1 and mte_checkN functions are now identical.
4
Drop mte_check1 and rename mte_checkN to mte_check.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210416183106.1516563-7-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 1506092407-26985-12-git-send-email-peter.maydell@linaro.org
9
---
10
---
10
target/arm/helper.c | 30 ++++++++++++++++++++++++++++++
11
target/arm/helper-a64.h | 3 +--
11
1 file changed, 30 insertions(+)
12
target/arm/internals.h | 5 +----
13
target/arm/mte_helper.c | 26 +++-----------------------
14
target/arm/sve_helper.c | 14 +++++++-------
15
target/arm/translate-a64.c | 4 ++--
16
5 files changed, 14 insertions(+), 38 deletions(-)
12
17
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
18
diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
14
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.c
20
--- a/target/arm/helper-a64.h
16
+++ b/target/arm/helper.c
21
+++ b/target/arm/helper-a64.h
17
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
18
"for destination state is UNPREDICTABLE\n");
23
DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
24
DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
25
26
-DEF_HELPER_FLAGS_3(mte_check1, TCG_CALL_NO_WG, i64, env, i32, i64)
27
-DEF_HELPER_FLAGS_3(mte_checkN, TCG_CALL_NO_WG, i64, env, i32, i64)
28
+DEF_HELPER_FLAGS_3(mte_check, TCG_CALL_NO_WG, i64, env, i32, i64)
29
DEF_HELPER_FLAGS_3(mte_check_zva, TCG_CALL_NO_WG, i64, env, i32, i64)
30
DEF_HELPER_FLAGS_3(irg, TCG_CALL_NO_RWG, i64, env, i64, i64)
31
DEF_HELPER_FLAGS_4(addsubg, TCG_CALL_NO_RWG_SE, i64, env, i64, s32, i32)
32
diff --git a/target/arm/internals.h b/target/arm/internals.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/internals.h
35
+++ b/target/arm/internals.h
36
@@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, WRITE, 8, 1)
37
FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
38
39
bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
40
-uint64_t mte_check1(CPUARMState *env, uint32_t desc,
41
- uint64_t ptr, uintptr_t ra);
42
-uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
43
- uint64_t ptr, uintptr_t ra);
44
+uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
45
46
static inline int allocation_tag_from_addr(uint64_t ptr)
47
{
48
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/target/arm/mte_helper.c
51
+++ b/target/arm/mte_helper.c
52
@@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
53
return 0;
54
}
55
56
-uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
57
- uint64_t ptr, uintptr_t ra)
58
+uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
59
{
60
uint64_t fault;
61
int ret = mte_probe_int(env, desc, ptr, ra, &fault);
62
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
63
return useronly_clean_ptr(ptr);
64
}
65
66
-uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr)
67
+uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
68
{
69
- return mte_checkN(env, desc, ptr, GETPC());
70
-}
71
-
72
-uint64_t mte_check1(CPUARMState *env, uint32_t desc,
73
- uint64_t ptr, uintptr_t ra)
74
-{
75
- uint64_t fault;
76
- int ret = mte_probe_int(env, desc, ptr, ra, &fault);
77
-
78
- if (unlikely(ret == 0)) {
79
- mte_check_fail(env, desc, fault, ra);
80
- } else if (ret < 0) {
81
- return ptr;
82
- }
83
- return useronly_clean_ptr(ptr);
84
-}
85
-
86
-uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
87
-{
88
- return mte_check1(env, desc, ptr, GETPC());
89
+ return mte_check(env, desc, ptr, GETPC());
90
}
91
92
/*
93
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/target/arm/sve_helper.c
96
+++ b/target/arm/sve_helper.c
97
@@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_mte_check1(SVEContLdSt *info, CPUARMState *env,
98
uintptr_t ra)
99
{
100
sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
101
- mtedesc, ra, mte_check1);
102
+ mtedesc, ra, mte_check);
103
}
104
105
static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env,
106
@@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env,
107
uintptr_t ra)
108
{
109
sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
110
- mtedesc, ra, mte_checkN);
111
+ mtedesc, ra, mte_check);
112
}
113
114
115
@@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
116
if (fault == FAULT_FIRST) {
117
/* Trapping mte check for the first-fault element. */
118
if (mtedesc) {
119
- mte_check1(env, mtedesc, addr + mem_off, retaddr);
120
+ mte_check(env, mtedesc, addr + mem_off, retaddr);
19
}
121
}
20
122
21
+ /* Do we need to pop callee-saved registers? */
123
/*
22
+ if (return_to_secure &&
124
@@ -XXX,XX +XXX,XX @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
23
+ ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
125
info.attrs, BP_MEM_READ, retaddr);
24
+ (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
126
}
25
+ uint32_t expected_sig = 0xfefa125b;
127
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
26
+ uint32_t actual_sig = ldl_phys(cs->as, frameptr);
128
- mte_check1(env, mtedesc, addr, retaddr);
27
+
129
+ mte_check(env, mtedesc, addr, retaddr);
28
+ if (expected_sig != actual_sig) {
130
}
29
+ /* Take a SecureFault on the current stack */
131
host_fn(&scratch, reg_off, info.host);
30
+ env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
132
} else {
31
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
133
@@ -XXX,XX +XXX,XX @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
32
+ v7m_exception_taken(cpu, excret);
134
BP_MEM_READ, retaddr);
33
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
135
}
34
+ "stackframe: failed exception return integrity "
136
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
35
+ "signature check\n");
137
- mte_check1(env, mtedesc, addr, retaddr);
36
+ return;
138
+ mte_check(env, mtedesc, addr, retaddr);
37
+ }
139
}
38
+
140
tlb_fn(env, &scratch, reg_off, addr, retaddr);
39
+ env->regs[4] = ldl_phys(cs->as, frameptr + 0x8);
141
}
40
+ env->regs[5] = ldl_phys(cs->as, frameptr + 0xc);
142
@@ -XXX,XX +XXX,XX @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
41
+ env->regs[6] = ldl_phys(cs->as, frameptr + 0x10);
143
*/
42
+ env->regs[7] = ldl_phys(cs->as, frameptr + 0x14);
144
addr = base + (off_fn(vm, reg_off) << scale);
43
+ env->regs[8] = ldl_phys(cs->as, frameptr + 0x18);
145
if (mtedesc) {
44
+ env->regs[9] = ldl_phys(cs->as, frameptr + 0x1c);
146
- mte_check1(env, mtedesc, addr, retaddr);
45
+ env->regs[10] = ldl_phys(cs->as, frameptr + 0x20);
147
+ mte_check(env, mtedesc, addr, retaddr);
46
+ env->regs[11] = ldl_phys(cs->as, frameptr + 0x24);
148
}
47
+
149
tlb_fn(env, vd, reg_off, addr, retaddr);
48
+ frameptr += 0x28;
150
49
+ }
151
@@ -XXX,XX +XXX,XX @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
50
+
152
}
51
/* Pop registers. TODO: make these accesses use the correct
153
52
* attributes and address space (S/NS, priv/unpriv) and handle
154
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
53
* memory transaction failures.
155
- mte_check1(env, mtedesc, addr, retaddr);
156
+ mte_check(env, mtedesc, addr, retaddr);
157
}
158
}
159
i += 1;
160
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/arm/translate-a64.c
163
+++ b/target/arm/translate-a64.c
164
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
165
tcg_desc = tcg_const_i32(desc);
166
167
ret = new_tmp_a64(s);
168
- gen_helper_mte_check1(ret, cpu_env, tcg_desc, addr);
169
+ gen_helper_mte_check(ret, cpu_env, tcg_desc, addr);
170
tcg_temp_free_i32(tcg_desc);
171
172
return ret;
173
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
174
tcg_desc = tcg_const_i32(desc);
175
176
ret = new_tmp_a64(s);
177
- gen_helper_mte_checkN(ret, cpu_env, tcg_desc, addr);
178
+ gen_helper_mte_check(ret, cpu_env, tcg_desc, addr);
179
tcg_temp_free_i32(tcg_desc);
180
181
return ret;
54
--
182
--
55
2.7.4
183
2.20.1
56
184
57
185
diff view generated by jsdifflib
1
Implement the security attribute lookups for memory accesses
1
From: Richard Henderson <richard.henderson@linaro.org>
2
in the get_phys_addr() functions, causing these to generate
3
various kinds of SecureFault for bad accesses.
4
2
5
The major subtlety in this code relates to handling of the
3
For consistency with the mte_check1 + mte_checkN merge
6
case when the security attributes the SAU assigns to the
4
to mte_check, rename the probe function as well.
7
address don't match the current security state of the CPU.
8
5
9
In the ARM ARM pseudocode for validating instruction
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
accesses, the security attributes of the address determine
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
whether the Secure or NonSecure MPU state is used. At face
8
Message-id: 20210416183106.1516563-8-richard.henderson@linaro.org
12
value, handling this would require us to encode the relevant
13
bits of state into mmu_idx for both S and NS at once, which
14
would result in our needing 16 mmu indexes. Fortunately we
15
don't actually need to do this because a mismatch between
16
address attributes and CPU state means either:
17
* some kind of fault (usually a SecureFault, but in theory
18
perhaps a UserFault for unaligned access to Device memory)
19
* execution of the SG instruction in NS state from a
20
Secure & NonSecure code region
21
22
The purpose of SG is simply to flip the CPU into Secure
23
state, so we can handle it by emulating execution of that
24
instruction directly in arm_v7m_cpu_do_interrupt(), which
25
means we can treat all the mismatch cases as "throw an
26
exception" and we don't need to encode the state of the
27
other MPU bank into our mmu_idx values.
28
29
This commit doesn't include the actual emulation of SG;
30
it also doesn't include implementation of the IDAU, which
31
is a per-board way to specify hard-coded memory attributes
32
for addresses, which override the CPU-internal SAU if they
33
specify a more secure setting than the SAU is programmed to.
34
35
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
36
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
37
Message-id: 1506092407-26985-15-git-send-email-peter.maydell@linaro.org
38
---
10
---
39
target/arm/internals.h | 15 ++++
11
target/arm/internals.h | 2 +-
40
target/arm/helper.c | 182 ++++++++++++++++++++++++++++++++++++++++++++++++-
12
target/arm/mte_helper.c | 6 +++---
41
2 files changed, 195 insertions(+), 2 deletions(-)
13
target/arm/sve_helper.c | 6 +++---
14
3 files changed, 7 insertions(+), 7 deletions(-)
42
15
43
diff --git a/target/arm/internals.h b/target/arm/internals.h
16
diff --git a/target/arm/internals.h b/target/arm/internals.h
44
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/internals.h
18
--- a/target/arm/internals.h
46
+++ b/target/arm/internals.h
19
+++ b/target/arm/internals.h
47
@@ -XXX,XX +XXX,XX @@ FIELD(V7M_EXCRET, DCRS, 5, 1)
20
@@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, TCMA, 6, 2)
48
FIELD(V7M_EXCRET, S, 6, 1)
21
FIELD(MTEDESC, WRITE, 8, 1)
49
FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
22
FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
50
23
51
+/* We use a few fake FSR values for internal purposes in M profile.
24
-bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
52
+ * M profile cores don't have A/R format FSRs, but currently our
25
+bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
53
+ * get_phys_addr() code assumes A/R profile and reports failures via
26
uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
54
+ * an A/R format FSR value. We then translate that into the proper
27
55
+ * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
28
static inline int allocation_tag_from_addr(uint64_t ptr)
56
+ * Mostly the FSR values we use for this are those defined for v7PMSA,
29
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
57
+ * since we share some of that codepath. A few kinds of fault are
30
index XXXXXXX..XXXXXXX 100644
58
+ * only for M profile and have no A/R equivalent, though, so we have
31
--- a/target/arm/mte_helper.c
59
+ * to pick a value from the reserved range (which we never otherwise
32
+++ b/target/arm/mte_helper.c
60
+ * generate) to use for these.
33
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
61
+ * These values will never be visible to the guest.
34
* exception for inaccessible pages, and resolves the virtual address
62
+ */
35
* into the softmmu tlb.
63
+#define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
36
*
64
+#define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
37
- * When RA == 0, this is for mte_probe1. The page is expected to be
65
+
38
+ * When RA == 0, this is for mte_probe. The page is expected to be
39
* valid. Indicate to probe_access_flags no-fault, then assert that
40
* we received a valid page.
41
*/
42
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
43
}
44
66
/*
45
/*
67
* For AArch64, map a given EL to an index in the banked_spsr array.
46
- * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
68
* Note that this mapping and the AArch32 mapping defined in bank_number()
47
+ * No-fault version of mte_check, to be used by SVE for MemSingleNF.
69
diff --git a/target/arm/helper.c b/target/arm/helper.c
48
* Returns false if the access is Checked and the check failed. This
49
* is only intended to probe the tag -- the validity of the page must
50
* be checked beforehand.
51
*/
52
-bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
53
+bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
54
{
55
uint64_t fault;
56
int ret = mte_probe_int(env, desc, ptr, 0, &fault);
57
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
70
index XXXXXXX..XXXXXXX 100644
58
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/helper.c
59
--- a/target/arm/sve_helper.c
72
+++ b/target/arm/helper.c
60
+++ b/target/arm/sve_helper.c
73
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
61
@@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
74
target_ulong *page_size_ptr, uint32_t *fsr,
62
/* Watchpoint hit, see below. */
75
ARMMMUFaultInfo *fi);
63
goto do_fault;
76
64
}
77
+/* Security attributes for an address, as returned by v8m_security_lookup. */
65
- if (mtedesc && !mte_probe1(env, mtedesc, addr + mem_off)) {
78
+typedef struct V8M_SAttributes {
66
+ if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
79
+ bool ns;
67
goto do_fault;
80
+ bool nsc;
68
}
81
+ uint8_t sregion;
69
/*
82
+ bool srvalid;
70
@@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
83
+ uint8_t iregion;
71
& BP_MEM_READ)) {
84
+ bool irvalid;
72
goto do_fault;
85
+} V8M_SAttributes;
73
}
86
+
74
- if (mtedesc && !mte_probe1(env, mtedesc, addr + mem_off)) {
87
/* Definitions for the PMCCNTR and PMCR registers */
75
+ if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
88
#define PMCRD 0x8
76
goto do_fault;
89
#define PMCRC 0x4
77
}
90
@@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
78
host_fn(vd, reg_off, host + mem_off);
91
* raises the fault, in the A profile short-descriptor format.
79
@@ -XXX,XX +XXX,XX @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
92
*/
80
}
93
switch (env->exception.fsr & 0xf) {
81
if (mtedesc &&
94
+ case M_FAKE_FSR_NSC_EXEC:
82
arm_tlb_mte_tagged(&info.attrs) &&
95
+ /* Exception generated when we try to execute code at an address
83
- !mte_probe1(env, mtedesc, addr)) {
96
+ * which is marked as Secure & Non-Secure Callable and the CPU
84
+ !mte_probe(env, mtedesc, addr)) {
97
+ * is in the Non-Secure state. The only instruction which can
85
goto fault;
98
+ * be executed like this is SG (and that only if both halves of
86
}
99
+ * the SG instruction have the same security attributes.)
87
100
+ * Everything else must generate an INVEP SecureFault, so we
101
+ * emulate the SG instruction here.
102
+ * TODO: actually emulate SG.
103
+ */
104
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
105
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
106
+ qemu_log_mask(CPU_LOG_INT,
107
+ "...really SecureFault with SFSR.INVEP\n");
108
+ break;
109
+ case M_FAKE_FSR_SFAULT:
110
+ /* Various flavours of SecureFault for attempts to execute or
111
+ * access data in the wrong security state.
112
+ */
113
+ switch (cs->exception_index) {
114
+ case EXCP_PREFETCH_ABORT:
115
+ if (env->v7m.secure) {
116
+ env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
117
+ qemu_log_mask(CPU_LOG_INT,
118
+ "...really SecureFault with SFSR.INVTRAN\n");
119
+ } else {
120
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
121
+ qemu_log_mask(CPU_LOG_INT,
122
+ "...really SecureFault with SFSR.INVEP\n");
123
+ }
124
+ break;
125
+ case EXCP_DATA_ABORT:
126
+ /* This must be an NS access to S memory */
127
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
128
+ qemu_log_mask(CPU_LOG_INT,
129
+ "...really SecureFault with SFSR.AUVIOL\n");
130
+ break;
131
+ }
132
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
133
+ break;
134
case 0x8: /* External Abort */
135
switch (cs->exception_index) {
136
case EXCP_PREFETCH_ABORT:
137
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
138
return !(*prot & (1 << access_type));
139
}
140
141
+static bool v8m_is_sau_exempt(CPUARMState *env,
142
+ uint32_t address, MMUAccessType access_type)
143
+{
144
+ /* The architecture specifies that certain address ranges are
145
+ * exempt from v8M SAU/IDAU checks.
146
+ */
147
+ return
148
+ (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
149
+ (address >= 0xe0000000 && address <= 0xe0002fff) ||
150
+ (address >= 0xe000e000 && address <= 0xe000efff) ||
151
+ (address >= 0xe002e000 && address <= 0xe002efff) ||
152
+ (address >= 0xe0040000 && address <= 0xe0041fff) ||
153
+ (address >= 0xe00ff000 && address <= 0xe00fffff);
154
+}
155
+
156
+static void v8m_security_lookup(CPUARMState *env, uint32_t address,
157
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
158
+ V8M_SAttributes *sattrs)
159
+{
160
+ /* Look up the security attributes for this address. Compare the
161
+ * pseudocode SecurityCheck() function.
162
+ * We assume the caller has zero-initialized *sattrs.
163
+ */
164
+ ARMCPU *cpu = arm_env_get_cpu(env);
165
+ int r;
166
+
167
+ /* TODO: implement IDAU */
168
+
169
+ if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
170
+ /* 0xf0000000..0xffffffff is always S for insn fetches */
171
+ return;
172
+ }
173
+
174
+ if (v8m_is_sau_exempt(env, address, access_type)) {
175
+ sattrs->ns = !regime_is_secure(env, mmu_idx);
176
+ return;
177
+ }
178
+
179
+ switch (env->sau.ctrl & 3) {
180
+ case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
181
+ break;
182
+ case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
183
+ sattrs->ns = true;
184
+ break;
185
+ default: /* SAU.ENABLE == 1 */
186
+ for (r = 0; r < cpu->sau_sregion; r++) {
187
+ if (env->sau.rlar[r] & 1) {
188
+ uint32_t base = env->sau.rbar[r] & ~0x1f;
189
+ uint32_t limit = env->sau.rlar[r] | 0x1f;
190
+
191
+ if (base <= address && limit >= address) {
192
+ if (sattrs->srvalid) {
193
+ /* If we hit in more than one region then we must report
194
+ * as Secure, not NS-Callable, with no valid region
195
+ * number info.
196
+ */
197
+ sattrs->ns = false;
198
+ sattrs->nsc = false;
199
+ sattrs->sregion = 0;
200
+ sattrs->srvalid = false;
201
+ break;
202
+ } else {
203
+ if (env->sau.rlar[r] & 2) {
204
+ sattrs->nsc = true;
205
+ } else {
206
+ sattrs->ns = true;
207
+ }
208
+ sattrs->srvalid = true;
209
+ sattrs->sregion = r;
210
+ }
211
+ }
212
+ }
213
+ }
214
+
215
+ /* TODO when we support the IDAU then it may override the result here */
216
+ break;
217
+ }
218
+}
219
+
220
static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
221
MMUAccessType access_type, ARMMMUIdx mmu_idx,
222
- hwaddr *phys_ptr, int *prot, uint32_t *fsr)
223
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
224
+ int *prot, uint32_t *fsr)
225
{
226
ARMCPU *cpu = arm_env_get_cpu(env);
227
bool is_user = regime_is_user(env, mmu_idx);
228
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
229
int n;
230
int matchregion = -1;
231
bool hit = false;
232
+ V8M_SAttributes sattrs = {};
233
234
*phys_ptr = address;
235
*prot = 0;
236
237
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
238
+ v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
239
+ if (access_type == MMU_INST_FETCH) {
240
+ /* Instruction fetches always use the MMU bank and the
241
+ * transaction attribute determined by the fetch address,
242
+ * regardless of CPU state. This is painful for QEMU
243
+ * to handle, because it would mean we need to encode
244
+ * into the mmu_idx not just the (user, negpri) information
245
+ * for the current security state but also that for the
246
+ * other security state, which would balloon the number
247
+ * of mmu_idx values needed alarmingly.
248
+ * Fortunately we can avoid this because it's not actually
249
+ * possible to arbitrarily execute code from memory with
250
+ * the wrong security attribute: it will always generate
251
+ * an exception of some kind or another, apart from the
252
+ * special case of an NS CPU executing an SG instruction
253
+ * in S&NSC memory. So we always just fail the translation
254
+ * here and sort things out in the exception handler
255
+ * (including possibly emulating an SG instruction).
256
+ */
257
+ if (sattrs.ns != !secure) {
258
+ *fsr = sattrs.nsc ? M_FAKE_FSR_NSC_EXEC : M_FAKE_FSR_SFAULT;
259
+ return true;
260
+ }
261
+ } else {
262
+ /* For data accesses we always use the MMU bank indicated
263
+ * by the current CPU state, but the security attributes
264
+ * might downgrade a secure access to nonsecure.
265
+ */
266
+ if (sattrs.ns) {
267
+ txattrs->secure = false;
268
+ } else if (!secure) {
269
+ /* NS access to S memory must fault.
270
+ * Architecturally we should first check whether the
271
+ * MPU information for this address indicates that we
272
+ * are doing an unaligned access to Device memory, which
273
+ * should generate a UsageFault instead. QEMU does not
274
+ * currently check for that kind of unaligned access though.
275
+ * If we added it we would need to do so as a special case
276
+ * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
277
+ */
278
+ *fsr = M_FAKE_FSR_SFAULT;
279
+ return true;
280
+ }
281
+ }
282
+ }
283
+
284
/* Unlike the ARM ARM pseudocode, we don't need to check whether this
285
* was an exception vector read from the vector table (which is always
286
* done using the default system address map), because those accesses
287
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
288
if (arm_feature(env, ARM_FEATURE_V8)) {
289
/* PMSAv8 */
290
ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
291
- phys_ptr, prot, fsr);
292
+ phys_ptr, attrs, prot, fsr);
293
} else if (arm_feature(env, ARM_FEATURE_V7)) {
294
/* PMSAv7 */
295
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
296
--
88
--
297
2.7.4
89
2.20.1
298
90
299
91
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Now that mte_check1 and mte_checkN have been merged, we can
4
merge sve_cont_ldst_mte_check1 and sve_cont_ldst_mte_checkN.
5
6
Which means that we can eliminate the function pointer into
7
sve_ldN_r and sve_stN_r, calling sve_cont_ldst_mte_check directly.
8
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210416183106.1516563-9-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/sve_helper.c | 84 +++++++++++++----------------------------
15
1 file changed, 26 insertions(+), 58 deletions(-)
16
17
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/sve_helper.c
20
+++ b/target/arm/sve_helper.c
21
@@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
22
#endif
23
}
24
25
-typedef uint64_t mte_check_fn(CPUARMState *, uint32_t, uint64_t, uintptr_t);
26
-
27
-static inline QEMU_ALWAYS_INLINE
28
-void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
29
- uint64_t *vg, target_ulong addr, int esize,
30
- int msize, uint32_t mtedesc, uintptr_t ra,
31
- mte_check_fn *check)
32
+static void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
33
+ uint64_t *vg, target_ulong addr, int esize,
34
+ int msize, uint32_t mtedesc, uintptr_t ra)
35
{
36
intptr_t mem_off, reg_off, reg_last;
37
38
@@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
39
uint64_t pg = vg[reg_off >> 6];
40
do {
41
if ((pg >> (reg_off & 63)) & 1) {
42
- check(env, mtedesc, addr, ra);
43
+ mte_check(env, mtedesc, addr, ra);
44
}
45
reg_off += esize;
46
mem_off += msize;
47
@@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
48
uint64_t pg = vg[reg_off >> 6];
49
do {
50
if ((pg >> (reg_off & 63)) & 1) {
51
- check(env, mtedesc, addr, ra);
52
+ mte_check(env, mtedesc, addr, ra);
53
}
54
reg_off += esize;
55
mem_off += msize;
56
@@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
57
}
58
}
59
60
-typedef void sve_cont_ldst_mte_check_fn(SVEContLdSt *info, CPUARMState *env,
61
- uint64_t *vg, target_ulong addr,
62
- int esize, int msize, uint32_t mtedesc,
63
- uintptr_t ra);
64
-
65
-static void sve_cont_ldst_mte_check1(SVEContLdSt *info, CPUARMState *env,
66
- uint64_t *vg, target_ulong addr,
67
- int esize, int msize, uint32_t mtedesc,
68
- uintptr_t ra)
69
-{
70
- sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
71
- mtedesc, ra, mte_check);
72
-}
73
-
74
-static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env,
75
- uint64_t *vg, target_ulong addr,
76
- int esize, int msize, uint32_t mtedesc,
77
- uintptr_t ra)
78
-{
79
- sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
80
- mtedesc, ra, mte_check);
81
-}
82
-
83
-
84
/*
85
* Common helper for all contiguous 1,2,3,4-register predicated stores.
86
*/
87
@@ -XXX,XX +XXX,XX @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
88
uint32_t desc, const uintptr_t retaddr,
89
const int esz, const int msz, const int N, uint32_t mtedesc,
90
sve_ldst1_host_fn *host_fn,
91
- sve_ldst1_tlb_fn *tlb_fn,
92
- sve_cont_ldst_mte_check_fn *mte_check_fn)
93
+ sve_ldst1_tlb_fn *tlb_fn)
94
{
95
const unsigned rd = simd_data(desc);
96
const intptr_t reg_max = simd_oprsz(desc);
97
@@ -XXX,XX +XXX,XX @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
98
* Handle mte checks for all active elements.
99
* Since TBI must be set for MTE, !mtedesc => !mte_active.
100
*/
101
- if (mte_check_fn && mtedesc) {
102
- mte_check_fn(&info, env, vg, addr, 1 << esz, N << msz,
103
- mtedesc, retaddr);
104
+ if (mtedesc) {
105
+ sve_cont_ldst_mte_check(&info, env, vg, addr, 1 << esz, N << msz,
106
+ mtedesc, retaddr);
107
}
108
109
flags = info.page[0].flags | info.page[1].flags;
110
@@ -XXX,XX +XXX,XX @@ void sve_ldN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr,
111
mtedesc = 0;
112
}
113
114
- sve_ldN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn,
115
- N == 1 ? sve_cont_ldst_mte_check1 : sve_cont_ldst_mte_checkN);
116
+ sve_ldN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn);
117
}
118
119
#define DO_LD1_1(NAME, ESZ) \
120
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \
121
target_ulong addr, uint32_t desc) \
122
{ \
123
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, 0, \
124
- sve_##NAME##_host, sve_##NAME##_tlb, NULL); \
125
+ sve_##NAME##_host, sve_##NAME##_tlb); \
126
} \
127
void HELPER(sve_##NAME##_r_mte)(CPUARMState *env, void *vg, \
128
target_ulong addr, uint32_t desc) \
129
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \
130
target_ulong addr, uint32_t desc) \
131
{ \
132
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \
133
- sve_##NAME##_le_host, sve_##NAME##_le_tlb, NULL); \
134
+ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
135
} \
136
void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \
137
target_ulong addr, uint32_t desc) \
138
{ \
139
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \
140
- sve_##NAME##_be_host, sve_##NAME##_be_tlb, NULL); \
141
+ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
142
} \
143
void HELPER(sve_##NAME##_le_r_mte)(CPUARMState *env, void *vg, \
144
- target_ulong addr, uint32_t desc) \
145
+ target_ulong addr, uint32_t desc) \
146
{ \
147
sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
148
sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
149
} \
150
void HELPER(sve_##NAME##_be_r_mte)(CPUARMState *env, void *vg, \
151
- target_ulong addr, uint32_t desc) \
152
+ target_ulong addr, uint32_t desc) \
153
{ \
154
sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
155
sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
156
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_ld##N##bb_r)(CPUARMState *env, void *vg, \
157
target_ulong addr, uint32_t desc) \
158
{ \
159
sve_ldN_r(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, 0, \
160
- sve_ld1bb_host, sve_ld1bb_tlb, NULL); \
161
+ sve_ld1bb_host, sve_ld1bb_tlb); \
162
} \
163
void HELPER(sve_ld##N##bb_r_mte)(CPUARMState *env, void *vg, \
164
target_ulong addr, uint32_t desc) \
165
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_ld##N##SUFF##_le_r)(CPUARMState *env, void *vg, \
166
target_ulong addr, uint32_t desc) \
167
{ \
168
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \
169
- sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb, NULL); \
170
+ sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \
171
} \
172
void HELPER(sve_ld##N##SUFF##_be_r)(CPUARMState *env, void *vg, \
173
target_ulong addr, uint32_t desc) \
174
{ \
175
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \
176
- sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb, NULL); \
177
+ sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \
178
} \
179
void HELPER(sve_ld##N##SUFF##_le_r_mte)(CPUARMState *env, void *vg, \
180
target_ulong addr, uint32_t desc) \
181
@@ -XXX,XX +XXX,XX @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
182
uint32_t desc, const uintptr_t retaddr,
183
const int esz, const int msz, const int N, uint32_t mtedesc,
184
sve_ldst1_host_fn *host_fn,
185
- sve_ldst1_tlb_fn *tlb_fn,
186
- sve_cont_ldst_mte_check_fn *mte_check_fn)
187
+ sve_ldst1_tlb_fn *tlb_fn)
188
{
189
const unsigned rd = simd_data(desc);
190
const intptr_t reg_max = simd_oprsz(desc);
191
@@ -XXX,XX +XXX,XX @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
192
* Handle mte checks for all active elements.
193
* Since TBI must be set for MTE, !mtedesc => !mte_active.
194
*/
195
- if (mte_check_fn && mtedesc) {
196
- mte_check_fn(&info, env, vg, addr, 1 << esz, N << msz,
197
- mtedesc, retaddr);
198
+ if (mtedesc) {
199
+ sve_cont_ldst_mte_check(&info, env, vg, addr, 1 << esz, N << msz,
200
+ mtedesc, retaddr);
201
}
202
203
flags = info.page[0].flags | info.page[1].flags;
204
@@ -XXX,XX +XXX,XX @@ void sve_stN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr,
205
mtedesc = 0;
206
}
207
208
- sve_stN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn,
209
- N == 1 ? sve_cont_ldst_mte_check1 : sve_cont_ldst_mte_checkN);
210
+ sve_stN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn);
211
}
212
213
#define DO_STN_1(N, NAME, ESZ) \
214
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_st##N##NAME##_r)(CPUARMState *env, void *vg, \
215
target_ulong addr, uint32_t desc) \
216
{ \
217
sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, N, 0, \
218
- sve_st1##NAME##_host, sve_st1##NAME##_tlb, NULL); \
219
+ sve_st1##NAME##_host, sve_st1##NAME##_tlb); \
220
} \
221
void HELPER(sve_st##N##NAME##_r_mte)(CPUARMState *env, void *vg, \
222
target_ulong addr, uint32_t desc) \
223
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_st##N##NAME##_le_r)(CPUARMState *env, void *vg, \
224
target_ulong addr, uint32_t desc) \
225
{ \
226
sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \
227
- sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb, NULL); \
228
+ sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb); \
229
} \
230
void HELPER(sve_st##N##NAME##_be_r)(CPUARMState *env, void *vg, \
231
target_ulong addr, uint32_t desc) \
232
{ \
233
sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \
234
- sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb, NULL); \
235
+ sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb); \
236
} \
237
void HELPER(sve_st##N##NAME##_le_r_mte)(CPUARMState *env, void *vg, \
238
target_ulong addr, uint32_t desc) \
239
--
240
2.20.1
241
242
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
The log2_esize parameter is not used except trivially.
4
Drop the parameter and the deferral to gen_mte_check1.
5
6
This fixes a bug in that the parameters as documented
7
in the header file were the reverse from those in the
8
implementation. Which meant that translate-sve.c was
9
passing the parameters in the wrong order.
10
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20210416183106.1516563-10-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
target/arm/translate-a64.h | 2 +-
17
target/arm/translate-a64.c | 15 +++++++--------
18
target/arm/translate-sve.c | 4 ++--
19
3 files changed, 10 insertions(+), 11 deletions(-)
20
21
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/translate-a64.h
24
+++ b/target/arm/translate-a64.h
25
@@ -XXX,XX +XXX,XX @@ TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr);
26
TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
27
bool tag_checked, int log2_size);
28
TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
29
- bool tag_checked, int count, int log2_esize);
30
+ bool tag_checked, int size);
31
32
/* We should have at some point before trying to access an FP register
33
* done the necessary access check, so assert that
34
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/translate-a64.c
37
+++ b/target/arm/translate-a64.c
38
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
39
* For MTE, check multiple logical sequential accesses.
40
*/
41
TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
42
- bool tag_checked, int log2_esize, int total_size)
43
+ bool tag_checked, int size)
44
{
45
- if (tag_checked && s->mte_active[0] && total_size != (1 << log2_esize)) {
46
+ if (tag_checked && s->mte_active[0]) {
47
TCGv_i32 tcg_desc;
48
TCGv_i64 ret;
49
int desc = 0;
50
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
51
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
52
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
53
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
54
- desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
55
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1);
56
tcg_desc = tcg_const_i32(desc);
57
58
ret = new_tmp_a64(s);
59
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
60
61
return ret;
62
}
63
- return gen_mte_check1(s, addr, is_write, tag_checked, log2_esize);
64
+ return clean_data_tbi(s, addr);
65
}
66
67
typedef struct DisasCompare64 {
68
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
69
}
70
71
clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
72
- (wback || rn != 31) && !set_tag,
73
- size, 2 << size);
74
+ (wback || rn != 31) && !set_tag, 2 << size);
75
76
if (is_vector) {
77
if (is_load) {
78
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
79
* promote consecutive little-endian elements below.
80
*/
81
clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
82
- size, total);
83
+ total);
84
85
/*
86
* Consecutive little-endian elements from a single register
87
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
88
tcg_rn = cpu_reg_sp(s, rn);
89
90
clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
91
- scale, total);
92
+ total);
93
94
tcg_ebytes = tcg_const_i64(1 << scale);
95
for (xs = 0; xs < selem; xs++) {
96
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/target/arm/translate-sve.c
99
+++ b/target/arm/translate-sve.c
100
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
101
102
dirty_addr = tcg_temp_new_i64();
103
tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
104
- clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8);
105
+ clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
106
tcg_temp_free_i64(dirty_addr);
107
108
/*
109
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
110
111
dirty_addr = tcg_temp_new_i64();
112
tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
113
- clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8);
114
+ clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
115
tcg_temp_free_i64(dirty_addr);
116
117
/* Note that unpredicated load/store of vector/predicate registers
118
--
119
2.20.1
120
121
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
The encoding of size = 2 and size = 3 had the incorrect decode
4
for align, overlapping the stride field. This error was hidden
5
by what should have been unnecessary masking in translate.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210419202257.161730-2-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/neon-ls.decode | 4 ++--
13
target/arm/translate-neon.c.inc | 4 ++--
14
2 files changed, 4 insertions(+), 4 deletions(-)
15
16
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/neon-ls.decode
19
+++ b/target/arm/neon-ls.decode
20
@@ -XXX,XX +XXX,XX @@ VLD_all_lanes 1111 0100 1 . 1 0 rn:4 .... 11 n:2 size:2 t:1 a:1 rm:4 \
21
22
VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 00 n:2 reg_idx:3 align:1 rm:4 \
23
vd=%vd_dp size=0 stride=1
24
-VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 01 n:2 reg_idx:2 align:2 rm:4 \
25
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 01 n:2 reg_idx:2 . align:1 rm:4 \
26
vd=%vd_dp size=1 stride=%imm1_5_p1
27
-VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 10 n:2 reg_idx:1 align:3 rm:4 \
28
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 10 n:2 reg_idx:1 . align:2 rm:4 \
29
vd=%vd_dp size=2 stride=%imm1_6_p1
30
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/translate-neon.c.inc
33
+++ b/target/arm/translate-neon.c.inc
34
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
35
switch (nregs) {
36
case 1:
37
if (((a->align & (1 << a->size)) != 0) ||
38
- (a->size == 2 && ((a->align & 3) == 1 || (a->align & 3) == 2))) {
39
+ (a->size == 2 && (a->align == 1 || a->align == 2))) {
40
return false;
41
}
42
break;
43
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
44
}
45
break;
46
case 4:
47
- if ((a->size == 2) && ((a->align & 3) == 3)) {
48
+ if (a->size == 2 && a->align == 3) {
49
return false;
50
}
51
break;
52
--
53
2.20.1
54
55
diff view generated by jsdifflib
1
In the v8M architecture, return from an exception to a PC which
1
From: Richard Henderson <richard.henderson@linaro.org>
2
has bit 0 set is not UNPREDICTABLE; it is defined that bit 0
3
is discarded [R_HRJH]. Restrict our complaint about this to v7M.
4
2
3
We're about to rearrange the macro expansion surrounding tbflags,
4
and this field name will be expanded using the bit definition of
5
the same name, resulting in a token pasting error.
6
7
So SCTLR_B -> SCTLR__B in the 3 uses, and document it.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210419202257.161730-3-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 1506092407-26985-9-git-send-email-peter.maydell@linaro.org
9
---
13
---
10
target/arm/helper.c | 22 +++++++++++++++-------
14
target/arm/cpu.h | 2 +-
11
1 file changed, 15 insertions(+), 7 deletions(-)
15
target/arm/helper.c | 2 +-
16
target/arm/translate.c | 2 +-
17
3 files changed, 3 insertions(+), 3 deletions(-)
12
18
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */
24
*/
25
FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2)
26
FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */
27
-FIELD(TBFLAG_A32, SCTLR_B, 15, 1)
28
+FIELD(TBFLAG_A32, SCTLR__B, 15, 1) /* Cannot overlap with SCTLR_B */
29
FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1)
30
/*
31
* Indicates whether cp register reads and writes by guest code should access
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
32
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.c
34
--- a/target/arm/helper.c
16
+++ b/target/arm/helper.c
35
+++ b/target/arm/helper.c
17
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
36
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
18
env->regs[12] = ldl_phys(cs->as, frameptr + 0x10);
37
bool sctlr_b = arm_sctlr_b(env);
19
env->regs[14] = ldl_phys(cs->as, frameptr + 0x14);
38
20
env->regs[15] = ldl_phys(cs->as, frameptr + 0x18);
39
if (sctlr_b) {
21
+
40
- flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1);
22
+ /* Returning from an exception with a PC with bit 0 set is defined
41
+ flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR__B, 1);
23
+ * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
42
}
24
+ * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
43
if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
25
+ * the lsbit, and there are several RTOSes out there which incorrectly
44
flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
26
+ * assume the r15 in the stack frame should be a Thumb-style "lsbit
45
diff --git a/target/arm/translate.c b/target/arm/translate.c
27
+ * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
46
index XXXXXXX..XXXXXXX 100644
28
+ * complain about the badly behaved guest.
47
--- a/target/arm/translate.c
29
+ */
48
+++ b/target/arm/translate.c
30
if (env->regs[15] & 1) {
49
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
31
- qemu_log_mask(LOG_GUEST_ERROR,
50
FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
32
- "M profile return from interrupt with misaligned "
51
dc->debug_target_el =
33
- "PC is UNPREDICTABLE\n");
52
FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
34
- /* Actual hardware seems to ignore the lsbit, and there are several
53
- dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
35
- * RTOSes out there which incorrectly assume the r15 in the stack
54
+ dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR__B);
36
- * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value.
55
dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
37
- */
56
dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
38
env->regs[15] &= ~1U;
57
dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
39
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
40
+ qemu_log_mask(LOG_GUEST_ERROR,
41
+ "M profile return from interrupt with misaligned "
42
+ "PC is UNPREDICTABLE on v7M\n");
43
+ }
44
}
45
+
46
xpsr = ldl_phys(cs->as, frameptr + 0x1c);
47
48
if (arm_feature(env, ARM_FEATURE_V8)) {
49
--
58
--
50
2.7.4
59
2.20.1
51
60
52
61
diff view generated by jsdifflib
1
Attempting to do an exception return with an exception frame that
1
From: Richard Henderson <richard.henderson@linaro.org>
2
is not 8-aligned is UNPREDICTABLE in v8M; warn about this.
3
(It is not UNPREDICTABLE in v7M, and our implementation can
4
handle the merely-4-aligned case fine, so we don't need to
5
do anything except warn.)
6
2
3
We're about to rearrange the macro expansion surrounding tbflags,
4
and this field name will be expanded using the bit definition of
5
the same name, resulting in a token pasting error.
6
7
So PSTATE_SS -> PSTATE__SS in the uses, and document it.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210419202257.161730-4-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 1506092407-26985-8-git-send-email-peter.maydell@linaro.org
11
---
13
---
12
target/arm/helper.c | 7 +++++++
14
target/arm/cpu.h | 2 +-
13
1 file changed, 7 insertions(+)
15
target/arm/helper.c | 4 ++--
16
target/arm/translate-a64.c | 2 +-
17
target/arm/translate.c | 2 +-
18
4 files changed, 5 insertions(+), 5 deletions(-)
14
19
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
25
*/
26
FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
27
FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1)
28
-FIELD(TBFLAG_ANY, PSTATE_SS, 29, 1) /* Not cached. */
29
+FIELD(TBFLAG_ANY, PSTATE__SS, 29, 1) /* Not cached. */
30
FIELD(TBFLAG_ANY, BE_DATA, 28, 1)
31
FIELD(TBFLAG_ANY, MMUIDX, 24, 4)
32
/* Target EL if we take a floating-point-disabled exception */
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
33
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
35
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
36
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
37
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
20
return_to_sp_process);
38
* 0 x Inactive (the TB flag for SS is always 0)
21
uint32_t frameptr = *frame_sp_p;
39
* 1 0 Active-pending
22
40
* 1 1 Active-not-pending
23
+ if (!QEMU_IS_ALIGNED(frameptr, 8) &&
41
- * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
24
+ arm_feature(env, ARM_FEATURE_V8)) {
42
+ * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
25
+ qemu_log_mask(LOG_GUEST_ERROR,
43
*/
26
+ "M profile exception return with non-8-aligned SP "
44
if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
27
+ "for destination state is UNPREDICTABLE\n");
45
(env->pstate & PSTATE_SS)) {
28
+ }
46
- flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
29
+
47
+ flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE__SS, 1);
30
/* Pop registers. TODO: make these accesses use the correct
48
}
31
* attributes and address space (S/NS, priv/unpriv) and handle
49
32
* memory transaction failures.
50
*pflags = flags;
51
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/translate-a64.c
54
+++ b/target/arm/translate-a64.c
55
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
56
* end the TB
57
*/
58
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
59
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
60
+ dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
61
dc->is_ldex = false;
62
dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
63
64
diff --git a/target/arm/translate.c b/target/arm/translate.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate.c
67
+++ b/target/arm/translate.c
68
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
69
* end the TB
70
*/
71
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
72
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
73
+ dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
74
dc->is_ldex = false;
75
76
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
33
--
77
--
34
2.7.4
78
2.20.1
35
79
36
80
diff view generated by jsdifflib
1
Now that we can handle the CONTROL.SPSEL bit not necessarily being
1
From: Richard Henderson <richard.henderson@linaro.org>
2
in sync with the current stack pointer, we can restore the correct
3
security state on exception return. This happens before we start
4
to read registers off the stack frame, but after we have taken
5
possible usage faults for bad exception return magic values and
6
updated CONTROL.SPSEL.
7
2
3
We're about to split tbflags into two parts. These macros
4
will ensure that the correct part is used with the correct
5
set of bits.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210419202257.161730-5-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 1506092407-26985-5-git-send-email-peter.maydell@linaro.org
11
---
11
---
12
target/arm/helper.c | 2 ++
12
target/arm/cpu.h | 22 +++++++++-
13
1 file changed, 2 insertions(+)
13
target/arm/helper-a64.c | 2 +-
14
target/arm/helper.c | 85 +++++++++++++++++---------------------
15
target/arm/translate-a64.c | 36 ++++++++--------
16
target/arm/translate.c | 48 ++++++++++-----------
17
5 files changed, 101 insertions(+), 92 deletions(-)
14
18
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, TCMA, 16, 2)
24
FIELD(TBFLAG_A64, MTE_ACTIVE, 18, 1)
25
FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
26
27
+/*
28
+ * Helpers for using the above.
29
+ */
30
+#define DP_TBFLAG_ANY(DST, WHICH, VAL) \
31
+ (DST = FIELD_DP32(DST, TBFLAG_ANY, WHICH, VAL))
32
+#define DP_TBFLAG_A64(DST, WHICH, VAL) \
33
+ (DST = FIELD_DP32(DST, TBFLAG_A64, WHICH, VAL))
34
+#define DP_TBFLAG_A32(DST, WHICH, VAL) \
35
+ (DST = FIELD_DP32(DST, TBFLAG_A32, WHICH, VAL))
36
+#define DP_TBFLAG_M32(DST, WHICH, VAL) \
37
+ (DST = FIELD_DP32(DST, TBFLAG_M32, WHICH, VAL))
38
+#define DP_TBFLAG_AM32(DST, WHICH, VAL) \
39
+ (DST = FIELD_DP32(DST, TBFLAG_AM32, WHICH, VAL))
40
+
41
+#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN, TBFLAG_ANY, WHICH)
42
+#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN, TBFLAG_A64, WHICH)
43
+#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN, TBFLAG_A32, WHICH)
44
+#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN, TBFLAG_M32, WHICH)
45
+#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN, TBFLAG_AM32, WHICH)
46
+
47
/**
48
* cpu_mmu_index:
49
* @env: The cpu environment
50
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
51
*/
52
static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
53
{
54
- return FIELD_EX32(env->hflags, TBFLAG_ANY, MMUIDX);
55
+ return EX_TBFLAG_ANY(env->hflags, MMUIDX);
56
}
57
58
static inline bool bswap_code(bool sctlr_b)
59
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/helper-a64.c
62
+++ b/target/arm/helper-a64.c
63
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
64
* the hflags rebuild, since we can pull the composite TBII field
65
* from there.
66
*/
67
- tbii = FIELD_EX32(env->hflags, TBFLAG_A64, TBII);
68
+ tbii = EX_TBFLAG_A64(env->hflags, TBII);
69
if ((tbii >> extract64(new_pc, 55, 1)) & 1) {
70
/* TBI is enabled. */
71
int core_mmu_idx = cpu_mmu_index(env, false);
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
72
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
73
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
74
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
75
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
76
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
77
static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
78
ARMMMUIdx mmu_idx, uint32_t flags)
79
{
80
- flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
81
- flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX,
82
- arm_to_core_mmu_idx(mmu_idx));
83
+ DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
84
+ DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
85
86
if (arm_singlestep_active(env)) {
87
- flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
88
+ DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
89
}
90
return flags;
91
}
92
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
93
bool sctlr_b = arm_sctlr_b(env);
94
95
if (sctlr_b) {
96
- flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR__B, 1);
97
+ DP_TBFLAG_A32(flags, SCTLR__B, 1);
98
}
99
if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
100
- flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
101
+ DP_TBFLAG_ANY(flags, BE_DATA, 1);
102
}
103
- flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
104
+ DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
105
106
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
107
}
108
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
109
uint32_t flags = 0;
110
111
if (arm_v7m_is_handler_mode(env)) {
112
- flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1);
113
+ DP_TBFLAG_M32(flags, HANDLER, 1);
114
}
115
116
/*
117
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
118
if (arm_feature(env, ARM_FEATURE_V8) &&
119
!((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
120
(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
121
- flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1);
122
+ DP_TBFLAG_M32(flags, STACKCHECK, 1);
123
}
124
125
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
126
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
127
{
128
int flags = 0;
129
130
- flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL,
131
- arm_debug_target_el(env));
132
+ DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env));
133
return flags;
134
}
135
136
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
137
uint32_t flags = rebuild_hflags_aprofile(env);
138
139
if (arm_el_is_aa64(env, 1)) {
140
- flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
141
+ DP_TBFLAG_A32(flags, VFPEN, 1);
142
}
143
144
if (arm_current_el(env) < 2 && env->cp15.hstr_el2 &&
145
(arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
146
- flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1);
147
+ DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
148
}
149
150
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
151
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
152
uint64_t sctlr;
153
int tbii, tbid;
154
155
- flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
156
+ DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
157
158
/* Get control bits for tagged addresses. */
159
tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
160
tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
161
162
- flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
163
- flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
164
+ DP_TBFLAG_A64(flags, TBII, tbii);
165
+ DP_TBFLAG_A64(flags, TBID, tbid);
166
167
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
168
int sve_el = sve_exception_el(env, el);
169
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
170
} else {
171
zcr_len = sve_zcr_len_for_el(env, el);
172
}
173
- flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
174
- flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
175
+ DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
176
+ DP_TBFLAG_A64(flags, ZCR_LEN, zcr_len);
177
}
178
179
sctlr = regime_sctlr(env, stage1);
180
181
if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
182
- flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
183
+ DP_TBFLAG_ANY(flags, BE_DATA, 1);
184
}
185
186
if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
187
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
188
* The decision of which action to take is left to a helper.
189
*/
190
if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
191
- flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
192
+ DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
193
}
194
}
195
196
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
197
/* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
198
if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
199
- flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
200
+ DP_TBFLAG_A64(flags, BT, 1);
201
}
202
}
203
204
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
205
case ARMMMUIdx_SE10_1:
206
case ARMMMUIdx_SE10_1_PAN:
207
/* TODO: ARMv8.3-NV */
208
- flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
209
+ DP_TBFLAG_A64(flags, UNPRIV, 1);
210
break;
211
case ARMMMUIdx_E20_2:
212
case ARMMMUIdx_E20_2_PAN:
213
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
214
* gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
215
*/
216
if (env->cp15.hcr_el2 & HCR_TGE) {
217
- flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
218
+ DP_TBFLAG_A64(flags, UNPRIV, 1);
219
}
220
break;
221
default:
222
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
223
* 4) If no Allocation Tag Access, then all accesses are Unchecked.
224
*/
225
if (allocation_tag_access_enabled(env, el, sctlr)) {
226
- flags = FIELD_DP32(flags, TBFLAG_A64, ATA, 1);
227
+ DP_TBFLAG_A64(flags, ATA, 1);
228
if (tbid
229
&& !(env->pstate & PSTATE_TCO)
230
&& (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
231
- flags = FIELD_DP32(flags, TBFLAG_A64, MTE_ACTIVE, 1);
232
+ DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
233
}
234
}
235
/* And again for unprivileged accesses, if required. */
236
- if (FIELD_EX32(flags, TBFLAG_A64, UNPRIV)
237
+ if (EX_TBFLAG_A64(flags, UNPRIV)
238
&& tbid
239
&& !(env->pstate & PSTATE_TCO)
240
&& (sctlr & SCTLR_TCF0)
241
&& allocation_tag_access_enabled(env, 0, sctlr)) {
242
- flags = FIELD_DP32(flags, TBFLAG_A64, MTE0_ACTIVE, 1);
243
+ DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
244
}
245
/* Cache TCMA as well as TBI. */
246
- flags = FIELD_DP32(flags, TBFLAG_A64, TCMA,
247
- aa64_va_parameter_tcma(tcr, mmu_idx));
248
+ DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
249
}
250
251
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
252
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
253
*cs_base = 0;
254
assert_hflags_rebuild_correctly(env);
255
256
- if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) {
257
+ if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
258
*pc = env->pc;
259
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
260
- flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
261
+ DP_TBFLAG_A64(flags, BTYPE, env->btype);
262
}
263
} else {
264
*pc = env->regs[15];
265
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
266
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
267
FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
268
!= env->v7m.secure) {
269
- flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1);
270
+ DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
271
}
272
273
if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
274
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
275
* active FP context; we must create a new FP context before
276
* executing any FP insn.
277
*/
278
- flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1);
279
+ DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
280
}
281
282
bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
283
if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
284
- flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1);
285
+ DP_TBFLAG_M32(flags, LSPACT, 1);
286
}
287
} else {
288
/*
289
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
290
* Note that VECLEN+VECSTRIDE are RES0 for M-profile.
291
*/
292
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
293
- flags = FIELD_DP32(flags, TBFLAG_A32,
294
- XSCALE_CPAR, env->cp15.c15_cpar);
295
+ DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
296
} else {
297
- flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN,
298
- env->vfp.vec_len);
299
- flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE,
300
- env->vfp.vec_stride);
301
+ DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
302
+ DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
303
}
304
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
305
- flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
306
+ DP_TBFLAG_A32(flags, VFPEN, 1);
307
}
308
}
309
310
- flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb);
311
- flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits);
312
+ DP_TBFLAG_AM32(flags, THUMB, env->thumb);
313
+ DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
314
}
315
316
/*
317
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
318
* 1 1 Active-not-pending
319
* SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
20
*/
320
*/
21
write_v7m_control_spsel(env, return_to_sp_process);
321
- if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
22
322
- (env->pstate & PSTATE_SS)) {
23
+ switch_v7m_security_state(env, return_to_secure);
323
- flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE__SS, 1);
24
+
324
+ if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
25
{
325
+ DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
26
/* The stack pointer we should be reading the exception frame from
326
}
27
* depends on bits in the magic exception return type value (and
327
328
*pflags = flags;
329
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
330
index XXXXXXX..XXXXXXX 100644
331
--- a/target/arm/translate-a64.c
332
+++ b/target/arm/translate-a64.c
333
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
334
!arm_el_is_aa64(env, 3);
335
dc->thumb = 0;
336
dc->sctlr_b = 0;
337
- dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
338
+ dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
339
dc->condexec_mask = 0;
340
dc->condexec_cond = 0;
341
- core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
342
+ core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
343
dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
344
- dc->tbii = FIELD_EX32(tb_flags, TBFLAG_A64, TBII);
345
- dc->tbid = FIELD_EX32(tb_flags, TBFLAG_A64, TBID);
346
- dc->tcma = FIELD_EX32(tb_flags, TBFLAG_A64, TCMA);
347
+ dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
348
+ dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
349
+ dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
350
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
351
#if !defined(CONFIG_USER_ONLY)
352
dc->user = (dc->current_el == 0);
353
#endif
354
- dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
355
- dc->sve_excp_el = FIELD_EX32(tb_flags, TBFLAG_A64, SVEEXC_EL);
356
- dc->sve_len = (FIELD_EX32(tb_flags, TBFLAG_A64, ZCR_LEN) + 1) * 16;
357
- dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE);
358
- dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT);
359
- dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE);
360
- dc->unpriv = FIELD_EX32(tb_flags, TBFLAG_A64, UNPRIV);
361
- dc->ata = FIELD_EX32(tb_flags, TBFLAG_A64, ATA);
362
- dc->mte_active[0] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE_ACTIVE);
363
- dc->mte_active[1] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE0_ACTIVE);
364
+ dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
365
+ dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
366
+ dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16;
367
+ dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
368
+ dc->bt = EX_TBFLAG_A64(tb_flags, BT);
369
+ dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
370
+ dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
371
+ dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
372
+ dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
373
+ dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
374
dc->vec_len = 0;
375
dc->vec_stride = 0;
376
dc->cp_regs = arm_cpu->cp_regs;
377
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
378
* emit code to generate a software step exception
379
* end the TB
380
*/
381
- dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
382
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
383
+ dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
384
+ dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
385
dc->is_ldex = false;
386
- dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
387
+ dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
388
389
/* Bound the number of insns to execute to those left on the page. */
390
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
391
diff --git a/target/arm/translate.c b/target/arm/translate.c
392
index XXXXXXX..XXXXXXX 100644
393
--- a/target/arm/translate.c
394
+++ b/target/arm/translate.c
395
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
396
*/
397
dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
398
!arm_el_is_aa64(env, 3);
399
- dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB);
400
- dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
401
- condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC);
402
+ dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB);
403
+ dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
404
+ condexec = EX_TBFLAG_AM32(tb_flags, CONDEXEC);
405
dc->condexec_mask = (condexec & 0xf) << 1;
406
dc->condexec_cond = condexec >> 4;
407
408
- core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
409
+ core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
410
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
411
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
412
#if !defined(CONFIG_USER_ONLY)
413
dc->user = (dc->current_el == 0);
414
#endif
415
- dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
416
+ dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
417
418
if (arm_feature(env, ARM_FEATURE_M)) {
419
dc->vfp_enabled = 1;
420
dc->be_data = MO_TE;
421
- dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER);
422
+ dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER);
423
dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
424
regime_is_secure(env, dc->mmu_idx);
425
- dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK);
426
- dc->v8m_fpccr_s_wrong =
427
- FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG);
428
+ dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK);
429
+ dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG);
430
dc->v7m_new_fp_ctxt_needed =
431
- FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED);
432
- dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT);
433
+ EX_TBFLAG_M32(tb_flags, NEW_FP_CTXT_NEEDED);
434
+ dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT);
435
} else {
436
- dc->be_data =
437
- FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
438
- dc->debug_target_el =
439
- FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
440
- dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR__B);
441
- dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
442
- dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
443
- dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
444
+ dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
445
+ dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B);
446
+ dc->hstr_active = EX_TBFLAG_A32(tb_flags, HSTR_ACTIVE);
447
+ dc->ns = EX_TBFLAG_A32(tb_flags, NS);
448
+ dc->vfp_enabled = EX_TBFLAG_A32(tb_flags, VFPEN);
449
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
450
- dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
451
+ dc->c15_cpar = EX_TBFLAG_A32(tb_flags, XSCALE_CPAR);
452
} else {
453
- dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
454
- dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
455
+ dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN);
456
+ dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE);
457
}
458
}
459
dc->cp_regs = cpu->cp_regs;
460
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
461
* emit code to generate a software step exception
462
* end the TB
463
*/
464
- dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
465
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
466
+ dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
467
+ dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
468
dc->is_ldex = false;
469
470
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
471
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
472
DisasContext dc = { };
473
const TranslatorOps *ops = &arm_translator_ops;
474
475
- if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) {
476
+ if (EX_TBFLAG_AM32(tb->flags, THUMB)) {
477
ops = &thumb_translator_ops;
478
}
479
#ifdef TARGET_AARCH64
480
- if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
481
+ if (EX_TBFLAG_ANY(tb->flags, AARCH64_STATE)) {
482
ops = &aarch64_translator_ops;
483
}
484
#endif
28
--
485
--
29
2.7.4
486
2.20.1
30
487
31
488
diff view generated by jsdifflib
1
For the SG instruction and secure function return we are going
1
From: Richard Henderson <richard.henderson@linaro.org>
2
to want to do memory accesses using the MMU index of the CPU
2
3
in secure state, even though the CPU is currently in non-secure
3
In preparation for splitting tb->flags across multiple
4
state. Write arm_v7m_mmu_idx_for_secstate() to do this job,
4
fields, introduce a structure to hold the value(s).
5
and use it in cpu_mmu_index().
5
So far this only migrates the one uint32_t and fixes
6
6
all of the places that require adjustment to match.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210419202257.161730-6-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 1506092407-26985-17-git-send-email-peter.maydell@linaro.org
11
---
12
---
12
target/arm/cpu.h | 32 +++++++++++++++++++++-----------
13
target/arm/cpu.h | 26 ++++++++++++---------
13
1 file changed, 21 insertions(+), 11 deletions(-)
14
target/arm/translate.h | 11 +++++++++
15
target/arm/helper.c | 48 +++++++++++++++++++++-----------------
16
target/arm/translate-a64.c | 2 +-
17
target/arm/translate.c | 7 +++---
18
5 files changed, 57 insertions(+), 37 deletions(-)
14
19
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
22
--- a/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
24
@@ -XXX,XX +XXX,XX @@ typedef struct ARMPACKey {
20
}
25
} ARMPACKey;
21
}
26
#endif
22
27
23
+/* Return the MMU index for a v7M CPU in the specified security state */
28
+/* See the commentary above the TBFLAG field definitions. */
24
+static inline ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env,
29
+typedef struct CPUARMTBFlags {
25
+ bool secstate)
30
+ uint32_t flags;
31
+} CPUARMTBFlags;
32
33
typedef struct CPUARMState {
34
/* Regs for current mode. */
35
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
36
uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
37
38
/* Cached TBFLAGS state. See below for which bits are included. */
39
- uint32_t hflags;
40
+ CPUARMTBFlags hflags;
41
42
/* Frequently accessed CPSR bits are stored separately for efficiency.
43
This contains all the other bits. Use cpsr_{read,write} to access
44
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
45
* Helpers for using the above.
46
*/
47
#define DP_TBFLAG_ANY(DST, WHICH, VAL) \
48
- (DST = FIELD_DP32(DST, TBFLAG_ANY, WHICH, VAL))
49
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL))
50
#define DP_TBFLAG_A64(DST, WHICH, VAL) \
51
- (DST = FIELD_DP32(DST, TBFLAG_A64, WHICH, VAL))
52
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A64, WHICH, VAL))
53
#define DP_TBFLAG_A32(DST, WHICH, VAL) \
54
- (DST = FIELD_DP32(DST, TBFLAG_A32, WHICH, VAL))
55
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A32, WHICH, VAL))
56
#define DP_TBFLAG_M32(DST, WHICH, VAL) \
57
- (DST = FIELD_DP32(DST, TBFLAG_M32, WHICH, VAL))
58
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_M32, WHICH, VAL))
59
#define DP_TBFLAG_AM32(DST, WHICH, VAL) \
60
- (DST = FIELD_DP32(DST, TBFLAG_AM32, WHICH, VAL))
61
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_AM32, WHICH, VAL))
62
63
-#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN, TBFLAG_ANY, WHICH)
64
-#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN, TBFLAG_A64, WHICH)
65
-#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN, TBFLAG_A32, WHICH)
66
-#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN, TBFLAG_M32, WHICH)
67
-#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN, TBFLAG_AM32, WHICH)
68
+#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH)
69
+#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A64, WHICH)
70
+#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A32, WHICH)
71
+#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_M32, WHICH)
72
+#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_AM32, WHICH)
73
74
/**
75
* cpu_mmu_index:
76
diff --git a/target/arm/translate.h b/target/arm/translate.h
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/translate.h
79
+++ b/target/arm/translate.h
80
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
81
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
82
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
83
84
+/**
85
+ * arm_tbflags_from_tb:
86
+ * @tb: the TranslationBlock
87
+ *
88
+ * Extract the flag values from @tb.
89
+ */
90
+static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb)
26
+{
91
+{
27
+ int el = arm_current_el(env);
92
+ return (CPUARMTBFlags){ tb->flags };
28
+ ARMMMUIdx mmu_idx;
29
+
30
+ if (el == 0) {
31
+ mmu_idx = secstate ? ARMMMUIdx_MSUser : ARMMMUIdx_MUser;
32
+ } else {
33
+ mmu_idx = secstate ? ARMMMUIdx_MSPriv : ARMMMUIdx_MPriv;
34
+ }
35
+
36
+ if (armv7m_nvic_neg_prio_requested(env->nvic, secstate)) {
37
+ mmu_idx = secstate ? ARMMMUIdx_MSNegPri : ARMMMUIdx_MNegPri;
38
+ }
39
+
40
+ return mmu_idx;
41
+}
93
+}
42
+
94
+
43
/* Determine the current mmu_idx to use for normal loads/stores */
95
/*
44
static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
96
* Enum for argument to fpstatus_ptr().
97
*/
98
diff --git a/target/arm/helper.c b/target/arm/helper.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/helper.c
101
+++ b/target/arm/helper.c
102
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
103
}
104
#endif
105
106
-static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
107
- ARMMMUIdx mmu_idx, uint32_t flags)
108
+static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
109
+ ARMMMUIdx mmu_idx,
110
+ CPUARMTBFlags flags)
111
{
112
DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
113
DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
114
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
115
return flags;
116
}
117
118
-static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
119
- ARMMMUIdx mmu_idx, uint32_t flags)
120
+static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
121
+ ARMMMUIdx mmu_idx,
122
+ CPUARMTBFlags flags)
123
{
124
bool sctlr_b = arm_sctlr_b(env);
125
126
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
127
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
128
}
129
130
-static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
131
- ARMMMUIdx mmu_idx)
132
+static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
133
+ ARMMMUIdx mmu_idx)
134
{
135
- uint32_t flags = 0;
136
+ CPUARMTBFlags flags = {};
137
138
if (arm_v7m_is_handler_mode(env)) {
139
DP_TBFLAG_M32(flags, HANDLER, 1);
140
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
141
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
142
}
143
144
-static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
145
+static CPUARMTBFlags rebuild_hflags_aprofile(CPUARMState *env)
146
{
147
- int flags = 0;
148
+ CPUARMTBFlags flags = {};
149
150
DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env));
151
return flags;
152
}
153
154
-static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
155
- ARMMMUIdx mmu_idx)
156
+static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
157
+ ARMMMUIdx mmu_idx)
158
{
159
- uint32_t flags = rebuild_hflags_aprofile(env);
160
+ CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
161
162
if (arm_el_is_aa64(env, 1)) {
163
DP_TBFLAG_A32(flags, VFPEN, 1);
164
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
165
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
166
}
167
168
-static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
169
- ARMMMUIdx mmu_idx)
170
+static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
171
+ ARMMMUIdx mmu_idx)
172
{
173
- uint32_t flags = rebuild_hflags_aprofile(env);
174
+ CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
175
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
176
uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
177
uint64_t sctlr;
178
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
179
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
180
}
181
182
-static uint32_t rebuild_hflags_internal(CPUARMState *env)
183
+static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
45
{
184
{
46
int el = arm_current_el(env);
185
int el = arm_current_el(env);
47
186
int fp_el = fp_exception_el(env, el);
48
if (arm_feature(env, ARM_FEATURE_M)) {
187
@@ -XXX,XX +XXX,XX @@ void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
49
- ARMMMUIdx mmu_idx;
188
int el = arm_current_el(env);
50
-
189
int fp_el = fp_exception_el(env, el);
51
- if (el == 0) {
190
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
52
- mmu_idx = env->v7m.secure ? ARMMMUIdx_MSUser : ARMMMUIdx_MUser;
191
+
53
- } else {
192
env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
54
- mmu_idx = env->v7m.secure ? ARMMMUIdx_MSPriv : ARMMMUIdx_MPriv;
193
}
55
- }
194
56
-
195
@@ -XXX,XX +XXX,XX @@ void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
57
- if (armv7m_nvic_neg_prio_requested(env->nvic, env->v7m.secure)) {
196
static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
58
- mmu_idx = env->v7m.secure ? ARMMMUIdx_MSNegPri : ARMMMUIdx_MNegPri;
197
{
59
- }
198
#ifdef CONFIG_DEBUG_TCG
60
+ ARMMMUIdx mmu_idx = arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
199
- uint32_t env_flags_current = env->hflags;
61
200
- uint32_t env_flags_rebuilt = rebuild_hflags_internal(env);
62
return arm_to_core_mmu_idx(mmu_idx);
201
+ CPUARMTBFlags c = env->hflags;
63
}
202
+ CPUARMTBFlags r = rebuild_hflags_internal(env);
203
204
- if (unlikely(env_flags_current != env_flags_rebuilt)) {
205
+ if (unlikely(c.flags != r.flags)) {
206
fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
207
- env_flags_current, env_flags_rebuilt);
208
+ c.flags, r.flags);
209
abort();
210
}
211
#endif
212
@@ -XXX,XX +XXX,XX @@ static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
213
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
214
target_ulong *cs_base, uint32_t *pflags)
215
{
216
- uint32_t flags = env->hflags;
217
+ CPUARMTBFlags flags;
218
219
*cs_base = 0;
220
assert_hflags_rebuild_correctly(env);
221
+ flags = env->hflags;
222
223
if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
224
*pc = env->pc;
225
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
226
DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
227
}
228
229
- *pflags = flags;
230
+ *pflags = flags.flags;
231
}
232
233
#ifdef TARGET_AARCH64
234
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
235
index XXXXXXX..XXXXXXX 100644
236
--- a/target/arm/translate-a64.c
237
+++ b/target/arm/translate-a64.c
238
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
239
DisasContext *dc = container_of(dcbase, DisasContext, base);
240
CPUARMState *env = cpu->env_ptr;
241
ARMCPU *arm_cpu = env_archcpu(env);
242
- uint32_t tb_flags = dc->base.tb->flags;
243
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
244
int bound, core_mmu_idx;
245
246
dc->isar = &arm_cpu->isar;
247
diff --git a/target/arm/translate.c b/target/arm/translate.c
248
index XXXXXXX..XXXXXXX 100644
249
--- a/target/arm/translate.c
250
+++ b/target/arm/translate.c
251
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
252
DisasContext *dc = container_of(dcbase, DisasContext, base);
253
CPUARMState *env = cs->env_ptr;
254
ARMCPU *cpu = env_archcpu(env);
255
- uint32_t tb_flags = dc->base.tb->flags;
256
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
257
uint32_t condexec, core_mmu_idx;
258
259
dc->isar = &cpu->isar;
260
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
261
{
262
DisasContext dc = { };
263
const TranslatorOps *ops = &arm_translator_ops;
264
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(tb);
265
266
- if (EX_TBFLAG_AM32(tb->flags, THUMB)) {
267
+ if (EX_TBFLAG_AM32(tb_flags, THUMB)) {
268
ops = &thumb_translator_ops;
269
}
270
#ifdef TARGET_AARCH64
271
- if (EX_TBFLAG_ANY(tb->flags, AARCH64_STATE)) {
272
+ if (EX_TBFLAG_ANY(tb_flags, AARCH64_STATE)) {
273
ops = &aarch64_translator_ops;
274
}
275
#endif
64
--
276
--
65
2.7.4
277
2.20.1
66
278
67
279
diff view generated by jsdifflib
1
In the v7M architecture, there is an invariant that if the CPU is
1
From: Richard Henderson <richard.henderson@linaro.org>
2
in Handler mode then the CONTROL.SPSEL bit cannot be nonzero.
3
This in turn means that the current stack pointer is always
4
indicated by CONTROL.SPSEL, even though Handler mode always uses
5
the Main stack pointer.
6
2
7
In v8M, this invariant is removed, and CONTROL.SPSEL may now
3
Now that we have all of the proper macros defined, expanding
8
be nonzero in Handler mode (though Handler mode still always
4
the CPUARMTBFlags structure and populating the two TB fields
9
uses the Main stack pointer). In preparation for this change,
5
is relatively simple.
10
change how we handle this bit: rename switch_v7m_sp() to
11
the now more accurate write_v7m_control_spsel(), and make it
12
check both the handler mode state and the SPSEL bit.
13
6
14
Note that this implicitly changes the point at which we switch
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
active SP on exception exit from before we pop the exception
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
frame to after it.
9
Message-id: 20210419202257.161730-7-richard.henderson@linaro.org
17
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
Message-id: 1506092407-26985-4-git-send-email-peter.maydell@linaro.org
22
---
11
---
23
target/arm/cpu.h | 8 ++++++-
12
target/arm/cpu.h | 49 ++++++++++++++++++++++++------------------
24
hw/intc/armv7m_nvic.c | 2 +-
13
target/arm/translate.h | 2 +-
25
target/arm/helper.c | 65 ++++++++++++++++++++++++++++++++++-----------------
14
target/arm/helper.c | 10 +++++----
26
3 files changed, 51 insertions(+), 24 deletions(-)
15
3 files changed, 35 insertions(+), 26 deletions(-)
27
16
28
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
29
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/cpu.h
19
--- a/target/arm/cpu.h
31
+++ b/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
32
@@ -XXX,XX +XXX,XX @@ void pmccntr_sync(CPUARMState *env);
21
@@ -XXX,XX +XXX,XX @@ typedef struct ARMPACKey {
33
#define PSTATE_MODE_EL1t 4
22
/* See the commentary above the TBFLAG field definitions. */
34
#define PSTATE_MODE_EL0t 0
23
typedef struct CPUARMTBFlags {
35
24
uint32_t flags;
36
+/* Write a new value to v7m.exception, thus transitioning into or out
25
+ target_ulong flags2;
37
+ * of Handler mode; this may result in a change of active stack pointer.
26
} CPUARMTBFlags;
38
+ */
27
39
+void write_v7m_exception(CPUARMState *env, uint32_t new_exc);
28
typedef struct CPUARMState {
40
+
29
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
41
/* Map EL and handler into a PSTATE_MODE. */
30
#include "exec/cpu-all.h"
42
static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
31
32
/*
33
- * Bit usage in the TB flags field: bit 31 indicates whether we are
34
- * in 32 or 64 bit mode. The meaning of the other bits depends on that.
35
- * We put flags which are shared between 32 and 64 bit mode at the top
36
- * of the word, and flags which apply to only one mode at the bottom.
37
+ * We have more than 32-bits worth of state per TB, so we split the data
38
+ * between tb->flags and tb->cs_base, which is otherwise unused for ARM.
39
+ * We collect these two parts in CPUARMTBFlags where they are named
40
+ * flags and flags2 respectively.
41
*
42
- * 31 20 18 14 9 0
43
- * +--------------+-----+-----+----------+--------------+
44
- * | | | TBFLAG_A32 | |
45
- * | | +-----+----------+ TBFLAG_AM32 |
46
- * | TBFLAG_ANY | |TBFLAG_M32| |
47
- * | +-----------+----------+--------------|
48
- * | | TBFLAG_A64 |
49
- * +--------------+-------------------------------------+
50
- * 31 20 0
51
+ * The flags that are shared between all execution modes, TBFLAG_ANY,
52
+ * are stored in flags. The flags that are specific to a given mode
53
+ * are stores in flags2. Since cs_base is sized on the configured
54
+ * address size, flags2 always has 64-bits for A64, and a minimum of
55
+ * 32-bits for A32 and M32.
56
+ *
57
+ * The bits for 32-bit A-profile and M-profile partially overlap:
58
+ *
59
+ * 18 9 0
60
+ * +----------------+--------------+
61
+ * | TBFLAG_A32 | |
62
+ * +-----+----------+ TBFLAG_AM32 |
63
+ * | |TBFLAG_M32| |
64
+ * +-----+----------+--------------+
65
+ * 14 9 0
66
*
67
* Unless otherwise noted, these bits are cached in env->hflags.
68
*/
69
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
70
#define DP_TBFLAG_ANY(DST, WHICH, VAL) \
71
(DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL))
72
#define DP_TBFLAG_A64(DST, WHICH, VAL) \
73
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A64, WHICH, VAL))
74
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A64, WHICH, VAL))
75
#define DP_TBFLAG_A32(DST, WHICH, VAL) \
76
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A32, WHICH, VAL))
77
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A32, WHICH, VAL))
78
#define DP_TBFLAG_M32(DST, WHICH, VAL) \
79
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_M32, WHICH, VAL))
80
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_M32, WHICH, VAL))
81
#define DP_TBFLAG_AM32(DST, WHICH, VAL) \
82
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_AM32, WHICH, VAL))
83
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_AM32, WHICH, VAL))
84
85
#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH)
86
-#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A64, WHICH)
87
-#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A32, WHICH)
88
-#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_M32, WHICH)
89
-#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_AM32, WHICH)
90
+#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A64, WHICH)
91
+#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A32, WHICH)
92
+#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_M32, WHICH)
93
+#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_AM32, WHICH)
94
95
/**
96
* cpu_mmu_index:
97
diff --git a/target/arm/translate.h b/target/arm/translate.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/target/arm/translate.h
100
+++ b/target/arm/translate.h
101
@@ -XXX,XX +XXX,XX @@ typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
102
*/
103
static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb)
43
{
104
{
44
@@ -XXX,XX +XXX,XX @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
105
- return (CPUARMTBFlags){ tb->flags };
45
env->condexec_bits |= (val >> 8) & 0xfc;
106
+ return (CPUARMTBFlags){ tb->flags, tb->cs_base };
46
}
47
if (mask & XPSR_EXCP) {
48
- env->v7m.exception = val & XPSR_EXCP;
49
+ /* Note that this only happens on exception exit */
50
+ write_v7m_exception(env, val & XPSR_EXCP);
51
}
52
}
107
}
53
108
54
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
109
/*
55
index XXXXXXX..XXXXXXX 100644
56
--- a/hw/intc/armv7m_nvic.c
57
+++ b/hw/intc/armv7m_nvic.c
58
@@ -XXX,XX +XXX,XX @@ bool armv7m_nvic_acknowledge_irq(void *opaque)
59
vec->active = 1;
60
vec->pending = 0;
61
62
- env->v7m.exception = s->vectpending;
63
+ write_v7m_exception(env, s->vectpending);
64
65
nvic_irq_update(s);
66
67
diff --git a/target/arm/helper.c b/target/arm/helper.c
110
diff --git a/target/arm/helper.c b/target/arm/helper.c
68
index XXXXXXX..XXXXXXX 100644
111
index XXXXXXX..XXXXXXX 100644
69
--- a/target/arm/helper.c
112
--- a/target/arm/helper.c
70
+++ b/target/arm/helper.c
113
+++ b/target/arm/helper.c
71
@@ -XXX,XX +XXX,XX @@ static bool v7m_using_psp(CPUARMState *env)
114
@@ -XXX,XX +XXX,XX @@ static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
72
env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
115
CPUARMTBFlags c = env->hflags;
116
CPUARMTBFlags r = rebuild_hflags_internal(env);
117
118
- if (unlikely(c.flags != r.flags)) {
119
- fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
120
- c.flags, r.flags);
121
+ if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
122
+ fprintf(stderr, "TCG hflags mismatch "
123
+ "(current:(0x%08x,0x" TARGET_FMT_lx ")"
124
+ " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
125
+ c.flags, c.flags2, r.flags, r.flags2);
126
abort();
127
}
128
#endif
129
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
130
{
131
CPUARMTBFlags flags;
132
133
- *cs_base = 0;
134
assert_hflags_rebuild_correctly(env);
135
flags = env->hflags;
136
137
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
138
}
139
140
*pflags = flags.flags;
141
+ *cs_base = flags.flags2;
73
}
142
}
74
143
75
-/* Switch to V7M main or process stack pointer. */
144
#ifdef TARGET_AARCH64
76
-static void switch_v7m_sp(CPUARMState *env, bool new_spsel)
77
+/* Write to v7M CONTROL.SPSEL bit. This may change the current
78
+ * stack pointer between Main and Process stack pointers.
79
+ */
80
+static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
81
{
82
uint32_t tmp;
83
- uint32_t old_control = env->v7m.control[env->v7m.secure];
84
- bool old_spsel = old_control & R_V7M_CONTROL_SPSEL_MASK;
85
+ bool new_is_psp, old_is_psp = v7m_using_psp(env);
86
+
87
+ env->v7m.control[env->v7m.secure] =
88
+ deposit32(env->v7m.control[env->v7m.secure],
89
+ R_V7M_CONTROL_SPSEL_SHIFT,
90
+ R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
91
+
92
+ new_is_psp = v7m_using_psp(env);
93
94
- if (old_spsel != new_spsel) {
95
+ if (old_is_psp != new_is_psp) {
96
tmp = env->v7m.other_sp;
97
env->v7m.other_sp = env->regs[13];
98
env->regs[13] = tmp;
99
+ }
100
+}
101
+
102
+void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
103
+{
104
+ /* Write a new value to v7m.exception, thus transitioning into or out
105
+ * of Handler mode; this may result in a change of active stack pointer.
106
+ */
107
+ bool new_is_psp, old_is_psp = v7m_using_psp(env);
108
+ uint32_t tmp;
109
110
- env->v7m.control[env->v7m.secure] = deposit32(old_control,
111
- R_V7M_CONTROL_SPSEL_SHIFT,
112
- R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
113
+ env->v7m.exception = new_exc;
114
+
115
+ new_is_psp = v7m_using_psp(env);
116
+
117
+ if (old_is_psp != new_is_psp) {
118
+ tmp = env->v7m.other_sp;
119
+ env->v7m.other_sp = env->regs[13];
120
+ env->regs[13] = tmp;
121
}
122
}
123
124
@@ -XXX,XX +XXX,XX @@ static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
125
bool want_psp = threadmode && spsel;
126
127
if (secure == env->v7m.secure) {
128
- /* Currently switch_v7m_sp switches SP as it updates SPSEL,
129
- * so the SP we want is always in regs[13].
130
- * When we decouple SPSEL from the actually selected SP
131
- * we need to check want_psp against v7m_using_psp()
132
- * to see whether we need regs[13] or v7m.other_sp.
133
- */
134
- return &env->regs[13];
135
+ if (want_psp == v7m_using_psp(env)) {
136
+ return &env->regs[13];
137
+ } else {
138
+ return &env->v7m.other_sp;
139
+ }
140
} else {
141
if (want_psp) {
142
return &env->v7m.other_ss_psp;
143
@@ -XXX,XX +XXX,XX @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr)
144
uint32_t addr;
145
146
armv7m_nvic_acknowledge_irq(env->nvic);
147
- switch_v7m_sp(env, 0);
148
+ write_v7m_control_spsel(env, 0);
149
arm_clear_exclusive(env);
150
/* Clear IT bits */
151
env->condexec_bits = 0;
152
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
153
return;
154
}
155
156
- /* Set CONTROL.SPSEL from excret.SPSEL. For QEMU this currently
157
- * causes us to switch the active SP, but we will change this
158
- * later to not do that so we can support v8M.
159
+ /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
160
+ * Handler mode (and will be until we write the new XPSR.Interrupt
161
+ * field) this does not switch around the current stack pointer.
162
*/
163
- switch_v7m_sp(env, return_to_sp_process);
164
+ write_v7m_control_spsel(env, return_to_sp_process);
165
166
{
167
/* The stack pointer we should be reading the exception frame from
168
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
169
case 20: /* CONTROL */
170
/* Writing to the SPSEL bit only has an effect if we are in
171
* thread mode; other bits can be updated by any privileged code.
172
- * switch_v7m_sp() deals with updating the SPSEL bit in
173
+ * write_v7m_control_spsel() deals with updating the SPSEL bit in
174
* env->v7m.control, so we only need update the others.
175
*/
176
if (!arm_v7m_is_handler_mode(env)) {
177
- switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
178
+ write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
179
}
180
env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
181
env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
182
--
145
--
183
2.7.4
146
2.20.1
184
147
185
148
diff view generated by jsdifflib
1
In cpu_mmu_index() we try to do this:
1
From: Richard Henderson <richard.henderson@linaro.org>
2
if (env->v7m.secure) {
3
mmu_idx += ARMMMUIdx_MSUser;
4
}
5
but it will give the wrong answer, because ARMMMUIdx_MSUser
6
includes the 0x40 ARM_MMU_IDX_M field, and so does the
7
mmu_idx we're adding to, and we'll end up with 0x8n rather
8
than 0x4n. This error is then nullified by the call to
9
arm_to_core_mmu_idx() which masks out the high part, but
10
we're about to factor out the code that calculates the
11
ARMMMUIdx values so it can be used without passing it through
12
arm_to_core_mmu_idx(), so fix this bug first.
13
2
3
Now that these bits have been moved out of tb->flags,
4
where TBFLAG_ANY was filling from the top, move AM32
5
to fill from the top, and A32 and M32 to fill from the
6
bottom. This means fewer changes when adding new bits.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210419202257.161730-9-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
16
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 1506092407-26985-16-git-send-email-peter.maydell@linaro.org
18
---
12
---
19
target/arm/cpu.h | 12 +++++++-----
13
target/arm/cpu.h | 42 +++++++++++++++++++++---------------------
20
1 file changed, 7 insertions(+), 5 deletions(-)
14
1 file changed, 21 insertions(+), 21 deletions(-)
21
15
22
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
23
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/cpu.h
18
--- a/target/arm/cpu.h
25
+++ b/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
26
@@ -XXX,XX +XXX,XX @@ static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
20
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
27
int el = arm_current_el(env);
21
*
28
22
* The bits for 32-bit A-profile and M-profile partially overlap:
29
if (arm_feature(env, ARM_FEATURE_M)) {
23
*
30
- ARMMMUIdx mmu_idx = el == 0 ? ARMMMUIdx_MUser : ARMMMUIdx_MPriv;
24
- * 18 9 0
31
+ ARMMMUIdx mmu_idx;
25
- * +----------------+--------------+
32
26
- * | TBFLAG_A32 | |
33
- if (armv7m_nvic_neg_prio_requested(env->nvic, env->v7m.secure)) {
27
- * +-----+----------+ TBFLAG_AM32 |
34
- mmu_idx = ARMMMUIdx_MNegPri;
28
- * | |TBFLAG_M32| |
35
+ if (el == 0) {
29
- * +-----+----------+--------------+
36
+ mmu_idx = env->v7m.secure ? ARMMMUIdx_MSUser : ARMMMUIdx_MUser;
30
- * 14 9 0
37
+ } else {
31
+ * 31 23 11 10 0
38
+ mmu_idx = env->v7m.secure ? ARMMMUIdx_MSPriv : ARMMMUIdx_MPriv;
32
+ * +-------------+----------+----------------+
39
}
33
+ * | | | TBFLAG_A32 |
40
34
+ * | TBFLAG_AM32 | +-----+----------+
41
- if (env->v7m.secure) {
35
+ * | | |TBFLAG_M32|
42
- mmu_idx += ARMMMUIdx_MSUser;
36
+ * +-------------+----------------+----------+
43
+ if (armv7m_nvic_neg_prio_requested(env->nvic, env->v7m.secure)) {
37
+ * 31 23 5 4 0
44
+ mmu_idx = env->v7m.secure ? ARMMMUIdx_MSNegPri : ARMMMUIdx_MNegPri;
38
*
45
}
39
* Unless otherwise noted, these bits are cached in env->hflags.
46
40
*/
47
return arm_to_core_mmu_idx(mmu_idx);
41
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2)
42
/*
43
* Bit usage when in AArch32 state, both A- and M-profile.
44
*/
45
-FIELD(TBFLAG_AM32, CONDEXEC, 0, 8) /* Not cached. */
46
-FIELD(TBFLAG_AM32, THUMB, 8, 1) /* Not cached. */
47
+FIELD(TBFLAG_AM32, CONDEXEC, 24, 8) /* Not cached. */
48
+FIELD(TBFLAG_AM32, THUMB, 23, 1) /* Not cached. */
49
50
/*
51
* Bit usage when in AArch32 state, for A-profile only.
52
*/
53
-FIELD(TBFLAG_A32, VECLEN, 9, 3) /* Not cached. */
54
-FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */
55
+FIELD(TBFLAG_A32, VECLEN, 0, 3) /* Not cached. */
56
+FIELD(TBFLAG_A32, VECSTRIDE, 3, 2) /* Not cached. */
57
/*
58
* We store the bottom two bits of the CPAR as TB flags and handle
59
* checks on the other bits at runtime. This shares the same bits as
60
* VECSTRIDE, which is OK as no XScale CPU has VFP.
61
* Not cached, because VECLEN+VECSTRIDE are not cached.
62
*/
63
-FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2)
64
-FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */
65
-FIELD(TBFLAG_A32, SCTLR__B, 15, 1) /* Cannot overlap with SCTLR_B */
66
-FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1)
67
+FIELD(TBFLAG_A32, XSCALE_CPAR, 5, 2)
68
+FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */
69
+FIELD(TBFLAG_A32, SCTLR__B, 8, 1) /* Cannot overlap with SCTLR_B */
70
+FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1)
71
/*
72
* Indicates whether cp register reads and writes by guest code should access
73
* the secure or nonsecure bank of banked registers; note that this is not
74
* the same thing as the current security state of the processor!
75
*/
76
-FIELD(TBFLAG_A32, NS, 17, 1)
77
+FIELD(TBFLAG_A32, NS, 10, 1)
78
79
/*
80
* Bit usage when in AArch32 state, for M-profile only.
81
*/
82
/* Handler (ie not Thread) mode */
83
-FIELD(TBFLAG_M32, HANDLER, 9, 1)
84
+FIELD(TBFLAG_M32, HANDLER, 0, 1)
85
/* Whether we should generate stack-limit checks */
86
-FIELD(TBFLAG_M32, STACKCHECK, 10, 1)
87
+FIELD(TBFLAG_M32, STACKCHECK, 1, 1)
88
/* Set if FPCCR.LSPACT is set */
89
-FIELD(TBFLAG_M32, LSPACT, 11, 1) /* Not cached. */
90
+FIELD(TBFLAG_M32, LSPACT, 2, 1) /* Not cached. */
91
/* Set if we must create a new FP context */
92
-FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 12, 1) /* Not cached. */
93
+FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1) /* Not cached. */
94
/* Set if FPCCR.S does not match current security state */
95
-FIELD(TBFLAG_M32, FPCCR_S_WRONG, 13, 1) /* Not cached. */
96
+FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1) /* Not cached. */
97
98
/*
99
* Bit usage when in AArch64 state
48
--
100
--
49
2.7.4
101
2.20.1
50
102
51
103
diff view generated by jsdifflib
1
Add the new M profile Secure Fault Status Register
1
From: Richard Henderson <richard.henderson@linaro.org>
2
and Secure Fault Address Register.
3
2
3
Now that other bits have been moved out of tb->flags,
4
there's no point in filling from the top.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210419202257.161730-10-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 1506092407-26985-10-git-send-email-peter.maydell@linaro.org
7
---
10
---
8
target/arm/cpu.h | 12 ++++++++++++
11
target/arm/cpu.h | 14 +++++++-------
9
hw/intc/armv7m_nvic.c | 34 ++++++++++++++++++++++++++++++++++
12
1 file changed, 7 insertions(+), 7 deletions(-)
10
target/arm/machine.c | 2 ++
11
3 files changed, 48 insertions(+)
12
13
13
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
14
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/cpu.h
16
--- a/target/arm/cpu.h
16
+++ b/target/arm/cpu.h
17
+++ b/target/arm/cpu.h
17
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
18
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
18
uint32_t cfsr[M_REG_NUM_BANKS]; /* Configurable Fault Status */
19
*
19
uint32_t hfsr; /* HardFault Status */
20
* Unless otherwise noted, these bits are cached in env->hflags.
20
uint32_t dfsr; /* Debug Fault Status Register */
21
*/
21
+ uint32_t sfsr; /* Secure Fault Status Register */
22
-FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
22
uint32_t mmfar[M_REG_NUM_BANKS]; /* MemManage Fault Address */
23
-FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1)
23
uint32_t bfar; /* BusFault Address */
24
-FIELD(TBFLAG_ANY, PSTATE__SS, 29, 1) /* Not cached. */
24
+ uint32_t sfar; /* Secure Fault Address Register */
25
-FIELD(TBFLAG_ANY, BE_DATA, 28, 1)
25
unsigned mpu_ctrl[M_REG_NUM_BANKS]; /* MPU_CTRL */
26
-FIELD(TBFLAG_ANY, MMUIDX, 24, 4)
26
int exception;
27
+FIELD(TBFLAG_ANY, AARCH64_STATE, 0, 1)
27
uint32_t primask[M_REG_NUM_BANKS];
28
+FIELD(TBFLAG_ANY, SS_ACTIVE, 1, 1)
28
@@ -XXX,XX +XXX,XX @@ FIELD(V7M_DFSR, DWTTRAP, 2, 1)
29
+FIELD(TBFLAG_ANY, PSTATE__SS, 2, 1) /* Not cached. */
29
FIELD(V7M_DFSR, VCATCH, 3, 1)
30
+FIELD(TBFLAG_ANY, BE_DATA, 3, 1)
30
FIELD(V7M_DFSR, EXTERNAL, 4, 1)
31
+FIELD(TBFLAG_ANY, MMUIDX, 4, 4)
31
32
/* Target EL if we take a floating-point-disabled exception */
32
+/* V7M SFSR bits */
33
-FIELD(TBFLAG_ANY, FPEXC_EL, 22, 2)
33
+FIELD(V7M_SFSR, INVEP, 0, 1)
34
+FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2)
34
+FIELD(V7M_SFSR, INVIS, 1, 1)
35
/* For A-profile only, target EL for debug exceptions. */
35
+FIELD(V7M_SFSR, INVER, 2, 1)
36
-FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2)
36
+FIELD(V7M_SFSR, AUVIOL, 3, 1)
37
+FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2)
37
+FIELD(V7M_SFSR, INVTRAN, 4, 1)
38
38
+FIELD(V7M_SFSR, LSPERR, 5, 1)
39
/*
39
+FIELD(V7M_SFSR, SFARVALID, 6, 1)
40
* Bit usage when in AArch32 state, both A- and M-profile.
40
+FIELD(V7M_SFSR, LSERR, 7, 1)
41
+
42
/* v7M MPU_CTRL bits */
43
FIELD(V7M_MPU_CTRL, ENABLE, 0, 1)
44
FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1)
45
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/hw/intc/armv7m_nvic.c
48
+++ b/hw/intc/armv7m_nvic.c
49
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
50
goto bad_offset;
51
}
52
return cpu->env.pmsav8.mair1[attrs.secure];
53
+ case 0xde4: /* SFSR */
54
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
55
+ goto bad_offset;
56
+ }
57
+ if (!attrs.secure) {
58
+ return 0;
59
+ }
60
+ return cpu->env.v7m.sfsr;
61
+ case 0xde8: /* SFAR */
62
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
63
+ goto bad_offset;
64
+ }
65
+ if (!attrs.secure) {
66
+ return 0;
67
+ }
68
+ return cpu->env.v7m.sfar;
69
default:
70
bad_offset:
71
qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
72
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
73
* only affect cacheability, and we don't implement caching.
74
*/
75
break;
76
+ case 0xde4: /* SFSR */
77
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
78
+ goto bad_offset;
79
+ }
80
+ if (!attrs.secure) {
81
+ return;
82
+ }
83
+ cpu->env.v7m.sfsr &= ~value; /* W1C */
84
+ break;
85
+ case 0xde8: /* SFAR */
86
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
87
+ goto bad_offset;
88
+ }
89
+ if (!attrs.secure) {
90
+ return;
91
+ }
92
+ cpu->env.v7m.sfsr = value;
93
+ break;
94
case 0xf00: /* Software Triggered Interrupt Register */
95
{
96
int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
97
diff --git a/target/arm/machine.c b/target/arm/machine.c
98
index XXXXXXX..XXXXXXX 100644
99
--- a/target/arm/machine.c
100
+++ b/target/arm/machine.c
101
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m_security = {
102
VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
103
VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
104
VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
105
+ VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
106
+ VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
107
VMSTATE_END_OF_LIST()
108
}
109
};
110
--
41
--
111
2.7.4
42
2.20.1
112
43
113
44
diff view generated by jsdifflib
1
Implement the register interface for the SAU: SAU_CTRL,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
SAU_TYPE, SAU_RNR, SAU_RBAR and SAU_RLAR. None of the
3
actual behaviour is implemented here; registers just
4
read back as written.
5
2
6
When the CPU definition for Cortex-M33 is eventually
3
Use this to signal when memory access alignment is required.
7
added, its initfn will set cpu->sau_sregion, in the same
4
This value comes from the CCR register for M-profile, and
8
way that we currently set cpu->pmsav7_dregion for the
5
from the SCTLR register for A-profile.
9
M3 and M4.
10
6
11
Number of SAU regions is typically a configurable
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
CPU parameter, but this patch doesn't provide a
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
QEMU CPU property for it. We can easily add one when
9
Message-id: 20210419202257.161730-11-richard.henderson@linaro.org
14
we have a board that requires it.
15
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Message-id: 1506092407-26985-14-git-send-email-peter.maydell@linaro.org
19
---
11
---
20
target/arm/cpu.h | 10 +++++
12
target/arm/cpu.h | 2 ++
21
hw/intc/armv7m_nvic.c | 116 ++++++++++++++++++++++++++++++++++++++++++++++++++
13
target/arm/translate.h | 2 ++
22
target/arm/cpu.c | 27 ++++++++++++
14
target/arm/helper.c | 19 +++++++++++++++++--
23
target/arm/machine.c | 14 ++++++
15
target/arm/translate-a64.c | 1 +
24
4 files changed, 167 insertions(+)
16
target/arm/translate.c | 7 +++----
17
5 files changed, 25 insertions(+), 6 deletions(-)
25
18
26
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
27
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/cpu.h
21
--- a/target/arm/cpu.h
29
+++ b/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
30
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
23
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, MMUIDX, 4, 4)
31
uint32_t mair1[M_REG_NUM_BANKS];
24
FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2)
32
} pmsav8;
25
/* For A-profile only, target EL for debug exceptions. */
33
26
FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2)
34
+ /* v8M SAU */
27
+/* Memory operations require alignment: SCTLR_ELx.A or CCR.UNALIGN_TRP */
35
+ struct {
28
+FIELD(TBFLAG_ANY, ALIGN_MEM, 12, 1)
36
+ uint32_t *rbar;
29
37
+ uint32_t *rlar;
30
/*
38
+ uint32_t rnr;
31
* Bit usage when in AArch32 state, both A- and M-profile.
39
+ uint32_t ctrl;
32
diff --git a/target/arm/translate.h b/target/arm/translate.h
40
+ } sau;
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate.h
35
+++ b/target/arm/translate.h
36
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
37
bool bt;
38
/* True if any CP15 access is trapped by HSTR_EL2 */
39
bool hstr_active;
40
+ /* True if memory operations require alignment */
41
+ bool align_mem;
42
/*
43
* >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
44
* < 0, set by the current instruction.
45
diff --git a/target/arm/helper.c b/target/arm/helper.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/helper.c
48
+++ b/target/arm/helper.c
49
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
50
ARMMMUIdx mmu_idx)
51
{
52
CPUARMTBFlags flags = {};
53
+ uint32_t ccr = env->v7m.ccr[env->v7m.secure];
41
+
54
+
42
void *nvic;
55
+ /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
43
const struct arm_boot_info *boot_info;
56
+ if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
44
/* Store GICv3CPUState to access from this struct */
57
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
45
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
58
+ }
46
bool has_mpu;
59
47
/* PMSAv7 MPU number of supported regions */
60
if (arm_v7m_is_handler_mode(env)) {
48
uint32_t pmsav7_dregion;
61
DP_TBFLAG_M32(flags, HANDLER, 1);
49
+ /* v8M SAU number of supported regions */
62
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
50
+ uint32_t sau_sregion;
63
*/
51
64
if (arm_feature(env, ARM_FEATURE_V8) &&
52
/* PSCI conduit used to invoke PSCI methods
65
!((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
53
* 0 - disabled, 1 - smc, 2 - hvc
66
- (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
54
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
67
+ (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
55
index XXXXXXX..XXXXXXX 100644
68
DP_TBFLAG_M32(flags, STACKCHECK, 1);
56
--- a/hw/intc/armv7m_nvic.c
69
}
57
+++ b/hw/intc/armv7m_nvic.c
70
58
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
71
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
59
goto bad_offset;
72
ARMMMUIdx mmu_idx)
60
}
73
{
61
return cpu->env.pmsav8.mair1[attrs.secure];
74
CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
62
+ case 0xdd0: /* SAU_CTRL */
75
+ int el = arm_current_el(env);
63
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
64
+ goto bad_offset;
65
+ }
66
+ if (!attrs.secure) {
67
+ return 0;
68
+ }
69
+ return cpu->env.sau.ctrl;
70
+ case 0xdd4: /* SAU_TYPE */
71
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
72
+ goto bad_offset;
73
+ }
74
+ if (!attrs.secure) {
75
+ return 0;
76
+ }
77
+ return cpu->sau_sregion;
78
+ case 0xdd8: /* SAU_RNR */
79
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
80
+ goto bad_offset;
81
+ }
82
+ if (!attrs.secure) {
83
+ return 0;
84
+ }
85
+ return cpu->env.sau.rnr;
86
+ case 0xddc: /* SAU_RBAR */
87
+ {
88
+ int region = cpu->env.sau.rnr;
89
+
76
+
90
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
77
+ if (arm_sctlr(env, el) & SCTLR_A) {
91
+ goto bad_offset;
78
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
92
+ }
93
+ if (!attrs.secure) {
94
+ return 0;
95
+ }
96
+ if (region >= cpu->sau_sregion) {
97
+ return 0;
98
+ }
99
+ return cpu->env.sau.rbar[region];
100
+ }
79
+ }
101
+ case 0xde0: /* SAU_RLAR */
80
102
+ {
81
if (arm_el_is_aa64(env, 1)) {
103
+ int region = cpu->env.sau.rnr;
82
DP_TBFLAG_A32(flags, VFPEN, 1);
104
+
105
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
106
+ goto bad_offset;
107
+ }
108
+ if (!attrs.secure) {
109
+ return 0;
110
+ }
111
+ if (region >= cpu->sau_sregion) {
112
+ return 0;
113
+ }
114
+ return cpu->env.sau.rlar[region];
115
+ }
116
case 0xde4: /* SFSR */
117
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
118
goto bad_offset;
119
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
120
* only affect cacheability, and we don't implement caching.
121
*/
122
break;
123
+ case 0xdd0: /* SAU_CTRL */
124
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
125
+ goto bad_offset;
126
+ }
127
+ if (!attrs.secure) {
128
+ return;
129
+ }
130
+ cpu->env.sau.ctrl = value & 3;
131
+ case 0xdd4: /* SAU_TYPE */
132
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
133
+ goto bad_offset;
134
+ }
135
+ break;
136
+ case 0xdd8: /* SAU_RNR */
137
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
138
+ goto bad_offset;
139
+ }
140
+ if (!attrs.secure) {
141
+ return;
142
+ }
143
+ if (value >= cpu->sau_sregion) {
144
+ qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %"
145
+ PRIu32 "/%" PRIu32 "\n",
146
+ value, cpu->sau_sregion);
147
+ } else {
148
+ cpu->env.sau.rnr = value;
149
+ }
150
+ break;
151
+ case 0xddc: /* SAU_RBAR */
152
+ {
153
+ int region = cpu->env.sau.rnr;
154
+
155
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
156
+ goto bad_offset;
157
+ }
158
+ if (!attrs.secure) {
159
+ return;
160
+ }
161
+ if (region >= cpu->sau_sregion) {
162
+ return;
163
+ }
164
+ cpu->env.sau.rbar[region] = value & ~0x1f;
165
+ tlb_flush(CPU(cpu));
166
+ break;
167
+ }
168
+ case 0xde0: /* SAU_RLAR */
169
+ {
170
+ int region = cpu->env.sau.rnr;
171
+
172
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
173
+ goto bad_offset;
174
+ }
175
+ if (!attrs.secure) {
176
+ return;
177
+ }
178
+ if (region >= cpu->sau_sregion) {
179
+ return;
180
+ }
181
+ cpu->env.sau.rlar[region] = value & ~0x1c;
182
+ tlb_flush(CPU(cpu));
183
+ break;
184
+ }
185
case 0xde4: /* SFSR */
186
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
187
goto bad_offset;
188
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
189
index XXXXXXX..XXXXXXX 100644
190
--- a/target/arm/cpu.c
191
+++ b/target/arm/cpu.c
192
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s)
193
env->pmsav8.mair1[M_REG_S] = 0;
194
}
83
}
195
84
196
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
85
- if (arm_current_el(env) < 2 && env->cp15.hstr_el2 &&
197
+ if (cpu->sau_sregion > 0) {
86
+ if (el < 2 && env->cp15.hstr_el2 &&
198
+ memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion);
87
(arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
199
+ memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion);
88
DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
200
+ }
89
}
201
+ env->sau.rnr = 0;
90
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
202
+ /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
91
203
+ * the Cortex-M33 does.
92
sctlr = regime_sctlr(env, stage1);
204
+ */
93
205
+ env->sau.ctrl = 0;
94
+ if (sctlr & SCTLR_A) {
95
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
206
+ }
96
+ }
207
+
97
+
208
set_flush_to_zero(1, &env->vfp.standard_fp_status);
98
if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
209
set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
99
DP_TBFLAG_ANY(flags, BE_DATA, 1);
210
set_default_nan_mode(1, &env->vfp.standard_fp_status);
211
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
212
}
213
}
100
}
214
101
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
215
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
102
index XXXXXXX..XXXXXXX 100644
216
+ uint32_t nr = cpu->sau_sregion;
103
--- a/target/arm/translate-a64.c
217
+
104
+++ b/target/arm/translate-a64.c
218
+ if (nr > 0xff) {
105
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
219
+ error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr);
106
dc->user = (dc->current_el == 0);
220
+ return;
107
#endif
221
+ }
108
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
222
+
109
+ dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
223
+ if (nr) {
110
dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
224
+ env->sau.rbar = g_new0(uint32_t, nr);
111
dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16;
225
+ env->sau.rlar = g_new0(uint32_t, nr);
112
dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
226
+ }
113
diff --git a/target/arm/translate.c b/target/arm/translate.c
227
+ }
114
index XXXXXXX..XXXXXXX 100644
228
+
115
--- a/target/arm/translate.c
229
if (arm_feature(env, ARM_FEATURE_EL3)) {
116
+++ b/target/arm/translate.c
230
set_feature(env, ARM_FEATURE_VBAR);
117
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
118
{
119
TCGv addr;
120
121
- if (arm_dc_feature(s, ARM_FEATURE_M) &&
122
- !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
123
+ if (s->align_mem) {
124
opc |= MO_ALIGN;
231
}
125
}
232
@@ -XXX,XX +XXX,XX @@ static void cortex_m4_initfn(Object *obj)
126
233
cpu->midr = 0x410fc240; /* r0p0 */
127
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
234
cpu->pmsav7_dregion = 8;
235
}
236
+
237
static void arm_v7m_class_init(ObjectClass *oc, void *data)
238
{
128
{
239
CPUClass *cc = CPU_CLASS(oc);
129
TCGv addr;
240
diff --git a/target/arm/machine.c b/target/arm/machine.c
130
241
index XXXXXXX..XXXXXXX 100644
131
- if (arm_dc_feature(s, ARM_FEATURE_M) &&
242
--- a/target/arm/machine.c
132
- !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
243
+++ b/target/arm/machine.c
133
+ if (s->align_mem) {
244
@@ -XXX,XX +XXX,XX @@ static bool s_rnr_vmstate_validate(void *opaque, int version_id)
134
opc |= MO_ALIGN;
245
return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
246
}
247
248
+static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
249
+{
250
+ ARMCPU *cpu = opaque;
251
+
252
+ return cpu->env.sau.rnr < cpu->sau_sregion;
253
+}
254
+
255
static bool m_security_needed(void *opaque)
256
{
257
ARMCPU *cpu = opaque;
258
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m_security = {
259
VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
260
VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
261
VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
262
+ VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
263
+ vmstate_info_uint32, uint32_t),
264
+ VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
265
+ vmstate_info_uint32, uint32_t),
266
+ VMSTATE_UINT32(env.sau.rnr, ARMCPU),
267
+ VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
268
+ VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
269
VMSTATE_END_OF_LIST()
270
}
135
}
271
};
136
137
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
138
dc->user = (dc->current_el == 0);
139
#endif
140
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
141
+ dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
142
143
if (arm_feature(env, ARM_FEATURE_M)) {
144
dc->vfp_enabled = 1;
272
--
145
--
273
2.7.4
146
2.20.1
274
147
275
148
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Create a finalize_memop function that computes alignment and
4
endianness and returns the final MemOp for the operation.
5
6
Split out gen_aa32_{ld,st}_internal_i32 which bypasses any special
7
handling of endianness or alignment. Adjust gen_aa32_{ld,st}_i32
8
so that s->be_data is not added by the callers.
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210419202257.161730-12-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
15
target/arm/translate.h | 24 ++++++++
16
target/arm/translate.c | 100 +++++++++++++++++---------------
17
target/arm/translate-neon.c.inc | 9 +--
18
3 files changed, 79 insertions(+), 54 deletions(-)
19
20
diff --git a/target/arm/translate.h b/target/arm/translate.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/translate.h
23
+++ b/target/arm/translate.h
24
@@ -XXX,XX +XXX,XX @@ static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour)
25
return statusptr;
26
}
27
28
+/**
29
+ * finalize_memop:
30
+ * @s: DisasContext
31
+ * @opc: size+sign+align of the memory operation
32
+ *
33
+ * Build the complete MemOp for a memory operation, including alignment
34
+ * and endianness.
35
+ *
36
+ * If (op & MO_AMASK) then the operation already contains the required
37
+ * alignment, e.g. for AccType_ATOMIC. Otherwise, this an optionally
38
+ * unaligned operation, e.g. for AccType_NORMAL.
39
+ *
40
+ * In the latter case, there are configuration bits that require alignment,
41
+ * and this is applied here. Note that there is no way to indicate that
42
+ * no alignment should ever be enforced; this must be handled manually.
43
+ */
44
+static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
45
+{
46
+ if (s->align_mem && !(opc & MO_AMASK)) {
47
+ opc |= MO_ALIGN;
48
+ }
49
+ return opc | s->be_data;
50
+}
51
+
52
#endif /* TARGET_ARM_TRANSLATE_H */
53
diff --git a/target/arm/translate.c b/target/arm/translate.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/translate.c
56
+++ b/target/arm/translate.c
57
@@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
58
#define IS_USER_ONLY 0
59
#endif
60
61
-/* Abstractions of "generate code to do a guest load/store for
62
+/*
63
+ * Abstractions of "generate code to do a guest load/store for
64
* AArch32", where a vaddr is always 32 bits (and is zero
65
* extended if we're a 64 bit core) and data is also
66
* 32 bits unless specifically doing a 64 bit access.
67
@@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
68
* that the address argument is TCGv_i32 rather than TCGv.
69
*/
70
71
-static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
72
+static TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
73
{
74
TCGv addr = tcg_temp_new();
75
tcg_gen_extu_i32_tl(addr, a32);
76
@@ -XXX,XX +XXX,XX @@ static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
77
return addr;
78
}
79
80
+/*
81
+ * Internal routines are used for NEON cases where the endianness
82
+ * and/or alignment has already been taken into account and manipulated.
83
+ */
84
+static void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val,
85
+ TCGv_i32 a32, int index, MemOp opc)
86
+{
87
+ TCGv addr = gen_aa32_addr(s, a32, opc);
88
+ tcg_gen_qemu_ld_i32(val, addr, index, opc);
89
+ tcg_temp_free(addr);
90
+}
91
+
92
+static void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
93
+ TCGv_i32 a32, int index, MemOp opc)
94
+{
95
+ TCGv addr = gen_aa32_addr(s, a32, opc);
96
+ tcg_gen_qemu_st_i32(val, addr, index, opc);
97
+ tcg_temp_free(addr);
98
+}
99
+
100
static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
101
int index, MemOp opc)
102
{
103
- TCGv addr;
104
-
105
- if (s->align_mem) {
106
- opc |= MO_ALIGN;
107
- }
108
-
109
- addr = gen_aa32_addr(s, a32, opc);
110
- tcg_gen_qemu_ld_i32(val, addr, index, opc);
111
- tcg_temp_free(addr);
112
+ gen_aa32_ld_internal_i32(s, val, a32, index, finalize_memop(s, opc));
113
}
114
115
static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
116
int index, MemOp opc)
117
{
118
- TCGv addr;
119
+ gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
120
+}
121
122
- if (s->align_mem) {
123
- opc |= MO_ALIGN;
124
+#define DO_GEN_LD(SUFF, OPC) \
125
+ static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
126
+ TCGv_i32 a32, int index) \
127
+ { \
128
+ gen_aa32_ld_i32(s, val, a32, index, OPC); \
129
}
130
131
- addr = gen_aa32_addr(s, a32, opc);
132
- tcg_gen_qemu_st_i32(val, addr, index, opc);
133
- tcg_temp_free(addr);
134
-}
135
-
136
-#define DO_GEN_LD(SUFF, OPC) \
137
-static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
138
- TCGv_i32 a32, int index) \
139
-{ \
140
- gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
141
-}
142
-
143
-#define DO_GEN_ST(SUFF, OPC) \
144
-static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
145
- TCGv_i32 a32, int index) \
146
-{ \
147
- gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
148
-}
149
+#define DO_GEN_ST(SUFF, OPC) \
150
+ static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
151
+ TCGv_i32 a32, int index) \
152
+ { \
153
+ gen_aa32_st_i32(s, val, a32, index, OPC); \
154
+ }
155
156
static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
157
{
158
@@ -XXX,XX +XXX,XX @@ static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
159
addr = op_addr_rr_pre(s, a);
160
161
tmp = tcg_temp_new_i32();
162
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
163
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
164
disas_set_da_iss(s, mop, issinfo);
165
166
/*
167
@@ -XXX,XX +XXX,XX @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
168
addr = op_addr_rr_pre(s, a);
169
170
tmp = load_reg(s, a->rt);
171
- gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
172
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
173
disas_set_da_iss(s, mop, issinfo);
174
tcg_temp_free_i32(tmp);
175
176
@@ -XXX,XX +XXX,XX @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
177
addr = op_addr_rr_pre(s, a);
178
179
tmp = tcg_temp_new_i32();
180
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
181
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
182
store_reg(s, a->rt, tmp);
183
184
tcg_gen_addi_i32(addr, addr, 4);
185
186
tmp = tcg_temp_new_i32();
187
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
188
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
189
store_reg(s, a->rt + 1, tmp);
190
191
/* LDRD w/ base writeback is undefined if the registers overlap. */
192
@@ -XXX,XX +XXX,XX @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
193
addr = op_addr_rr_pre(s, a);
194
195
tmp = load_reg(s, a->rt);
196
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
197
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
198
tcg_temp_free_i32(tmp);
199
200
tcg_gen_addi_i32(addr, addr, 4);
201
202
tmp = load_reg(s, a->rt + 1);
203
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
204
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
205
tcg_temp_free_i32(tmp);
206
207
op_addr_rr_post(s, a, addr, -4);
208
@@ -XXX,XX +XXX,XX @@ static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
209
addr = op_addr_ri_pre(s, a);
210
211
tmp = tcg_temp_new_i32();
212
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
213
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
214
disas_set_da_iss(s, mop, issinfo);
215
216
/*
217
@@ -XXX,XX +XXX,XX @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
218
addr = op_addr_ri_pre(s, a);
219
220
tmp = load_reg(s, a->rt);
221
- gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
222
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
223
disas_set_da_iss(s, mop, issinfo);
224
tcg_temp_free_i32(tmp);
225
226
@@ -XXX,XX +XXX,XX @@ static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
227
addr = op_addr_ri_pre(s, a);
228
229
tmp = tcg_temp_new_i32();
230
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
231
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
232
store_reg(s, a->rt, tmp);
233
234
tcg_gen_addi_i32(addr, addr, 4);
235
236
tmp = tcg_temp_new_i32();
237
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
238
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
239
store_reg(s, rt2, tmp);
240
241
/* LDRD w/ base writeback is undefined if the registers overlap. */
242
@@ -XXX,XX +XXX,XX @@ static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
243
addr = op_addr_ri_pre(s, a);
244
245
tmp = load_reg(s, a->rt);
246
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
247
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
248
tcg_temp_free_i32(tmp);
249
250
tcg_gen_addi_i32(addr, addr, 4);
251
252
tmp = load_reg(s, rt2);
253
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
254
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
255
tcg_temp_free_i32(tmp);
256
257
op_addr_ri_post(s, a, addr, -4);
258
@@ -XXX,XX +XXX,XX @@ static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
259
addr = load_reg(s, a->rn);
260
tmp = load_reg(s, a->rt);
261
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
262
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
263
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop);
264
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
265
266
tcg_temp_free_i32(tmp);
267
@@ -XXX,XX +XXX,XX @@ static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
268
269
addr = load_reg(s, a->rn);
270
tmp = tcg_temp_new_i32();
271
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
272
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
273
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
274
tcg_temp_free_i32(addr);
275
276
@@ -XXX,XX +XXX,XX @@ static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
277
addr = load_reg(s, a->rn);
278
tcg_gen_add_i32(addr, addr, tmp);
279
280
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
281
- half ? MO_UW | s->be_data : MO_UB);
282
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB);
283
tcg_temp_free_i32(addr);
284
285
tcg_gen_add_i32(tmp, tmp, tmp);
286
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
287
index XXXXXXX..XXXXXXX 100644
288
--- a/target/arm/translate-neon.c.inc
289
+++ b/target/arm/translate-neon.c.inc
290
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
291
addr = tcg_temp_new_i32();
292
load_reg_var(s, addr, a->rn);
293
for (reg = 0; reg < nregs; reg++) {
294
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
295
- s->be_data | size);
296
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), size);
297
if ((vd & 1) && vec_size == 16) {
298
/*
299
* We cannot write 16 bytes at once because the
300
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
301
*/
302
for (reg = 0; reg < nregs; reg++) {
303
if (a->l) {
304
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
305
- s->be_data | a->size);
306
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), a->size);
307
neon_store_element(vd, a->reg_idx, a->size, tmp);
308
} else { /* Store */
309
neon_load_element(tmp, vd, a->reg_idx, a->size);
310
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
311
- s->be_data | a->size);
312
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), a->size);
313
}
314
vd += a->stride;
315
tcg_gen_addi_i32(addr, addr, 1 << a->size);
316
--
317
2.20.1
318
319
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
This is the only caller. Adjust some commentary to talk
4
about SCTLR_B instead of the vanishing function.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210419202257.161730-13-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/translate.c | 37 ++++++++++++++++---------------------
12
1 file changed, 16 insertions(+), 21 deletions(-)
13
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
17
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
19
gen_aa32_st_i32(s, val, a32, index, OPC); \
20
}
21
22
-static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
23
-{
24
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
25
- if (!IS_USER_ONLY && s->sctlr_b) {
26
- tcg_gen_rotri_i64(val, val, 32);
27
- }
28
-}
29
-
30
static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
31
int index, MemOp opc)
32
{
33
TCGv addr = gen_aa32_addr(s, a32, opc);
34
tcg_gen_qemu_ld_i64(val, addr, index, opc);
35
- gen_aa32_frob64(s, val);
36
+
37
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
38
+ if (!IS_USER_ONLY && s->sctlr_b) {
39
+ tcg_gen_rotri_i64(val, val, 32);
40
+ }
41
+
42
tcg_temp_free(addr);
43
}
44
45
@@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
46
TCGv_i32 tmp2 = tcg_temp_new_i32();
47
TCGv_i64 t64 = tcg_temp_new_i64();
48
49
- /* For AArch32, architecturally the 32-bit word at the lowest
50
+ /*
51
+ * For AArch32, architecturally the 32-bit word at the lowest
52
* address is always Rt and the one at addr+4 is Rt2, even if
53
* the CPU is big-endian. That means we don't want to do a
54
- * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
55
- * for an architecturally 64-bit access, but instead do a
56
- * 64-bit access using MO_BE if appropriate and then split
57
- * the two halves.
58
- * This only makes a difference for BE32 user-mode, where
59
- * frob64() must not flip the two halves of the 64-bit data
60
- * but this code must treat BE32 user-mode like BE32 system.
61
+ * gen_aa32_ld_i64(), which checks SCTLR_B as if for an
62
+ * architecturally 64-bit access, but instead do a 64-bit access
63
+ * using MO_BE if appropriate and then split the two halves.
64
*/
65
TCGv taddr = gen_aa32_addr(s, addr, opc);
66
67
@@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
68
TCGv_i64 n64 = tcg_temp_new_i64();
69
70
t2 = load_reg(s, rt2);
71
- /* For AArch32, architecturally the 32-bit word at the lowest
72
+
73
+ /*
74
+ * For AArch32, architecturally the 32-bit word at the lowest
75
* address is always Rt and the one at addr+4 is Rt2, even if
76
* the CPU is big-endian. Since we're going to treat this as a
77
* single 64-bit BE store, we need to put the two halves in the
78
* opposite order for BE to LE, so that they end up in the right
79
- * places.
80
- * We don't want gen_aa32_frob64() because that does the wrong
81
- * thing for BE32 usermode.
82
+ * places. We don't want gen_aa32_st_i64, because that checks
83
+ * SCTLR_B as if for an architectural 64-bit access.
84
*/
85
if (s->be_data == MO_BE) {
86
tcg_gen_concat_i32_i64(n64, t2, t1);
87
--
88
2.20.1
89
90
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Just because operating on a TCGv_i64 temporary does not
4
mean that we're performing a 64-bit operation. Restrict
5
the frobbing to actual 64-bit operations.
6
7
This bug is not currently visible because all current
8
users of these two functions always pass MO_64.
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210419202257.161730-14-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
15
target/arm/translate.c | 4 ++--
16
1 file changed, 2 insertions(+), 2 deletions(-)
17
18
diff --git a/target/arm/translate.c b/target/arm/translate.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/translate.c
21
+++ b/target/arm/translate.c
22
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
23
tcg_gen_qemu_ld_i64(val, addr, index, opc);
24
25
/* Not needed for user-mode BE32, where we use MO_BE instead. */
26
- if (!IS_USER_ONLY && s->sctlr_b) {
27
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
28
tcg_gen_rotri_i64(val, val, 32);
29
}
30
31
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
32
TCGv addr = gen_aa32_addr(s, a32, opc);
33
34
/* Not needed for user-mode BE32, where we use MO_BE instead. */
35
- if (!IS_USER_ONLY && s->sctlr_b) {
36
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
37
TCGv_i64 tmp = tcg_temp_new_i64();
38
tcg_gen_rotri_i64(tmp, val, 32);
39
tcg_gen_qemu_st_i64(tmp, addr, index, opc);
40
--
41
2.20.1
42
43
diff view generated by jsdifflib
1
Currently our M profile exception return code switches to the
1
From: Richard Henderson <richard.henderson@linaro.org>
2
target stack pointer relatively early in the process, before
3
it tries to pop the exception frame off the stack. This is
4
awkward for v8M for two reasons:
5
* in v8M the process vs main stack pointer is not selected
6
purely by the value of CONTROL.SPSEL, so updating SPSEL
7
and relying on that to switch to the right stack pointer
8
won't work
9
* the stack we should be reading the stack frame from and
10
the stack we will eventually switch to might not be the
11
same if the guest is doing strange things
12
2
13
Change our exception return code to use a 'frame pointer'
3
Adjust the interface to match what has been done to the
14
to read the exception frame rather than assuming that we
4
TCGv_i32 load/store functions.
15
can switch the live stack pointer this early.
16
5
6
This is less obvious, because at present the only user of
7
these functions, trans_VLDST_multiple, also wants to manipulate
8
the endianness to speed up loading multiple bytes. Thus we
9
retain an "internal" interface which is identical to the
10
current gen_aa32_{ld,st}_i64 interface.
11
12
The "new" interface will gain users as we remove the legacy
13
interfaces, gen_aa32_ld64 and gen_aa32_st64.
14
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20210419202257.161730-15-richard.henderson@linaro.org
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
19
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
20
Message-id: 1506092407-26985-3-git-send-email-peter.maydell@linaro.org
21
---
19
---
22
target/arm/helper.c | 130 +++++++++++++++++++++++++++++++++++++++-------------
20
target/arm/translate.c | 78 +++++++++++++++++++--------------
23
1 file changed, 98 insertions(+), 32 deletions(-)
21
target/arm/translate-neon.c.inc | 6 ++-
22
2 files changed, 49 insertions(+), 35 deletions(-)
24
23
25
diff --git a/target/arm/helper.c b/target/arm/helper.c
24
diff --git a/target/arm/translate.c b/target/arm/translate.c
26
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/helper.c
26
--- a/target/arm/translate.c
28
+++ b/target/arm/helper.c
27
+++ b/target/arm/translate.c
29
@@ -XXX,XX +XXX,XX @@ static void v7m_push(CPUARMState *env, uint32_t val)
28
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
30
stl_phys(cs->as, env->regs[13], val);
29
tcg_temp_free(addr);
31
}
30
}
32
31
33
-static uint32_t v7m_pop(CPUARMState *env)
32
+static void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val,
33
+ TCGv_i32 a32, int index, MemOp opc)
34
+{
35
+ TCGv addr = gen_aa32_addr(s, a32, opc);
36
+
37
+ tcg_gen_qemu_ld_i64(val, addr, index, opc);
38
+
39
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
40
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
41
+ tcg_gen_rotri_i64(val, val, 32);
42
+ }
43
+ tcg_temp_free(addr);
44
+}
45
+
46
+static void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val,
47
+ TCGv_i32 a32, int index, MemOp opc)
48
+{
49
+ TCGv addr = gen_aa32_addr(s, a32, opc);
50
+
51
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
52
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
53
+ TCGv_i64 tmp = tcg_temp_new_i64();
54
+ tcg_gen_rotri_i64(tmp, val, 32);
55
+ tcg_gen_qemu_st_i64(tmp, addr, index, opc);
56
+ tcg_temp_free_i64(tmp);
57
+ } else {
58
+ tcg_gen_qemu_st_i64(val, addr, index, opc);
59
+ }
60
+ tcg_temp_free(addr);
61
+}
62
+
63
static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
64
int index, MemOp opc)
65
{
66
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
67
gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
68
}
69
70
+static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
71
+ int index, MemOp opc)
72
+{
73
+ gen_aa32_ld_internal_i64(s, val, a32, index, finalize_memop(s, opc));
74
+}
75
+
76
+static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
77
+ int index, MemOp opc)
78
+{
79
+ gen_aa32_st_internal_i64(s, val, a32, index, finalize_memop(s, opc));
80
+}
81
+
82
#define DO_GEN_LD(SUFF, OPC) \
83
static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
84
TCGv_i32 a32, int index) \
85
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
86
gen_aa32_st_i32(s, val, a32, index, OPC); \
87
}
88
89
-static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
90
- int index, MemOp opc)
34
-{
91
-{
35
- CPUState *cs = CPU(arm_env_get_cpu(env));
92
- TCGv addr = gen_aa32_addr(s, a32, opc);
36
- uint32_t val;
93
- tcg_gen_qemu_ld_i64(val, addr, index, opc);
37
-
94
-
38
- val = ldl_phys(cs->as, env->regs[13]);
95
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
39
- env->regs[13] += 4;
96
- if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
40
- return val;
97
- tcg_gen_rotri_i64(val, val, 32);
98
- }
99
-
100
- tcg_temp_free(addr);
41
-}
101
-}
42
-
102
-
43
/* Return true if we're using the process stack pointer (not the MSP) */
103
static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
44
static bool v7m_using_psp(CPUARMState *env)
104
TCGv_i32 a32, int index)
45
{
105
{
46
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
106
- gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
47
env->regs[15] = dest & ~1;
107
-}
108
-
109
-static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
110
- int index, MemOp opc)
111
-{
112
- TCGv addr = gen_aa32_addr(s, a32, opc);
113
-
114
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
115
- if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
116
- TCGv_i64 tmp = tcg_temp_new_i64();
117
- tcg_gen_rotri_i64(tmp, val, 32);
118
- tcg_gen_qemu_st_i64(tmp, addr, index, opc);
119
- tcg_temp_free_i64(tmp);
120
- } else {
121
- tcg_gen_qemu_st_i64(val, addr, index, opc);
122
- }
123
- tcg_temp_free(addr);
124
+ gen_aa32_ld_i64(s, val, a32, index, MO_Q);
48
}
125
}
49
126
50
+static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
127
static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
51
+ bool spsel)
128
TCGv_i32 a32, int index)
52
+{
53
+ /* Return a pointer to the location where we currently store the
54
+ * stack pointer for the requested security state and thread mode.
55
+ * This pointer will become invalid if the CPU state is updated
56
+ * such that the stack pointers are switched around (eg changing
57
+ * the SPSEL control bit).
58
+ * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
59
+ * Unlike that pseudocode, we require the caller to pass us in the
60
+ * SPSEL control bit value; this is because we also use this
61
+ * function in handling of pushing of the callee-saves registers
62
+ * part of the v8M stack frame (pseudocode PushCalleeStack()),
63
+ * and in the tailchain codepath the SPSEL bit comes from the exception
64
+ * return magic LR value from the previous exception. The pseudocode
65
+ * opencodes the stack-selection in PushCalleeStack(), but we prefer
66
+ * to make this utility function generic enough to do the job.
67
+ */
68
+ bool want_psp = threadmode && spsel;
69
+
70
+ if (secure == env->v7m.secure) {
71
+ /* Currently switch_v7m_sp switches SP as it updates SPSEL,
72
+ * so the SP we want is always in regs[13].
73
+ * When we decouple SPSEL from the actually selected SP
74
+ * we need to check want_psp against v7m_using_psp()
75
+ * to see whether we need regs[13] or v7m.other_sp.
76
+ */
77
+ return &env->regs[13];
78
+ } else {
79
+ if (want_psp) {
80
+ return &env->v7m.other_ss_psp;
81
+ } else {
82
+ return &env->v7m.other_ss_msp;
83
+ }
84
+ }
85
+}
86
+
87
static uint32_t arm_v7m_load_vector(ARMCPU *cpu)
88
{
129
{
89
CPUState *cs = CPU(cpu);
130
- gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
90
@@ -XXX,XX +XXX,XX @@ static void v7m_push_stack(ARMCPU *cpu)
131
+ gen_aa32_st_i64(s, val, a32, index, MO_Q);
91
static void do_v7m_exception_exit(ARMCPU *cpu)
132
}
92
{
133
93
CPUARMState *env = &cpu->env;
134
DO_GEN_LD(8u, MO_UB)
94
+ CPUState *cs = CPU(cpu);
135
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
95
uint32_t excret;
136
index XXXXXXX..XXXXXXX 100644
96
uint32_t xpsr;
137
--- a/target/arm/translate-neon.c.inc
97
bool ufault = false;
138
+++ b/target/arm/translate-neon.c.inc
98
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
139
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
99
bool return_to_handler = false;
140
int tt = a->vd + reg + spacing * xs;
100
bool rettobase = false;
141
101
bool exc_secure = false;
142
if (a->l) {
102
+ bool return_to_secure;
143
- gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
103
144
+ gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx,
104
/* We can only get here from an EXCP_EXCEPTION_EXIT, and
145
+ endian | size);
105
* gen_bx_excret() enforces the architectural rule
146
neon_store_element64(tt, n, size, tmp64);
106
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
147
} else {
107
g_assert_not_reached();
148
neon_load_element64(tmp64, tt, n, size);
108
}
149
- gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
109
150
+ gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx,
110
+ return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
151
+ endian | size);
111
+ (excret & R_V7M_EXCRET_S_MASK);
152
}
112
+
153
tcg_gen_add_i32(addr, addr, tmp);
113
switch (excret & 0xf) {
154
}
114
case 1: /* Return to Handler */
115
return_to_handler = true;
116
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
117
return;
118
}
119
120
- /* Switch to the target stack. */
121
+ /* Set CONTROL.SPSEL from excret.SPSEL. For QEMU this currently
122
+ * causes us to switch the active SP, but we will change this
123
+ * later to not do that so we can support v8M.
124
+ */
125
switch_v7m_sp(env, return_to_sp_process);
126
- /* Pop registers. */
127
- env->regs[0] = v7m_pop(env);
128
- env->regs[1] = v7m_pop(env);
129
- env->regs[2] = v7m_pop(env);
130
- env->regs[3] = v7m_pop(env);
131
- env->regs[12] = v7m_pop(env);
132
- env->regs[14] = v7m_pop(env);
133
- env->regs[15] = v7m_pop(env);
134
- if (env->regs[15] & 1) {
135
- qemu_log_mask(LOG_GUEST_ERROR,
136
- "M profile return from interrupt with misaligned "
137
- "PC is UNPREDICTABLE\n");
138
- /* Actual hardware seems to ignore the lsbit, and there are several
139
- * RTOSes out there which incorrectly assume the r15 in the stack
140
- * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value.
141
+
142
+ {
143
+ /* The stack pointer we should be reading the exception frame from
144
+ * depends on bits in the magic exception return type value (and
145
+ * for v8M isn't necessarily the stack pointer we will eventually
146
+ * end up resuming execution with). Get a pointer to the location
147
+ * in the CPU state struct where the SP we need is currently being
148
+ * stored; we will use and modify it in place.
149
+ * We use this limited C variable scope so we don't accidentally
150
+ * use 'frame_sp_p' after we do something that makes it invalid.
151
+ */
152
+ uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
153
+ return_to_secure,
154
+ !return_to_handler,
155
+ return_to_sp_process);
156
+ uint32_t frameptr = *frame_sp_p;
157
+
158
+ /* Pop registers. TODO: make these accesses use the correct
159
+ * attributes and address space (S/NS, priv/unpriv) and handle
160
+ * memory transaction failures.
161
*/
162
- env->regs[15] &= ~1U;
163
+ env->regs[0] = ldl_phys(cs->as, frameptr);
164
+ env->regs[1] = ldl_phys(cs->as, frameptr + 0x4);
165
+ env->regs[2] = ldl_phys(cs->as, frameptr + 0x8);
166
+ env->regs[3] = ldl_phys(cs->as, frameptr + 0xc);
167
+ env->regs[12] = ldl_phys(cs->as, frameptr + 0x10);
168
+ env->regs[14] = ldl_phys(cs->as, frameptr + 0x14);
169
+ env->regs[15] = ldl_phys(cs->as, frameptr + 0x18);
170
+ if (env->regs[15] & 1) {
171
+ qemu_log_mask(LOG_GUEST_ERROR,
172
+ "M profile return from interrupt with misaligned "
173
+ "PC is UNPREDICTABLE\n");
174
+ /* Actual hardware seems to ignore the lsbit, and there are several
175
+ * RTOSes out there which incorrectly assume the r15 in the stack
176
+ * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value.
177
+ */
178
+ env->regs[15] &= ~1U;
179
+ }
180
+ xpsr = ldl_phys(cs->as, frameptr + 0x1c);
181
+
182
+ /* Commit to consuming the stack frame */
183
+ frameptr += 0x20;
184
+ /* Undo stack alignment (the SPREALIGN bit indicates that the original
185
+ * pre-exception SP was not 8-aligned and we added a padding word to
186
+ * align it, so we undo this by ORing in the bit that increases it
187
+ * from the current 8-aligned value to the 8-unaligned value. (Adding 4
188
+ * would work too but a logical OR is how the pseudocode specifies it.)
189
+ */
190
+ if (xpsr & XPSR_SPREALIGN) {
191
+ frameptr |= 4;
192
+ }
193
+ *frame_sp_p = frameptr;
194
}
195
- xpsr = v7m_pop(env);
196
+ /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
197
xpsr_write(env, xpsr, ~XPSR_SPREALIGN);
198
- /* Undo stack alignment. */
199
- if (xpsr & XPSR_SPREALIGN) {
200
- env->regs[13] |= 4;
201
- }
202
203
/* The restored xPSR exception field will be zero if we're
204
* resuming in Thread mode. If that doesn't match what the
205
--
155
--
206
2.7.4
156
2.20.1
207
157
208
158
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Buglink: https://bugs.launchpad.net/qemu/+bug/1905356
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210419202257.161730-16-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
target/arm/translate.c | 16 ++++++++--------
10
1 file changed, 8 insertions(+), 8 deletions(-)
11
12
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate.c
15
+++ b/target/arm/translate.c
16
@@ -XXX,XX +XXX,XX @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
17
addr = op_addr_rr_pre(s, a);
18
19
tmp = tcg_temp_new_i32();
20
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
21
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
22
store_reg(s, a->rt, tmp);
23
24
tcg_gen_addi_i32(addr, addr, 4);
25
26
tmp = tcg_temp_new_i32();
27
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
28
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
29
store_reg(s, a->rt + 1, tmp);
30
31
/* LDRD w/ base writeback is undefined if the registers overlap. */
32
@@ -XXX,XX +XXX,XX @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
33
addr = op_addr_rr_pre(s, a);
34
35
tmp = load_reg(s, a->rt);
36
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
37
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
38
tcg_temp_free_i32(tmp);
39
40
tcg_gen_addi_i32(addr, addr, 4);
41
42
tmp = load_reg(s, a->rt + 1);
43
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
44
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
45
tcg_temp_free_i32(tmp);
46
47
op_addr_rr_post(s, a, addr, -4);
48
@@ -XXX,XX +XXX,XX @@ static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
49
addr = op_addr_ri_pre(s, a);
50
51
tmp = tcg_temp_new_i32();
52
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
53
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
54
store_reg(s, a->rt, tmp);
55
56
tcg_gen_addi_i32(addr, addr, 4);
57
58
tmp = tcg_temp_new_i32();
59
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
60
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
61
store_reg(s, rt2, tmp);
62
63
/* LDRD w/ base writeback is undefined if the registers overlap. */
64
@@ -XXX,XX +XXX,XX @@ static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
65
addr = op_addr_ri_pre(s, a);
66
67
tmp = load_reg(s, a->rt);
68
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
69
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
70
tcg_temp_free_i32(tmp);
71
72
tcg_gen_addi_i32(addr, addr, 4);
73
74
tmp = load_reg(s, rt2);
75
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
76
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
77
tcg_temp_free_i32(tmp);
78
79
op_addr_ri_post(s, a, addr, -4);
80
--
81
2.20.1
82
83
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-17-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
16
addr = load_reg(s, a->rn);
17
tmp = load_reg(s, a->rt);
18
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
19
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop);
20
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
21
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
22
23
tcg_temp_free_i32(tmp);
24
@@ -XXX,XX +XXX,XX @@ static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
25
26
addr = load_reg(s, a->rn);
27
tmp = tcg_temp_new_i32();
28
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
29
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
30
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
31
tcg_temp_free_i32(addr);
32
33
--
34
2.20.1
35
36
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-18-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
16
} else {
17
tmp = load_reg(s, i);
18
}
19
- gen_aa32_st32(s, tmp, addr, mem_idx);
20
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
21
tcg_temp_free_i32(tmp);
22
23
/* No need to add after the last transfer. */
24
@@ -XXX,XX +XXX,XX @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
25
}
26
27
tmp = tcg_temp_new_i32();
28
- gen_aa32_ld32u(s, tmp, addr, mem_idx);
29
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
30
if (user) {
31
tmp2 = tcg_const_i32(i);
32
gen_helper_set_user_reg(cpu_env, tmp2, tmp);
33
--
34
2.20.1
35
36
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-19-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_RFE(DisasContext *s, arg_RFE *a)
16
17
/* Load PC into tmp and CPSR into tmp2. */
18
t1 = tcg_temp_new_i32();
19
- gen_aa32_ld32u(s, t1, addr, get_mem_index(s));
20
+ gen_aa32_ld_i32(s, t1, addr, get_mem_index(s), MO_UL | MO_ALIGN);
21
tcg_gen_addi_i32(addr, addr, 4);
22
t2 = tcg_temp_new_i32();
23
- gen_aa32_ld32u(s, t2, addr, get_mem_index(s));
24
+ gen_aa32_ld_i32(s, t2, addr, get_mem_index(s), MO_UL | MO_ALIGN);
25
26
if (a->w) {
27
/* Base writeback. */
28
--
29
2.20.1
30
31
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-20-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
16
}
17
tcg_gen_addi_i32(addr, addr, offset);
18
tmp = load_reg(s, 14);
19
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
20
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
21
tcg_temp_free_i32(tmp);
22
tmp = load_cpu_field(spsr);
23
tcg_gen_addi_i32(addr, addr, 4);
24
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
25
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
26
tcg_temp_free_i32(tmp);
27
if (writeback) {
28
switch (amode) {
29
--
30
2.20.1
31
32
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-21-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-vfp.c.inc | 8 ++++----
9
1 file changed, 4 insertions(+), 4 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.c.inc
14
+++ b/target/arm/translate-vfp.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
16
for (i = 0; i < n; i++) {
17
if (a->l) {
18
/* load */
19
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
20
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
21
vfp_store_reg32(tmp, a->vd + i);
22
} else {
23
/* store */
24
vfp_load_reg32(tmp, a->vd + i);
25
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
26
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
27
}
28
tcg_gen_addi_i32(addr, addr, offset);
29
}
30
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
31
for (i = 0; i < n; i++) {
32
if (a->l) {
33
/* load */
34
- gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
35
+ gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
36
vfp_store_reg64(tmp, a->vd + i);
37
} else {
38
/* store */
39
vfp_load_reg64(tmp, a->vd + i);
40
- gen_aa32_st64(s, tmp, addr, get_mem_index(s));
41
+ gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
42
}
43
tcg_gen_addi_i32(addr, addr, offset);
44
}
45
--
46
2.20.1
47
48
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-22-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-vfp.c.inc | 12 ++++++------
9
1 file changed, 6 insertions(+), 6 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.c.inc
14
+++ b/target/arm/translate-vfp.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
16
addr = add_reg_for_lit(s, a->rn, offset);
17
tmp = tcg_temp_new_i32();
18
if (a->l) {
19
- gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
20
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN);
21
vfp_store_reg32(tmp, a->vd);
22
} else {
23
vfp_load_reg32(tmp, a->vd);
24
- gen_aa32_st16(s, tmp, addr, get_mem_index(s));
25
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN);
26
}
27
tcg_temp_free_i32(tmp);
28
tcg_temp_free_i32(addr);
29
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
30
addr = add_reg_for_lit(s, a->rn, offset);
31
tmp = tcg_temp_new_i32();
32
if (a->l) {
33
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
34
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
35
vfp_store_reg32(tmp, a->vd);
36
} else {
37
vfp_load_reg32(tmp, a->vd);
38
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
39
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
40
}
41
tcg_temp_free_i32(tmp);
42
tcg_temp_free_i32(addr);
43
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
44
addr = add_reg_for_lit(s, a->rn, offset);
45
tmp = tcg_temp_new_i64();
46
if (a->l) {
47
- gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
48
+ gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
49
vfp_store_reg64(tmp, a->vd);
50
} else {
51
vfp_load_reg64(tmp, a->vd);
52
- gen_aa32_st64(s, tmp, addr, get_mem_index(s));
53
+ gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
54
}
55
tcg_temp_free_i64(tmp);
56
tcg_temp_free_i32(addr);
57
--
58
2.20.1
59
60
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-23-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.h | 1 +
9
target/arm/translate.c | 15 +++++++++++++
10
target/arm/translate-neon.c.inc | 37 +++++++++++++++++++++++++--------
11
3 files changed, 44 insertions(+), 9 deletions(-)
12
13
diff --git a/target/arm/translate.h b/target/arm/translate.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate.h
16
+++ b/target/arm/translate.h
17
@@ -XXX,XX +XXX,XX @@ void arm_test_cc(DisasCompare *cmp, int cc);
18
void arm_free_cc(DisasCompare *cmp);
19
void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
20
void arm_gen_test_cc(int cc, TCGLabel *label);
21
+MemOp pow2_align(unsigned i);
22
23
/* Return state of Alternate Half-precision flag, caller frees result */
24
static inline TCGv_i32 get_ahp_flag(void)
25
diff --git a/target/arm/translate.c b/target/arm/translate.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate.c
28
+++ b/target/arm/translate.c
29
@@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
30
#define IS_USER_ONLY 0
31
#endif
32
33
+MemOp pow2_align(unsigned i)
34
+{
35
+ static const MemOp mop_align[] = {
36
+ 0, MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16,
37
+ /*
38
+ * FIXME: TARGET_PAGE_BITS_MIN affects TLB_FLAGS_MASK such
39
+ * that 256-bit alignment (MO_ALIGN_32) cannot be supported:
40
+ * see get_alignment_bits(). Enforce only 128-bit alignment for now.
41
+ */
42
+ MO_ALIGN_16
43
+ };
44
+ g_assert(i < ARRAY_SIZE(mop_align));
45
+ return mop_align[i];
46
+}
47
+
48
/*
49
* Abstractions of "generate code to do a guest load/store for
50
* AArch32", where a vaddr is always 32 bits (and is zero
51
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/translate-neon.c.inc
54
+++ b/target/arm/translate-neon.c.inc
55
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
56
int size = a->size;
57
int nregs = a->n + 1;
58
TCGv_i32 addr, tmp;
59
+ MemOp mop, align;
60
61
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
62
return false;
63
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
64
return false;
65
}
66
67
+ align = 0;
68
if (size == 3) {
69
if (nregs != 4 || a->a == 0) {
70
return false;
71
}
72
/* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */
73
- size = 2;
74
- }
75
- if (nregs == 1 && a->a == 1 && size == 0) {
76
- return false;
77
- }
78
- if (nregs == 3 && a->a == 1) {
79
- return false;
80
+ size = MO_32;
81
+ align = MO_ALIGN_16;
82
+ } else if (a->a) {
83
+ switch (nregs) {
84
+ case 1:
85
+ if (size == 0) {
86
+ return false;
87
+ }
88
+ align = MO_ALIGN;
89
+ break;
90
+ case 2:
91
+ align = pow2_align(size + 1);
92
+ break;
93
+ case 3:
94
+ return false;
95
+ case 4:
96
+ align = pow2_align(size + 2);
97
+ break;
98
+ default:
99
+ g_assert_not_reached();
100
+ }
101
}
102
103
if (!vfp_access_check(s)) {
104
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
105
*/
106
stride = a->t ? 2 : 1;
107
vec_size = nregs == 1 ? stride * 8 : 8;
108
-
109
+ mop = size | align;
110
tmp = tcg_temp_new_i32();
111
addr = tcg_temp_new_i32();
112
load_reg_var(s, addr, a->rn);
113
for (reg = 0; reg < nregs; reg++) {
114
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), size);
115
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
116
if ((vd & 1) && vec_size == 16) {
117
/*
118
* We cannot write 16 bytes at once because the
119
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
120
}
121
tcg_gen_addi_i32(addr, addr, 1 << size);
122
vd += stride;
123
+
124
+ /* Subsequent memory operations inherit alignment */
125
+ mop &= ~MO_AMASK;
126
}
127
tcg_temp_free_i32(tmp);
128
tcg_temp_free_i32(addr);
129
--
130
2.20.1
131
132
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-24-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-neon.c.inc | 27 ++++++++++++++++++++++-----
9
1 file changed, 22 insertions(+), 5 deletions(-)
10
11
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-neon.c.inc
14
+++ b/target/arm/translate-neon.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
16
{
17
/* Neon load/store multiple structures */
18
int nregs, interleave, spacing, reg, n;
19
- MemOp endian = s->be_data;
20
+ MemOp mop, align, endian;
21
int mmu_idx = get_mem_index(s);
22
int size = a->size;
23
TCGv_i64 tmp64;
24
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
25
}
26
27
/* For our purposes, bytes are always little-endian. */
28
+ endian = s->be_data;
29
if (size == 0) {
30
endian = MO_LE;
31
}
32
+
33
+ /* Enforce alignment requested by the instruction */
34
+ if (a->align) {
35
+ align = pow2_align(a->align + 2); /* 4 ** a->align */
36
+ } else {
37
+ align = s->align_mem ? MO_ALIGN : 0;
38
+ }
39
+
40
/*
41
* Consecutive little-endian elements from a single register
42
* can be promoted to a larger little-endian operation.
43
*/
44
if (interleave == 1 && endian == MO_LE) {
45
+ /* Retain any natural alignment. */
46
+ if (align == MO_ALIGN) {
47
+ align = pow2_align(size);
48
+ }
49
size = 3;
50
}
51
+
52
tmp64 = tcg_temp_new_i64();
53
addr = tcg_temp_new_i32();
54
tmp = tcg_const_i32(1 << size);
55
load_reg_var(s, addr, a->rn);
56
+
57
+ mop = endian | size | align;
58
for (reg = 0; reg < nregs; reg++) {
59
for (n = 0; n < 8 >> size; n++) {
60
int xs;
61
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
62
int tt = a->vd + reg + spacing * xs;
63
64
if (a->l) {
65
- gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx,
66
- endian | size);
67
+ gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx, mop);
68
neon_store_element64(tt, n, size, tmp64);
69
} else {
70
neon_load_element64(tmp64, tt, n, size);
71
- gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx,
72
- endian | size);
73
+ gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx, mop);
74
}
75
tcg_gen_add_i32(addr, addr, tmp);
76
+
77
+ /* Subsequent memory operations inherit alignment */
78
+ mop &= ~MO_AMASK;
79
}
80
}
81
}
82
--
83
2.20.1
84
85
diff view generated by jsdifflib
1
In v8M, more bits are defined in the exception-return magic
1
From: Richard Henderson <richard.henderson@linaro.org>
2
values; update the code that checks these so we accept
3
the v8M values when the CPU permits them.
4
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-25-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 1506092407-26985-11-git-send-email-peter.maydell@linaro.org
8
---
7
---
9
target/arm/helper.c | 73 ++++++++++++++++++++++++++++++++++++++++++-----------
8
target/arm/translate-neon.c.inc | 48 ++++++++++++++++++++++++++++-----
10
1 file changed, 58 insertions(+), 15 deletions(-)
9
1 file changed, 42 insertions(+), 6 deletions(-)
11
10
12
diff --git a/target/arm/helper.c b/target/arm/helper.c
11
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/helper.c
13
--- a/target/arm/translate-neon.c.inc
15
+++ b/target/arm/helper.c
14
+++ b/target/arm/translate-neon.c.inc
16
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
17
uint32_t excret;
16
int nregs = a->n + 1;
18
uint32_t xpsr;
17
int vd = a->vd;
19
bool ufault = false;
18
TCGv_i32 addr, tmp;
20
- bool return_to_sp_process = false;
19
+ MemOp mop;
21
- bool return_to_handler = false;
20
22
+ bool sfault = false;
21
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
23
+ bool return_to_sp_process;
22
return false;
24
+ bool return_to_handler;
23
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
25
bool rettobase = false;
24
return true;
26
bool exc_secure = false;
27
bool return_to_secure;
28
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
29
excret);
30
}
25
}
31
26
32
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
27
+ /* Pick up SCTLR settings */
33
+ /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
28
+ mop = finalize_memop(s, a->size);
34
+ * we pick which FAULTMASK to clear.
35
+ */
36
+ if (!env->v7m.secure &&
37
+ ((excret & R_V7M_EXCRET_ES_MASK) ||
38
+ !(excret & R_V7M_EXCRET_DCRS_MASK))) {
39
+ sfault = 1;
40
+ /* For all other purposes, treat ES as 0 (R_HXSR) */
41
+ excret &= ~R_V7M_EXCRET_ES_MASK;
42
+ }
43
+ }
44
+
29
+
45
if (env->v7m.exception != ARMV7M_EXCP_NMI) {
30
+ if (a->align) {
46
/* Auto-clear FAULTMASK on return from other than NMI.
31
+ MemOp align_op;
47
* If the security extension is implemented then this only
32
+
48
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
33
+ switch (nregs) {
49
g_assert_not_reached();
34
+ case 1:
50
}
35
+ /* For VLD1, use natural alignment. */
51
36
+ align_op = MO_ALIGN;
52
+ return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
53
+ return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
54
return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
55
(excret & R_V7M_EXCRET_S_MASK);
56
57
- switch (excret & 0xf) {
58
- case 1: /* Return to Handler */
59
- return_to_handler = true;
60
- break;
61
- case 13: /* Return to Thread using Process stack */
62
- return_to_sp_process = true;
63
- /* fall through */
64
- case 9: /* Return to Thread using Main stack */
65
- if (!rettobase &&
66
- !(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_NONBASETHRDENA_MASK)) {
67
+ if (arm_feature(env, ARM_FEATURE_V8)) {
68
+ if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
69
+ /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
70
+ * we choose to take the UsageFault.
71
+ */
72
+ if ((excret & R_V7M_EXCRET_S_MASK) ||
73
+ (excret & R_V7M_EXCRET_ES_MASK) ||
74
+ !(excret & R_V7M_EXCRET_DCRS_MASK)) {
75
+ ufault = true;
76
+ }
77
+ }
78
+ if (excret & R_V7M_EXCRET_RES0_MASK) {
79
ufault = true;
80
}
81
- break;
82
- default:
83
- ufault = true;
84
+ } else {
85
+ /* For v7M we only recognize certain combinations of the low bits */
86
+ switch (excret & 0xf) {
87
+ case 1: /* Return to Handler */
88
+ break;
37
+ break;
89
+ case 13: /* Return to Thread using Process stack */
38
+ case 2:
90
+ case 9: /* Return to Thread using Main stack */
39
+ /* For VLD2, use double alignment. */
91
+ /* We only need to check NONBASETHRDENA for v7M, because in
40
+ align_op = pow2_align(a->size + 1);
92
+ * v8M this bit does not exist (it is RES1).
41
+ break;
93
+ */
42
+ case 4:
94
+ if (!rettobase &&
43
+ if (a->size == MO_32) {
95
+ !(env->v7m.ccr[env->v7m.secure] &
44
+ /*
96
+ R_V7M_CCR_NONBASETHRDENA_MASK)) {
45
+ * For VLD4.32, align = 1 is double alignment, align = 2 is
97
+ ufault = true;
46
+ * quad alignment; align = 3 is rejected above.
47
+ */
48
+ align_op = pow2_align(a->size + a->align);
49
+ } else {
50
+ /* For VLD4.8 and VLD.16, we want quad alignment. */
51
+ align_op = pow2_align(a->size + 2);
98
+ }
52
+ }
99
+ break;
53
+ break;
100
+ default:
54
+ default:
101
+ ufault = true;
55
+ /* For VLD3, the alignment field is zero and rejected above. */
56
+ g_assert_not_reached();
102
+ }
57
+ }
58
+
59
+ mop = (mop & ~MO_AMASK) | align_op;
103
+ }
60
+ }
104
+
61
+
105
+ if (sfault) {
62
tmp = tcg_temp_new_i32();
106
+ env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
63
addr = tcg_temp_new_i32();
107
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
64
load_reg_var(s, addr, a->rn);
108
+ v7m_exception_taken(cpu, excret);
65
- /*
109
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
66
- * TODO: if we implemented alignment exceptions, we should check
110
+ "stackframe: failed EXC_RETURN.ES validity check\n");
67
- * addr against the alignment encoded in a->align here.
111
+ return;
68
- */
69
+
70
for (reg = 0; reg < nregs; reg++) {
71
if (a->l) {
72
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), a->size);
73
+ gen_aa32_ld_internal_i32(s, tmp, addr, get_mem_index(s), mop);
74
neon_store_element(vd, a->reg_idx, a->size, tmp);
75
} else { /* Store */
76
neon_load_element(tmp, vd, a->reg_idx, a->size);
77
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), a->size);
78
+ gen_aa32_st_internal_i32(s, tmp, addr, get_mem_index(s), mop);
79
}
80
vd += a->stride;
81
tcg_gen_addi_i32(addr, addr, 1 << a->size);
82
+
83
+ /* Subsequent memory operations inherit alignment */
84
+ mop &= ~MO_AMASK;
112
}
85
}
113
86
tcg_temp_free_i32(addr);
114
if (ufault) {
87
tcg_temp_free_i32(tmp);
115
--
88
--
116
2.7.4
89
2.20.1
117
90
118
91
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
In the case of gpr load, merge the size and is_signed arguments;
4
otherwise, simply convert size to memop.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210419202257.161730-26-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/translate-a64.c | 78 ++++++++++++++++----------------------
12
1 file changed, 33 insertions(+), 45 deletions(-)
13
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate-a64.c
17
+++ b/target/arm/translate-a64.c
18
@@ -XXX,XX +XXX,XX @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
19
* Store from GPR register to memory.
20
*/
21
static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
22
- TCGv_i64 tcg_addr, int size, int memidx,
23
+ TCGv_i64 tcg_addr, MemOp memop, int memidx,
24
bool iss_valid,
25
unsigned int iss_srt,
26
bool iss_sf, bool iss_ar)
27
{
28
- g_assert(size <= 3);
29
- tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
30
+ memop = finalize_memop(s, memop);
31
+ tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
32
33
if (iss_valid) {
34
uint32_t syn;
35
36
syn = syn_data_abort_with_iss(0,
37
- size,
38
+ (memop & MO_SIZE),
39
false,
40
iss_srt,
41
iss_sf,
42
@@ -XXX,XX +XXX,XX @@ static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
43
}
44
45
static void do_gpr_st(DisasContext *s, TCGv_i64 source,
46
- TCGv_i64 tcg_addr, int size,
47
+ TCGv_i64 tcg_addr, MemOp memop,
48
bool iss_valid,
49
unsigned int iss_srt,
50
bool iss_sf, bool iss_ar)
51
{
52
- do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
53
+ do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
54
iss_valid, iss_srt, iss_sf, iss_ar);
55
}
56
57
/*
58
* Load from memory to GPR register
59
*/
60
-static void do_gpr_ld_memidx(DisasContext *s,
61
- TCGv_i64 dest, TCGv_i64 tcg_addr,
62
- int size, bool is_signed,
63
- bool extend, int memidx,
64
+static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
65
+ MemOp memop, bool extend, int memidx,
66
bool iss_valid, unsigned int iss_srt,
67
bool iss_sf, bool iss_ar)
68
{
69
- MemOp memop = s->be_data + size;
70
-
71
- g_assert(size <= 3);
72
-
73
- if (is_signed) {
74
- memop += MO_SIGN;
75
- }
76
-
77
+ memop = finalize_memop(s, memop);
78
tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
79
80
- if (extend && is_signed) {
81
- g_assert(size < 3);
82
+ if (extend && (memop & MO_SIGN)) {
83
+ g_assert((memop & MO_SIZE) <= MO_32);
84
tcg_gen_ext32u_i64(dest, dest);
85
}
86
87
@@ -XXX,XX +XXX,XX @@ static void do_gpr_ld_memidx(DisasContext *s,
88
uint32_t syn;
89
90
syn = syn_data_abort_with_iss(0,
91
- size,
92
- is_signed,
93
+ (memop & MO_SIZE),
94
+ (memop & MO_SIGN) != 0,
95
iss_srt,
96
iss_sf,
97
iss_ar,
98
@@ -XXX,XX +XXX,XX @@ static void do_gpr_ld_memidx(DisasContext *s,
99
}
100
}
101
102
-static void do_gpr_ld(DisasContext *s,
103
- TCGv_i64 dest, TCGv_i64 tcg_addr,
104
- int size, bool is_signed, bool extend,
105
+static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
106
+ MemOp memop, bool extend,
107
bool iss_valid, unsigned int iss_srt,
108
bool iss_sf, bool iss_ar)
109
{
110
- do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
111
- get_mem_index(s),
112
+ do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
113
iss_valid, iss_srt, iss_sf, iss_ar);
114
}
115
116
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
117
}
118
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
119
false, rn != 31, size);
120
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt,
121
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt,
122
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
123
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
124
return;
125
@@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
126
/* Only unsigned 32bit loads target 32bit registers. */
127
bool iss_sf = opc != 0;
128
129
- do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, false,
130
- true, rt, iss_sf, false);
131
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
132
+ false, true, rt, iss_sf, false);
133
}
134
tcg_temp_free_i64(clean_addr);
135
}
136
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
137
/* Do not modify tcg_rt before recognizing any exception
138
* from the second load.
139
*/
140
- do_gpr_ld(s, tmp, clean_addr, size, is_signed, false,
141
- false, 0, false, false);
142
+ do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN,
143
+ false, false, 0, false, false);
144
tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
145
- do_gpr_ld(s, tcg_rt2, clean_addr, size, is_signed, false,
146
- false, 0, false, false);
147
+ do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN,
148
+ false, false, 0, false, false);
149
150
tcg_gen_mov_i64(tcg_rt, tmp);
151
tcg_temp_free_i64(tmp);
152
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
153
do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
154
iss_valid, rt, iss_sf, false);
155
} else {
156
- do_gpr_ld_memidx(s, tcg_rt, clean_addr, size,
157
- is_signed, is_extended, memidx,
158
+ do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
159
+ is_extended, memidx,
160
iss_valid, rt, iss_sf, false);
161
}
162
}
163
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
164
do_gpr_st(s, tcg_rt, clean_addr, size,
165
true, rt, iss_sf, false);
166
} else {
167
- do_gpr_ld(s, tcg_rt, clean_addr, size,
168
- is_signed, is_extended,
169
- true, rt, iss_sf, false);
170
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
171
+ is_extended, true, rt, iss_sf, false);
172
}
173
}
174
}
175
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
176
do_gpr_st(s, tcg_rt, clean_addr, size,
177
true, rt, iss_sf, false);
178
} else {
179
- do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, is_extended,
180
- true, rt, iss_sf, false);
181
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
182
+ is_extended, true, rt, iss_sf, false);
183
}
184
}
185
}
186
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
187
* full load-acquire (we only need "load-acquire processor consistent"),
188
* but we choose to implement them as full LDAQ.
189
*/
190
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false,
191
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false,
192
true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
193
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
194
return;
195
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
196
is_wback || rn != 31, size);
197
198
tcg_rt = cpu_reg(s, rt);
199
- do_gpr_ld(s, tcg_rt, clean_addr, size, /* is_signed */ false,
200
+ do_gpr_ld(s, tcg_rt, clean_addr, size,
201
/* extend */ false, /* iss_valid */ !is_wback,
202
/* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
203
204
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
205
* Load-AcquirePC semantics; we implement as the slightly more
206
* restrictive Load-Acquire.
207
*/
208
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, is_signed, extend,
209
- true, rt, iss_sf, true);
210
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size + is_signed * MO_SIGN,
211
+ extend, true, rt, iss_sf, true);
212
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
213
}
214
}
215
--
216
2.20.1
217
218
diff view generated by jsdifflib
1
From: Michael Olbrich <m.olbrich@pengutronix.de>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The current code checks if the next block exceeds the size of the card.
3
For 128-bit load/store, use 16-byte alignment. This
4
This generates an error while reading the last block of the card.
4
requires that we perform the two operations in the
5
Do the out-of-bounds check when starting to read a new block to fix this.
5
correct order so that we generate the alignment fault
6
before modifying memory.
6
7
7
This issue became visible with increased error checking in Linux 4.13.
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Cc: qemu-stable@nongnu.org
10
Message-id: 20210419202257.161730-27-richard.henderson@linaro.org
10
Signed-off-by: Michael Olbrich <m.olbrich@pengutronix.de>
11
Reviewed-by: Alistair Francis <alistair.francis@xilinx.com>
12
Message-id: 20170916091611.10241-1-m.olbrich@pengutronix.de
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
12
---
15
hw/sd/sd.c | 12 ++++++------
13
target/arm/translate-a64.c | 42 +++++++++++++++++++++++---------------
16
1 file changed, 6 insertions(+), 6 deletions(-)
14
1 file changed, 26 insertions(+), 16 deletions(-)
17
15
18
diff --git a/hw/sd/sd.c b/hw/sd/sd.c
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/sd/sd.c
18
--- a/target/arm/translate-a64.c
21
+++ b/hw/sd/sd.c
19
+++ b/target/arm/translate-a64.c
22
@@ -XXX,XX +XXX,XX @@ uint8_t sd_read_data(SDState *sd)
20
@@ -XXX,XX +XXX,XX @@ static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
23
break;
21
static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
24
22
{
25
case 18:    /* CMD18: READ_MULTIPLE_BLOCK */
23
/* This writes the bottom N bits of a 128 bit wide vector to memory */
26
- if (sd->data_offset == 0)
24
- TCGv_i64 tmp = tcg_temp_new_i64();
27
+ if (sd->data_offset == 0) {
25
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
28
+ if (sd->data_start + io_len > sd->size) {
26
+ TCGv_i64 tmplo = tcg_temp_new_i64();
29
+ sd->card_status |= ADDRESS_ERROR;
27
+ MemOp mop;
30
+ return 0x00;
28
+
31
+ }
29
+ tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
32
BLK_READ_BLOCK(sd->data_start, io_len);
30
+
33
+ }
31
if (size < 4) {
34
ret = sd->data[sd->data_offset ++];
32
- tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
35
33
- s->be_data + size);
36
if (sd->data_offset >= io_len) {
34
+ mop = finalize_memop(s, size);
37
@@ -XXX,XX +XXX,XX @@ uint8_t sd_read_data(SDState *sd)
35
+ tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
38
break;
36
} else {
39
}
37
bool be = s->be_data == MO_BE;
40
}
38
TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
41
-
39
+ TCGv_i64 tmphi = tcg_temp_new_i64();
42
- if (sd->data_start + io_len > sd->size) {
40
43
- sd->card_status |= ADDRESS_ERROR;
41
+ tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
44
- break;
42
+
45
- }
43
+ mop = s->be_data | MO_Q;
46
}
44
+ tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
47
break;
45
+ mop | (s->align_mem ? MO_ALIGN_16 : 0));
46
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
47
- tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
48
- s->be_data | MO_Q);
49
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
50
- tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
51
- s->be_data | MO_Q);
52
+ tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr,
53
+ get_mem_index(s), mop);
54
+
55
tcg_temp_free_i64(tcg_hiaddr);
56
+ tcg_temp_free_i64(tmphi);
57
}
58
59
- tcg_temp_free_i64(tmp);
60
+ tcg_temp_free_i64(tmplo);
61
}
62
63
/*
64
@@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
65
/* This always zero-extends and writes to a full 128 bit wide vector */
66
TCGv_i64 tmplo = tcg_temp_new_i64();
67
TCGv_i64 tmphi = NULL;
68
+ MemOp mop;
69
70
if (size < 4) {
71
- MemOp memop = s->be_data + size;
72
- tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
73
+ mop = finalize_memop(s, size);
74
+ tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
75
} else {
76
bool be = s->be_data == MO_BE;
77
TCGv_i64 tcg_hiaddr;
78
@@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
79
tmphi = tcg_temp_new_i64();
80
tcg_hiaddr = tcg_temp_new_i64();
81
82
+ mop = s->be_data | MO_Q;
83
+ tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
84
+ mop | (s->align_mem ? MO_ALIGN_16 : 0));
85
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
86
- tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
87
- s->be_data | MO_Q);
88
- tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
89
- s->be_data | MO_Q);
90
+ tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr,
91
+ get_mem_index(s), mop);
92
tcg_temp_free_i64(tcg_hiaddr);
93
}
48
94
49
--
95
--
50
2.7.4
96
2.20.1
51
97
52
98
diff view generated by jsdifflib
1
From: Jan Kiszka <jan.kiszka@siemens.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This properly forwards SMC events to EL2 when PSCI is provided by QEMU
4
itself and, thus, ARM_FEATURE_EL3 is off.
5
6
Found and tested with the Jailhouse hypervisor. Solution based on
7
suggestions by Peter Maydell.
8
9
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
10
Message-id: 4f243068-aaea-776f-d18f-f9e05e7be9cd@siemens.com
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-28-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
7
---
14
target/arm/helper.c | 9 ++++++++-
8
target/arm/translate-a64.c | 23 ++++++++++++++---------
15
target/arm/op_helper.c | 27 +++++++++++++++++----------
9
1 file changed, 14 insertions(+), 9 deletions(-)
16
2 files changed, 25 insertions(+), 11 deletions(-)
17
10
18
diff --git a/target/arm/helper.c b/target/arm/helper.c
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
19
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper.c
13
--- a/target/arm/translate-a64.c
21
+++ b/target/arm/helper.c
14
+++ b/target/arm/translate-a64.c
22
@@ -XXX,XX +XXX,XX @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
15
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
23
16
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
24
if (arm_feature(env, ARM_FEATURE_EL3)) {
17
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
25
valid_mask &= ~HCR_HCD;
18
true, rn != 31, size);
26
- } else {
19
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt,
27
+ } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
20
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
28
+ /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
21
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
29
+ * However, if we're using the SMC PSCI conduit then QEMU is
22
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
30
+ * effectively acting like EL3 firmware and so the guest at
23
return;
31
+ * EL2 should retain the ability to prevent EL1 from being
24
32
+ * able to make SMC calls into the ersatz firmware, so in
25
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
33
+ * that case HCR.TSC should be read/write.
26
}
34
+ */
27
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
35
valid_mask &= ~HCR_TSC;
28
false, rn != 31, size);
29
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt,
30
- disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
31
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
32
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
33
+ rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
34
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
35
return;
36
37
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
38
int size = extract32(insn, 30, 2);
39
TCGv_i64 clean_addr, dirty_addr;
40
bool is_store = false;
41
- bool is_signed = false;
42
bool extend = false;
43
bool iss_sf;
44
+ MemOp mop;
45
46
if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
47
unallocated_encoding(s);
48
return;
36
}
49
}
37
50
38
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
51
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
39
index XXXXXXX..XXXXXXX 100644
52
+ mop = size | MO_ALIGN;
40
--- a/target/arm/op_helper.c
53
+
41
+++ b/target/arm/op_helper.c
54
switch (opc) {
42
@@ -XXX,XX +XXX,XX @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
55
case 0: /* STLURB */
43
*/
56
is_store = true;
44
bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
57
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
45
58
unallocated_encoding(s);
46
- if (arm_is_psci_call(cpu, EXCP_SMC)) {
59
return;
47
- /* If PSCI is enabled and this looks like a valid PSCI call then
60
}
48
- * that overrides the architecturally mandated SMC behaviour.
61
- is_signed = true;
49
+ if (!arm_feature(env, ARM_FEATURE_EL3) &&
62
+ mop |= MO_SIGN;
50
+ cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
63
break;
51
+ /* If we have no EL3 then SMC always UNDEFs and can't be
64
case 3: /* LDAPURS* 32-bit variant */
52
+ * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
65
if (size > 1) {
53
+ * firmware within QEMU, and we want an EL2 guest to be able
66
unallocated_encoding(s);
54
+ * to forbid its EL1 from making PSCI calls into QEMU's
67
return;
55
+ * "firmware" via HCR.TSC, so for these purposes treat
68
}
56
+ * PSCI-via-SMC as implying an EL3.
69
- is_signed = true;
70
+ mop |= MO_SIGN;
71
extend = true; /* zero-extend 32->64 after signed load */
72
break;
73
default:
74
g_assert_not_reached();
75
}
76
77
- iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
78
+ iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
79
80
if (rn == 31) {
81
gen_check_sp_alignment(s);
82
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
83
if (is_store) {
84
/* Store-Release semantics */
85
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
86
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, iss_sf, true);
87
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
88
} else {
89
/*
90
* Load-AcquirePC semantics; we implement as the slightly more
91
* restrictive Load-Acquire.
57
*/
92
*/
58
- return;
93
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size + is_signed * MO_SIGN,
59
- }
94
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
60
-
95
extend, true, rt, iss_sf, true);
61
- if (!arm_feature(env, ARM_FEATURE_EL3)) {
96
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
62
- /* If we have no EL3 then SMC always UNDEFs */
63
undef = true;
64
} else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
65
- /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
66
+ /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
67
+ * We also want an EL2 guest to be able to forbid its EL1 from
68
+ * making PSCI calls into QEMU's "firmware" via HCR.TSC.
69
+ */
70
raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
71
}
72
73
- if (undef) {
74
+ /* If PSCI is enabled and this looks like a valid PSCI call then
75
+ * suppress the UNDEF -- we'll catch the SMC exception and
76
+ * implement the PSCI call behaviour there.
77
+ */
78
+ if (undef && !arm_is_psci_call(cpu, EXCP_SMC)) {
79
raise_exception(env, EXCP_UDEF, syn_uncategorized(),
80
exception_target_el(env));
81
}
97
}
82
--
98
--
83
2.7.4
99
2.20.1
84
100
85
101
diff view generated by jsdifflib
1
From: Thomas Huth <thuth@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The device uses serial_hds in its realize function and thus can't be
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
used twice. Apart from that, the comma in its name makes it quite hard
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
to use for the user anyway, since a comma is normally used to separate
5
Message-id: 20210419202257.161730-29-richard.henderson@linaro.org
6
the device name from its properties when using the "-device" parameter
7
or the "device_add" HMP command.
8
9
Signed-off-by: Thomas Huth <thuth@redhat.com>
10
Reviewed-by: Alistair Francis <alistair.francis@xilinx.com>
11
Message-id: 1506441116-16627-1-git-send-email-thuth@redhat.com
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
7
---
14
hw/arm/xlnx-zynqmp.c | 2 ++
8
target/arm/translate-a64.c | 20 ++++++++++----------
15
1 file changed, 2 insertions(+)
9
1 file changed, 10 insertions(+), 10 deletions(-)
16
10
17
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
18
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/arm/xlnx-zynqmp.c
13
--- a/target/arm/translate-a64.c
20
+++ b/hw/arm/xlnx-zynqmp.c
14
+++ b/target/arm/translate-a64.c
21
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_class_init(ObjectClass *oc, void *data)
15
@@ -XXX,XX +XXX,XX @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
22
16
23
dc->props = xlnx_zynqmp_props;
17
/* Store from vector register to memory */
24
dc->realize = xlnx_zynqmp_realize;
18
static void do_vec_st(DisasContext *s, int srcidx, int element,
25
+ /* Reason: Uses serial_hds in realize function, thus can't be used twice */
19
- TCGv_i64 tcg_addr, int size, MemOp endian)
26
+ dc->user_creatable = false;
20
+ TCGv_i64 tcg_addr, MemOp mop)
21
{
22
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
23
24
- read_vec_element(s, tcg_tmp, srcidx, element, size);
25
- tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
26
+ read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
27
+ tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
28
29
tcg_temp_free_i64(tcg_tmp);
27
}
30
}
28
31
29
static const TypeInfo xlnx_zynqmp_type_info = {
32
/* Load from memory to vector register */
33
static void do_vec_ld(DisasContext *s, int destidx, int element,
34
- TCGv_i64 tcg_addr, int size, MemOp endian)
35
+ TCGv_i64 tcg_addr, MemOp mop)
36
{
37
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
38
39
- tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
40
- write_vec_element(s, tcg_tmp, destidx, element, size);
41
+ tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
42
+ write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
43
44
tcg_temp_free_i64(tcg_tmp);
45
}
46
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
47
for (xs = 0; xs < selem; xs++) {
48
int tt = (rt + r + xs) % 32;
49
if (is_store) {
50
- do_vec_st(s, tt, e, clean_addr, size, endian);
51
+ do_vec_st(s, tt, e, clean_addr, size | endian);
52
} else {
53
- do_vec_ld(s, tt, e, clean_addr, size, endian);
54
+ do_vec_ld(s, tt, e, clean_addr, size | endian);
55
}
56
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
57
}
58
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
59
} else {
60
/* Load/store one element per register */
61
if (is_load) {
62
- do_vec_ld(s, rt, index, clean_addr, scale, s->be_data);
63
+ do_vec_ld(s, rt, index, clean_addr, scale | s->be_data);
64
} else {
65
- do_vec_st(s, rt, index, clean_addr, scale, s->be_data);
66
+ do_vec_st(s, rt, index, clean_addr, scale | s->be_data);
67
}
68
}
69
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
30
--
70
--
31
2.7.4
71
2.20.1
32
72
33
73
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-30-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-a64.c | 15 +++++++++++----
9
1 file changed, 11 insertions(+), 4 deletions(-)
10
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
14
+++ b/target/arm/translate-a64.c
15
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
16
bool is_postidx = extract32(insn, 23, 1);
17
bool is_q = extract32(insn, 30, 1);
18
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
19
- MemOp endian = s->be_data;
20
+ MemOp endian, align, mop;
21
22
int total; /* total bytes */
23
int elements; /* elements per vector */
24
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
25
}
26
27
/* For our purposes, bytes are always little-endian. */
28
+ endian = s->be_data;
29
if (size == 0) {
30
endian = MO_LE;
31
}
32
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
33
* Consecutive little-endian elements from a single register
34
* can be promoted to a larger little-endian operation.
35
*/
36
+ align = MO_ALIGN;
37
if (selem == 1 && endian == MO_LE) {
38
+ align = pow2_align(size);
39
size = 3;
40
}
41
- elements = (is_q ? 16 : 8) >> size;
42
+ if (!s->align_mem) {
43
+ align = 0;
44
+ }
45
+ mop = endian | size | align;
46
47
+ elements = (is_q ? 16 : 8) >> size;
48
tcg_ebytes = tcg_const_i64(1 << size);
49
for (r = 0; r < rpt; r++) {
50
int e;
51
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
52
for (xs = 0; xs < selem; xs++) {
53
int tt = (rt + r + xs) % 32;
54
if (is_store) {
55
- do_vec_st(s, tt, e, clean_addr, size | endian);
56
+ do_vec_st(s, tt, e, clean_addr, mop);
57
} else {
58
- do_vec_ld(s, tt, e, clean_addr, size | endian);
59
+ do_vec_ld(s, tt, e, clean_addr, mop);
60
}
61
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
62
}
63
--
64
2.20.1
65
66
diff view generated by jsdifflib
1
When we added support for the new SHCSR bits in v8M in commit
1
From: Richard Henderson <richard.henderson@linaro.org>
2
437d59c17e9 the code to support writing to the new HARDFAULTPENDED
3
bit was accidentally only added for non-secure writes; the
4
secure banked version of the bit should also be writable.
5
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-31-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 1506092407-26985-21-git-send-email-peter.maydell@linaro.org
10
---
7
---
11
hw/intc/armv7m_nvic.c | 1 +
8
target/arm/translate-a64.c | 9 +++++----
12
1 file changed, 1 insertion(+)
9
1 file changed, 5 insertions(+), 4 deletions(-)
13
10
14
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
15
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/intc/armv7m_nvic.c
13
--- a/target/arm/translate-a64.c
17
+++ b/hw/intc/armv7m_nvic.c
14
+++ b/target/arm/translate-a64.c
18
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
15
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
19
s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
16
int index = is_q << 3 | S << 2 | size;
20
s->sec_vectors[ARMV7M_EXCP_USAGE].enabled =
17
int xs, total;
21
(value & (1 << 18)) != 0;
18
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
22
+ s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
19
+ MemOp mop;
23
/* SecureFault not banked, but RAZ/WI to NS */
20
24
s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0;
21
if (extract32(insn, 31, 1)) {
25
s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0;
22
unallocated_encoding(s);
23
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
24
25
clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
26
total);
27
+ mop = finalize_memop(s, scale);
28
29
tcg_ebytes = tcg_const_i64(1 << scale);
30
for (xs = 0; xs < selem; xs++) {
31
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
32
/* Load and replicate to all elements */
33
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
34
35
- tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr,
36
- get_mem_index(s), s->be_data + scale);
37
+ tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
38
tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
39
(is_q + 1) * 8, vec_full_reg_size(s),
40
tcg_tmp);
41
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
42
} else {
43
/* Load/store one element per register */
44
if (is_load) {
45
- do_vec_ld(s, rt, index, clean_addr, scale | s->be_data);
46
+ do_vec_ld(s, rt, index, clean_addr, mop);
47
} else {
48
- do_vec_st(s, rt, index, clean_addr, scale | s->be_data);
49
+ do_vec_st(s, rt, index, clean_addr, mop);
50
}
51
}
52
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
26
--
53
--
27
2.7.4
54
2.20.1
28
55
29
56
diff view generated by jsdifflib
1
Reset for devices does not include an automatic clear of the
1
From: Richard Henderson <richard.henderson@linaro.org>
2
device state (unlike CPU state, where most of the state
3
structure is cleared to zero). Add some missing initialization
4
of NVIC state that meant that the device was left in the wrong
5
state if the guest did a warm reset.
6
2
7
(In particular, since we were resetting the computed state like
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
s->exception_prio but not all the state it was computed
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
from like s->vectors[x].active, the NVIC wound up in an
5
Message-id: 20210419202257.161730-32-richard.henderson@linaro.org
10
inconsistent state that could later trigger assertion failures.)
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-sve.c | 2 +-
9
1 file changed, 1 insertion(+), 1 deletion(-)
11
10
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
15
Message-id: 1506092407-26985-2-git-send-email-peter.maydell@linaro.org
16
---
17
hw/intc/armv7m_nvic.c | 5 +++++
18
1 file changed, 5 insertions(+)
19
20
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
21
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/intc/armv7m_nvic.c
13
--- a/target/arm/translate-sve.c
23
+++ b/hw/intc/armv7m_nvic.c
14
+++ b/target/arm/translate-sve.c
24
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_reset(DeviceState *dev)
15
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a)
25
int resetprio;
16
clean_addr = gen_mte_check1(s, temp, false, true, msz);
26
NVICState *s = NVIC(dev);
17
27
18
tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s),
28
+ memset(s->vectors, 0, sizeof(s->vectors));
19
- s->be_data | dtype_mop[a->dtype]);
29
+ memset(s->sec_vectors, 0, sizeof(s->sec_vectors));
20
+ finalize_memop(s, dtype_mop[a->dtype]));
30
+ s->prigroup[M_REG_NS] = 0;
21
31
+ s->prigroup[M_REG_S] = 0;
22
/* Broadcast to *all* elements. */
32
+
23
tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd),
33
s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
34
/* MEM, BUS, and USAGE are enabled through
35
* the System Handler Control register
36
--
24
--
37
2.7.4
25
2.20.1
38
26
39
27
diff view generated by jsdifflib
New patch
1
1
From: Cornelia Huck <cohuck@redhat.com>
2
3
Add 6.1 machine types for arm/i440fx/q35/s390x/spapr.
4
5
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
6
Acked-by: Greg Kurz <groug@kaod.org>
7
Message-id: 20210331111900.118274-1-cohuck@redhat.com
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
include/hw/boards.h | 3 +++
12
include/hw/i386/pc.h | 3 +++
13
hw/arm/virt.c | 7 ++++++-
14
hw/core/machine.c | 3 +++
15
hw/i386/pc.c | 3 +++
16
hw/i386/pc_piix.c | 14 +++++++++++++-
17
hw/i386/pc_q35.c | 13 ++++++++++++-
18
hw/ppc/spapr.c | 17 ++++++++++++++---
19
hw/s390x/s390-virtio-ccw.c | 14 +++++++++++++-
20
9 files changed, 70 insertions(+), 7 deletions(-)
21
22
diff --git a/include/hw/boards.h b/include/hw/boards.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/include/hw/boards.h
25
+++ b/include/hw/boards.h
26
@@ -XXX,XX +XXX,XX @@ struct MachineState {
27
} \
28
type_init(machine_initfn##_register_types)
29
30
+extern GlobalProperty hw_compat_6_0[];
31
+extern const size_t hw_compat_6_0_len;
32
+
33
extern GlobalProperty hw_compat_5_2[];
34
extern const size_t hw_compat_5_2_len;
35
36
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
37
index XXXXXXX..XXXXXXX 100644
38
--- a/include/hw/i386/pc.h
39
+++ b/include/hw/i386/pc.h
40
@@ -XXX,XX +XXX,XX @@ bool pc_system_ovmf_table_find(const char *entry, uint8_t **data,
41
void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
42
const CPUArchIdList *apic_ids, GArray *entry);
43
44
+extern GlobalProperty pc_compat_6_0[];
45
+extern const size_t pc_compat_6_0_len;
46
+
47
extern GlobalProperty pc_compat_5_2[];
48
extern const size_t pc_compat_5_2_len;
49
50
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/hw/arm/virt.c
53
+++ b/hw/arm/virt.c
54
@@ -XXX,XX +XXX,XX @@ static void machvirt_machine_init(void)
55
}
56
type_init(machvirt_machine_init);
57
58
+static void virt_machine_6_1_options(MachineClass *mc)
59
+{
60
+}
61
+DEFINE_VIRT_MACHINE_AS_LATEST(6, 1)
62
+
63
static void virt_machine_6_0_options(MachineClass *mc)
64
{
65
}
66
-DEFINE_VIRT_MACHINE_AS_LATEST(6, 0)
67
+DEFINE_VIRT_MACHINE(6, 0)
68
69
static void virt_machine_5_2_options(MachineClass *mc)
70
{
71
diff --git a/hw/core/machine.c b/hw/core/machine.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/hw/core/machine.c
74
+++ b/hw/core/machine.c
75
@@ -XXX,XX +XXX,XX @@
76
#include "hw/virtio/virtio.h"
77
#include "hw/virtio/virtio-pci.h"
78
79
+GlobalProperty hw_compat_6_0[] = {};
80
+const size_t hw_compat_6_0_len = G_N_ELEMENTS(hw_compat_6_0);
81
+
82
GlobalProperty hw_compat_5_2[] = {
83
{ "ICH9-LPC", "smm-compat", "on"},
84
{ "PIIX4_PM", "smm-compat", "on"},
85
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/hw/i386/pc.c
88
+++ b/hw/i386/pc.c
89
@@ -XXX,XX +XXX,XX @@
90
#include "trace.h"
91
#include CONFIG_DEVICES
92
93
+GlobalProperty pc_compat_6_0[] = {};
94
+const size_t pc_compat_6_0_len = G_N_ELEMENTS(pc_compat_6_0);
95
+
96
GlobalProperty pc_compat_5_2[] = {
97
{ "ICH9-LPC", "x-smi-cpu-hotunplug", "off" },
98
};
99
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/hw/i386/pc_piix.c
102
+++ b/hw/i386/pc_piix.c
103
@@ -XXX,XX +XXX,XX @@ static void pc_i440fx_machine_options(MachineClass *m)
104
machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
105
}
106
107
-static void pc_i440fx_6_0_machine_options(MachineClass *m)
108
+static void pc_i440fx_6_1_machine_options(MachineClass *m)
109
{
110
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
111
pc_i440fx_machine_options(m);
112
@@ -XXX,XX +XXX,XX @@ static void pc_i440fx_6_0_machine_options(MachineClass *m)
113
pcmc->default_cpu_version = 1;
114
}
115
116
+DEFINE_I440FX_MACHINE(v6_1, "pc-i440fx-6.1", NULL,
117
+ pc_i440fx_6_1_machine_options);
118
+
119
+static void pc_i440fx_6_0_machine_options(MachineClass *m)
120
+{
121
+ pc_i440fx_6_1_machine_options(m);
122
+ m->alias = NULL;
123
+ m->is_default = false;
124
+ compat_props_add(m->compat_props, hw_compat_6_0, hw_compat_6_0_len);
125
+ compat_props_add(m->compat_props, pc_compat_6_0, pc_compat_6_0_len);
126
+}
127
+
128
DEFINE_I440FX_MACHINE(v6_0, "pc-i440fx-6.0", NULL,
129
pc_i440fx_6_0_machine_options);
130
131
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
132
index XXXXXXX..XXXXXXX 100644
133
--- a/hw/i386/pc_q35.c
134
+++ b/hw/i386/pc_q35.c
135
@@ -XXX,XX +XXX,XX @@ static void pc_q35_machine_options(MachineClass *m)
136
m->max_cpus = 288;
137
}
138
139
-static void pc_q35_6_0_machine_options(MachineClass *m)
140
+static void pc_q35_6_1_machine_options(MachineClass *m)
141
{
142
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
143
pc_q35_machine_options(m);
144
@@ -XXX,XX +XXX,XX @@ static void pc_q35_6_0_machine_options(MachineClass *m)
145
pcmc->default_cpu_version = 1;
146
}
147
148
+DEFINE_Q35_MACHINE(v6_1, "pc-q35-6.1", NULL,
149
+ pc_q35_6_1_machine_options);
150
+
151
+static void pc_q35_6_0_machine_options(MachineClass *m)
152
+{
153
+ pc_q35_6_1_machine_options(m);
154
+ m->alias = NULL;
155
+ compat_props_add(m->compat_props, hw_compat_6_0, hw_compat_6_0_len);
156
+ compat_props_add(m->compat_props, pc_compat_6_0, pc_compat_6_0_len);
157
+}
158
+
159
DEFINE_Q35_MACHINE(v6_0, "pc-q35-6.0", NULL,
160
pc_q35_6_0_machine_options);
161
162
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
163
index XXXXXXX..XXXXXXX 100644
164
--- a/hw/ppc/spapr.c
165
+++ b/hw/ppc/spapr.c
166
@@ -XXX,XX +XXX,XX @@ static void spapr_machine_latest_class_options(MachineClass *mc)
167
type_init(spapr_machine_register_##suffix)
168
169
/*
170
- * pseries-6.0
171
+ * pseries-6.1
172
*/
173
-static void spapr_machine_6_0_class_options(MachineClass *mc)
174
+static void spapr_machine_6_1_class_options(MachineClass *mc)
175
{
176
/* Defaults for the latest behaviour inherited from the base class */
177
}
178
179
-DEFINE_SPAPR_MACHINE(6_0, "6.0", true);
180
+DEFINE_SPAPR_MACHINE(6_1, "6.1", true);
181
+
182
+/*
183
+ * pseries-6.0
184
+ */
185
+static void spapr_machine_6_0_class_options(MachineClass *mc)
186
+{
187
+ spapr_machine_6_1_class_options(mc);
188
+ compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len);
189
+}
190
+
191
+DEFINE_SPAPR_MACHINE(6_0, "6.0", false);
192
193
/*
194
* pseries-5.2
195
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
196
index XXXXXXX..XXXXXXX 100644
197
--- a/hw/s390x/s390-virtio-ccw.c
198
+++ b/hw/s390x/s390-virtio-ccw.c
199
@@ -XXX,XX +XXX,XX @@ bool css_migration_enabled(void)
200
} \
201
type_init(ccw_machine_register_##suffix)
202
203
+static void ccw_machine_6_1_instance_options(MachineState *machine)
204
+{
205
+}
206
+
207
+static void ccw_machine_6_1_class_options(MachineClass *mc)
208
+{
209
+}
210
+DEFINE_CCW_MACHINE(6_1, "6.1", true);
211
+
212
static void ccw_machine_6_0_instance_options(MachineState *machine)
213
{
214
+ ccw_machine_6_1_instance_options(machine);
215
}
216
217
static void ccw_machine_6_0_class_options(MachineClass *mc)
218
{
219
+ ccw_machine_6_1_class_options(mc);
220
+ compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len);
221
}
222
-DEFINE_CCW_MACHINE(6_0, "6.0", true);
223
+DEFINE_CCW_MACHINE(6_0, "6.0", false);
224
225
static void ccw_machine_5_2_instance_options(MachineState *machine)
226
{
227
--
228
2.20.1
229
230
diff view generated by jsdifflib
1
On exception return for v8M, the SPSEL bit in the EXC_RETURN magic
1
Currently the gpex PCI controller implements no special behaviour for
2
value should be restored to the SPSEL bit in the CONTROL register
2
guest accesses to areas of the PIO and MMIO where it has not mapped
3
banked specified by the EXC_RETURN.ES bit.
3
any PCI devices, which means that for Arm you end up with a CPU
4
exception due to a data abort.
4
5
5
Add write_v7m_control_spsel_for_secstate() which behaves like
6
Most host OSes expect "like an x86 PC" behaviour, where bad accesses
6
write_v7m_control_spsel() but allows the caller to specify which
7
like this return -1 for reads and ignore writes. In the interests of
7
CONTROL bank to use, reimplement write_v7m_control_spsel() in
8
not being surprising, make host CPU accesses to these windows behave
8
terms of it, and use it in exception return.
9
as -1/discard where there's no mapped PCI device.
9
10
11
The old behaviour generally didn't cause any problems, because
12
almost always the guest OS will map the PCI devices and then only
13
access where it has mapped them. One corner case where you will see
14
this kind of access is if Linux attempts to probe legacy ISA
15
devices via a PIO window access. So far the only case where we've
16
seen this has been via the syzkaller fuzzer.
17
18
Reported-by: Dmitry Vyukov <dvyukov@google.com>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 1506092407-26985-6-git-send-email-peter.maydell@linaro.org
21
Acked-by: Michael S. Tsirkin <mst@redhat.com>
22
Message-id: 20210325163315.27724-1-peter.maydell@linaro.org
23
Fixes: https://bugs.launchpad.net/qemu/+bug/1918917
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
25
---
14
target/arm/helper.c | 40 +++++++++++++++++++++++++++-------------
26
include/hw/pci-host/gpex.h | 4 +++
15
1 file changed, 27 insertions(+), 13 deletions(-)
27
hw/core/machine.c | 4 ++-
28
hw/pci-host/gpex.c | 56 ++++++++++++++++++++++++++++++++++++--
29
3 files changed, 60 insertions(+), 4 deletions(-)
16
30
17
diff --git a/target/arm/helper.c b/target/arm/helper.c
31
diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h
18
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper.c
33
--- a/include/hw/pci-host/gpex.h
20
+++ b/target/arm/helper.c
34
+++ b/include/hw/pci-host/gpex.h
21
@@ -XXX,XX +XXX,XX @@ static bool v7m_using_psp(CPUARMState *env)
35
@@ -XXX,XX +XXX,XX @@ struct GPEXHost {
22
env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
36
37
MemoryRegion io_ioport;
38
MemoryRegion io_mmio;
39
+ MemoryRegion io_ioport_window;
40
+ MemoryRegion io_mmio_window;
41
qemu_irq irq[GPEX_NUM_IRQS];
42
int irq_num[GPEX_NUM_IRQS];
43
+
44
+ bool allow_unmapped_accesses;
45
};
46
47
struct GPEXConfig {
48
diff --git a/hw/core/machine.c b/hw/core/machine.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/hw/core/machine.c
51
+++ b/hw/core/machine.c
52
@@ -XXX,XX +XXX,XX @@
53
#include "hw/virtio/virtio.h"
54
#include "hw/virtio/virtio-pci.h"
55
56
-GlobalProperty hw_compat_6_0[] = {};
57
+GlobalProperty hw_compat_6_0[] = {
58
+ { "gpex-pcihost", "allow-unmapped-accesses", "false" },
59
+};
60
const size_t hw_compat_6_0_len = G_N_ELEMENTS(hw_compat_6_0);
61
62
GlobalProperty hw_compat_5_2[] = {
63
diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/hw/pci-host/gpex.c
66
+++ b/hw/pci-host/gpex.c
67
@@ -XXX,XX +XXX,XX @@ static void gpex_host_realize(DeviceState *dev, Error **errp)
68
int i;
69
70
pcie_host_mmcfg_init(pex, PCIE_MMCFG_SIZE_MAX);
71
+ sysbus_init_mmio(sbd, &pex->mmio);
72
+
73
+ /*
74
+ * Note that the MemoryRegions io_mmio and io_ioport that we pass
75
+ * to pci_register_root_bus() are not the same as the
76
+ * MemoryRegions io_mmio_window and io_ioport_window that we
77
+ * expose as SysBus MRs. The difference is in the behaviour of
78
+ * accesses to addresses where no PCI device has been mapped.
79
+ *
80
+ * io_mmio and io_ioport are the underlying PCI view of the PCI
81
+ * address space, and when a PCI device does a bus master access
82
+ * to a bad address this is reported back to it as a transaction
83
+ * failure.
84
+ *
85
+ * io_mmio_window and io_ioport_window implement "unmapped
86
+ * addresses read as -1 and ignore writes"; this is traditional
87
+ * x86 PC behaviour, which is not mandated by the PCI spec proper
88
+ * but expected by much PCI-using guest software, including Linux.
89
+ *
90
+ * In the interests of not being unnecessarily surprising, we
91
+ * implement it in the gpex PCI host controller, by providing the
92
+ * _window MRs, which are containers with io ops that implement
93
+ * the 'background' behaviour and which hold the real PCI MRs as
94
+ * subregions.
95
+ */
96
memory_region_init(&s->io_mmio, OBJECT(s), "gpex_mmio", UINT64_MAX);
97
memory_region_init(&s->io_ioport, OBJECT(s), "gpex_ioport", 64 * 1024);
98
99
- sysbus_init_mmio(sbd, &pex->mmio);
100
- sysbus_init_mmio(sbd, &s->io_mmio);
101
- sysbus_init_mmio(sbd, &s->io_ioport);
102
+ if (s->allow_unmapped_accesses) {
103
+ memory_region_init_io(&s->io_mmio_window, OBJECT(s),
104
+ &unassigned_io_ops, OBJECT(s),
105
+ "gpex_mmio_window", UINT64_MAX);
106
+ memory_region_init_io(&s->io_ioport_window, OBJECT(s),
107
+ &unassigned_io_ops, OBJECT(s),
108
+ "gpex_ioport_window", 64 * 1024);
109
+
110
+ memory_region_add_subregion(&s->io_mmio_window, 0, &s->io_mmio);
111
+ memory_region_add_subregion(&s->io_ioport_window, 0, &s->io_ioport);
112
+ sysbus_init_mmio(sbd, &s->io_mmio_window);
113
+ sysbus_init_mmio(sbd, &s->io_ioport_window);
114
+ } else {
115
+ sysbus_init_mmio(sbd, &s->io_mmio);
116
+ sysbus_init_mmio(sbd, &s->io_ioport);
117
+ }
118
+
119
for (i = 0; i < GPEX_NUM_IRQS; i++) {
120
sysbus_init_irq(sbd, &s->irq[i]);
121
s->irq_num[i] = -1;
122
@@ -XXX,XX +XXX,XX @@ static const char *gpex_host_root_bus_path(PCIHostState *host_bridge,
123
return "0000:00";
23
}
124
}
24
125
25
-/* Write to v7M CONTROL.SPSEL bit. This may change the current
126
+static Property gpex_host_properties[] = {
26
- * stack pointer between Main and Process stack pointers.
127
+ /*
27
+/* Write to v7M CONTROL.SPSEL bit for the specified security bank.
128
+ * Permit CPU accesses to unmapped areas of the PIO and MMIO windows
28
+ * This may change the current stack pointer between Main and Process
129
+ * (discarding writes and returning -1 for reads) rather than aborting.
29
+ * stack pointers if it is done for the CONTROL register for the current
130
+ */
30
+ * security state.
131
+ DEFINE_PROP_BOOL("allow-unmapped-accesses", GPEXHost,
31
*/
132
+ allow_unmapped_accesses, true),
32
-static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
133
+ DEFINE_PROP_END_OF_LIST(),
33
+static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
134
+};
34
+ bool new_spsel,
135
+
35
+ bool secstate)
136
static void gpex_host_class_init(ObjectClass *klass, void *data)
36
{
137
{
37
- uint32_t tmp;
138
DeviceClass *dc = DEVICE_CLASS(klass);
38
- bool new_is_psp, old_is_psp = v7m_using_psp(env);
139
@@ -XXX,XX +XXX,XX @@ static void gpex_host_class_init(ObjectClass *klass, void *data)
39
+ bool old_is_psp = v7m_using_psp(env);
140
dc->realize = gpex_host_realize;
40
141
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
41
- env->v7m.control[env->v7m.secure] =
142
dc->fw_name = "pci";
42
- deposit32(env->v7m.control[env->v7m.secure],
143
+ device_class_set_props(dc, gpex_host_properties);
43
+ env->v7m.control[secstate] =
44
+ deposit32(env->v7m.control[secstate],
45
R_V7M_CONTROL_SPSEL_SHIFT,
46
R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
47
48
- new_is_psp = v7m_using_psp(env);
49
+ if (secstate == env->v7m.secure) {
50
+ bool new_is_psp = v7m_using_psp(env);
51
+ uint32_t tmp;
52
53
- if (old_is_psp != new_is_psp) {
54
- tmp = env->v7m.other_sp;
55
- env->v7m.other_sp = env->regs[13];
56
- env->regs[13] = tmp;
57
+ if (old_is_psp != new_is_psp) {
58
+ tmp = env->v7m.other_sp;
59
+ env->v7m.other_sp = env->regs[13];
60
+ env->regs[13] = tmp;
61
+ }
62
}
63
}
144
}
64
145
65
+/* Write to v7M CONTROL.SPSEL bit. This may change the current
146
static void gpex_host_initfn(Object *obj)
66
+ * stack pointer between Main and Process stack pointers.
67
+ */
68
+static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
69
+{
70
+ write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
71
+}
72
+
73
void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
74
{
75
/* Write a new value to v7m.exception, thus transitioning into or out
76
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
77
* Handler mode (and will be until we write the new XPSR.Interrupt
78
* field) this does not switch around the current stack pointer.
79
*/
80
- write_v7m_control_spsel(env, return_to_sp_process);
81
+ write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
82
83
switch_v7m_security_state(env, return_to_secure);
84
85
--
147
--
86
2.7.4
148
2.20.1
87
149
88
150
diff view generated by jsdifflib