1
First pullreq for 6.0: mostly my v8.1M work, plus some other
1
First arm pullreq for 6.1 cycle. The big stuff here is RTH's alignment series.
2
bits and pieces. (I still have a lot of stuff in my to-review
3
folder, which I may or may not get to before the Christmas break...)
4
2
5
thanks
3
thanks
6
-- PMM
4
-- PMM
7
5
8
The following changes since commit 5e7b204dbfae9a562fc73684986f936b97f63877:
6
The following changes since commit ccdf06c1db192152ac70a1dd974c624f566cb7d4:
9
7
10
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging (2020-12-09 20:08:54 +0000)
8
Open 6.1 development tree (2021-04-30 11:15:40 +0100)
11
9
12
are available in the Git repository at:
10
are available in the Git repository at:
13
11
14
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20201210
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210430
15
13
16
for you to fetch changes up to 71f916be1c7e9ede0e37d9cabc781b5a9e8638ff:
14
for you to fetch changes up to a6091108aa44e9017af4ca13c43f55a629e3744c:
17
15
18
hw/arm/armv7m: Correct typo in QOM object name (2020-12-10 11:44:56 +0000)
16
hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows (2021-04-30 11:16:52 +0100)
19
17
20
----------------------------------------------------------------
18
----------------------------------------------------------------
21
target-arm queue:
19
target-arm queue:
22
* hw/arm/smmuv3: Fix up L1STD_SPAN decoding
20
* hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows
23
* xlnx-zynqmp: Support Xilinx ZynqMP CAN controllers
21
* hw: add compat machines for 6.1
24
* sbsa-ref: allow to use Cortex-A53/57/72 cpus
22
* Fault misaligned accesses where the architecture requires it
25
* Various minor code cleanups
23
* Fix some corner cases of MTE faults (notably with misaligned accesses)
26
* hw/intc/armv7m_nvic: Make all of system PPB range be RAZWI/BusFault
24
* Make Thumb store insns UNDEF for Rn==1111
27
* Implement more pieces of ARMv8.1M support
25
* hw/arm/smmuv3: Support 16K translation granule
28
26
29
----------------------------------------------------------------
27
----------------------------------------------------------------
30
Alex Chen (4):
28
Cornelia Huck (1):
31
i.MX25: Fix bad printf format specifiers
29
hw: add compat machines for 6.1
32
i.MX31: Fix bad printf format specifiers
33
i.MX6: Fix bad printf format specifiers
34
i.MX6ul: Fix bad printf format specifiers
35
36
Havard Skinnemoen (1):
37
tests/qtest/npcm7xx_rng-test: dump random data on failure
38
30
39
Kunkun Jiang (1):
31
Kunkun Jiang (1):
40
hw/arm/smmuv3: Fix up L1STD_SPAN decoding
32
hw/arm/smmuv3: Support 16K translation granule
41
33
42
Marcin Juszkiewicz (1):
34
Peter Maydell (2):
43
sbsa-ref: allow to use Cortex-A53/57/72 cpus
35
target/arm: Make Thumb store insns UNDEF for Rn==1111
36
hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows
44
37
45
Peter Maydell (25):
38
Richard Henderson (39):
46
hw/intc/armv7m_nvic: Make all of system PPB range be RAZWI/BusFault
39
target/arm: Fix mte_checkN
47
target/arm: Implement v8.1M PXN extension
40
target/arm: Split out mte_probe_int
48
target/arm: Don't clobber ID_PFR1.Security on M-profile cores
41
target/arm: Fix unaligned checks for mte_check1, mte_probe1
49
target/arm: Implement VSCCLRM insn
42
test/tcg/aarch64: Add mte-5
50
target/arm: Implement CLRM instruction
43
target/arm: Replace MTEDESC ESIZE+TSIZE with SIZEM1
51
target/arm: Enforce M-profile VMRS/VMSR register restrictions
44
target/arm: Merge mte_check1, mte_checkN
52
target/arm: Refactor M-profile VMSR/VMRS handling
45
target/arm: Rename mte_probe1 to mte_probe
53
target/arm: Move general-use constant expanders up in translate.c
46
target/arm: Simplify sve mte checking
54
target/arm: Implement VLDR/VSTR system register
47
target/arm: Remove log2_esize parameter to gen_mte_checkN
55
target/arm: Implement M-profile FPSCR_nzcvqc
48
target/arm: Fix decode of align in VLDST_single
56
target/arm: Use new FPCR_NZCV_MASK constant
49
target/arm: Rename TBFLAG_A32, SCTLR_B
57
target/arm: Factor out preserve-fp-state from full_vfp_access_check()
50
target/arm: Rename TBFLAG_ANY, PSTATE_SS
58
target/arm: Implement FPCXT_S fp system register
51
target/arm: Add wrapper macros for accessing tbflags
59
hw/intc/armv7m_nvic: Update FPDSCR masking for v8.1M
52
target/arm: Introduce CPUARMTBFlags
60
target/arm: For v8.1M, always clear R0-R3, R12, APSR, EPSR on exception entry
53
target/arm: Move mode specific TB flags to tb->cs_base
61
target/arm: In v8.1M, don't set HFSR.FORCED on vector table fetch failures
54
target/arm: Move TBFLAG_AM32 bits to the top
62
target/arm: Implement v8.1M REVIDR register
55
target/arm: Move TBFLAG_ANY bits to the bottom
63
target/arm: Implement new v8.1M NOCP check for exception return
56
target/arm: Add ALIGN_MEM to TBFLAG_ANY
64
target/arm: Implement new v8.1M VLLDM and VLSTM encodings
57
target/arm: Adjust gen_aa32_{ld, st}_i32 for align+endianness
65
hw/intc/armv7m_nvic: Support v8.1M CCR.TRD bit
58
target/arm: Merge gen_aa32_frob64 into gen_aa32_ld_i64
66
target/arm: Implement CCR_S.TRD behaviour for SG insns
59
target/arm: Fix SCTLR_B test for TCGv_i64 load/store
67
hw/intc/armv7m_nvic: Fix "return from inactive handler" check
60
target/arm: Adjust gen_aa32_{ld, st}_i64 for align+endianness
68
target/arm: Implement M-profile "minimal RAS implementation"
61
target/arm: Enforce word alignment for LDRD/STRD
69
hw/intc/armv7m_nvic: Implement read/write for RAS register block
62
target/arm: Enforce alignment for LDA/LDAH/STL/STLH
70
hw/arm/armv7m: Correct typo in QOM object name
63
target/arm: Enforce alignment for LDM/STM
64
target/arm: Enforce alignment for RFE
65
target/arm: Enforce alignment for SRS
66
target/arm: Enforce alignment for VLDM/VSTM
67
target/arm: Enforce alignment for VLDR/VSTR
68
target/arm: Enforce alignment for VLDn (all lanes)
69
target/arm: Enforce alignment for VLDn/VSTn (multiple)
70
target/arm: Enforce alignment for VLDn/VSTn (single)
71
target/arm: Use finalize_memop for aa64 gpr load/store
72
target/arm: Use finalize_memop for aa64 fpr load/store
73
target/arm: Enforce alignment for aa64 load-acq/store-rel
74
target/arm: Use MemOp for size + endian in aa64 vector ld/st
75
target/arm: Enforce alignment for aa64 vector LDn/STn (multiple)
76
target/arm: Enforce alignment for aa64 vector LDn/STn (single)
77
target/arm: Enforce alignment for sve LD1R
71
78
72
Vikram Garhwal (4):
79
include/hw/boards.h | 3 +
73
hw/net/can: Introduce Xilinx ZynqMP CAN controller
80
include/hw/i386/pc.h | 3 +
74
xlnx-zynqmp: Connect Xilinx ZynqMP CAN controllers
81
include/hw/pci-host/gpex.h | 4 +
75
tests/qtest: Introduce tests for Xilinx ZynqMP CAN controller
82
target/arm/cpu.h | 105 ++++++++++-----
76
MAINTAINERS: Add maintainer entry for Xilinx ZynqMP CAN controller
83
target/arm/helper-a64.h | 3 +-
84
target/arm/internals.h | 11 +-
85
target/arm/translate-a64.h | 2 +-
86
target/arm/translate.h | 38 ++++++
87
target/arm/neon-ls.decode | 4 +-
88
hw/arm/smmuv3.c | 6 +-
89
hw/arm/virt.c | 7 +-
90
hw/core/machine.c | 5 +
91
hw/i386/pc.c | 3 +
92
hw/i386/pc_piix.c | 14 +-
93
hw/i386/pc_q35.c | 13 +-
94
hw/pci-host/gpex.c | 56 +++++++-
95
hw/ppc/spapr.c | 17 ++-
96
hw/s390x/s390-virtio-ccw.c | 14 +-
97
target/arm/helper-a64.c | 2 +-
98
target/arm/helper.c | 162 ++++++++++++----------
99
target/arm/mte_helper.c | 185 ++++++++++---------------
100
target/arm/sve_helper.c | 100 +++++---------
101
target/arm/translate-a64.c | 236 ++++++++++++++++----------------
102
target/arm/translate-sve.c | 11 +-
103
target/arm/translate.c | 274 ++++++++++++++++++++++----------------
104
tests/tcg/aarch64/mte-5.c | 44 ++++++
105
target/arm/translate-neon.c.inc | 117 ++++++++++++----
106
target/arm/translate-vfp.c.inc | 20 +--
107
tests/tcg/aarch64/Makefile.target | 2 +-
108
29 files changed, 878 insertions(+), 583 deletions(-)
109
create mode 100644 tests/tcg/aarch64/mte-5.c
77
110
78
meson.build | 1 +
79
hw/arm/smmuv3-internal.h | 2 +-
80
hw/net/can/trace.h | 1 +
81
include/hw/arm/xlnx-zynqmp.h | 8 +
82
include/hw/intc/armv7m_nvic.h | 2 +
83
include/hw/net/xlnx-zynqmp-can.h | 78 +++
84
target/arm/cpu.h | 46 ++
85
target/arm/m-nocp.decode | 10 +-
86
target/arm/t32.decode | 10 +-
87
target/arm/vfp.decode | 14 +
88
hw/arm/armv7m.c | 4 +-
89
hw/arm/sbsa-ref.c | 23 +-
90
hw/arm/xlnx-zcu102.c | 20 +
91
hw/arm/xlnx-zynqmp.c | 34 ++
92
hw/intc/armv7m_nvic.c | 246 ++++++--
93
hw/misc/imx25_ccm.c | 12 +-
94
hw/misc/imx31_ccm.c | 14 +-
95
hw/misc/imx6_ccm.c | 20 +-
96
hw/misc/imx6_src.c | 2 +-
97
hw/misc/imx6ul_ccm.c | 4 +-
98
hw/misc/imx_ccm.c | 4 +-
99
hw/net/can/xlnx-zynqmp-can.c | 1161 ++++++++++++++++++++++++++++++++++++++
100
target/arm/cpu.c | 5 +-
101
target/arm/helper.c | 7 +-
102
target/arm/m_helper.c | 130 ++++-
103
target/arm/translate.c | 105 +++-
104
tests/qtest/npcm7xx_rng-test.c | 12 +
105
tests/qtest/xlnx-can-test.c | 360 ++++++++++++
106
MAINTAINERS | 8 +
107
hw/Kconfig | 1 +
108
hw/net/can/meson.build | 1 +
109
hw/net/can/trace-events | 9 +
110
target/arm/translate-vfp.c.inc | 511 ++++++++++++++++-
111
tests/qtest/meson.build | 1 +
112
34 files changed, 2713 insertions(+), 153 deletions(-)
113
create mode 100644 hw/net/can/trace.h
114
create mode 100644 include/hw/net/xlnx-zynqmp-can.h
115
create mode 100644 hw/net/can/xlnx-zynqmp-can.c
116
create mode 100644 tests/qtest/xlnx-can-test.c
117
create mode 100644 hw/net/can/trace-events
118
diff view generated by jsdifflib
1
From: Kunkun Jiang <jiangkunkun@huawei.com>
1
From: Kunkun Jiang <jiangkunkun@huawei.com>
2
2
3
Accroding to the SMMUv3 spec, the SPAN field of Level1 Stream Table
3
The driver can query some bits in SMMUv3 IDR5 to learn which
4
Descriptor is 5 bits([4:0]).
4
translation granules are supported. Arm recommends that SMMUv3
5
implementations support at least 4K and 64K granules. But in
6
the vSMMUv3, there seems to be no reason not to support 16K
7
translation granule. In addition, if 16K is not supported,
8
vSVA will failed to be enabled in the future for 16K guest
9
kernel. So it'd better to support it.
5
10
6
Fixes: 9bde7f0674f(hw/arm/smmuv3: Implement translate callback)
7
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
11
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
8
Message-id: 20201124023711.1184-1-jiangkunkun@huawei.com
12
Reviewed-by: Eric Auger <eric.auger@redhat.com>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Tested-by: Eric Auger <eric.auger@redhat.com>
10
Acked-by: Eric Auger <eric.auger@redhat.com>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
15
---
13
hw/arm/smmuv3-internal.h | 2 +-
16
hw/arm/smmuv3.c | 6 ++++--
14
1 file changed, 1 insertion(+), 1 deletion(-)
17
1 file changed, 4 insertions(+), 2 deletions(-)
15
18
16
diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
19
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
17
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/arm/smmuv3-internal.h
21
--- a/hw/arm/smmuv3.c
19
+++ b/hw/arm/smmuv3-internal.h
22
+++ b/hw/arm/smmuv3.c
20
@@ -XXX,XX +XXX,XX @@ static inline uint64_t l1std_l2ptr(STEDesc *desc)
23
@@ -XXX,XX +XXX,XX @@ static void smmuv3_init_regs(SMMUv3State *s)
21
return hi << 32 | lo;
24
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
22
}
25
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
23
26
24
-#define L1STD_SPAN(stm) (extract32((stm)->word[0], 0, 4))
27
- /* 4K and 64K granule support */
25
+#define L1STD_SPAN(stm) (extract32((stm)->word[0], 0, 5))
28
+ /* 4K, 16K and 64K granule support */
26
29
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
27
#endif
30
+ s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1);
31
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
32
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
33
34
@@ -XXX,XX +XXX,XX @@ static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
35
36
tg = CD_TG(cd, i);
37
tt->granule_sz = tg2granule(tg, i);
38
- if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) {
39
+ if ((tt->granule_sz != 12 && tt->granule_sz != 14 &&
40
+ tt->granule_sz != 16) || CD_ENDI(cd)) {
41
goto bad_cd;
42
}
43
28
--
44
--
29
2.20.1
45
2.20.1
30
46
31
47
diff view generated by jsdifflib
1
In v8.1M the new CLRM instruction allows zeroing an arbitrary set of
1
The Arm ARM specifies that for Thumb encodings of the various plain
2
the general-purpose registers and APSR. Implement this.
2
store insns, if the Rn field is 1111 then we must UNDEF. This is
3
different from the Arm encodings, where this case is either
4
UNPREDICTABLE or has well-defined behaviour. The exclusive stores,
5
store-release and STRD do not have this UNDEF case for any encoding.
3
6
4
The encoding is a subset of the LDMIA T2 encoding, using what would
7
Enforce the UNDEF for this case in the Thumb plain store insns.
5
be Rn=0b1111 (which UNDEFs for LDMIA).
6
8
9
Fixes: https://bugs.launchpad.net/qemu/+bug/1922887
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20201119215617.29887-6-peter.maydell@linaro.org
12
Message-id: 20210408162402.5822-1-peter.maydell@linaro.org
10
---
13
---
11
target/arm/t32.decode | 6 +++++-
14
target/arm/translate.c | 16 ++++++++++++++++
12
target/arm/translate.c | 38 ++++++++++++++++++++++++++++++++++++++
15
1 file changed, 16 insertions(+)
13
2 files changed, 43 insertions(+), 1 deletion(-)
14
16
15
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/t32.decode
18
+++ b/target/arm/t32.decode
19
@@ -XXX,XX +XXX,XX @@ UXTAB 1111 1010 0101 .... 1111 .... 10.. .... @rrr_rot
20
21
STM_t32 1110 1000 10.0 .... ................ @ldstm i=1 b=0
22
STM_t32 1110 1001 00.0 .... ................ @ldstm i=0 b=1
23
-LDM_t32 1110 1000 10.1 .... ................ @ldstm i=1 b=0
24
+{
25
+ # Rn=15 UNDEFs for LDM; M-profile CLRM uses that encoding
26
+ CLRM 1110 1000 1001 1111 list:16
27
+ LDM_t32 1110 1000 10.1 .... ................ @ldstm i=1 b=0
28
+}
29
LDM_t32 1110 1001 00.1 .... ................ @ldstm i=0 b=1
30
31
&rfe !extern rn w pu
32
diff --git a/target/arm/translate.c b/target/arm/translate.c
17
diff --git a/target/arm/translate.c b/target/arm/translate.c
33
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate.c
19
--- a/target/arm/translate.c
35
+++ b/target/arm/translate.c
20
+++ b/target/arm/translate.c
36
@@ -XXX,XX +XXX,XX @@ static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a)
21
@@ -XXX,XX +XXX,XX @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
37
return do_ldm(s, a, 1);
22
ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
38
}
23
TCGv_i32 addr, tmp;
39
24
40
+static bool trans_CLRM(DisasContext *s, arg_CLRM *a)
25
+ /*
41
+{
26
+ * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
42
+ int i;
27
+ * is either UNPREDICTABLE or has defined behaviour
43
+ TCGv_i32 zero;
28
+ */
44
+
29
+ if (s->thumb && a->rn == 15) {
45
+ if (!dc_isar_feature(aa32_m_sec_state, s)) {
46
+ return false;
30
+ return false;
47
+ }
31
+ }
48
+
32
+
49
+ if (extract32(a->list, 13, 1)) {
33
addr = op_addr_rr_pre(s, a);
34
35
tmp = load_reg(s, a->rt);
36
@@ -XXX,XX +XXX,XX @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
37
ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
38
TCGv_i32 addr, tmp;
39
40
+ /*
41
+ * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
42
+ * is either UNPREDICTABLE or has defined behaviour
43
+ */
44
+ if (s->thumb && a->rn == 15) {
50
+ return false;
45
+ return false;
51
+ }
46
+ }
52
+
47
+
53
+ if (!a->list) {
48
addr = op_addr_ri_pre(s, a);
54
+ /* UNPREDICTABLE; we choose to UNDEF */
49
55
+ return false;
50
tmp = load_reg(s, a->rt);
56
+ }
57
+
58
+ zero = tcg_const_i32(0);
59
+ for (i = 0; i < 15; i++) {
60
+ if (extract32(a->list, i, 1)) {
61
+ /* Clear R[i] */
62
+ tcg_gen_mov_i32(cpu_R[i], zero);
63
+ }
64
+ }
65
+ if (extract32(a->list, 15, 1)) {
66
+ /*
67
+ * Clear APSR (by calling the MSR helper with the same argument
68
+ * as for "MSR APSR_nzcvqg, Rn": mask = 0b1100, SYSM=0)
69
+ */
70
+ TCGv_i32 maskreg = tcg_const_i32(0xc << 8);
71
+ gen_helper_v7m_msr(cpu_env, maskreg, zero);
72
+ tcg_temp_free_i32(maskreg);
73
+ }
74
+ tcg_temp_free_i32(zero);
75
+ return true;
76
+}
77
+
78
/*
79
* Branch, branch with link
80
*/
81
--
51
--
82
2.20.1
52
2.20.1
83
53
84
54
diff view generated by jsdifflib
1
For M-profile CPUs, the range from 0xe0000000 to 0xe00fffff is the
1
From: Richard Henderson <richard.henderson@linaro.org>
2
Private Peripheral Bus range, which includes all of the memory mapped
3
devices and registers that are part of the CPU itself, including the
4
NVIC, systick timer, and debug and trace components like the Data
5
Watchpoint and Trace unit (DWT). Within this large region, the range
6
0xe000e000 to 0xe000efff is the System Control Space (NVIC, system
7
registers, systick) and 0xe002e000 to 0exe002efff is its Non-secure
8
alias.
9
2
10
The architecture is clear that within the SCS unimplemented registers
3
We were incorrectly assuming that only the first byte of an MTE access
11
should be RES0 for privileged accesses and generate BusFault for
4
is checked against the tags. But per the ARM, unaligned accesses are
12
unprivileged accesses, and we currently implement this.
5
pre-decomposed into single-byte accesses. So by the time we reach the
6
actual MTE check in the ARM pseudocode, all accesses are aligned.
13
7
14
It is less clear about how to handle accesses to unimplemented
8
Therefore, the first failure is always either the first byte of the
15
regions of the wider PPB. Unprivileged accesses should definitely
9
access, or the first byte of the granule.
16
cause BusFaults (R_DQQS), but the behaviour of privileged accesses is
17
not given as a general rule. However, the register definitions of
18
individual registers for components like the DWT all state that they
19
are RES0 if the relevant component is not implemented, so the
20
simplest way to provide that is to provide RAZ/WI for the whole range
21
for privileged accesses. (The v7M Arm ARM does say that reserved
22
registers should be UNK/SBZP.)
23
10
24
Expand the container MemoryRegion that the NVIC exposes so that
11
In addition, some of the arithmetic is off for last-first -> count.
25
it covers the whole PPB space. This means:
12
This does not become directly visible until a later patch that passes
26
* moving the address that the ARMV7M device maps it to down by
13
single bytes into this function, so ptr == ptr_last.
27
0xe000 bytes
28
* moving the off and the offsets within the container of all the
29
subregions forward by 0xe000 bytes
30
* adding a new default MemoryRegion that covers the whole container
31
at a lower priority than anything else and which provides the
32
RAZWI/BusFault behaviour
33
14
15
Buglink: https://bugs.launchpad.net/bugs/1921948
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20210416183106.1516563-2-richard.henderson@linaro.org
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
19
[PMM: tweaked a comment]
34
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
35
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
36
Message-id: 20201119215617.29887-2-peter.maydell@linaro.org
37
---
21
---
38
include/hw/intc/armv7m_nvic.h | 1 +
22
target/arm/mte_helper.c | 40 ++++++++++++++++++----------------------
39
hw/arm/armv7m.c | 2 +-
23
1 file changed, 18 insertions(+), 22 deletions(-)
40
hw/intc/armv7m_nvic.c | 78 ++++++++++++++++++++++++++++++-----
41
3 files changed, 69 insertions(+), 12 deletions(-)
42
24
43
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
25
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
44
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
45
--- a/include/hw/intc/armv7m_nvic.h
27
--- a/target/arm/mte_helper.c
46
+++ b/include/hw/intc/armv7m_nvic.h
28
+++ b/target/arm/mte_helper.c
47
@@ -XXX,XX +XXX,XX @@ struct NVICState {
29
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
48
MemoryRegion systickmem;
30
uint64_t ptr, uintptr_t ra)
49
MemoryRegion systick_ns_mem;
50
MemoryRegion container;
51
+ MemoryRegion defaultmem;
52
53
uint32_t num_irq;
54
qemu_irq excpout;
55
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/hw/arm/armv7m.c
58
+++ b/hw/arm/armv7m.c
59
@@ -XXX,XX +XXX,XX @@ static void armv7m_realize(DeviceState *dev, Error **errp)
60
sysbus_connect_irq(sbd, 0,
61
qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ));
62
63
- memory_region_add_subregion(&s->container, 0xe000e000,
64
+ memory_region_add_subregion(&s->container, 0xe0000000,
65
sysbus_mmio_get_region(sbd, 0));
66
67
for (i = 0; i < ARRAY_SIZE(s->bitband); i++) {
68
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/hw/intc/armv7m_nvic.c
71
+++ b/hw/intc/armv7m_nvic.c
72
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps nvic_systick_ops = {
73
.endianness = DEVICE_NATIVE_ENDIAN,
74
};
75
76
+/*
77
+ * Unassigned portions of the PPB space are RAZ/WI for privileged
78
+ * accesses, and fault for non-privileged accesses.
79
+ */
80
+static MemTxResult ppb_default_read(void *opaque, hwaddr addr,
81
+ uint64_t *data, unsigned size,
82
+ MemTxAttrs attrs)
83
+{
84
+ qemu_log_mask(LOG_UNIMP, "Read of unassigned area of PPB: offset 0x%x\n",
85
+ (uint32_t)addr);
86
+ if (attrs.user) {
87
+ return MEMTX_ERROR;
88
+ }
89
+ *data = 0;
90
+ return MEMTX_OK;
91
+}
92
+
93
+static MemTxResult ppb_default_write(void *opaque, hwaddr addr,
94
+ uint64_t value, unsigned size,
95
+ MemTxAttrs attrs)
96
+{
97
+ qemu_log_mask(LOG_UNIMP, "Write of unassigned area of PPB: offset 0x%x\n",
98
+ (uint32_t)addr);
99
+ if (attrs.user) {
100
+ return MEMTX_ERROR;
101
+ }
102
+ return MEMTX_OK;
103
+}
104
+
105
+static const MemoryRegionOps ppb_default_ops = {
106
+ .read_with_attrs = ppb_default_read,
107
+ .write_with_attrs = ppb_default_write,
108
+ .endianness = DEVICE_NATIVE_ENDIAN,
109
+ .valid.min_access_size = 1,
110
+ .valid.max_access_size = 8,
111
+};
112
+
113
static int nvic_post_load(void *opaque, int version_id)
114
{
31
{
115
NVICState *s = opaque;
32
int mmu_idx, ptr_tag, bit55;
116
@@ -XXX,XX +XXX,XX @@ static void nvic_systick_trigger(void *opaque, int n, int level)
33
- uint64_t ptr_last, ptr_end, prev_page, next_page;
117
static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
34
- uint64_t tag_first, tag_end;
118
{
35
- uint64_t tag_byte_first, tag_byte_end;
119
NVICState *s = NVIC(dev);
36
- uint32_t esize, total, tag_count, tag_size, n, c;
120
- int regionlen;
37
+ uint64_t ptr_last, prev_page, next_page;
121
38
+ uint64_t tag_first, tag_last;
122
/* The armv7m container object will have set our CPU pointer */
39
+ uint64_t tag_byte_first, tag_byte_last;
123
if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) {
40
+ uint32_t total, tag_count, tag_size, n, c;
124
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
41
uint8_t *mem1, *mem2;
125
M_REG_S));
42
MMUAccessType type;
43
44
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
45
46
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
47
type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
48
- esize = FIELD_EX32(desc, MTEDESC, ESIZE);
49
total = FIELD_EX32(desc, MTEDESC, TSIZE);
50
51
- /* Find the addr of the end of the access, and of the last element. */
52
- ptr_end = ptr + total;
53
- ptr_last = ptr_end - esize;
54
+ /* Find the addr of the end of the access */
55
+ ptr_last = ptr + total - 1;
56
57
/* Round the bounds to the tag granule, and compute the number of tags. */
58
tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
59
- tag_end = QEMU_ALIGN_UP(ptr_last, TAG_GRANULE);
60
- tag_count = (tag_end - tag_first) / TAG_GRANULE;
61
+ tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
62
+ tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
63
64
/* Round the bounds to twice the tag granule, and compute the bytes. */
65
tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE);
66
- tag_byte_end = QEMU_ALIGN_UP(ptr_last, 2 * TAG_GRANULE);
67
+ tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE);
68
69
/* Locate the page boundaries. */
70
prev_page = ptr & TARGET_PAGE_MASK;
71
next_page = prev_page + TARGET_PAGE_SIZE;
72
73
- if (likely(tag_end - prev_page <= TARGET_PAGE_SIZE)) {
74
+ if (likely(tag_last - prev_page <= TARGET_PAGE_SIZE)) {
75
/* Memory access stays on one page. */
76
- tag_size = (tag_byte_end - tag_byte_first) / (2 * TAG_GRANULE);
77
+ tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
78
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total,
79
MMU_DATA_LOAD, tag_size, ra);
80
if (!mem1) {
81
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
82
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
83
MMU_DATA_LOAD, tag_size, ra);
84
85
- tag_size = (tag_byte_end - next_page) / (2 * TAG_GRANULE);
86
+ tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1;
87
mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
88
- ptr_end - next_page,
89
+ ptr_last - next_page + 1,
90
MMU_DATA_LOAD, tag_size, ra);
91
92
/*
93
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
126
}
94
}
127
95
128
- /* The NVIC and System Control Space (SCS) starts at 0xe000e000
96
/*
129
+ /*
97
- * If we failed, we know which granule. Compute the element that
130
+ * This device provides a single sysbus memory region which
98
- * is first in that granule, and signal failure on that element.
131
+ * represents the whole of the "System PPB" space. This is the
99
+ * If we failed, we know which granule. For the first granule, the
132
+ * range from 0xe0000000 to 0xe00fffff and includes the NVIC,
100
+ * failure address is @ptr, the first byte accessed. Otherwise the
133
+ * the System Control Space (system registers), the systick timer,
101
+ * failure address is the first byte of the nth granule.
134
+ * and for CPUs with the Security extension an NS banked version
135
+ * of all of these.
136
+ *
137
+ * The default behaviour for unimplemented registers/ranges
138
+ * (for instance the Data Watchpoint and Trace unit at 0xe0001000)
139
+ * is to RAZ/WI for privileged access and BusFault for non-privileged
140
+ * access.
141
+ *
142
+ * The NVIC and System Control Space (SCS) starts at 0xe000e000
143
* and looks like this:
144
* 0x004 - ICTR
145
* 0x010 - 0xff - systick
146
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
147
* generally code determining which banked register to use should
148
* use attrs.secure; code determining actual behaviour of the system
149
* should use env->v7m.secure.
150
+ *
151
+ * The container covers the whole PPB space. Within it the priority
152
+ * of overlapping regions is:
153
+ * - default region (for RAZ/WI and BusFault) : -1
154
+ * - system register regions : 0
155
+ * - systick : 1
156
+ * This is because the systick device is a small block of registers
157
+ * in the middle of the other system control registers.
158
*/
102
*/
159
- regionlen = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? 0x21000 : 0x1000;
103
if (unlikely(n < tag_count)) {
160
- memory_region_init(&s->container, OBJECT(s), "nvic", regionlen);
104
- uint64_t fail_ofs;
161
- /* The system register region goes at the bottom of the priority
105
-
162
- * stack as it covers the whole page.
106
- fail_ofs = tag_first + n * TAG_GRANULE - ptr;
163
- */
107
- fail_ofs = ROUND_UP(fail_ofs, esize);
164
+ memory_region_init(&s->container, OBJECT(s), "nvic", 0x100000);
108
- mte_check_fail(env, desc, ptr + fail_ofs, ra);
165
+ memory_region_init_io(&s->defaultmem, OBJECT(s), &ppb_default_ops, s,
109
+ uint64_t fault = (n == 0 ? ptr : tag_first + n * TAG_GRANULE);
166
+ "nvic-default", 0x100000);
110
+ mte_check_fail(env, desc, fault, ra);
167
+ memory_region_add_subregion_overlap(&s->container, 0, &s->defaultmem, -1);
168
memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
169
"nvic_sysregs", 0x1000);
170
- memory_region_add_subregion(&s->container, 0, &s->sysregmem);
171
+ memory_region_add_subregion(&s->container, 0xe000, &s->sysregmem);
172
173
memory_region_init_io(&s->systickmem, OBJECT(s),
174
&nvic_systick_ops, s,
175
"nvic_systick", 0xe0);
176
177
- memory_region_add_subregion_overlap(&s->container, 0x10,
178
+ memory_region_add_subregion_overlap(&s->container, 0xe010,
179
&s->systickmem, 1);
180
181
if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
182
memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s),
183
&nvic_sysreg_ns_ops, &s->sysregmem,
184
"nvic_sysregs_ns", 0x1000);
185
- memory_region_add_subregion(&s->container, 0x20000, &s->sysreg_ns_mem);
186
+ memory_region_add_subregion(&s->container, 0x2e000, &s->sysreg_ns_mem);
187
memory_region_init_io(&s->systick_ns_mem, OBJECT(s),
188
&nvic_sysreg_ns_ops, &s->systickmem,
189
"nvic_systick_ns", 0xe0);
190
- memory_region_add_subregion_overlap(&s->container, 0x20010,
191
+ memory_region_add_subregion_overlap(&s->container, 0x2e010,
192
&s->systick_ns_mem, 1);
193
}
111
}
194
112
113
done:
195
--
114
--
196
2.20.1
115
2.20.1
197
116
198
117
diff view generated by jsdifflib
1
In commit 077d7449100d824a4 we added code to handle the v8M
1
From: Richard Henderson <richard.henderson@linaro.org>
2
requirement that returns from NMI or HardFault forcibly deactivate
3
those exceptions regardless of what interrupt the guest is trying to
4
deactivate. Unfortunately this broke the handling of the "illegal
5
exception return because the returning exception number is not
6
active" check for those cases. In the pseudocode this test is done
7
on the exception the guest asks to return from, but because our
8
implementation was doing this in armv7m_nvic_complete_irq() after the
9
new "deactivate NMI/HardFault regardless" code we ended up doing the
10
test on the VecInfo for that exception instead, which usually meant
11
failing to raise the illegal exception return fault.
12
2
13
In the case for "configurable exception targeting the opposite
3
Split out a helper function from mte_checkN to perform
14
security state" we detected the illegal-return case but went ahead
4
all of the checking and address manpulation. So far,
15
and deactivated the VecInfo anyway, which is wrong because that is
5
just use this in mte_checkN itself.
16
the VecInfo for the other security state.
17
6
18
Rearrange the code so that we first identify the illegal return
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
19
cases, then see if we really need to deactivate NMI or HardFault
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
20
instead, and finally do the deactivation.
9
Message-id: 20210416183106.1516563-3-richard.henderson@linaro.org
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/mte_helper.c | 52 +++++++++++++++++++++++++++++++----------
14
1 file changed, 40 insertions(+), 12 deletions(-)
21
15
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
23
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
24
Message-id: 20201119215617.29887-25-peter.maydell@linaro.org
25
---
26
hw/intc/armv7m_nvic.c | 59 +++++++++++++++++++++++--------------------
27
1 file changed, 32 insertions(+), 27 deletions(-)
28
29
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
30
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/intc/armv7m_nvic.c
18
--- a/target/arm/mte_helper.c
32
+++ b/hw/intc/armv7m_nvic.c
19
+++ b/target/arm/mte_helper.c
33
@@ -XXX,XX +XXX,XX @@ int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
20
@@ -XXX,XX +XXX,XX @@ static int checkN(uint8_t *mem, int odd, int cmp, int count)
21
return n;
22
}
23
24
-uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
25
- uint64_t ptr, uintptr_t ra)
26
+/**
27
+ * mte_probe_int() - helper for mte_probe and mte_check
28
+ * @env: CPU environment
29
+ * @desc: MTEDESC descriptor
30
+ * @ptr: virtual address of the base of the access
31
+ * @fault: return virtual address of the first check failure
32
+ *
33
+ * Internal routine for both mte_probe and mte_check.
34
+ * Return zero on failure, filling in *fault.
35
+ * Return negative on trivial success for tbi disabled.
36
+ * Return positive on success with tbi enabled.
37
+ */
38
+static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
39
+ uintptr_t ra, uint32_t total, uint64_t *fault)
34
{
40
{
35
NVICState *s = (NVICState *)opaque;
41
int mmu_idx, ptr_tag, bit55;
36
VecInfo *vec = NULL;
42
uint64_t ptr_last, prev_page, next_page;
37
- int ret;
43
uint64_t tag_first, tag_last;
38
+ int ret = 0;
44
uint64_t tag_byte_first, tag_byte_last;
39
45
- uint32_t total, tag_count, tag_size, n, c;
40
assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
46
+ uint32_t tag_count, tag_size, n, c;
41
47
uint8_t *mem1, *mem2;
42
+ trace_nvic_complete_irq(irq, secure);
48
MMUAccessType type;
43
+
49
44
+ if (secure && exc_is_banked(irq)) {
50
bit55 = extract64(ptr, 55, 1);
45
+ vec = &s->sec_vectors[irq];
51
+ *fault = ptr;
46
+ } else {
52
47
+ vec = &s->vectors[irq];
53
/* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
48
+ }
54
if (unlikely(!tbi_check(desc, bit55))) {
49
+
55
- return ptr;
50
+ /*
56
+ return -1;
51
+ * Identify illegal exception return cases. We can't immediately
57
}
52
+ * return at this point because we still need to deactivate
58
53
+ * (either this exception or NMI/HardFault) first.
59
ptr_tag = allocation_tag_from_addr(ptr);
54
+ */
60
55
+ if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) {
61
if (tcma_check(desc, bit55, ptr_tag)) {
56
+ /*
62
- goto done;
57
+ * Return from a configurable exception targeting the opposite
63
+ return 1;
58
+ * security state from the one we're trying to complete it for.
64
}
59
+ * Clear vec because it's not really the VecInfo for this
65
60
+ * (irq, secstate) so we mustn't deactivate it.
66
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
61
+ */
67
type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
62
+ ret = -1;
68
- total = FIELD_EX32(desc, MTEDESC, TSIZE);
63
+ vec = NULL;
69
64
+ } else if (!vec->active) {
70
/* Find the addr of the end of the access */
65
+ /* Return from an inactive interrupt */
71
ptr_last = ptr + total - 1;
66
+ ret = -1;
72
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
67
+ } else {
73
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total,
68
+ /* Legal return, we will return the RETTOBASE bit value to the caller */
74
MMU_DATA_LOAD, tag_size, ra);
69
+ ret = nvic_rettobase(s);
75
if (!mem1) {
76
- goto done;
77
+ return 1;
78
}
79
/* Perform all of the comparisons. */
80
n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
81
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
82
}
83
if (n == c) {
84
if (!mem2) {
85
- goto done;
86
+ return 1;
87
}
88
n += checkN(mem2, 0, ptr_tag, tag_count - c);
89
}
90
}
91
92
+ if (likely(n == tag_count)) {
93
+ return 1;
70
+ }
94
+ }
71
+
95
+
72
/*
96
/*
73
* For negative priorities, v8M will forcibly deactivate the appropriate
97
* If we failed, we know which granule. For the first granule, the
74
* NMI or HardFault regardless of what interrupt we're being asked to
98
* failure address is @ptr, the first byte accessed. Otherwise the
75
@@ -XXX,XX +XXX,XX @@ int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
99
* failure address is the first byte of the nth granule.
100
*/
101
- if (unlikely(n < tag_count)) {
102
- uint64_t fault = (n == 0 ? ptr : tag_first + n * TAG_GRANULE);
103
- mte_check_fail(env, desc, fault, ra);
104
+ if (n > 0) {
105
+ *fault = tag_first + n * TAG_GRANULE;
76
}
106
}
77
107
+ return 0;
78
if (!vec) {
108
+}
79
- if (secure && exc_is_banked(irq)) {
109
80
- vec = &s->sec_vectors[irq];
110
- done:
81
- } else {
111
+uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
82
- vec = &s->vectors[irq];
112
+ uint64_t ptr, uintptr_t ra)
83
- }
113
+{
84
- }
114
+ uint64_t fault;
85
-
115
+ uint32_t total = FIELD_EX32(desc, MTEDESC, TSIZE);
86
- trace_nvic_complete_irq(irq, secure);
116
+ int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
87
-
117
+
88
- if (!vec->active) {
118
+ if (unlikely(ret == 0)) {
89
- /* Tell the caller this was an illegal exception return */
119
+ mte_check_fail(env, desc, fault, ra);
90
- return -1;
120
+ } else if (ret < 0) {
91
- }
121
+ return ptr;
92
-
122
+ }
93
- /*
123
return useronly_clean_ptr(ptr);
94
- * If this is a configurable exception and it is currently
124
}
95
- * targeting the opposite security state from the one we're trying
125
96
- * to complete it for, this counts as an illegal exception return.
97
- * We still need to deactivate whatever vector the logic above has
98
- * selected, though, as it might not be the same as the one for the
99
- * requested exception number.
100
- */
101
- if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) {
102
- ret = -1;
103
- } else {
104
- ret = nvic_rettobase(s);
105
+ return ret;
106
}
107
108
vec->active = 0;
109
--
126
--
110
2.20.1
127
2.20.1
111
128
112
129
diff view generated by jsdifflib
1
From: Vikram Garhwal <fnu.vikram@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The QTests perform five tests on the Xilinx ZynqMP CAN controller:
3
We were incorrectly assuming that only the first byte of an MTE access
4
Tests the CAN controller in loopback, sleep and snoop mode.
4
is checked against the tags. But per the ARM, unaligned accesses are
5
Tests filtering of incoming CAN messages.
5
pre-decomposed into single-byte accesses. So by the time we reach the
6
actual MTE check in the ARM pseudocode, all accesses are aligned.
6
7
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
We cannot tell a priori whether or not a given scalar access is aligned,
8
Reviewed-by: Francisco Iglesias <francisco.iglesias@xilinx.com>
9
therefore we must at least check. Use mte_probe_int, which is already
9
Signed-off-by: Vikram Garhwal <fnu.vikram@xilinx.com>
10
set up for checking multiple granules.
10
Message-id: 1605728926-352690-4-git-send-email-fnu.vikram@xilinx.com
11
12
Buglink: https://bugs.launchpad.net/bugs/1921948
13
Tested-by: Alex Bennée <alex.bennee@linaro.org>
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20210416183106.1516563-4-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
18
---
13
tests/qtest/xlnx-can-test.c | 360 ++++++++++++++++++++++++++++++++++++
19
target/arm/mte_helper.c | 109 +++++++++++++---------------------------
14
tests/qtest/meson.build | 1 +
20
1 file changed, 35 insertions(+), 74 deletions(-)
15
2 files changed, 361 insertions(+)
16
create mode 100644 tests/qtest/xlnx-can-test.c
17
21
18
diff --git a/tests/qtest/xlnx-can-test.c b/tests/qtest/xlnx-can-test.c
22
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
19
new file mode 100644
23
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX
24
--- a/target/arm/mte_helper.c
21
--- /dev/null
25
+++ b/target/arm/mte_helper.c
22
+++ b/tests/qtest/xlnx-can-test.c
26
@@ -XXX,XX +XXX,XX @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
23
@@ -XXX,XX +XXX,XX @@
27
}
24
+/*
28
}
25
+ * QTests for the Xilinx ZynqMP CAN controller.
29
26
+ *
30
-/*
27
+ * Copyright (c) 2020 Xilinx Inc.
31
- * Perform an MTE checked access for a single logical or atomic access.
28
+ *
32
- */
29
+ * Written-by: Vikram Garhwal<fnu.vikram@xilinx.com>
33
-static bool mte_probe1_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
30
+ *
34
- uintptr_t ra, int bit55)
31
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
35
-{
32
+ * of this software and associated documentation files (the "Software"), to deal
36
- int mem_tag, mmu_idx, ptr_tag, size;
33
+ * in the Software without restriction, including without limitation the rights
37
- MMUAccessType type;
34
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
38
- uint8_t *mem;
35
+ * copies of the Software, and to permit persons to whom the Software is
39
-
36
+ * furnished to do so, subject to the following conditions:
40
- ptr_tag = allocation_tag_from_addr(ptr);
37
+ *
41
-
38
+ * The above copyright notice and this permission notice shall be included in
42
- if (tcma_check(desc, bit55, ptr_tag)) {
39
+ * all copies or substantial portions of the Software.
43
- return true;
40
+ *
44
- }
41
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
45
-
42
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
46
- mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
43
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
47
- type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
44
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
48
- size = FIELD_EX32(desc, MTEDESC, ESIZE);
45
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
49
-
46
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
50
- mem = allocation_tag_mem(env, mmu_idx, ptr, type, size,
47
+ * THE SOFTWARE.
51
- MMU_DATA_LOAD, 1, ra);
48
+ */
52
- if (!mem) {
53
- return true;
54
- }
55
-
56
- mem_tag = load_tag1(ptr, mem);
57
- return ptr_tag == mem_tag;
58
-}
59
-
60
-/*
61
- * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
62
- * Returns false if the access is Checked and the check failed. This
63
- * is only intended to probe the tag -- the validity of the page must
64
- * be checked beforehand.
65
- */
66
-bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
67
-{
68
- int bit55 = extract64(ptr, 55, 1);
69
-
70
- /* If TBI is disabled, the access is unchecked. */
71
- if (unlikely(!tbi_check(desc, bit55))) {
72
- return true;
73
- }
74
-
75
- return mte_probe1_int(env, desc, ptr, 0, bit55);
76
-}
77
-
78
-uint64_t mte_check1(CPUARMState *env, uint32_t desc,
79
- uint64_t ptr, uintptr_t ra)
80
-{
81
- int bit55 = extract64(ptr, 55, 1);
82
-
83
- /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
84
- if (unlikely(!tbi_check(desc, bit55))) {
85
- return ptr;
86
- }
87
-
88
- if (unlikely(!mte_probe1_int(env, desc, ptr, ra, bit55))) {
89
- mte_check_fail(env, desc, ptr, ra);
90
- }
91
-
92
- return useronly_clean_ptr(ptr);
93
-}
94
-
95
-uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
96
-{
97
- return mte_check1(env, desc, ptr, GETPC());
98
-}
99
-
100
-/*
101
- * Perform an MTE checked access for multiple logical accesses.
102
- */
103
-
104
/**
105
* checkN:
106
* @tag: tag memory to test
107
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr)
108
return mte_checkN(env, desc, ptr, GETPC());
109
}
110
111
+uint64_t mte_check1(CPUARMState *env, uint32_t desc,
112
+ uint64_t ptr, uintptr_t ra)
113
+{
114
+ uint64_t fault;
115
+ uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
116
+ int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
49
+
117
+
50
+#include "qemu/osdep.h"
118
+ if (unlikely(ret == 0)) {
51
+#include "libqos/libqtest.h"
119
+ mte_check_fail(env, desc, fault, ra);
52
+
120
+ } else if (ret < 0) {
53
+/* Base address. */
121
+ return ptr;
54
+#define CAN0_BASE_ADDR 0xFF060000
55
+#define CAN1_BASE_ADDR 0xFF070000
56
+
57
+/* Register addresses. */
58
+#define R_SRR_OFFSET 0x00
59
+#define R_MSR_OFFSET 0x04
60
+#define R_SR_OFFSET 0x18
61
+#define R_ISR_OFFSET 0x1C
62
+#define R_ICR_OFFSET 0x24
63
+#define R_TXID_OFFSET 0x30
64
+#define R_TXDLC_OFFSET 0x34
65
+#define R_TXDATA1_OFFSET 0x38
66
+#define R_TXDATA2_OFFSET 0x3C
67
+#define R_RXID_OFFSET 0x50
68
+#define R_RXDLC_OFFSET 0x54
69
+#define R_RXDATA1_OFFSET 0x58
70
+#define R_RXDATA2_OFFSET 0x5C
71
+#define R_AFR 0x60
72
+#define R_AFMR1 0x64
73
+#define R_AFIR1 0x68
74
+#define R_AFMR2 0x6C
75
+#define R_AFIR2 0x70
76
+#define R_AFMR3 0x74
77
+#define R_AFIR3 0x78
78
+#define R_AFMR4 0x7C
79
+#define R_AFIR4 0x80
80
+
81
+/* CAN modes. */
82
+#define CONFIG_MODE 0x00
83
+#define NORMAL_MODE 0x00
84
+#define LOOPBACK_MODE 0x02
85
+#define SNOOP_MODE 0x04
86
+#define SLEEP_MODE 0x01
87
+#define ENABLE_CAN (1 << 1)
88
+#define STATUS_NORMAL_MODE (1 << 3)
89
+#define STATUS_LOOPBACK_MODE (1 << 1)
90
+#define STATUS_SNOOP_MODE (1 << 12)
91
+#define STATUS_SLEEP_MODE (1 << 2)
92
+#define ISR_TXOK (1 << 1)
93
+#define ISR_RXOK (1 << 4)
94
+
95
+static void match_rx_tx_data(const uint32_t *buf_tx, const uint32_t *buf_rx,
96
+ uint8_t can_timestamp)
97
+{
98
+ uint16_t size = 0;
99
+ uint8_t len = 4;
100
+
101
+ while (size < len) {
102
+ if (R_RXID_OFFSET + 4 * size == R_RXDLC_OFFSET) {
103
+ g_assert_cmpint(buf_rx[size], ==, buf_tx[size] + can_timestamp);
104
+ } else {
105
+ g_assert_cmpint(buf_rx[size], ==, buf_tx[size]);
106
+ }
107
+
108
+ size++;
109
+ }
122
+ }
123
+ return useronly_clean_ptr(ptr);
110
+}
124
+}
111
+
125
+
112
+static void read_data(QTestState *qts, uint64_t can_base_addr, uint32_t *buf_rx)
126
+uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
113
+{
127
+{
114
+ uint32_t int_status;
128
+ return mte_check1(env, desc, ptr, GETPC());
115
+
116
+ /* Read the interrupt on CAN rx. */
117
+ int_status = qtest_readl(qts, can_base_addr + R_ISR_OFFSET) & ISR_RXOK;
118
+
119
+ g_assert_cmpint(int_status, ==, ISR_RXOK);
120
+
121
+ /* Read the RX register data for CAN. */
122
+ buf_rx[0] = qtest_readl(qts, can_base_addr + R_RXID_OFFSET);
123
+ buf_rx[1] = qtest_readl(qts, can_base_addr + R_RXDLC_OFFSET);
124
+ buf_rx[2] = qtest_readl(qts, can_base_addr + R_RXDATA1_OFFSET);
125
+ buf_rx[3] = qtest_readl(qts, can_base_addr + R_RXDATA2_OFFSET);
126
+
127
+ /* Clear the RX interrupt. */
128
+ qtest_writel(qts, CAN1_BASE_ADDR + R_ICR_OFFSET, ISR_RXOK);
129
+}
130
+
131
+static void send_data(QTestState *qts, uint64_t can_base_addr,
132
+ const uint32_t *buf_tx)
133
+{
134
+ uint32_t int_status;
135
+
136
+ /* Write the TX register data for CAN. */
137
+ qtest_writel(qts, can_base_addr + R_TXID_OFFSET, buf_tx[0]);
138
+ qtest_writel(qts, can_base_addr + R_TXDLC_OFFSET, buf_tx[1]);
139
+ qtest_writel(qts, can_base_addr + R_TXDATA1_OFFSET, buf_tx[2]);
140
+ qtest_writel(qts, can_base_addr + R_TXDATA2_OFFSET, buf_tx[3]);
141
+
142
+ /* Read the interrupt on CAN for tx. */
143
+ int_status = qtest_readl(qts, can_base_addr + R_ISR_OFFSET) & ISR_TXOK;
144
+
145
+ g_assert_cmpint(int_status, ==, ISR_TXOK);
146
+
147
+ /* Clear the interrupt for tx. */
148
+ qtest_writel(qts, CAN0_BASE_ADDR + R_ICR_OFFSET, ISR_TXOK);
149
+}
129
+}
150
+
130
+
151
+/*
131
+/*
152
+ * This test will be transferring data from CAN0 and CAN1 through canbus. CAN0
132
+ * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
153
+ * initiate the data transfer to can-bus, CAN1 receives the data. Test compares
133
+ * Returns false if the access is Checked and the check failed. This
154
+ * the data sent from CAN0 with received on CAN1.
134
+ * is only intended to probe the tag -- the validity of the page must
135
+ * be checked beforehand.
155
+ */
136
+ */
156
+static void test_can_bus(void)
137
+bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
157
+{
138
+{
158
+ const uint32_t buf_tx[4] = { 0xFF, 0x80000000, 0x12345678, 0x87654321 };
139
+ uint64_t fault;
159
+ uint32_t buf_rx[4] = { 0x00, 0x00, 0x00, 0x00 };
140
+ uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
160
+ uint32_t status = 0;
141
+ int ret = mte_probe_int(env, desc, ptr, 0, total, &fault);
161
+ uint8_t can_timestamp = 1;
162
+
142
+
163
+ QTestState *qts = qtest_init("-machine xlnx-zcu102"
143
+ return ret != 0;
164
+ " -object can-bus,id=canbus0"
165
+ " -machine xlnx-zcu102.canbus0=canbus0"
166
+ " -machine xlnx-zcu102.canbus1=canbus0"
167
+ );
168
+
169
+ /* Configure the CAN0 and CAN1. */
170
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
171
+ qtest_writel(qts, CAN0_BASE_ADDR + R_MSR_OFFSET, NORMAL_MODE);
172
+ qtest_writel(qts, CAN1_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
173
+ qtest_writel(qts, CAN1_BASE_ADDR + R_MSR_OFFSET, NORMAL_MODE);
174
+
175
+ /* Check here if CAN0 and CAN1 are in normal mode. */
176
+ status = qtest_readl(qts, CAN0_BASE_ADDR + R_SR_OFFSET);
177
+ g_assert_cmpint(status, ==, STATUS_NORMAL_MODE);
178
+
179
+ status = qtest_readl(qts, CAN1_BASE_ADDR + R_SR_OFFSET);
180
+ g_assert_cmpint(status, ==, STATUS_NORMAL_MODE);
181
+
182
+ send_data(qts, CAN0_BASE_ADDR, buf_tx);
183
+
184
+ read_data(qts, CAN1_BASE_ADDR, buf_rx);
185
+ match_rx_tx_data(buf_tx, buf_rx, can_timestamp);
186
+
187
+ qtest_quit(qts);
188
+}
144
+}
189
+
145
+
190
+/*
146
/*
191
+ * This test is performing loopback mode on CAN0 and CAN1. Data sent from TX of
147
* Perform an MTE checked access for DC_ZVA.
192
+ * each CAN0 and CAN1 are compared with RX register data for respective CAN.
148
*/
193
+ */
194
+static void test_can_loopback(void)
195
+{
196
+ uint32_t buf_tx[4] = { 0xFF, 0x80000000, 0x12345678, 0x87654321 };
197
+ uint32_t buf_rx[4] = { 0x00, 0x00, 0x00, 0x00 };
198
+ uint32_t status = 0;
199
+
200
+ QTestState *qts = qtest_init("-machine xlnx-zcu102"
201
+ " -object can-bus,id=canbus0"
202
+ " -machine xlnx-zcu102.canbus0=canbus0"
203
+ " -machine xlnx-zcu102.canbus1=canbus0"
204
+ );
205
+
206
+ /* Configure the CAN0 in loopback mode. */
207
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, CONFIG_MODE);
208
+ qtest_writel(qts, CAN0_BASE_ADDR + R_MSR_OFFSET, LOOPBACK_MODE);
209
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
210
+
211
+ /* Check here if CAN0 is set in loopback mode. */
212
+ status = qtest_readl(qts, CAN0_BASE_ADDR + R_SR_OFFSET);
213
+
214
+ g_assert_cmpint(status, ==, STATUS_LOOPBACK_MODE);
215
+
216
+ send_data(qts, CAN0_BASE_ADDR, buf_tx);
217
+ read_data(qts, CAN0_BASE_ADDR, buf_rx);
218
+ match_rx_tx_data(buf_tx, buf_rx, 0);
219
+
220
+ /* Configure the CAN1 in loopback mode. */
221
+ qtest_writel(qts, CAN1_BASE_ADDR + R_SRR_OFFSET, CONFIG_MODE);
222
+ qtest_writel(qts, CAN1_BASE_ADDR + R_MSR_OFFSET, LOOPBACK_MODE);
223
+ qtest_writel(qts, CAN1_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
224
+
225
+ /* Check here if CAN1 is set in loopback mode. */
226
+ status = qtest_readl(qts, CAN1_BASE_ADDR + R_SR_OFFSET);
227
+
228
+ g_assert_cmpint(status, ==, STATUS_LOOPBACK_MODE);
229
+
230
+ send_data(qts, CAN1_BASE_ADDR, buf_tx);
231
+ read_data(qts, CAN1_BASE_ADDR, buf_rx);
232
+ match_rx_tx_data(buf_tx, buf_rx, 0);
233
+
234
+ qtest_quit(qts);
235
+}
236
+
237
+/*
238
+ * Enable filters for CAN1. This will filter incoming messages with ID. In this
239
+ * test message will pass through filter 2.
240
+ */
241
+static void test_can_filter(void)
242
+{
243
+ uint32_t buf_tx[4] = { 0x14, 0x80000000, 0x12345678, 0x87654321 };
244
+ uint32_t buf_rx[4] = { 0x00, 0x00, 0x00, 0x00 };
245
+ uint32_t status = 0;
246
+ uint8_t can_timestamp = 1;
247
+
248
+ QTestState *qts = qtest_init("-machine xlnx-zcu102"
249
+ " -object can-bus,id=canbus0"
250
+ " -machine xlnx-zcu102.canbus0=canbus0"
251
+ " -machine xlnx-zcu102.canbus1=canbus0"
252
+ );
253
+
254
+ /* Configure the CAN0 and CAN1. */
255
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
256
+ qtest_writel(qts, CAN0_BASE_ADDR + R_MSR_OFFSET, NORMAL_MODE);
257
+ qtest_writel(qts, CAN1_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
258
+ qtest_writel(qts, CAN1_BASE_ADDR + R_MSR_OFFSET, NORMAL_MODE);
259
+
260
+ /* Check here if CAN0 and CAN1 are in normal mode. */
261
+ status = qtest_readl(qts, CAN0_BASE_ADDR + R_SR_OFFSET);
262
+ g_assert_cmpint(status, ==, STATUS_NORMAL_MODE);
263
+
264
+ status = qtest_readl(qts, CAN1_BASE_ADDR + R_SR_OFFSET);
265
+ g_assert_cmpint(status, ==, STATUS_NORMAL_MODE);
266
+
267
+ /* Set filter for CAN1 for incoming messages. */
268
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFR, 0x0);
269
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFMR1, 0xF7);
270
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFIR1, 0x121F);
271
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFMR2, 0x5431);
272
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFIR2, 0x14);
273
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFMR3, 0x1234);
274
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFIR3, 0x5431);
275
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFMR4, 0xFFF);
276
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFIR4, 0x1234);
277
+
278
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFR, 0xF);
279
+
280
+ send_data(qts, CAN0_BASE_ADDR, buf_tx);
281
+
282
+ read_data(qts, CAN1_BASE_ADDR, buf_rx);
283
+ match_rx_tx_data(buf_tx, buf_rx, can_timestamp);
284
+
285
+ qtest_quit(qts);
286
+}
287
+
288
+/* Testing sleep mode on CAN0 while CAN1 is in normal mode. */
289
+static void test_can_sleepmode(void)
290
+{
291
+ uint32_t buf_tx[4] = { 0x14, 0x80000000, 0x12345678, 0x87654321 };
292
+ uint32_t buf_rx[4] = { 0x00, 0x00, 0x00, 0x00 };
293
+ uint32_t status = 0;
294
+ uint8_t can_timestamp = 1;
295
+
296
+ QTestState *qts = qtest_init("-machine xlnx-zcu102"
297
+ " -object can-bus,id=canbus0"
298
+ " -machine xlnx-zcu102.canbus0=canbus0"
299
+ " -machine xlnx-zcu102.canbus1=canbus0"
300
+ );
301
+
302
+ /* Configure the CAN0. */
303
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, CONFIG_MODE);
304
+ qtest_writel(qts, CAN0_BASE_ADDR + R_MSR_OFFSET, SLEEP_MODE);
305
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
306
+
307
+ qtest_writel(qts, CAN1_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
308
+ qtest_writel(qts, CAN1_BASE_ADDR + R_MSR_OFFSET, NORMAL_MODE);
309
+
310
+ /* Check here if CAN0 is in SLEEP mode and CAN1 in normal mode. */
311
+ status = qtest_readl(qts, CAN0_BASE_ADDR + R_SR_OFFSET);
312
+ g_assert_cmpint(status, ==, STATUS_SLEEP_MODE);
313
+
314
+ status = qtest_readl(qts, CAN1_BASE_ADDR + R_SR_OFFSET);
315
+ g_assert_cmpint(status, ==, STATUS_NORMAL_MODE);
316
+
317
+ send_data(qts, CAN1_BASE_ADDR, buf_tx);
318
+
319
+ /*
320
+ * Once CAN1 sends data on can-bus. CAN0 should exit sleep mode.
321
+ * Check the CAN0 status now. It should exit the sleep mode and receive the
322
+ * incoming data.
323
+ */
324
+ status = qtest_readl(qts, CAN0_BASE_ADDR + R_SR_OFFSET);
325
+ g_assert_cmpint(status, ==, STATUS_NORMAL_MODE);
326
+
327
+ read_data(qts, CAN0_BASE_ADDR, buf_rx);
328
+
329
+ match_rx_tx_data(buf_tx, buf_rx, can_timestamp);
330
+
331
+ qtest_quit(qts);
332
+}
333
+
334
+/* Testing Snoop mode on CAN0 while CAN1 is in normal mode. */
335
+static void test_can_snoopmode(void)
336
+{
337
+ uint32_t buf_tx[4] = { 0x14, 0x80000000, 0x12345678, 0x87654321 };
338
+ uint32_t buf_rx[4] = { 0x00, 0x00, 0x00, 0x00 };
339
+ uint32_t status = 0;
340
+ uint8_t can_timestamp = 1;
341
+
342
+ QTestState *qts = qtest_init("-machine xlnx-zcu102"
343
+ " -object can-bus,id=canbus0"
344
+ " -machine xlnx-zcu102.canbus0=canbus0"
345
+ " -machine xlnx-zcu102.canbus1=canbus0"
346
+ );
347
+
348
+ /* Configure the CAN0. */
349
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, CONFIG_MODE);
350
+ qtest_writel(qts, CAN0_BASE_ADDR + R_MSR_OFFSET, SNOOP_MODE);
351
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
352
+
353
+ qtest_writel(qts, CAN1_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
354
+ qtest_writel(qts, CAN1_BASE_ADDR + R_MSR_OFFSET, NORMAL_MODE);
355
+
356
+ /* Check here if CAN0 is in SNOOP mode and CAN1 in normal mode. */
357
+ status = qtest_readl(qts, CAN0_BASE_ADDR + R_SR_OFFSET);
358
+ g_assert_cmpint(status, ==, STATUS_SNOOP_MODE);
359
+
360
+ status = qtest_readl(qts, CAN1_BASE_ADDR + R_SR_OFFSET);
361
+ g_assert_cmpint(status, ==, STATUS_NORMAL_MODE);
362
+
363
+ send_data(qts, CAN1_BASE_ADDR, buf_tx);
364
+
365
+ read_data(qts, CAN0_BASE_ADDR, buf_rx);
366
+
367
+ match_rx_tx_data(buf_tx, buf_rx, can_timestamp);
368
+
369
+ qtest_quit(qts);
370
+}
371
+
372
+int main(int argc, char **argv)
373
+{
374
+ g_test_init(&argc, &argv, NULL);
375
+
376
+ qtest_add_func("/net/can/can_bus", test_can_bus);
377
+ qtest_add_func("/net/can/can_loopback", test_can_loopback);
378
+ qtest_add_func("/net/can/can_filter", test_can_filter);
379
+ qtest_add_func("/net/can/can_test_snoopmode", test_can_snoopmode);
380
+ qtest_add_func("/net/can/can_test_sleepmode", test_can_sleepmode);
381
+
382
+ return g_test_run();
383
+}
384
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
385
index XXXXXXX..XXXXXXX 100644
386
--- a/tests/qtest/meson.build
387
+++ b/tests/qtest/meson.build
388
@@ -XXX,XX +XXX,XX @@ qtests_aarch64 = \
389
['arm-cpu-features',
390
'numa-test',
391
'boot-serial-test',
392
+ 'xlnx-can-test',
393
'migration-test']
394
395
qtests_s390x = \
396
--
149
--
397
2.20.1
150
2.20.1
398
151
399
152
diff view generated by jsdifflib
1
From: Vikram Garhwal <fnu.vikram@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The Xilinx ZynqMP CAN controller is developed based on SocketCAN, QEMU CAN bus
3
Buglink: https://bugs.launchpad.net/bugs/1921948
4
implementation. Bus connection and socketCAN connection for each CAN module
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
can be set through command lines.
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
6
Message-id: 20210416183106.1516563-5-richard.henderson@linaro.org
7
Example for using single CAN:
8
-object can-bus,id=canbus0 \
9
-machine xlnx-zcu102.canbus0=canbus0 \
10
-object can-host-socketcan,id=socketcan0,if=vcan0,canbus=canbus0
11
12
Example for connecting both CAN to same virtual CAN on host machine:
13
-object can-bus,id=canbus0 -object can-bus,id=canbus1 \
14
-machine xlnx-zcu102.canbus0=canbus0 \
15
-machine xlnx-zcu102.canbus1=canbus1 \
16
-object can-host-socketcan,id=socketcan0,if=vcan0,canbus=canbus0 \
17
-object can-host-socketcan,id=socketcan1,if=vcan0,canbus=canbus1
18
19
To create virtual CAN on the host machine, please check the QEMU CAN docs:
20
https://github.com/qemu/qemu/blob/master/docs/can.txt
21
22
Signed-off-by: Vikram Garhwal <fnu.vikram@xilinx.com>
23
Message-id: 1605728926-352690-2-git-send-email-fnu.vikram@xilinx.com
24
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
25
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
26
---
8
---
27
meson.build | 1 +
9
tests/tcg/aarch64/mte-5.c | 44 +++++++++++++++++++++++++++++++
28
hw/net/can/trace.h | 1 +
10
tests/tcg/aarch64/Makefile.target | 2 +-
29
include/hw/net/xlnx-zynqmp-can.h | 78 ++
11
2 files changed, 45 insertions(+), 1 deletion(-)
30
hw/net/can/xlnx-zynqmp-can.c | 1161 ++++++++++++++++++++++++++++++
12
create mode 100644 tests/tcg/aarch64/mte-5.c
31
hw/Kconfig | 1 +
32
hw/net/can/meson.build | 1 +
33
hw/net/can/trace-events | 9 +
34
7 files changed, 1252 insertions(+)
35
create mode 100644 hw/net/can/trace.h
36
create mode 100644 include/hw/net/xlnx-zynqmp-can.h
37
create mode 100644 hw/net/can/xlnx-zynqmp-can.c
38
create mode 100644 hw/net/can/trace-events
39
13
40
diff --git a/meson.build b/meson.build
14
diff --git a/tests/tcg/aarch64/mte-5.c b/tests/tcg/aarch64/mte-5.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/meson.build
43
+++ b/meson.build
44
@@ -XXX,XX +XXX,XX @@ if have_system
45
'hw/misc',
46
'hw/misc/macio',
47
'hw/net',
48
+ 'hw/net/can',
49
'hw/nvram',
50
'hw/pci',
51
'hw/pci-host',
52
diff --git a/hw/net/can/trace.h b/hw/net/can/trace.h
53
new file mode 100644
15
new file mode 100644
54
index XXXXXXX..XXXXXXX
16
index XXXXXXX..XXXXXXX
55
--- /dev/null
17
--- /dev/null
56
+++ b/hw/net/can/trace.h
18
+++ b/tests/tcg/aarch64/mte-5.c
57
@@ -0,0 +1 @@
58
+#include "trace/trace-hw_net_can.h"
59
diff --git a/include/hw/net/xlnx-zynqmp-can.h b/include/hw/net/xlnx-zynqmp-can.h
60
new file mode 100644
61
index XXXXXXX..XXXXXXX
62
--- /dev/null
63
+++ b/include/hw/net/xlnx-zynqmp-can.h
64
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@
65
+/*
20
+/*
66
+ * QEMU model of the Xilinx ZynqMP CAN controller.
21
+ * Memory tagging, faulting unaligned access.
67
+ *
22
+ *
68
+ * Copyright (c) 2020 Xilinx Inc.
23
+ * Copyright (c) 2021 Linaro Ltd
69
+ *
24
+ * SPDX-License-Identifier: GPL-2.0-or-later
70
+ * Written-by: Vikram Garhwal<fnu.vikram@xilinx.com>
71
+ *
72
+ * Based on QEMU CAN Device emulation implemented by Jin Yang, Deniz Eren and
73
+ * Pavel Pisa.
74
+ *
75
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
76
+ * of this software and associated documentation files (the "Software"), to deal
77
+ * in the Software without restriction, including without limitation the rights
78
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
79
+ * copies of the Software, and to permit persons to whom the Software is
80
+ * furnished to do so, subject to the following conditions:
81
+ *
82
+ * The above copyright notice and this permission notice shall be included in
83
+ * all copies or substantial portions of the Software.
84
+ *
85
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
86
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
87
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
88
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
89
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
90
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
91
+ * THE SOFTWARE.
92
+ */
25
+ */
93
+
26
+
94
+#ifndef XLNX_ZYNQMP_CAN_H
27
+#include "mte.h"
95
+#define XLNX_ZYNQMP_CAN_H
96
+
28
+
97
+#include "hw/register.h"
29
+void pass(int sig, siginfo_t *info, void *uc)
98
+#include "net/can_emu.h"
99
+#include "net/can_host.h"
100
+#include "qemu/fifo32.h"
101
+#include "hw/ptimer.h"
102
+#include "hw/qdev-clock.h"
103
+
104
+#define TYPE_XLNX_ZYNQMP_CAN "xlnx.zynqmp-can"
105
+
106
+#define XLNX_ZYNQMP_CAN(obj) \
107
+ OBJECT_CHECK(XlnxZynqMPCANState, (obj), TYPE_XLNX_ZYNQMP_CAN)
108
+
109
+#define MAX_CAN_CTRLS 2
110
+#define XLNX_ZYNQMP_CAN_R_MAX (0x84 / 4)
111
+#define MAILBOX_CAPACITY 64
112
+#define CAN_TIMER_MAX 0XFFFFUL
113
+#define CAN_DEFAULT_CLOCK (24 * 1000 * 1000)
114
+
115
+/* Each CAN_FRAME will have 4 * 32bit size. */
116
+#define CAN_FRAME_SIZE 4
117
+#define RXFIFO_SIZE (MAILBOX_CAPACITY * CAN_FRAME_SIZE)
118
+
119
+typedef struct XlnxZynqMPCANState {
120
+ SysBusDevice parent_obj;
121
+ MemoryRegion iomem;
122
+
123
+ qemu_irq irq;
124
+
125
+ CanBusClientState bus_client;
126
+ CanBusState *canbus;
127
+
128
+ struct {
129
+ uint32_t ext_clk_freq;
130
+ } cfg;
131
+
132
+ RegisterInfo reg_info[XLNX_ZYNQMP_CAN_R_MAX];
133
+ uint32_t regs[XLNX_ZYNQMP_CAN_R_MAX];
134
+
135
+ Fifo32 rx_fifo;
136
+ Fifo32 tx_fifo;
137
+ Fifo32 txhpb_fifo;
138
+
139
+ ptimer_state *can_timer;
140
+} XlnxZynqMPCANState;
141
+
142
+#endif
143
diff --git a/hw/net/can/xlnx-zynqmp-can.c b/hw/net/can/xlnx-zynqmp-can.c
144
new file mode 100644
145
index XXXXXXX..XXXXXXX
146
--- /dev/null
147
+++ b/hw/net/can/xlnx-zynqmp-can.c
148
@@ -XXX,XX +XXX,XX @@
149
+/*
150
+ * QEMU model of the Xilinx ZynqMP CAN controller.
151
+ * This implementation is based on the following datasheet:
152
+ * https://www.xilinx.com/support/documentation/user_guides/ug1085-zynq-ultrascale-trm.pdf
153
+ *
154
+ * Copyright (c) 2020 Xilinx Inc.
155
+ *
156
+ * Written-by: Vikram Garhwal<fnu.vikram@xilinx.com>
157
+ *
158
+ * Based on QEMU CAN Device emulation implemented by Jin Yang, Deniz Eren and
159
+ * Pavel Pisa
160
+ *
161
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
162
+ * of this software and associated documentation files (the "Software"), to deal
163
+ * in the Software without restriction, including without limitation the rights
164
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
165
+ * copies of the Software, and to permit persons to whom the Software is
166
+ * furnished to do so, subject to the following conditions:
167
+ *
168
+ * The above copyright notice and this permission notice shall be included in
169
+ * all copies or substantial portions of the Software.
170
+ *
171
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
172
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
173
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
174
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
175
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
176
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
177
+ * THE SOFTWARE.
178
+ */
179
+
180
+#include "qemu/osdep.h"
181
+#include "hw/sysbus.h"
182
+#include "hw/register.h"
183
+#include "hw/irq.h"
184
+#include "qapi/error.h"
185
+#include "qemu/bitops.h"
186
+#include "qemu/log.h"
187
+#include "qemu/cutils.h"
188
+#include "sysemu/sysemu.h"
189
+#include "migration/vmstate.h"
190
+#include "hw/qdev-properties.h"
191
+#include "net/can_emu.h"
192
+#include "net/can_host.h"
193
+#include "qemu/event_notifier.h"
194
+#include "qom/object_interfaces.h"
195
+#include "hw/net/xlnx-zynqmp-can.h"
196
+#include "trace.h"
197
+
198
+#ifndef XLNX_ZYNQMP_CAN_ERR_DEBUG
199
+#define XLNX_ZYNQMP_CAN_ERR_DEBUG 0
200
+#endif
201
+
202
+#define MAX_DLC 8
203
+#undef ERROR
204
+
205
+REG32(SOFTWARE_RESET_REGISTER, 0x0)
206
+ FIELD(SOFTWARE_RESET_REGISTER, CEN, 1, 1)
207
+ FIELD(SOFTWARE_RESET_REGISTER, SRST, 0, 1)
208
+REG32(MODE_SELECT_REGISTER, 0x4)
209
+ FIELD(MODE_SELECT_REGISTER, SNOOP, 2, 1)
210
+ FIELD(MODE_SELECT_REGISTER, LBACK, 1, 1)
211
+ FIELD(MODE_SELECT_REGISTER, SLEEP, 0, 1)
212
+REG32(ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER, 0x8)
213
+ FIELD(ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER, BRP, 0, 8)
214
+REG32(ARBITRATION_PHASE_BIT_TIMING_REGISTER, 0xc)
215
+ FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, SJW, 7, 2)
216
+ FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, TS2, 4, 3)
217
+ FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, TS1, 0, 4)
218
+REG32(ERROR_COUNTER_REGISTER, 0x10)
219
+ FIELD(ERROR_COUNTER_REGISTER, REC, 8, 8)
220
+ FIELD(ERROR_COUNTER_REGISTER, TEC, 0, 8)
221
+REG32(ERROR_STATUS_REGISTER, 0x14)
222
+ FIELD(ERROR_STATUS_REGISTER, ACKER, 4, 1)
223
+ FIELD(ERROR_STATUS_REGISTER, BERR, 3, 1)
224
+ FIELD(ERROR_STATUS_REGISTER, STER, 2, 1)
225
+ FIELD(ERROR_STATUS_REGISTER, FMER, 1, 1)
226
+ FIELD(ERROR_STATUS_REGISTER, CRCER, 0, 1)
227
+REG32(STATUS_REGISTER, 0x18)
228
+ FIELD(STATUS_REGISTER, SNOOP, 12, 1)
229
+ FIELD(STATUS_REGISTER, ACFBSY, 11, 1)
230
+ FIELD(STATUS_REGISTER, TXFLL, 10, 1)
231
+ FIELD(STATUS_REGISTER, TXBFLL, 9, 1)
232
+ FIELD(STATUS_REGISTER, ESTAT, 7, 2)
233
+ FIELD(STATUS_REGISTER, ERRWRN, 6, 1)
234
+ FIELD(STATUS_REGISTER, BBSY, 5, 1)
235
+ FIELD(STATUS_REGISTER, BIDLE, 4, 1)
236
+ FIELD(STATUS_REGISTER, NORMAL, 3, 1)
237
+ FIELD(STATUS_REGISTER, SLEEP, 2, 1)
238
+ FIELD(STATUS_REGISTER, LBACK, 1, 1)
239
+ FIELD(STATUS_REGISTER, CONFIG, 0, 1)
240
+REG32(INTERRUPT_STATUS_REGISTER, 0x1c)
241
+ FIELD(INTERRUPT_STATUS_REGISTER, TXFEMP, 14, 1)
242
+ FIELD(INTERRUPT_STATUS_REGISTER, TXFWMEMP, 13, 1)
243
+ FIELD(INTERRUPT_STATUS_REGISTER, RXFWMFLL, 12, 1)
244
+ FIELD(INTERRUPT_STATUS_REGISTER, WKUP, 11, 1)
245
+ FIELD(INTERRUPT_STATUS_REGISTER, SLP, 10, 1)
246
+ FIELD(INTERRUPT_STATUS_REGISTER, BSOFF, 9, 1)
247
+ FIELD(INTERRUPT_STATUS_REGISTER, ERROR, 8, 1)
248
+ FIELD(INTERRUPT_STATUS_REGISTER, RXNEMP, 7, 1)
249
+ FIELD(INTERRUPT_STATUS_REGISTER, RXOFLW, 6, 1)
250
+ FIELD(INTERRUPT_STATUS_REGISTER, RXUFLW, 5, 1)
251
+ FIELD(INTERRUPT_STATUS_REGISTER, RXOK, 4, 1)
252
+ FIELD(INTERRUPT_STATUS_REGISTER, TXBFLL, 3, 1)
253
+ FIELD(INTERRUPT_STATUS_REGISTER, TXFLL, 2, 1)
254
+ FIELD(INTERRUPT_STATUS_REGISTER, TXOK, 1, 1)
255
+ FIELD(INTERRUPT_STATUS_REGISTER, ARBLST, 0, 1)
256
+REG32(INTERRUPT_ENABLE_REGISTER, 0x20)
257
+ FIELD(INTERRUPT_ENABLE_REGISTER, ETXFEMP, 14, 1)
258
+ FIELD(INTERRUPT_ENABLE_REGISTER, ETXFWMEMP, 13, 1)
259
+ FIELD(INTERRUPT_ENABLE_REGISTER, ERXFWMFLL, 12, 1)
260
+ FIELD(INTERRUPT_ENABLE_REGISTER, EWKUP, 11, 1)
261
+ FIELD(INTERRUPT_ENABLE_REGISTER, ESLP, 10, 1)
262
+ FIELD(INTERRUPT_ENABLE_REGISTER, EBSOFF, 9, 1)
263
+ FIELD(INTERRUPT_ENABLE_REGISTER, EERROR, 8, 1)
264
+ FIELD(INTERRUPT_ENABLE_REGISTER, ERXNEMP, 7, 1)
265
+ FIELD(INTERRUPT_ENABLE_REGISTER, ERXOFLW, 6, 1)
266
+ FIELD(INTERRUPT_ENABLE_REGISTER, ERXUFLW, 5, 1)
267
+ FIELD(INTERRUPT_ENABLE_REGISTER, ERXOK, 4, 1)
268
+ FIELD(INTERRUPT_ENABLE_REGISTER, ETXBFLL, 3, 1)
269
+ FIELD(INTERRUPT_ENABLE_REGISTER, ETXFLL, 2, 1)
270
+ FIELD(INTERRUPT_ENABLE_REGISTER, ETXOK, 1, 1)
271
+ FIELD(INTERRUPT_ENABLE_REGISTER, EARBLST, 0, 1)
272
+REG32(INTERRUPT_CLEAR_REGISTER, 0x24)
273
+ FIELD(INTERRUPT_CLEAR_REGISTER, CTXFEMP, 14, 1)
274
+ FIELD(INTERRUPT_CLEAR_REGISTER, CTXFWMEMP, 13, 1)
275
+ FIELD(INTERRUPT_CLEAR_REGISTER, CRXFWMFLL, 12, 1)
276
+ FIELD(INTERRUPT_CLEAR_REGISTER, CWKUP, 11, 1)
277
+ FIELD(INTERRUPT_CLEAR_REGISTER, CSLP, 10, 1)
278
+ FIELD(INTERRUPT_CLEAR_REGISTER, CBSOFF, 9, 1)
279
+ FIELD(INTERRUPT_CLEAR_REGISTER, CERROR, 8, 1)
280
+ FIELD(INTERRUPT_CLEAR_REGISTER, CRXNEMP, 7, 1)
281
+ FIELD(INTERRUPT_CLEAR_REGISTER, CRXOFLW, 6, 1)
282
+ FIELD(INTERRUPT_CLEAR_REGISTER, CRXUFLW, 5, 1)
283
+ FIELD(INTERRUPT_CLEAR_REGISTER, CRXOK, 4, 1)
284
+ FIELD(INTERRUPT_CLEAR_REGISTER, CTXBFLL, 3, 1)
285
+ FIELD(INTERRUPT_CLEAR_REGISTER, CTXFLL, 2, 1)
286
+ FIELD(INTERRUPT_CLEAR_REGISTER, CTXOK, 1, 1)
287
+ FIELD(INTERRUPT_CLEAR_REGISTER, CARBLST, 0, 1)
288
+REG32(TIMESTAMP_REGISTER, 0x28)
289
+ FIELD(TIMESTAMP_REGISTER, CTS, 0, 1)
290
+REG32(WIR, 0x2c)
291
+ FIELD(WIR, EW, 8, 8)
292
+ FIELD(WIR, FW, 0, 8)
293
+REG32(TXFIFO_ID, 0x30)
294
+ FIELD(TXFIFO_ID, IDH, 21, 11)
295
+ FIELD(TXFIFO_ID, SRRRTR, 20, 1)
296
+ FIELD(TXFIFO_ID, IDE, 19, 1)
297
+ FIELD(TXFIFO_ID, IDL, 1, 18)
298
+ FIELD(TXFIFO_ID, RTR, 0, 1)
299
+REG32(TXFIFO_DLC, 0x34)
300
+ FIELD(TXFIFO_DLC, DLC, 28, 4)
301
+REG32(TXFIFO_DATA1, 0x38)
302
+ FIELD(TXFIFO_DATA1, DB0, 24, 8)
303
+ FIELD(TXFIFO_DATA1, DB1, 16, 8)
304
+ FIELD(TXFIFO_DATA1, DB2, 8, 8)
305
+ FIELD(TXFIFO_DATA1, DB3, 0, 8)
306
+REG32(TXFIFO_DATA2, 0x3c)
307
+ FIELD(TXFIFO_DATA2, DB4, 24, 8)
308
+ FIELD(TXFIFO_DATA2, DB5, 16, 8)
309
+ FIELD(TXFIFO_DATA2, DB6, 8, 8)
310
+ FIELD(TXFIFO_DATA2, DB7, 0, 8)
311
+REG32(TXHPB_ID, 0x40)
312
+ FIELD(TXHPB_ID, IDH, 21, 11)
313
+ FIELD(TXHPB_ID, SRRRTR, 20, 1)
314
+ FIELD(TXHPB_ID, IDE, 19, 1)
315
+ FIELD(TXHPB_ID, IDL, 1, 18)
316
+ FIELD(TXHPB_ID, RTR, 0, 1)
317
+REG32(TXHPB_DLC, 0x44)
318
+ FIELD(TXHPB_DLC, DLC, 28, 4)
319
+REG32(TXHPB_DATA1, 0x48)
320
+ FIELD(TXHPB_DATA1, DB0, 24, 8)
321
+ FIELD(TXHPB_DATA1, DB1, 16, 8)
322
+ FIELD(TXHPB_DATA1, DB2, 8, 8)
323
+ FIELD(TXHPB_DATA1, DB3, 0, 8)
324
+REG32(TXHPB_DATA2, 0x4c)
325
+ FIELD(TXHPB_DATA2, DB4, 24, 8)
326
+ FIELD(TXHPB_DATA2, DB5, 16, 8)
327
+ FIELD(TXHPB_DATA2, DB6, 8, 8)
328
+ FIELD(TXHPB_DATA2, DB7, 0, 8)
329
+REG32(RXFIFO_ID, 0x50)
330
+ FIELD(RXFIFO_ID, IDH, 21, 11)
331
+ FIELD(RXFIFO_ID, SRRRTR, 20, 1)
332
+ FIELD(RXFIFO_ID, IDE, 19, 1)
333
+ FIELD(RXFIFO_ID, IDL, 1, 18)
334
+ FIELD(RXFIFO_ID, RTR, 0, 1)
335
+REG32(RXFIFO_DLC, 0x54)
336
+ FIELD(RXFIFO_DLC, DLC, 28, 4)
337
+ FIELD(RXFIFO_DLC, RXT, 0, 16)
338
+REG32(RXFIFO_DATA1, 0x58)
339
+ FIELD(RXFIFO_DATA1, DB0, 24, 8)
340
+ FIELD(RXFIFO_DATA1, DB1, 16, 8)
341
+ FIELD(RXFIFO_DATA1, DB2, 8, 8)
342
+ FIELD(RXFIFO_DATA1, DB3, 0, 8)
343
+REG32(RXFIFO_DATA2, 0x5c)
344
+ FIELD(RXFIFO_DATA2, DB4, 24, 8)
345
+ FIELD(RXFIFO_DATA2, DB5, 16, 8)
346
+ FIELD(RXFIFO_DATA2, DB6, 8, 8)
347
+ FIELD(RXFIFO_DATA2, DB7, 0, 8)
348
+REG32(AFR, 0x60)
349
+ FIELD(AFR, UAF4, 3, 1)
350
+ FIELD(AFR, UAF3, 2, 1)
351
+ FIELD(AFR, UAF2, 1, 1)
352
+ FIELD(AFR, UAF1, 0, 1)
353
+REG32(AFMR1, 0x64)
354
+ FIELD(AFMR1, AMIDH, 21, 11)
355
+ FIELD(AFMR1, AMSRR, 20, 1)
356
+ FIELD(AFMR1, AMIDE, 19, 1)
357
+ FIELD(AFMR1, AMIDL, 1, 18)
358
+ FIELD(AFMR1, AMRTR, 0, 1)
359
+REG32(AFIR1, 0x68)
360
+ FIELD(AFIR1, AIIDH, 21, 11)
361
+ FIELD(AFIR1, AISRR, 20, 1)
362
+ FIELD(AFIR1, AIIDE, 19, 1)
363
+ FIELD(AFIR1, AIIDL, 1, 18)
364
+ FIELD(AFIR1, AIRTR, 0, 1)
365
+REG32(AFMR2, 0x6c)
366
+ FIELD(AFMR2, AMIDH, 21, 11)
367
+ FIELD(AFMR2, AMSRR, 20, 1)
368
+ FIELD(AFMR2, AMIDE, 19, 1)
369
+ FIELD(AFMR2, AMIDL, 1, 18)
370
+ FIELD(AFMR2, AMRTR, 0, 1)
371
+REG32(AFIR2, 0x70)
372
+ FIELD(AFIR2, AIIDH, 21, 11)
373
+ FIELD(AFIR2, AISRR, 20, 1)
374
+ FIELD(AFIR2, AIIDE, 19, 1)
375
+ FIELD(AFIR2, AIIDL, 1, 18)
376
+ FIELD(AFIR2, AIRTR, 0, 1)
377
+REG32(AFMR3, 0x74)
378
+ FIELD(AFMR3, AMIDH, 21, 11)
379
+ FIELD(AFMR3, AMSRR, 20, 1)
380
+ FIELD(AFMR3, AMIDE, 19, 1)
381
+ FIELD(AFMR3, AMIDL, 1, 18)
382
+ FIELD(AFMR3, AMRTR, 0, 1)
383
+REG32(AFIR3, 0x78)
384
+ FIELD(AFIR3, AIIDH, 21, 11)
385
+ FIELD(AFIR3, AISRR, 20, 1)
386
+ FIELD(AFIR3, AIIDE, 19, 1)
387
+ FIELD(AFIR3, AIIDL, 1, 18)
388
+ FIELD(AFIR3, AIRTR, 0, 1)
389
+REG32(AFMR4, 0x7c)
390
+ FIELD(AFMR4, AMIDH, 21, 11)
391
+ FIELD(AFMR4, AMSRR, 20, 1)
392
+ FIELD(AFMR4, AMIDE, 19, 1)
393
+ FIELD(AFMR4, AMIDL, 1, 18)
394
+ FIELD(AFMR4, AMRTR, 0, 1)
395
+REG32(AFIR4, 0x80)
396
+ FIELD(AFIR4, AIIDH, 21, 11)
397
+ FIELD(AFIR4, AISRR, 20, 1)
398
+ FIELD(AFIR4, AIIDE, 19, 1)
399
+ FIELD(AFIR4, AIIDL, 1, 18)
400
+ FIELD(AFIR4, AIRTR, 0, 1)
401
+
402
+static void can_update_irq(XlnxZynqMPCANState *s)
403
+{
30
+{
404
+ uint32_t irq;
31
+ assert(info->si_code == SEGV_MTESERR);
405
+
32
+ exit(0);
406
+ /* Watermark register interrupts. */
407
+ if ((fifo32_num_free(&s->tx_fifo) / CAN_FRAME_SIZE) >
408
+ ARRAY_FIELD_EX32(s->regs, WIR, EW)) {
409
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXFWMEMP, 1);
410
+ }
411
+
412
+ if ((fifo32_num_used(&s->rx_fifo) / CAN_FRAME_SIZE) >
413
+ ARRAY_FIELD_EX32(s->regs, WIR, FW)) {
414
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFWMFLL, 1);
415
+ }
416
+
417
+ /* RX Interrupts. */
418
+ if (fifo32_num_used(&s->rx_fifo) >= CAN_FRAME_SIZE) {
419
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXNEMP, 1);
420
+ }
421
+
422
+ /* TX interrupts. */
423
+ if (fifo32_is_empty(&s->tx_fifo)) {
424
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXFEMP, 1);
425
+ }
426
+
427
+ if (fifo32_is_full(&s->tx_fifo)) {
428
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXFLL, 1);
429
+ }
430
+
431
+ if (fifo32_is_full(&s->txhpb_fifo)) {
432
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXBFLL, 1);
433
+ }
434
+
435
+ irq = s->regs[R_INTERRUPT_STATUS_REGISTER];
436
+ irq &= s->regs[R_INTERRUPT_ENABLE_REGISTER];
437
+
438
+ trace_xlnx_can_update_irq(s->regs[R_INTERRUPT_STATUS_REGISTER],
439
+ s->regs[R_INTERRUPT_ENABLE_REGISTER], irq);
440
+ qemu_set_irq(s->irq, irq);
441
+}
33
+}
442
+
34
+
443
+static void can_ier_post_write(RegisterInfo *reg, uint64_t val)
35
+int main(int ac, char **av)
444
+{
36
+{
445
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
37
+ struct sigaction sa;
38
+ void *p0, *p1, *p2;
39
+ long excl = 1;
446
+
40
+
447
+ can_update_irq(s);
41
+ enable_mte(PR_MTE_TCF_SYNC);
42
+ p0 = alloc_mte_mem(sizeof(*p0));
43
+
44
+ /* Create two differently tagged pointers. */
45
+ asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(excl));
46
+ asm("gmi %0,%1,%0" : "+r"(excl) : "r" (p1));
47
+ assert(excl != 1);
48
+ asm("irg %0,%1,%2" : "=r"(p2) : "r"(p0), "r"(excl));
49
+ assert(p1 != p2);
50
+
51
+ memset(&sa, 0, sizeof(sa));
52
+ sa.sa_sigaction = pass;
53
+ sa.sa_flags = SA_SIGINFO;
54
+ sigaction(SIGSEGV, &sa, NULL);
55
+
56
+ /* Store store two different tags in sequential granules. */
57
+ asm("stg %0, [%0]" : : "r"(p1));
58
+ asm("stg %0, [%0]" : : "r"(p2 + 16));
59
+
60
+ /* Perform an unaligned load crossing the granules. */
61
+ asm volatile("ldr %0, [%1]" : "=r"(p0) : "r"(p1 + 12));
62
+ abort();
448
+}
63
+}
449
+
64
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
450
+static uint64_t can_icr_pre_write(RegisterInfo *reg, uint64_t val)
451
+{
452
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
453
+
454
+ s->regs[R_INTERRUPT_STATUS_REGISTER] &= ~val;
455
+ can_update_irq(s);
456
+
457
+ return 0;
458
+}
459
+
460
+static void can_config_reset(XlnxZynqMPCANState *s)
461
+{
462
+ /* Reset all the configuration registers. */
463
+ register_reset(&s->reg_info[R_SOFTWARE_RESET_REGISTER]);
464
+ register_reset(&s->reg_info[R_MODE_SELECT_REGISTER]);
465
+ register_reset(
466
+ &s->reg_info[R_ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER]);
467
+ register_reset(&s->reg_info[R_ARBITRATION_PHASE_BIT_TIMING_REGISTER]);
468
+ register_reset(&s->reg_info[R_STATUS_REGISTER]);
469
+ register_reset(&s->reg_info[R_INTERRUPT_STATUS_REGISTER]);
470
+ register_reset(&s->reg_info[R_INTERRUPT_ENABLE_REGISTER]);
471
+ register_reset(&s->reg_info[R_INTERRUPT_CLEAR_REGISTER]);
472
+ register_reset(&s->reg_info[R_WIR]);
473
+}
474
+
475
+static void can_config_mode(XlnxZynqMPCANState *s)
476
+{
477
+ register_reset(&s->reg_info[R_ERROR_COUNTER_REGISTER]);
478
+ register_reset(&s->reg_info[R_ERROR_STATUS_REGISTER]);
479
+
480
+ /* Put XlnxZynqMPCAN in configuration mode. */
481
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, CONFIG, 1);
482
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, WKUP, 0);
483
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, SLP, 0);
484
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, BSOFF, 0);
485
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, ERROR, 0);
486
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOFLW, 0);
487
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 0);
488
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXOK, 0);
489
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, ARBLST, 0);
490
+
491
+ can_update_irq(s);
492
+}
493
+
494
+static void update_status_register_mode_bits(XlnxZynqMPCANState *s)
495
+{
496
+ bool sleep_status = ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP);
497
+ bool sleep_mode = ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SLEEP);
498
+ /* Wake up interrupt bit. */
499
+ bool wakeup_irq_val = sleep_status && (sleep_mode == 0);
500
+ /* Sleep interrupt bit. */
501
+ bool sleep_irq_val = sleep_mode && (sleep_status == 0);
502
+
503
+ /* Clear previous core mode status bits. */
504
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, LBACK, 0);
505
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SLEEP, 0);
506
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SNOOP, 0);
507
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, NORMAL, 0);
508
+
509
+ /* set current mode bit and generate irqs accordingly. */
510
+ if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, LBACK)) {
511
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, LBACK, 1);
512
+ } else if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SLEEP)) {
513
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SLEEP, 1);
514
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, SLP,
515
+ sleep_irq_val);
516
+ } else if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SNOOP)) {
517
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SNOOP, 1);
518
+ } else {
519
+ /*
520
+ * If all bits are zero then XlnxZynqMPCAN is set in normal mode.
521
+ */
522
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, NORMAL, 1);
523
+ /* Set wakeup interrupt bit. */
524
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, WKUP,
525
+ wakeup_irq_val);
526
+ }
527
+
528
+ can_update_irq(s);
529
+}
530
+
531
+static void can_exit_sleep_mode(XlnxZynqMPCANState *s)
532
+{
533
+ ARRAY_FIELD_DP32(s->regs, MODE_SELECT_REGISTER, SLEEP, 0);
534
+ update_status_register_mode_bits(s);
535
+}
536
+
537
+static void generate_frame(qemu_can_frame *frame, uint32_t *data)
538
+{
539
+ frame->can_id = data[0];
540
+ frame->can_dlc = FIELD_EX32(data[1], TXFIFO_DLC, DLC);
541
+
542
+ frame->data[0] = FIELD_EX32(data[2], TXFIFO_DATA1, DB3);
543
+ frame->data[1] = FIELD_EX32(data[2], TXFIFO_DATA1, DB2);
544
+ frame->data[2] = FIELD_EX32(data[2], TXFIFO_DATA1, DB1);
545
+ frame->data[3] = FIELD_EX32(data[2], TXFIFO_DATA1, DB0);
546
+
547
+ frame->data[4] = FIELD_EX32(data[3], TXFIFO_DATA2, DB7);
548
+ frame->data[5] = FIELD_EX32(data[3], TXFIFO_DATA2, DB6);
549
+ frame->data[6] = FIELD_EX32(data[3], TXFIFO_DATA2, DB5);
550
+ frame->data[7] = FIELD_EX32(data[3], TXFIFO_DATA2, DB4);
551
+}
552
+
553
+static bool tx_ready_check(XlnxZynqMPCANState *s)
554
+{
555
+ if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, SRST)) {
556
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
557
+
558
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer data while"
559
+ " data while controller is in reset mode.\n",
560
+ path);
561
+ return false;
562
+ }
563
+
564
+ if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) {
565
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
566
+
567
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer"
568
+ " data while controller is in configuration mode. Reset"
569
+ " the core so operations can start fresh.\n",
570
+ path);
571
+ return false;
572
+ }
573
+
574
+ if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SNOOP)) {
575
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
576
+
577
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer"
578
+ " data while controller is in SNOOP MODE.\n",
579
+ path);
580
+ return false;
581
+ }
582
+
583
+ return true;
584
+}
585
+
586
+static void transfer_fifo(XlnxZynqMPCANState *s, Fifo32 *fifo)
587
+{
588
+ qemu_can_frame frame;
589
+ uint32_t data[CAN_FRAME_SIZE];
590
+ int i;
591
+ bool can_tx = tx_ready_check(s);
592
+
593
+ if (!can_tx) {
594
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
595
+
596
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Controller is not enabled for data"
597
+ " transfer.\n", path);
598
+ can_update_irq(s);
599
+ return;
600
+ }
601
+
602
+ while (!fifo32_is_empty(fifo)) {
603
+ for (i = 0; i < CAN_FRAME_SIZE; i++) {
604
+ data[i] = fifo32_pop(fifo);
605
+ }
606
+
607
+ if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, LBACK)) {
608
+ /*
609
+ * Controller is in loopback. In Loopback mode, the CAN core
610
+ * transmits a recessive bitstream on to the XlnxZynqMPCAN Bus.
611
+ * Any message transmitted is looped back to the RX line and
612
+ * acknowledged. The XlnxZynqMPCAN core receives any message
613
+ * that it transmits.
614
+ */
615
+ if (fifo32_is_full(&s->rx_fifo)) {
616
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOFLW, 1);
617
+ } else {
618
+ for (i = 0; i < CAN_FRAME_SIZE; i++) {
619
+ fifo32_push(&s->rx_fifo, data[i]);
620
+ }
621
+
622
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 1);
623
+ }
624
+ } else {
625
+ /* Normal mode Tx. */
626
+ generate_frame(&frame, data);
627
+
628
+ trace_xlnx_can_tx_data(frame.can_id, frame.can_dlc,
629
+ frame.data[0], frame.data[1],
630
+ frame.data[2], frame.data[3],
631
+ frame.data[4], frame.data[5],
632
+ frame.data[6], frame.data[7]);
633
+ can_bus_client_send(&s->bus_client, &frame, 1);
634
+ }
635
+ }
636
+
637
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXOK, 1);
638
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, TXBFLL, 0);
639
+
640
+ if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP)) {
641
+ can_exit_sleep_mode(s);
642
+ }
643
+
644
+ can_update_irq(s);
645
+}
646
+
647
+static uint64_t can_srr_pre_write(RegisterInfo *reg, uint64_t val)
648
+{
649
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
650
+
651
+ ARRAY_FIELD_DP32(s->regs, SOFTWARE_RESET_REGISTER, CEN,
652
+ FIELD_EX32(val, SOFTWARE_RESET_REGISTER, CEN));
653
+
654
+ if (FIELD_EX32(val, SOFTWARE_RESET_REGISTER, SRST)) {
655
+ trace_xlnx_can_reset(val);
656
+
657
+ /* First, core will do software reset then will enter in config mode. */
658
+ can_config_reset(s);
659
+ }
660
+
661
+ if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) {
662
+ can_config_mode(s);
663
+ } else {
664
+ /*
665
+ * Leave config mode. Now XlnxZynqMPCAN core will enter normal,
666
+ * sleep, snoop or loopback mode depending upon LBACK, SLEEP, SNOOP
667
+ * register states.
668
+ */
669
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, CONFIG, 0);
670
+
671
+ ptimer_transaction_begin(s->can_timer);
672
+ ptimer_set_count(s->can_timer, 0);
673
+ ptimer_transaction_commit(s->can_timer);
674
+
675
+ /* XlnxZynqMPCAN is out of config mode. It will send pending data. */
676
+ transfer_fifo(s, &s->txhpb_fifo);
677
+ transfer_fifo(s, &s->tx_fifo);
678
+ }
679
+
680
+ update_status_register_mode_bits(s);
681
+
682
+ return s->regs[R_SOFTWARE_RESET_REGISTER];
683
+}
684
+
685
+static uint64_t can_msr_pre_write(RegisterInfo *reg, uint64_t val)
686
+{
687
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
688
+ uint8_t multi_mode;
689
+
690
+ /*
691
+ * Multiple mode set check. This is done to make sure user doesn't set
692
+ * multiple modes.
693
+ */
694
+ multi_mode = FIELD_EX32(val, MODE_SELECT_REGISTER, LBACK) +
695
+ FIELD_EX32(val, MODE_SELECT_REGISTER, SLEEP) +
696
+ FIELD_EX32(val, MODE_SELECT_REGISTER, SNOOP);
697
+
698
+ if (multi_mode > 1) {
699
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
700
+
701
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to config"
702
+ " several modes simultaneously. One mode will be selected"
703
+ " according to their priority: LBACK > SLEEP > SNOOP.\n",
704
+ path);
705
+ }
706
+
707
+ if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) {
708
+ /* We are in configuration mode, any mode can be selected. */
709
+ s->regs[R_MODE_SELECT_REGISTER] = val;
710
+ } else {
711
+ bool sleep_mode_bit = FIELD_EX32(val, MODE_SELECT_REGISTER, SLEEP);
712
+
713
+ ARRAY_FIELD_DP32(s->regs, MODE_SELECT_REGISTER, SLEEP, sleep_mode_bit);
714
+
715
+ if (FIELD_EX32(val, MODE_SELECT_REGISTER, LBACK)) {
716
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
717
+
718
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to set"
719
+ " LBACK mode without setting CEN bit as 0.\n",
720
+ path);
721
+ } else if (FIELD_EX32(val, MODE_SELECT_REGISTER, SNOOP)) {
722
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
723
+
724
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to set"
725
+ " SNOOP mode without setting CEN bit as 0.\n",
726
+ path);
727
+ }
728
+
729
+ update_status_register_mode_bits(s);
730
+ }
731
+
732
+ return s->regs[R_MODE_SELECT_REGISTER];
733
+}
734
+
735
+static uint64_t can_brpr_pre_write(RegisterInfo *reg, uint64_t val)
736
+{
737
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
738
+
739
+ /* Only allow writes when in config mode. */
740
+ if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN)) {
741
+ return s->regs[R_ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER];
742
+ }
743
+
744
+ return val;
745
+}
746
+
747
+static uint64_t can_btr_pre_write(RegisterInfo *reg, uint64_t val)
748
+{
749
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
750
+
751
+ /* Only allow writes when in config mode. */
752
+ if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN)) {
753
+ return s->regs[R_ARBITRATION_PHASE_BIT_TIMING_REGISTER];
754
+ }
755
+
756
+ return val;
757
+}
758
+
759
+static uint64_t can_tcr_pre_write(RegisterInfo *reg, uint64_t val)
760
+{
761
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
762
+
763
+ if (FIELD_EX32(val, TIMESTAMP_REGISTER, CTS)) {
764
+ ptimer_transaction_begin(s->can_timer);
765
+ ptimer_set_count(s->can_timer, 0);
766
+ ptimer_transaction_commit(s->can_timer);
767
+ }
768
+
769
+ return 0;
770
+}
771
+
772
+static void update_rx_fifo(XlnxZynqMPCANState *s, const qemu_can_frame *frame)
773
+{
774
+ bool filter_pass = false;
775
+ uint16_t timestamp = 0;
776
+
777
+ /* If no filter is enabled. Message will be stored in FIFO. */
778
+ if (!((ARRAY_FIELD_EX32(s->regs, AFR, UAF1)) |
779
+ (ARRAY_FIELD_EX32(s->regs, AFR, UAF2)) |
780
+ (ARRAY_FIELD_EX32(s->regs, AFR, UAF3)) |
781
+ (ARRAY_FIELD_EX32(s->regs, AFR, UAF4)))) {
782
+ filter_pass = true;
783
+ }
784
+
785
+ /*
786
+ * Messages that pass any of the acceptance filters will be stored in
787
+ * the RX FIFO.
788
+ */
789
+ if (ARRAY_FIELD_EX32(s->regs, AFR, UAF1)) {
790
+ uint32_t id_masked = s->regs[R_AFMR1] & frame->can_id;
791
+ uint32_t filter_id_masked = s->regs[R_AFMR1] & s->regs[R_AFIR1];
792
+
793
+ if (filter_id_masked == id_masked) {
794
+ filter_pass = true;
795
+ }
796
+ }
797
+
798
+ if (ARRAY_FIELD_EX32(s->regs, AFR, UAF2)) {
799
+ uint32_t id_masked = s->regs[R_AFMR2] & frame->can_id;
800
+ uint32_t filter_id_masked = s->regs[R_AFMR2] & s->regs[R_AFIR2];
801
+
802
+ if (filter_id_masked == id_masked) {
803
+ filter_pass = true;
804
+ }
805
+ }
806
+
807
+ if (ARRAY_FIELD_EX32(s->regs, AFR, UAF3)) {
808
+ uint32_t id_masked = s->regs[R_AFMR3] & frame->can_id;
809
+ uint32_t filter_id_masked = s->regs[R_AFMR3] & s->regs[R_AFIR3];
810
+
811
+ if (filter_id_masked == id_masked) {
812
+ filter_pass = true;
813
+ }
814
+ }
815
+
816
+ if (ARRAY_FIELD_EX32(s->regs, AFR, UAF4)) {
817
+ uint32_t id_masked = s->regs[R_AFMR4] & frame->can_id;
818
+ uint32_t filter_id_masked = s->regs[R_AFMR4] & s->regs[R_AFIR4];
819
+
820
+ if (filter_id_masked == id_masked) {
821
+ filter_pass = true;
822
+ }
823
+ }
824
+
825
+ if (!filter_pass) {
826
+ trace_xlnx_can_rx_fifo_filter_reject(frame->can_id, frame->can_dlc);
827
+ return;
828
+ }
829
+
830
+ /* Store the message in fifo if it passed through any of the filters. */
831
+ if (filter_pass && frame->can_dlc <= MAX_DLC) {
832
+
833
+ if (fifo32_is_full(&s->rx_fifo)) {
834
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOFLW, 1);
835
+ } else {
836
+ timestamp = CAN_TIMER_MAX - ptimer_get_count(s->can_timer);
837
+
838
+ fifo32_push(&s->rx_fifo, frame->can_id);
839
+
840
+ fifo32_push(&s->rx_fifo, deposit32(0, R_RXFIFO_DLC_DLC_SHIFT,
841
+ R_RXFIFO_DLC_DLC_LENGTH,
842
+ frame->can_dlc) |
843
+ deposit32(0, R_RXFIFO_DLC_RXT_SHIFT,
844
+ R_RXFIFO_DLC_RXT_LENGTH,
845
+ timestamp));
846
+
847
+ /* First 32 bit of the data. */
848
+ fifo32_push(&s->rx_fifo, deposit32(0, R_TXFIFO_DATA1_DB3_SHIFT,
849
+ R_TXFIFO_DATA1_DB3_LENGTH,
850
+ frame->data[0]) |
851
+ deposit32(0, R_TXFIFO_DATA1_DB2_SHIFT,
852
+ R_TXFIFO_DATA1_DB2_LENGTH,
853
+ frame->data[1]) |
854
+ deposit32(0, R_TXFIFO_DATA1_DB1_SHIFT,
855
+ R_TXFIFO_DATA1_DB1_LENGTH,
856
+ frame->data[2]) |
857
+ deposit32(0, R_TXFIFO_DATA1_DB0_SHIFT,
858
+ R_TXFIFO_DATA1_DB0_LENGTH,
859
+ frame->data[3]));
860
+ /* Last 32 bit of the data. */
861
+ fifo32_push(&s->rx_fifo, deposit32(0, R_TXFIFO_DATA2_DB7_SHIFT,
862
+ R_TXFIFO_DATA2_DB7_LENGTH,
863
+ frame->data[4]) |
864
+ deposit32(0, R_TXFIFO_DATA2_DB6_SHIFT,
865
+ R_TXFIFO_DATA2_DB6_LENGTH,
866
+ frame->data[5]) |
867
+ deposit32(0, R_TXFIFO_DATA2_DB5_SHIFT,
868
+ R_TXFIFO_DATA2_DB5_LENGTH,
869
+ frame->data[6]) |
870
+ deposit32(0, R_TXFIFO_DATA2_DB4_SHIFT,
871
+ R_TXFIFO_DATA2_DB4_LENGTH,
872
+ frame->data[7]));
873
+
874
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 1);
875
+ trace_xlnx_can_rx_data(frame->can_id, frame->can_dlc,
876
+ frame->data[0], frame->data[1],
877
+ frame->data[2], frame->data[3],
878
+ frame->data[4], frame->data[5],
879
+ frame->data[6], frame->data[7]);
880
+ }
881
+
882
+ can_update_irq(s);
883
+ }
884
+}
885
+
886
+static uint64_t can_rxfifo_pre_read(RegisterInfo *reg, uint64_t val)
887
+{
888
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
889
+
890
+ if (!fifo32_is_empty(&s->rx_fifo)) {
891
+ val = fifo32_pop(&s->rx_fifo);
892
+ } else {
893
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXUFLW, 1);
894
+ }
895
+
896
+ can_update_irq(s);
897
+ return val;
898
+}
899
+
900
+static void can_filter_enable_post_write(RegisterInfo *reg, uint64_t val)
901
+{
902
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
903
+
904
+ if (ARRAY_FIELD_EX32(s->regs, AFR, UAF1) &&
905
+ ARRAY_FIELD_EX32(s->regs, AFR, UAF2) &&
906
+ ARRAY_FIELD_EX32(s->regs, AFR, UAF3) &&
907
+ ARRAY_FIELD_EX32(s->regs, AFR, UAF4)) {
908
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, ACFBSY, 1);
909
+ } else {
910
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, ACFBSY, 0);
911
+ }
912
+}
913
+
914
+static uint64_t can_filter_mask_pre_write(RegisterInfo *reg, uint64_t val)
915
+{
916
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
917
+ uint32_t reg_idx = (reg->access->addr) / 4;
918
+ uint32_t filter_number = (reg_idx - R_AFMR1) / 2;
919
+
920
+ /* modify an acceptance filter, the corresponding UAF bit should be '0'. */
921
+ if (!(s->regs[R_AFR] & (1 << filter_number))) {
922
+ s->regs[reg_idx] = val;
923
+
924
+ trace_xlnx_can_filter_mask_pre_write(filter_number, s->regs[reg_idx]);
925
+ } else {
926
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
927
+
928
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Acceptance filter %d"
929
+ " mask is not set as corresponding UAF bit is not 0.\n",
930
+ path, filter_number + 1);
931
+ }
932
+
933
+ return s->regs[reg_idx];
934
+}
935
+
936
+static uint64_t can_filter_id_pre_write(RegisterInfo *reg, uint64_t val)
937
+{
938
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
939
+ uint32_t reg_idx = (reg->access->addr) / 4;
940
+ uint32_t filter_number = (reg_idx - R_AFIR1) / 2;
941
+
942
+ if (!(s->regs[R_AFR] & (1 << filter_number))) {
943
+ s->regs[reg_idx] = val;
944
+
945
+ trace_xlnx_can_filter_id_pre_write(filter_number, s->regs[reg_idx]);
946
+ } else {
947
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
948
+
949
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Acceptance filter %d"
950
+ " id is not set as corresponding UAF bit is not 0.\n",
951
+ path, filter_number + 1);
952
+ }
953
+
954
+ return s->regs[reg_idx];
955
+}
956
+
957
+static void can_tx_post_write(RegisterInfo *reg, uint64_t val)
958
+{
959
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
960
+
961
+ bool is_txhpb = reg->access->addr > A_TXFIFO_DATA2;
962
+
963
+ bool initiate_transfer = (reg->access->addr == A_TXFIFO_DATA2) ||
964
+ (reg->access->addr == A_TXHPB_DATA2);
965
+
966
+ Fifo32 *f = is_txhpb ? &s->txhpb_fifo : &s->tx_fifo;
967
+
968
+ if (!fifo32_is_full(f)) {
969
+ fifo32_push(f, val);
970
+ } else {
971
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
972
+
973
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: TX FIFO is full.\n", path);
974
+ }
975
+
976
+ /* Initiate the message send if TX register is written. */
977
+ if (initiate_transfer &&
978
+ ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN)) {
979
+ transfer_fifo(s, f);
980
+ }
981
+
982
+ can_update_irq(s);
983
+}
984
+
985
+static const RegisterAccessInfo can_regs_info[] = {
986
+ { .name = "SOFTWARE_RESET_REGISTER",
987
+ .addr = A_SOFTWARE_RESET_REGISTER,
988
+ .rsvd = 0xfffffffc,
989
+ .pre_write = can_srr_pre_write,
990
+ },{ .name = "MODE_SELECT_REGISTER",
991
+ .addr = A_MODE_SELECT_REGISTER,
992
+ .rsvd = 0xfffffff8,
993
+ .pre_write = can_msr_pre_write,
994
+ },{ .name = "ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER",
995
+ .addr = A_ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER,
996
+ .rsvd = 0xffffff00,
997
+ .pre_write = can_brpr_pre_write,
998
+ },{ .name = "ARBITRATION_PHASE_BIT_TIMING_REGISTER",
999
+ .addr = A_ARBITRATION_PHASE_BIT_TIMING_REGISTER,
1000
+ .rsvd = 0xfffffe00,
1001
+ .pre_write = can_btr_pre_write,
1002
+ },{ .name = "ERROR_COUNTER_REGISTER",
1003
+ .addr = A_ERROR_COUNTER_REGISTER,
1004
+ .rsvd = 0xffff0000,
1005
+ .ro = 0xffffffff,
1006
+ },{ .name = "ERROR_STATUS_REGISTER",
1007
+ .addr = A_ERROR_STATUS_REGISTER,
1008
+ .rsvd = 0xffffffe0,
1009
+ .w1c = 0x1f,
1010
+ },{ .name = "STATUS_REGISTER", .addr = A_STATUS_REGISTER,
1011
+ .reset = 0x1,
1012
+ .rsvd = 0xffffe000,
1013
+ .ro = 0x1fff,
1014
+ },{ .name = "INTERRUPT_STATUS_REGISTER",
1015
+ .addr = A_INTERRUPT_STATUS_REGISTER,
1016
+ .reset = 0x6000,
1017
+ .rsvd = 0xffff8000,
1018
+ .ro = 0x7fff,
1019
+ },{ .name = "INTERRUPT_ENABLE_REGISTER",
1020
+ .addr = A_INTERRUPT_ENABLE_REGISTER,
1021
+ .rsvd = 0xffff8000,
1022
+ .post_write = can_ier_post_write,
1023
+ },{ .name = "INTERRUPT_CLEAR_REGISTER",
1024
+ .addr = A_INTERRUPT_CLEAR_REGISTER,
1025
+ .rsvd = 0xffff8000,
1026
+ .pre_write = can_icr_pre_write,
1027
+ },{ .name = "TIMESTAMP_REGISTER",
1028
+ .addr = A_TIMESTAMP_REGISTER,
1029
+ .rsvd = 0xfffffffe,
1030
+ .pre_write = can_tcr_pre_write,
1031
+ },{ .name = "WIR", .addr = A_WIR,
1032
+ .reset = 0x3f3f,
1033
+ .rsvd = 0xffff0000,
1034
+ },{ .name = "TXFIFO_ID", .addr = A_TXFIFO_ID,
1035
+ .post_write = can_tx_post_write,
1036
+ },{ .name = "TXFIFO_DLC", .addr = A_TXFIFO_DLC,
1037
+ .rsvd = 0xfffffff,
1038
+ .post_write = can_tx_post_write,
1039
+ },{ .name = "TXFIFO_DATA1", .addr = A_TXFIFO_DATA1,
1040
+ .post_write = can_tx_post_write,
1041
+ },{ .name = "TXFIFO_DATA2", .addr = A_TXFIFO_DATA2,
1042
+ .post_write = can_tx_post_write,
1043
+ },{ .name = "TXHPB_ID", .addr = A_TXHPB_ID,
1044
+ .post_write = can_tx_post_write,
1045
+ },{ .name = "TXHPB_DLC", .addr = A_TXHPB_DLC,
1046
+ .rsvd = 0xfffffff,
1047
+ .post_write = can_tx_post_write,
1048
+ },{ .name = "TXHPB_DATA1", .addr = A_TXHPB_DATA1,
1049
+ .post_write = can_tx_post_write,
1050
+ },{ .name = "TXHPB_DATA2", .addr = A_TXHPB_DATA2,
1051
+ .post_write = can_tx_post_write,
1052
+ },{ .name = "RXFIFO_ID", .addr = A_RXFIFO_ID,
1053
+ .ro = 0xffffffff,
1054
+ .post_read = can_rxfifo_pre_read,
1055
+ },{ .name = "RXFIFO_DLC", .addr = A_RXFIFO_DLC,
1056
+ .rsvd = 0xfff0000,
1057
+ .post_read = can_rxfifo_pre_read,
1058
+ },{ .name = "RXFIFO_DATA1", .addr = A_RXFIFO_DATA1,
1059
+ .post_read = can_rxfifo_pre_read,
1060
+ },{ .name = "RXFIFO_DATA2", .addr = A_RXFIFO_DATA2,
1061
+ .post_read = can_rxfifo_pre_read,
1062
+ },{ .name = "AFR", .addr = A_AFR,
1063
+ .rsvd = 0xfffffff0,
1064
+ .post_write = can_filter_enable_post_write,
1065
+ },{ .name = "AFMR1", .addr = A_AFMR1,
1066
+ .pre_write = can_filter_mask_pre_write,
1067
+ },{ .name = "AFIR1", .addr = A_AFIR1,
1068
+ .pre_write = can_filter_id_pre_write,
1069
+ },{ .name = "AFMR2", .addr = A_AFMR2,
1070
+ .pre_write = can_filter_mask_pre_write,
1071
+ },{ .name = "AFIR2", .addr = A_AFIR2,
1072
+ .pre_write = can_filter_id_pre_write,
1073
+ },{ .name = "AFMR3", .addr = A_AFMR3,
1074
+ .pre_write = can_filter_mask_pre_write,
1075
+ },{ .name = "AFIR3", .addr = A_AFIR3,
1076
+ .pre_write = can_filter_id_pre_write,
1077
+ },{ .name = "AFMR4", .addr = A_AFMR4,
1078
+ .pre_write = can_filter_mask_pre_write,
1079
+ },{ .name = "AFIR4", .addr = A_AFIR4,
1080
+ .pre_write = can_filter_id_pre_write,
1081
+ }
1082
+};
1083
+
1084
+static void xlnx_zynqmp_can_ptimer_cb(void *opaque)
1085
+{
1086
+ /* No action required on the timer rollover. */
1087
+}
1088
+
1089
+static const MemoryRegionOps can_ops = {
1090
+ .read = register_read_memory,
1091
+ .write = register_write_memory,
1092
+ .endianness = DEVICE_LITTLE_ENDIAN,
1093
+ .valid = {
1094
+ .min_access_size = 4,
1095
+ .max_access_size = 4,
1096
+ },
1097
+};
1098
+
1099
+static void xlnx_zynqmp_can_reset_init(Object *obj, ResetType type)
1100
+{
1101
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(obj);
1102
+ unsigned int i;
1103
+
1104
+ for (i = R_RXFIFO_ID; i < ARRAY_SIZE(s->reg_info); ++i) {
1105
+ register_reset(&s->reg_info[i]);
1106
+ }
1107
+
1108
+ ptimer_transaction_begin(s->can_timer);
1109
+ ptimer_set_count(s->can_timer, 0);
1110
+ ptimer_transaction_commit(s->can_timer);
1111
+}
1112
+
1113
+static void xlnx_zynqmp_can_reset_hold(Object *obj)
1114
+{
1115
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(obj);
1116
+ unsigned int i;
1117
+
1118
+ for (i = 0; i < R_RXFIFO_ID; ++i) {
1119
+ register_reset(&s->reg_info[i]);
1120
+ }
1121
+
1122
+ /*
1123
+ * Reset FIFOs when CAN model is reset. This will clear the fifo writes
1124
+ * done by post_write which gets called from register_reset function,
1125
+ * post_write handle will not be able to trigger tx because CAN will be
1126
+ * disabled when software_reset_register is cleared first.
1127
+ */
1128
+ fifo32_reset(&s->rx_fifo);
1129
+ fifo32_reset(&s->tx_fifo);
1130
+ fifo32_reset(&s->txhpb_fifo);
1131
+}
1132
+
1133
+static bool xlnx_zynqmp_can_can_receive(CanBusClientState *client)
1134
+{
1135
+ XlnxZynqMPCANState *s = container_of(client, XlnxZynqMPCANState,
1136
+ bus_client);
1137
+
1138
+ if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, SRST)) {
1139
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
1140
+
1141
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Controller is in reset state.\n",
1142
+ path);
1143
+ return false;
1144
+ }
1145
+
1146
+ if ((ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN)) == 0) {
1147
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
1148
+
1149
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Controller is disabled. Incoming"
1150
+ " messages will be discarded.\n", path);
1151
+ return false;
1152
+ }
1153
+
1154
+ return true;
1155
+}
1156
+
1157
+static ssize_t xlnx_zynqmp_can_receive(CanBusClientState *client,
1158
+ const qemu_can_frame *buf, size_t buf_size) {
1159
+ XlnxZynqMPCANState *s = container_of(client, XlnxZynqMPCANState,
1160
+ bus_client);
1161
+ const qemu_can_frame *frame = buf;
1162
+
1163
+ if (buf_size <= 0) {
1164
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
1165
+
1166
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Error in the data received.\n",
1167
+ path);
1168
+ return 0;
1169
+ }
1170
+
1171
+ if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SNOOP)) {
1172
+ /* Snoop Mode: Just keep the data. no response back. */
1173
+ update_rx_fifo(s, frame);
1174
+ } else if ((ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP))) {
1175
+ /*
1176
+ * XlnxZynqMPCAN is in sleep mode. Any data on bus will bring it to wake
1177
+ * up state.
1178
+ */
1179
+ can_exit_sleep_mode(s);
1180
+ update_rx_fifo(s, frame);
1181
+ } else if ((ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP)) == 0) {
1182
+ update_rx_fifo(s, frame);
1183
+ } else {
1184
+ /*
1185
+ * XlnxZynqMPCAN will not participate in normal bus communication
1186
+ * and will not receive any messages transmitted by other CAN nodes.
1187
+ */
1188
+ trace_xlnx_can_rx_discard(s->regs[R_STATUS_REGISTER]);
1189
+ }
1190
+
1191
+ return 1;
1192
+}
1193
+
1194
+static CanBusClientInfo can_xilinx_bus_client_info = {
1195
+ .can_receive = xlnx_zynqmp_can_can_receive,
1196
+ .receive = xlnx_zynqmp_can_receive,
1197
+};
1198
+
1199
+static int xlnx_zynqmp_can_connect_to_bus(XlnxZynqMPCANState *s,
1200
+ CanBusState *bus)
1201
+{
1202
+ s->bus_client.info = &can_xilinx_bus_client_info;
1203
+
1204
+ if (can_bus_insert_client(bus, &s->bus_client) < 0) {
1205
+ return -1;
1206
+ }
1207
+ return 0;
1208
+}
1209
+
1210
+static void xlnx_zynqmp_can_realize(DeviceState *dev, Error **errp)
1211
+{
1212
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(dev);
1213
+
1214
+ if (s->canbus) {
1215
+ if (xlnx_zynqmp_can_connect_to_bus(s, s->canbus) < 0) {
1216
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
1217
+
1218
+ error_setg(errp, "%s: xlnx_zynqmp_can_connect_to_bus"
1219
+ " failed.", path);
1220
+ return;
1221
+ }
1222
+ }
1223
+
1224
+ /* Create RX FIFO, TXFIFO, TXHPB storage. */
1225
+ fifo32_create(&s->rx_fifo, RXFIFO_SIZE);
1226
+ fifo32_create(&s->tx_fifo, RXFIFO_SIZE);
1227
+ fifo32_create(&s->txhpb_fifo, CAN_FRAME_SIZE);
1228
+
1229
+ /* Allocate a new timer. */
1230
+ s->can_timer = ptimer_init(xlnx_zynqmp_can_ptimer_cb, s,
1231
+ PTIMER_POLICY_DEFAULT);
1232
+
1233
+ ptimer_transaction_begin(s->can_timer);
1234
+
1235
+ ptimer_set_freq(s->can_timer, s->cfg.ext_clk_freq);
1236
+ ptimer_set_limit(s->can_timer, CAN_TIMER_MAX, 1);
1237
+ ptimer_run(s->can_timer, 0);
1238
+ ptimer_transaction_commit(s->can_timer);
1239
+}
1240
+
1241
+static void xlnx_zynqmp_can_init(Object *obj)
1242
+{
1243
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(obj);
1244
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
1245
+
1246
+ RegisterInfoArray *reg_array;
1247
+
1248
+ memory_region_init(&s->iomem, obj, TYPE_XLNX_ZYNQMP_CAN,
1249
+ XLNX_ZYNQMP_CAN_R_MAX * 4);
1250
+ reg_array = register_init_block32(DEVICE(obj), can_regs_info,
1251
+ ARRAY_SIZE(can_regs_info),
1252
+ s->reg_info, s->regs,
1253
+ &can_ops,
1254
+ XLNX_ZYNQMP_CAN_ERR_DEBUG,
1255
+ XLNX_ZYNQMP_CAN_R_MAX * 4);
1256
+
1257
+ memory_region_add_subregion(&s->iomem, 0x00, &reg_array->mem);
1258
+ sysbus_init_mmio(sbd, &s->iomem);
1259
+ sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq);
1260
+}
1261
+
1262
+static const VMStateDescription vmstate_can = {
1263
+ .name = TYPE_XLNX_ZYNQMP_CAN,
1264
+ .version_id = 1,
1265
+ .minimum_version_id = 1,
1266
+ .fields = (VMStateField[]) {
1267
+ VMSTATE_FIFO32(rx_fifo, XlnxZynqMPCANState),
1268
+ VMSTATE_FIFO32(tx_fifo, XlnxZynqMPCANState),
1269
+ VMSTATE_FIFO32(txhpb_fifo, XlnxZynqMPCANState),
1270
+ VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPCANState, XLNX_ZYNQMP_CAN_R_MAX),
1271
+ VMSTATE_PTIMER(can_timer, XlnxZynqMPCANState),
1272
+ VMSTATE_END_OF_LIST(),
1273
+ }
1274
+};
1275
+
1276
+static Property xlnx_zynqmp_can_properties[] = {
1277
+ DEFINE_PROP_UINT32("ext_clk_freq", XlnxZynqMPCANState, cfg.ext_clk_freq,
1278
+ CAN_DEFAULT_CLOCK),
1279
+ DEFINE_PROP_LINK("canbus", XlnxZynqMPCANState, canbus, TYPE_CAN_BUS,
1280
+ CanBusState *),
1281
+ DEFINE_PROP_END_OF_LIST(),
1282
+};
1283
+
1284
+static void xlnx_zynqmp_can_class_init(ObjectClass *klass, void *data)
1285
+{
1286
+ DeviceClass *dc = DEVICE_CLASS(klass);
1287
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
1288
+
1289
+ rc->phases.enter = xlnx_zynqmp_can_reset_init;
1290
+ rc->phases.hold = xlnx_zynqmp_can_reset_hold;
1291
+ dc->realize = xlnx_zynqmp_can_realize;
1292
+ device_class_set_props(dc, xlnx_zynqmp_can_properties);
1293
+ dc->vmsd = &vmstate_can;
1294
+}
1295
+
1296
+static const TypeInfo can_info = {
1297
+ .name = TYPE_XLNX_ZYNQMP_CAN,
1298
+ .parent = TYPE_SYS_BUS_DEVICE,
1299
+ .instance_size = sizeof(XlnxZynqMPCANState),
1300
+ .class_init = xlnx_zynqmp_can_class_init,
1301
+ .instance_init = xlnx_zynqmp_can_init,
1302
+};
1303
+
1304
+static void can_register_types(void)
1305
+{
1306
+ type_register_static(&can_info);
1307
+}
1308
+
1309
+type_init(can_register_types)
1310
diff --git a/hw/Kconfig b/hw/Kconfig
1311
index XXXXXXX..XXXXXXX 100644
65
index XXXXXXX..XXXXXXX 100644
1312
--- a/hw/Kconfig
66
--- a/tests/tcg/aarch64/Makefile.target
1313
+++ b/hw/Kconfig
67
+++ b/tests/tcg/aarch64/Makefile.target
1314
@@ -XXX,XX +XXX,XX @@ config XILINX_AXI
68
@@ -XXX,XX +XXX,XX @@ AARCH64_TESTS += bti-2
1315
config XLNX_ZYNQMP
69
1316
bool
70
# MTE Tests
1317
select REGISTER
71
ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_ARMV8_MTE),)
1318
+ select CAN_BUS
72
-AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-6
1319
diff --git a/hw/net/can/meson.build b/hw/net/can/meson.build
73
+AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-5 mte-6
1320
index XXXXXXX..XXXXXXX 100644
74
mte-%: CFLAGS += -march=armv8.5-a+memtag
1321
--- a/hw/net/can/meson.build
75
endif
1322
+++ b/hw/net/can/meson.build
76
1323
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_CAN_PCI', if_true: files('can_pcm3680_pci.c'))
1324
softmmu_ss.add(when: 'CONFIG_CAN_PCI', if_true: files('can_mioe3680_pci.c'))
1325
softmmu_ss.add(when: 'CONFIG_CAN_CTUCANFD', if_true: files('ctucan_core.c'))
1326
softmmu_ss.add(when: 'CONFIG_CAN_CTUCANFD_PCI', if_true: files('ctucan_pci.c'))
1327
+softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-can.c'))
1328
diff --git a/hw/net/can/trace-events b/hw/net/can/trace-events
1329
new file mode 100644
1330
index XXXXXXX..XXXXXXX
1331
--- /dev/null
1332
+++ b/hw/net/can/trace-events
1333
@@ -XXX,XX +XXX,XX @@
1334
+# xlnx-zynqmp-can.c
1335
+xlnx_can_update_irq(uint32_t isr, uint32_t ier, uint32_t irq) "ISR: 0x%08x IER: 0x%08x IRQ: 0x%08x"
1336
+xlnx_can_reset(uint32_t val) "Resetting controller with value = 0x%08x"
1337
+xlnx_can_rx_fifo_filter_reject(uint32_t id, uint8_t dlc) "Frame: ID: 0x%08x DLC: 0x%02x"
1338
+xlnx_can_filter_id_pre_write(uint8_t filter_num, uint32_t value) "Filter%d ID: 0x%08x"
1339
+xlnx_can_filter_mask_pre_write(uint8_t filter_num, uint32_t value) "Filter%d MASK: 0x%08x"
1340
+xlnx_can_tx_data(uint32_t id, uint8_t dlc, uint8_t db0, uint8_t db1, uint8_t db2, uint8_t db3, uint8_t db4, uint8_t db5, uint8_t db6, uint8_t db7) "Frame: ID: 0x%08x DLC: 0x%02x DATA: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x"
1341
+xlnx_can_rx_data(uint32_t id, uint32_t dlc, uint8_t db0, uint8_t db1, uint8_t db2, uint8_t db3, uint8_t db4, uint8_t db5, uint8_t db6, uint8_t db7) "Frame: ID: 0x%08x DLC: 0x%02x DATA: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x"
1342
+xlnx_can_rx_discard(uint32_t status) "Controller is not enabled for bus communication. Status Register: 0x%08x"
1343
--
77
--
1344
2.20.1
78
2.20.1
1345
79
1346
80
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
After recent changes, mte_checkN does not use ESIZE,
4
and mte_check1 never used TSIZE. We can combine the
5
two into a single field: SIZEM1.
6
7
Choose to pass size - 1 because size == 0 is never used,
8
our immediate need in mte_probe_int is for the address
9
of the last byte (ptr + size - 1), and since almost all
10
operations are powers of 2, this makes the immediate
11
constant one bit smaller.
12
13
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20210416183106.1516563-6-richard.henderson@linaro.org
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
18
target/arm/internals.h | 4 ++--
19
target/arm/mte_helper.c | 18 ++++++++----------
20
target/arm/translate-a64.c | 5 ++---
21
target/arm/translate-sve.c | 5 ++---
22
4 files changed, 14 insertions(+), 18 deletions(-)
23
24
diff --git a/target/arm/internals.h b/target/arm/internals.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/internals.h
27
+++ b/target/arm/internals.h
28
@@ -XXX,XX +XXX,XX @@
29
#define TARGET_ARM_INTERNALS_H
30
31
#include "hw/registerfields.h"
32
+#include "tcg/tcg-gvec-desc.h"
33
#include "syndrome.h"
34
35
/* register banks for CPU modes */
36
@@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, MIDX, 0, 4)
37
FIELD(MTEDESC, TBI, 4, 2)
38
FIELD(MTEDESC, TCMA, 6, 2)
39
FIELD(MTEDESC, WRITE, 8, 1)
40
-FIELD(MTEDESC, ESIZE, 9, 5)
41
-FIELD(MTEDESC, TSIZE, 14, 10) /* mte_checkN only */
42
+FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
43
44
bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
45
uint64_t mte_check1(CPUARMState *env, uint32_t desc,
46
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/mte_helper.c
49
+++ b/target/arm/mte_helper.c
50
@@ -XXX,XX +XXX,XX @@ static int checkN(uint8_t *mem, int odd, int cmp, int count)
51
* Return positive on success with tbi enabled.
52
*/
53
static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
54
- uintptr_t ra, uint32_t total, uint64_t *fault)
55
+ uintptr_t ra, uint64_t *fault)
56
{
57
int mmu_idx, ptr_tag, bit55;
58
uint64_t ptr_last, prev_page, next_page;
59
uint64_t tag_first, tag_last;
60
uint64_t tag_byte_first, tag_byte_last;
61
- uint32_t tag_count, tag_size, n, c;
62
+ uint32_t sizem1, tag_count, tag_size, n, c;
63
uint8_t *mem1, *mem2;
64
MMUAccessType type;
65
66
@@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
67
68
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
69
type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
70
+ sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
71
72
/* Find the addr of the end of the access */
73
- ptr_last = ptr + total - 1;
74
+ ptr_last = ptr + sizem1;
75
76
/* Round the bounds to the tag granule, and compute the number of tags. */
77
tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
78
@@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
79
if (likely(tag_last - prev_page <= TARGET_PAGE_SIZE)) {
80
/* Memory access stays on one page. */
81
tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
82
- mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total,
83
+ mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
84
MMU_DATA_LOAD, tag_size, ra);
85
if (!mem1) {
86
return 1;
87
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
88
uint64_t ptr, uintptr_t ra)
89
{
90
uint64_t fault;
91
- uint32_t total = FIELD_EX32(desc, MTEDESC, TSIZE);
92
- int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
93
+ int ret = mte_probe_int(env, desc, ptr, ra, &fault);
94
95
if (unlikely(ret == 0)) {
96
mte_check_fail(env, desc, fault, ra);
97
@@ -XXX,XX +XXX,XX @@ uint64_t mte_check1(CPUARMState *env, uint32_t desc,
98
uint64_t ptr, uintptr_t ra)
99
{
100
uint64_t fault;
101
- uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
102
- int ret = mte_probe_int(env, desc, ptr, ra, total, &fault);
103
+ int ret = mte_probe_int(env, desc, ptr, ra, &fault);
104
105
if (unlikely(ret == 0)) {
106
mte_check_fail(env, desc, fault, ra);
107
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
108
bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
109
{
110
uint64_t fault;
111
- uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE);
112
- int ret = mte_probe_int(env, desc, ptr, 0, total, &fault);
113
+ int ret = mte_probe_int(env, desc, ptr, 0, &fault);
114
115
return ret != 0;
116
}
117
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
118
index XXXXXXX..XXXXXXX 100644
119
--- a/target/arm/translate-a64.c
120
+++ b/target/arm/translate-a64.c
121
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
122
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
123
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
124
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
125
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_size);
126
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1);
127
tcg_desc = tcg_const_i32(desc);
128
129
ret = new_tmp_a64(s);
130
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
131
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
132
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
133
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
134
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_esize);
135
- desc = FIELD_DP32(desc, MTEDESC, TSIZE, total_size);
136
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
137
tcg_desc = tcg_const_i32(desc);
138
139
ret = new_tmp_a64(s);
140
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
141
index XXXXXXX..XXXXXXX 100644
142
--- a/target/arm/translate-sve.c
143
+++ b/target/arm/translate-sve.c
144
@@ -XXX,XX +XXX,XX @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
145
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
146
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
147
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
148
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << msz);
149
- desc = FIELD_DP32(desc, MTEDESC, TSIZE, mte_n << msz);
150
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (mte_n << msz) - 1);
151
desc <<= SVE_MTEDESC_SHIFT;
152
} else {
153
addr = clean_data_tbi(s, addr);
154
@@ -XXX,XX +XXX,XX @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
155
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
156
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
157
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
158
- desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << msz);
159
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << msz) - 1);
160
desc <<= SVE_MTEDESC_SHIFT;
161
}
162
desc = simd_desc(vsz, vsz, desc | scale);
163
--
164
2.20.1
165
166
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
The mte_check1 and mte_checkN functions are now identical.
4
Drop mte_check1 and rename mte_checkN to mte_check.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210416183106.1516563-7-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/helper-a64.h | 3 +--
12
target/arm/internals.h | 5 +----
13
target/arm/mte_helper.c | 26 +++-----------------------
14
target/arm/sve_helper.c | 14 +++++++-------
15
target/arm/translate-a64.c | 4 ++--
16
5 files changed, 14 insertions(+), 38 deletions(-)
17
18
diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-a64.h
21
+++ b/target/arm/helper-a64.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
23
DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
24
DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
25
26
-DEF_HELPER_FLAGS_3(mte_check1, TCG_CALL_NO_WG, i64, env, i32, i64)
27
-DEF_HELPER_FLAGS_3(mte_checkN, TCG_CALL_NO_WG, i64, env, i32, i64)
28
+DEF_HELPER_FLAGS_3(mte_check, TCG_CALL_NO_WG, i64, env, i32, i64)
29
DEF_HELPER_FLAGS_3(mte_check_zva, TCG_CALL_NO_WG, i64, env, i32, i64)
30
DEF_HELPER_FLAGS_3(irg, TCG_CALL_NO_RWG, i64, env, i64, i64)
31
DEF_HELPER_FLAGS_4(addsubg, TCG_CALL_NO_RWG_SE, i64, env, i64, s32, i32)
32
diff --git a/target/arm/internals.h b/target/arm/internals.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/internals.h
35
+++ b/target/arm/internals.h
36
@@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, WRITE, 8, 1)
37
FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
38
39
bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
40
-uint64_t mte_check1(CPUARMState *env, uint32_t desc,
41
- uint64_t ptr, uintptr_t ra);
42
-uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
43
- uint64_t ptr, uintptr_t ra);
44
+uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
45
46
static inline int allocation_tag_from_addr(uint64_t ptr)
47
{
48
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/target/arm/mte_helper.c
51
+++ b/target/arm/mte_helper.c
52
@@ -XXX,XX +XXX,XX @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
53
return 0;
54
}
55
56
-uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
57
- uint64_t ptr, uintptr_t ra)
58
+uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
59
{
60
uint64_t fault;
61
int ret = mte_probe_int(env, desc, ptr, ra, &fault);
62
@@ -XXX,XX +XXX,XX @@ uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
63
return useronly_clean_ptr(ptr);
64
}
65
66
-uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr)
67
+uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
68
{
69
- return mte_checkN(env, desc, ptr, GETPC());
70
-}
71
-
72
-uint64_t mte_check1(CPUARMState *env, uint32_t desc,
73
- uint64_t ptr, uintptr_t ra)
74
-{
75
- uint64_t fault;
76
- int ret = mte_probe_int(env, desc, ptr, ra, &fault);
77
-
78
- if (unlikely(ret == 0)) {
79
- mte_check_fail(env, desc, fault, ra);
80
- } else if (ret < 0) {
81
- return ptr;
82
- }
83
- return useronly_clean_ptr(ptr);
84
-}
85
-
86
-uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
87
-{
88
- return mte_check1(env, desc, ptr, GETPC());
89
+ return mte_check(env, desc, ptr, GETPC());
90
}
91
92
/*
93
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/target/arm/sve_helper.c
96
+++ b/target/arm/sve_helper.c
97
@@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_mte_check1(SVEContLdSt *info, CPUARMState *env,
98
uintptr_t ra)
99
{
100
sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
101
- mtedesc, ra, mte_check1);
102
+ mtedesc, ra, mte_check);
103
}
104
105
static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env,
106
@@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env,
107
uintptr_t ra)
108
{
109
sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
110
- mtedesc, ra, mte_checkN);
111
+ mtedesc, ra, mte_check);
112
}
113
114
115
@@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
116
if (fault == FAULT_FIRST) {
117
/* Trapping mte check for the first-fault element. */
118
if (mtedesc) {
119
- mte_check1(env, mtedesc, addr + mem_off, retaddr);
120
+ mte_check(env, mtedesc, addr + mem_off, retaddr);
121
}
122
123
/*
124
@@ -XXX,XX +XXX,XX @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
125
info.attrs, BP_MEM_READ, retaddr);
126
}
127
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
128
- mte_check1(env, mtedesc, addr, retaddr);
129
+ mte_check(env, mtedesc, addr, retaddr);
130
}
131
host_fn(&scratch, reg_off, info.host);
132
} else {
133
@@ -XXX,XX +XXX,XX @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
134
BP_MEM_READ, retaddr);
135
}
136
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
137
- mte_check1(env, mtedesc, addr, retaddr);
138
+ mte_check(env, mtedesc, addr, retaddr);
139
}
140
tlb_fn(env, &scratch, reg_off, addr, retaddr);
141
}
142
@@ -XXX,XX +XXX,XX @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
143
*/
144
addr = base + (off_fn(vm, reg_off) << scale);
145
if (mtedesc) {
146
- mte_check1(env, mtedesc, addr, retaddr);
147
+ mte_check(env, mtedesc, addr, retaddr);
148
}
149
tlb_fn(env, vd, reg_off, addr, retaddr);
150
151
@@ -XXX,XX +XXX,XX @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
152
}
153
154
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
155
- mte_check1(env, mtedesc, addr, retaddr);
156
+ mte_check(env, mtedesc, addr, retaddr);
157
}
158
}
159
i += 1;
160
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/arm/translate-a64.c
163
+++ b/target/arm/translate-a64.c
164
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
165
tcg_desc = tcg_const_i32(desc);
166
167
ret = new_tmp_a64(s);
168
- gen_helper_mte_check1(ret, cpu_env, tcg_desc, addr);
169
+ gen_helper_mte_check(ret, cpu_env, tcg_desc, addr);
170
tcg_temp_free_i32(tcg_desc);
171
172
return ret;
173
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
174
tcg_desc = tcg_const_i32(desc);
175
176
ret = new_tmp_a64(s);
177
- gen_helper_mte_checkN(ret, cpu_env, tcg_desc, addr);
178
+ gen_helper_mte_check(ret, cpu_env, tcg_desc, addr);
179
tcg_temp_free_i32(tcg_desc);
180
181
return ret;
182
--
183
2.20.1
184
185
diff view generated by jsdifflib
1
From: Vikram Garhwal <fnu.vikram@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Connect CAN0 and CAN1 on the ZynqMP.
3
For consistency with the mte_check1 + mte_checkN merge
4
to mte_check, rename the probe function as well.
4
5
5
Reviewed-by: Francisco Iglesias <francisco.iglesias@xilinx.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Vikram Garhwal <fnu.vikram@xilinx.com>
8
Message-id: 20210416183106.1516563-8-richard.henderson@linaro.org
8
Message-id: 1605728926-352690-3-git-send-email-fnu.vikram@xilinx.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
include/hw/arm/xlnx-zynqmp.h | 8 ++++++++
11
target/arm/internals.h | 2 +-
12
hw/arm/xlnx-zcu102.c | 20 ++++++++++++++++++++
12
target/arm/mte_helper.c | 6 +++---
13
hw/arm/xlnx-zynqmp.c | 34 ++++++++++++++++++++++++++++++++++
13
target/arm/sve_helper.c | 6 +++---
14
3 files changed, 62 insertions(+)
14
3 files changed, 7 insertions(+), 7 deletions(-)
15
15
16
diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h
16
diff --git a/target/arm/internals.h b/target/arm/internals.h
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/hw/arm/xlnx-zynqmp.h
18
--- a/target/arm/internals.h
19
+++ b/include/hw/arm/xlnx-zynqmp.h
19
+++ b/target/arm/internals.h
20
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ FIELD(MTEDESC, TCMA, 6, 2)
21
#include "hw/intc/arm_gic.h"
21
FIELD(MTEDESC, WRITE, 8, 1)
22
#include "hw/net/cadence_gem.h"
22
FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
23
#include "hw/char/cadence_uart.h"
23
24
+#include "hw/net/xlnx-zynqmp-can.h"
24
-bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
25
#include "hw/ide/ahci.h"
25
+bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
26
#include "hw/sd/sdhci.h"
26
uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
27
#include "hw/ssi/xilinx_spips.h"
27
28
@@ -XXX,XX +XXX,XX @@
28
static inline int allocation_tag_from_addr(uint64_t ptr)
29
#include "hw/cpu/cluster.h"
29
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
30
#include "target/arm/cpu.h"
31
#include "qom/object.h"
32
+#include "net/can_emu.h"
33
34
#define TYPE_XLNX_ZYNQMP "xlnx,zynqmp"
35
OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP)
36
@@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP)
37
#define XLNX_ZYNQMP_NUM_RPU_CPUS 2
38
#define XLNX_ZYNQMP_NUM_GEMS 4
39
#define XLNX_ZYNQMP_NUM_UARTS 2
40
+#define XLNX_ZYNQMP_NUM_CAN 2
41
+#define XLNX_ZYNQMP_CAN_REF_CLK (24 * 1000 * 1000)
42
#define XLNX_ZYNQMP_NUM_SDHCI 2
43
#define XLNX_ZYNQMP_NUM_SPIS 2
44
#define XLNX_ZYNQMP_NUM_GDMA_CH 8
45
@@ -XXX,XX +XXX,XX @@ struct XlnxZynqMPState {
46
47
CadenceGEMState gem[XLNX_ZYNQMP_NUM_GEMS];
48
CadenceUARTState uart[XLNX_ZYNQMP_NUM_UARTS];
49
+ XlnxZynqMPCANState can[XLNX_ZYNQMP_NUM_CAN];
50
SysbusAHCIState sata;
51
SDHCIState sdhci[XLNX_ZYNQMP_NUM_SDHCI];
52
XilinxSPIPS spi[XLNX_ZYNQMP_NUM_SPIS];
53
@@ -XXX,XX +XXX,XX @@ struct XlnxZynqMPState {
54
bool virt;
55
/* Has the RPU subsystem? */
56
bool has_rpu;
57
+
58
+ /* CAN bus. */
59
+ CanBusState *canbus[XLNX_ZYNQMP_NUM_CAN];
60
};
61
62
#endif
63
diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c
64
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
65
--- a/hw/arm/xlnx-zcu102.c
31
--- a/target/arm/mte_helper.c
66
+++ b/hw/arm/xlnx-zcu102.c
32
+++ b/target/arm/mte_helper.c
67
@@ -XXX,XX +XXX,XX @@
33
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
68
#include "sysemu/qtest.h"
34
* exception for inaccessible pages, and resolves the virtual address
69
#include "sysemu/device_tree.h"
35
* into the softmmu tlb.
70
#include "qom/object.h"
36
*
71
+#include "net/can_emu.h"
37
- * When RA == 0, this is for mte_probe1. The page is expected to be
72
38
+ * When RA == 0, this is for mte_probe. The page is expected to be
73
struct XlnxZCU102 {
39
* valid. Indicate to probe_access_flags no-fault, then assert that
74
MachineState parent_obj;
40
* we received a valid page.
75
@@ -XXX,XX +XXX,XX @@ struct XlnxZCU102 {
41
*/
76
bool secure;
42
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
77
bool virt;
78
79
+ CanBusState *canbus[XLNX_ZYNQMP_NUM_CAN];
80
+
81
struct arm_boot_info binfo;
82
};
83
84
@@ -XXX,XX +XXX,XX @@ static void xlnx_zcu102_init(MachineState *machine)
85
object_property_set_bool(OBJECT(&s->soc), "virtualization", s->virt,
86
&error_fatal);
87
88
+ for (i = 0; i < XLNX_ZYNQMP_NUM_CAN; i++) {
89
+ gchar *bus_name = g_strdup_printf("canbus%d", i);
90
+
91
+ object_property_set_link(OBJECT(&s->soc), bus_name,
92
+ OBJECT(s->canbus[i]), &error_fatal);
93
+ g_free(bus_name);
94
+ }
95
+
96
qdev_realize(DEVICE(&s->soc), NULL, &error_fatal);
97
98
/* Create and plug in the SD cards */
99
@@ -XXX,XX +XXX,XX @@ static void xlnx_zcu102_machine_instance_init(Object *obj)
100
s->secure = false;
101
/* Default to virt (EL2) being disabled */
102
s->virt = false;
103
+ object_property_add_link(obj, "xlnx-zcu102.canbus0", TYPE_CAN_BUS,
104
+ (Object **)&s->canbus[0],
105
+ object_property_allow_set_link,
106
+ 0);
107
+
108
+ object_property_add_link(obj, "xlnx-zcu102.canbus1", TYPE_CAN_BUS,
109
+ (Object **)&s->canbus[1],
110
+ object_property_allow_set_link,
111
+ 0);
112
}
43
}
113
44
114
static void xlnx_zcu102_machine_class_init(ObjectClass *oc, void *data)
45
/*
115
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
46
- * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
47
+ * No-fault version of mte_check, to be used by SVE for MemSingleNF.
48
* Returns false if the access is Checked and the check failed. This
49
* is only intended to probe the tag -- the validity of the page must
50
* be checked beforehand.
51
*/
52
-bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
53
+bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
54
{
55
uint64_t fault;
56
int ret = mte_probe_int(env, desc, ptr, 0, &fault);
57
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
116
index XXXXXXX..XXXXXXX 100644
58
index XXXXXXX..XXXXXXX 100644
117
--- a/hw/arm/xlnx-zynqmp.c
59
--- a/target/arm/sve_helper.c
118
+++ b/hw/arm/xlnx-zynqmp.c
60
+++ b/target/arm/sve_helper.c
119
@@ -XXX,XX +XXX,XX @@ static const int uart_intr[XLNX_ZYNQMP_NUM_UARTS] = {
61
@@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
120
21, 22,
62
/* Watchpoint hit, see below. */
121
};
63
goto do_fault;
122
64
}
123
+static const uint64_t can_addr[XLNX_ZYNQMP_NUM_CAN] = {
65
- if (mtedesc && !mte_probe1(env, mtedesc, addr + mem_off)) {
124
+ 0xFF060000, 0xFF070000,
66
+ if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
125
+};
67
goto do_fault;
126
+
68
}
127
+static const int can_intr[XLNX_ZYNQMP_NUM_CAN] = {
69
/*
128
+ 23, 24,
70
@@ -XXX,XX +XXX,XX @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
129
+};
71
& BP_MEM_READ)) {
130
+
72
goto do_fault;
131
static const uint64_t sdhci_addr[XLNX_ZYNQMP_NUM_SDHCI] = {
73
}
132
0xFF160000, 0xFF170000,
74
- if (mtedesc && !mte_probe1(env, mtedesc, addr + mem_off)) {
133
};
75
+ if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
134
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_init(Object *obj)
76
goto do_fault;
135
TYPE_CADENCE_UART);
77
}
136
}
78
host_fn(vd, reg_off, host + mem_off);
137
79
@@ -XXX,XX +XXX,XX @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
138
+ for (i = 0; i < XLNX_ZYNQMP_NUM_CAN; i++) {
80
}
139
+ object_initialize_child(obj, "can[*]", &s->can[i],
81
if (mtedesc &&
140
+ TYPE_XLNX_ZYNQMP_CAN);
82
arm_tlb_mte_tagged(&info.attrs) &&
141
+ }
83
- !mte_probe1(env, mtedesc, addr)) {
142
+
84
+ !mte_probe(env, mtedesc, addr)) {
143
object_initialize_child(obj, "sata", &s->sata, TYPE_SYSBUS_AHCI);
85
goto fault;
144
86
}
145
for (i = 0; i < XLNX_ZYNQMP_NUM_SDHCI; i++) {
146
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
147
gic_spi[uart_intr[i]]);
148
}
149
150
+ for (i = 0; i < XLNX_ZYNQMP_NUM_CAN; i++) {
151
+ object_property_set_int(OBJECT(&s->can[i]), "ext_clk_freq",
152
+ XLNX_ZYNQMP_CAN_REF_CLK, &error_abort);
153
+
154
+ object_property_set_link(OBJECT(&s->can[i]), "canbus",
155
+ OBJECT(s->canbus[i]), &error_fatal);
156
+
157
+ sysbus_realize(SYS_BUS_DEVICE(&s->can[i]), &err);
158
+ if (err) {
159
+ error_propagate(errp, err);
160
+ return;
161
+ }
162
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->can[i]), 0, can_addr[i]);
163
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->can[i]), 0,
164
+ gic_spi[can_intr[i]]);
165
+ }
166
+
167
object_property_set_int(OBJECT(&s->sata), "num-ports", SATA_NUM_PORTS,
168
&error_abort);
169
if (!sysbus_realize(SYS_BUS_DEVICE(&s->sata), errp)) {
170
@@ -XXX,XX +XXX,XX @@ static Property xlnx_zynqmp_props[] = {
171
DEFINE_PROP_BOOL("has_rpu", XlnxZynqMPState, has_rpu, false),
172
DEFINE_PROP_LINK("ddr-ram", XlnxZynqMPState, ddr_ram, TYPE_MEMORY_REGION,
173
MemoryRegion *),
174
+ DEFINE_PROP_LINK("canbus0", XlnxZynqMPState, canbus[0], TYPE_CAN_BUS,
175
+ CanBusState *),
176
+ DEFINE_PROP_LINK("canbus1", XlnxZynqMPState, canbus[1], TYPE_CAN_BUS,
177
+ CanBusState *),
178
DEFINE_PROP_END_OF_LIST()
179
};
180
87
181
--
88
--
182
2.20.1
89
2.20.1
183
90
184
91
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Now that mte_check1 and mte_checkN have been merged, we can
4
merge sve_cont_ldst_mte_check1 and sve_cont_ldst_mte_checkN.
5
6
Which means that we can eliminate the function pointer into
7
sve_ldN_r and sve_stN_r, calling sve_cont_ldst_mte_check directly.
8
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210416183106.1516563-9-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/sve_helper.c | 84 +++++++++++++----------------------------
15
1 file changed, 26 insertions(+), 58 deletions(-)
16
17
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/sve_helper.c
20
+++ b/target/arm/sve_helper.c
21
@@ -XXX,XX +XXX,XX @@ static void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
22
#endif
23
}
24
25
-typedef uint64_t mte_check_fn(CPUARMState *, uint32_t, uint64_t, uintptr_t);
26
-
27
-static inline QEMU_ALWAYS_INLINE
28
-void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
29
- uint64_t *vg, target_ulong addr, int esize,
30
- int msize, uint32_t mtedesc, uintptr_t ra,
31
- mte_check_fn *check)
32
+static void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
33
+ uint64_t *vg, target_ulong addr, int esize,
34
+ int msize, uint32_t mtedesc, uintptr_t ra)
35
{
36
intptr_t mem_off, reg_off, reg_last;
37
38
@@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
39
uint64_t pg = vg[reg_off >> 6];
40
do {
41
if ((pg >> (reg_off & 63)) & 1) {
42
- check(env, mtedesc, addr, ra);
43
+ mte_check(env, mtedesc, addr, ra);
44
}
45
reg_off += esize;
46
mem_off += msize;
47
@@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
48
uint64_t pg = vg[reg_off >> 6];
49
do {
50
if ((pg >> (reg_off & 63)) & 1) {
51
- check(env, mtedesc, addr, ra);
52
+ mte_check(env, mtedesc, addr, ra);
53
}
54
reg_off += esize;
55
mem_off += msize;
56
@@ -XXX,XX +XXX,XX @@ void sve_cont_ldst_mte_check_int(SVEContLdSt *info, CPUARMState *env,
57
}
58
}
59
60
-typedef void sve_cont_ldst_mte_check_fn(SVEContLdSt *info, CPUARMState *env,
61
- uint64_t *vg, target_ulong addr,
62
- int esize, int msize, uint32_t mtedesc,
63
- uintptr_t ra);
64
-
65
-static void sve_cont_ldst_mte_check1(SVEContLdSt *info, CPUARMState *env,
66
- uint64_t *vg, target_ulong addr,
67
- int esize, int msize, uint32_t mtedesc,
68
- uintptr_t ra)
69
-{
70
- sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
71
- mtedesc, ra, mte_check);
72
-}
73
-
74
-static void sve_cont_ldst_mte_checkN(SVEContLdSt *info, CPUARMState *env,
75
- uint64_t *vg, target_ulong addr,
76
- int esize, int msize, uint32_t mtedesc,
77
- uintptr_t ra)
78
-{
79
- sve_cont_ldst_mte_check_int(info, env, vg, addr, esize, msize,
80
- mtedesc, ra, mte_check);
81
-}
82
-
83
-
84
/*
85
* Common helper for all contiguous 1,2,3,4-register predicated stores.
86
*/
87
@@ -XXX,XX +XXX,XX @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
88
uint32_t desc, const uintptr_t retaddr,
89
const int esz, const int msz, const int N, uint32_t mtedesc,
90
sve_ldst1_host_fn *host_fn,
91
- sve_ldst1_tlb_fn *tlb_fn,
92
- sve_cont_ldst_mte_check_fn *mte_check_fn)
93
+ sve_ldst1_tlb_fn *tlb_fn)
94
{
95
const unsigned rd = simd_data(desc);
96
const intptr_t reg_max = simd_oprsz(desc);
97
@@ -XXX,XX +XXX,XX @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
98
* Handle mte checks for all active elements.
99
* Since TBI must be set for MTE, !mtedesc => !mte_active.
100
*/
101
- if (mte_check_fn && mtedesc) {
102
- mte_check_fn(&info, env, vg, addr, 1 << esz, N << msz,
103
- mtedesc, retaddr);
104
+ if (mtedesc) {
105
+ sve_cont_ldst_mte_check(&info, env, vg, addr, 1 << esz, N << msz,
106
+ mtedesc, retaddr);
107
}
108
109
flags = info.page[0].flags | info.page[1].flags;
110
@@ -XXX,XX +XXX,XX @@ void sve_ldN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr,
111
mtedesc = 0;
112
}
113
114
- sve_ldN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn,
115
- N == 1 ? sve_cont_ldst_mte_check1 : sve_cont_ldst_mte_checkN);
116
+ sve_ldN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn);
117
}
118
119
#define DO_LD1_1(NAME, ESZ) \
120
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \
121
target_ulong addr, uint32_t desc) \
122
{ \
123
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, 0, \
124
- sve_##NAME##_host, sve_##NAME##_tlb, NULL); \
125
+ sve_##NAME##_host, sve_##NAME##_tlb); \
126
} \
127
void HELPER(sve_##NAME##_r_mte)(CPUARMState *env, void *vg, \
128
target_ulong addr, uint32_t desc) \
129
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \
130
target_ulong addr, uint32_t desc) \
131
{ \
132
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \
133
- sve_##NAME##_le_host, sve_##NAME##_le_tlb, NULL); \
134
+ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
135
} \
136
void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \
137
target_ulong addr, uint32_t desc) \
138
{ \
139
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \
140
- sve_##NAME##_be_host, sve_##NAME##_be_tlb, NULL); \
141
+ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
142
} \
143
void HELPER(sve_##NAME##_le_r_mte)(CPUARMState *env, void *vg, \
144
- target_ulong addr, uint32_t desc) \
145
+ target_ulong addr, uint32_t desc) \
146
{ \
147
sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
148
sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
149
} \
150
void HELPER(sve_##NAME##_be_r_mte)(CPUARMState *env, void *vg, \
151
- target_ulong addr, uint32_t desc) \
152
+ target_ulong addr, uint32_t desc) \
153
{ \
154
sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
155
sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
156
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_ld##N##bb_r)(CPUARMState *env, void *vg, \
157
target_ulong addr, uint32_t desc) \
158
{ \
159
sve_ldN_r(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, 0, \
160
- sve_ld1bb_host, sve_ld1bb_tlb, NULL); \
161
+ sve_ld1bb_host, sve_ld1bb_tlb); \
162
} \
163
void HELPER(sve_ld##N##bb_r_mte)(CPUARMState *env, void *vg, \
164
target_ulong addr, uint32_t desc) \
165
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_ld##N##SUFF##_le_r)(CPUARMState *env, void *vg, \
166
target_ulong addr, uint32_t desc) \
167
{ \
168
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \
169
- sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb, NULL); \
170
+ sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \
171
} \
172
void HELPER(sve_ld##N##SUFF##_be_r)(CPUARMState *env, void *vg, \
173
target_ulong addr, uint32_t desc) \
174
{ \
175
sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \
176
- sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb, NULL); \
177
+ sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \
178
} \
179
void HELPER(sve_ld##N##SUFF##_le_r_mte)(CPUARMState *env, void *vg, \
180
target_ulong addr, uint32_t desc) \
181
@@ -XXX,XX +XXX,XX @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
182
uint32_t desc, const uintptr_t retaddr,
183
const int esz, const int msz, const int N, uint32_t mtedesc,
184
sve_ldst1_host_fn *host_fn,
185
- sve_ldst1_tlb_fn *tlb_fn,
186
- sve_cont_ldst_mte_check_fn *mte_check_fn)
187
+ sve_ldst1_tlb_fn *tlb_fn)
188
{
189
const unsigned rd = simd_data(desc);
190
const intptr_t reg_max = simd_oprsz(desc);
191
@@ -XXX,XX +XXX,XX @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
192
* Handle mte checks for all active elements.
193
* Since TBI must be set for MTE, !mtedesc => !mte_active.
194
*/
195
- if (mte_check_fn && mtedesc) {
196
- mte_check_fn(&info, env, vg, addr, 1 << esz, N << msz,
197
- mtedesc, retaddr);
198
+ if (mtedesc) {
199
+ sve_cont_ldst_mte_check(&info, env, vg, addr, 1 << esz, N << msz,
200
+ mtedesc, retaddr);
201
}
202
203
flags = info.page[0].flags | info.page[1].flags;
204
@@ -XXX,XX +XXX,XX @@ void sve_stN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr,
205
mtedesc = 0;
206
}
207
208
- sve_stN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn,
209
- N == 1 ? sve_cont_ldst_mte_check1 : sve_cont_ldst_mte_checkN);
210
+ sve_stN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn);
211
}
212
213
#define DO_STN_1(N, NAME, ESZ) \
214
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_st##N##NAME##_r)(CPUARMState *env, void *vg, \
215
target_ulong addr, uint32_t desc) \
216
{ \
217
sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, N, 0, \
218
- sve_st1##NAME##_host, sve_st1##NAME##_tlb, NULL); \
219
+ sve_st1##NAME##_host, sve_st1##NAME##_tlb); \
220
} \
221
void HELPER(sve_st##N##NAME##_r_mte)(CPUARMState *env, void *vg, \
222
target_ulong addr, uint32_t desc) \
223
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_st##N##NAME##_le_r)(CPUARMState *env, void *vg, \
224
target_ulong addr, uint32_t desc) \
225
{ \
226
sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \
227
- sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb, NULL); \
228
+ sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb); \
229
} \
230
void HELPER(sve_st##N##NAME##_be_r)(CPUARMState *env, void *vg, \
231
target_ulong addr, uint32_t desc) \
232
{ \
233
sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \
234
- sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb, NULL); \
235
+ sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb); \
236
} \
237
void HELPER(sve_st##N##NAME##_le_r_mte)(CPUARMState *env, void *vg, \
238
target_ulong addr, uint32_t desc) \
239
--
240
2.20.1
241
242
diff view generated by jsdifflib
1
From: Alex Chen <alex.chen@huawei.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
We should use printf format specifier "%u" instead of "%d" for
3
The log2_esize parameter is not used except trivially.
4
argument of type "unsigned int".
4
Drop the parameter and the deferral to gen_mte_check1.
5
5
6
Reported-by: Euler Robot <euler.robot@huawei.com>
6
This fixes a bug in that the parameters as documented
7
Signed-off-by: Alex Chen <alex.chen@huawei.com>
7
in the header file were the reverse from those in the
8
Message-id: 20201126111109.112238-3-alex.chen@huawei.com
8
implementation. Which meant that translate-sve.c was
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
passing the parameters in the wrong order.
10
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20210416183106.1516563-10-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
15
---
12
hw/misc/imx31_ccm.c | 14 +++++++-------
16
target/arm/translate-a64.h | 2 +-
13
hw/misc/imx_ccm.c | 4 ++--
17
target/arm/translate-a64.c | 15 +++++++--------
14
2 files changed, 9 insertions(+), 9 deletions(-)
18
target/arm/translate-sve.c | 4 ++--
19
3 files changed, 10 insertions(+), 11 deletions(-)
15
20
16
diff --git a/hw/misc/imx31_ccm.c b/hw/misc/imx31_ccm.c
21
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
17
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/misc/imx31_ccm.c
23
--- a/target/arm/translate-a64.h
19
+++ b/hw/misc/imx31_ccm.c
24
+++ b/target/arm/translate-a64.h
20
@@ -XXX,XX +XXX,XX @@ static const char *imx31_ccm_reg_name(uint32_t reg)
25
@@ -XXX,XX +XXX,XX @@ TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr);
21
case IMX31_CCM_PDR2_REG:
26
TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
22
return "PDR2";
27
bool tag_checked, int log2_size);
23
default:
28
TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
24
- sprintf(unknown, "[%d ?]", reg);
29
- bool tag_checked, int count, int log2_esize);
25
+ sprintf(unknown, "[%u ?]", reg);
30
+ bool tag_checked, int size);
26
return unknown;
31
32
/* We should have at some point before trying to access an FP register
33
* done the necessary access check, so assert that
34
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/translate-a64.c
37
+++ b/target/arm/translate-a64.c
38
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
39
* For MTE, check multiple logical sequential accesses.
40
*/
41
TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
42
- bool tag_checked, int log2_esize, int total_size)
43
+ bool tag_checked, int size)
44
{
45
- if (tag_checked && s->mte_active[0] && total_size != (1 << log2_esize)) {
46
+ if (tag_checked && s->mte_active[0]) {
47
TCGv_i32 tcg_desc;
48
TCGv_i64 ret;
49
int desc = 0;
50
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
51
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
52
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
53
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
54
- desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
55
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1);
56
tcg_desc = tcg_const_i32(desc);
57
58
ret = new_tmp_a64(s);
59
@@ -XXX,XX +XXX,XX @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
60
61
return ret;
27
}
62
}
63
- return gen_mte_check1(s, addr, is_write, tag_checked, log2_esize);
64
+ return clean_data_tbi(s, addr);
28
}
65
}
29
@@ -XXX,XX +XXX,XX @@ static uint32_t imx31_ccm_get_pll_ref_clk(IMXCCMState *dev)
66
30
freq = CKIH_FREQ;
67
typedef struct DisasCompare64 {
68
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
31
}
69
}
32
70
33
- DPRINTF("freq = %d\n", freq);
71
clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
34
+ DPRINTF("freq = %u\n", freq);
72
- (wback || rn != 31) && !set_tag,
35
73
- size, 2 << size);
36
return freq;
74
+ (wback || rn != 31) && !set_tag, 2 << size);
37
}
75
38
@@ -XXX,XX +XXX,XX @@ static uint32_t imx31_ccm_get_mpll_clk(IMXCCMState *dev)
76
if (is_vector) {
39
freq = imx_ccm_calc_pll(s->reg[IMX31_CCM_MPCTL_REG],
77
if (is_load) {
40
imx31_ccm_get_pll_ref_clk(dev));
78
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
41
79
* promote consecutive little-endian elements below.
42
- DPRINTF("freq = %d\n", freq);
80
*/
43
+ DPRINTF("freq = %u\n", freq);
81
clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
44
82
- size, total);
45
return freq;
83
+ total);
46
}
84
47
@@ -XXX,XX +XXX,XX @@ static uint32_t imx31_ccm_get_mcu_main_clk(IMXCCMState *dev)
85
/*
48
freq = imx31_ccm_get_mpll_clk(dev);
86
* Consecutive little-endian elements from a single register
49
}
87
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
50
88
tcg_rn = cpu_reg_sp(s, rn);
51
- DPRINTF("freq = %d\n", freq);
89
52
+ DPRINTF("freq = %u\n", freq);
90
clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
53
91
- scale, total);
54
return freq;
92
+ total);
55
}
93
56
@@ -XXX,XX +XXX,XX @@ static uint32_t imx31_ccm_get_hclk_clk(IMXCCMState *dev)
94
tcg_ebytes = tcg_const_i64(1 << scale);
57
freq = imx31_ccm_get_mcu_main_clk(dev)
95
for (xs = 0; xs < selem; xs++) {
58
/ (1 + EXTRACT(s->reg[IMX31_CCM_PDR0_REG], MAX));
96
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
59
60
- DPRINTF("freq = %d\n", freq);
61
+ DPRINTF("freq = %u\n", freq);
62
63
return freq;
64
}
65
@@ -XXX,XX +XXX,XX @@ static uint32_t imx31_ccm_get_ipg_clk(IMXCCMState *dev)
66
freq = imx31_ccm_get_hclk_clk(dev)
67
/ (1 + EXTRACT(s->reg[IMX31_CCM_PDR0_REG], IPG));
68
69
- DPRINTF("freq = %d\n", freq);
70
+ DPRINTF("freq = %u\n", freq);
71
72
return freq;
73
}
74
@@ -XXX,XX +XXX,XX @@ static uint32_t imx31_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
75
break;
76
}
77
78
- DPRINTF("Clock = %d) = %d\n", clock, freq);
79
+ DPRINTF("Clock = %d) = %u\n", clock, freq);
80
81
return freq;
82
}
83
diff --git a/hw/misc/imx_ccm.c b/hw/misc/imx_ccm.c
84
index XXXXXXX..XXXXXXX 100644
97
index XXXXXXX..XXXXXXX 100644
85
--- a/hw/misc/imx_ccm.c
98
--- a/target/arm/translate-sve.c
86
+++ b/hw/misc/imx_ccm.c
99
+++ b/target/arm/translate-sve.c
87
@@ -XXX,XX +XXX,XX @@ uint32_t imx_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
100
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
88
freq = klass->get_clock_frequency(dev, clock);
101
89
}
102
dirty_addr = tcg_temp_new_i64();
90
103
tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
91
- DPRINTF("(clock = %d) = %d\n", clock, freq);
104
- clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8);
92
+ DPRINTF("(clock = %d) = %u\n", clock, freq);
105
+ clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
93
106
tcg_temp_free_i64(dirty_addr);
94
return freq;
107
95
}
108
/*
96
@@ -XXX,XX +XXX,XX @@ uint32_t imx_ccm_calc_pll(uint32_t pllreg, uint32_t base_freq)
109
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
97
freq = ((2 * (base_freq >> 10) * (mfi * mfd + mfn)) /
110
98
(mfd * pd)) << 10;
111
dirty_addr = tcg_temp_new_i64();
99
112
tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
100
- DPRINTF("(pllreg = 0x%08x, base_freq = %d) = %d\n", pllreg, base_freq,
113
- clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8);
101
+ DPRINTF("(pllreg = 0x%08x, base_freq = %u) = %d\n", pllreg, base_freq,
114
+ clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
102
freq);
115
tcg_temp_free_i64(dirty_addr);
103
116
104
return freq;
117
/* Note that unpredicated load/store of vector/predicate registers
105
--
118
--
106
2.20.1
119
2.20.1
107
120
108
121
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
The encoding of size = 2 and size = 3 had the incorrect decode
4
for align, overlapping the stride field. This error was hidden
5
by what should have been unnecessary masking in translate.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210419202257.161730-2-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/neon-ls.decode | 4 ++--
13
target/arm/translate-neon.c.inc | 4 ++--
14
2 files changed, 4 insertions(+), 4 deletions(-)
15
16
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/neon-ls.decode
19
+++ b/target/arm/neon-ls.decode
20
@@ -XXX,XX +XXX,XX @@ VLD_all_lanes 1111 0100 1 . 1 0 rn:4 .... 11 n:2 size:2 t:1 a:1 rm:4 \
21
22
VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 00 n:2 reg_idx:3 align:1 rm:4 \
23
vd=%vd_dp size=0 stride=1
24
-VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 01 n:2 reg_idx:2 align:2 rm:4 \
25
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 01 n:2 reg_idx:2 . align:1 rm:4 \
26
vd=%vd_dp size=1 stride=%imm1_5_p1
27
-VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 10 n:2 reg_idx:1 align:3 rm:4 \
28
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 10 n:2 reg_idx:1 . align:2 rm:4 \
29
vd=%vd_dp size=2 stride=%imm1_6_p1
30
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/translate-neon.c.inc
33
+++ b/target/arm/translate-neon.c.inc
34
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
35
switch (nregs) {
36
case 1:
37
if (((a->align & (1 << a->size)) != 0) ||
38
- (a->size == 2 && ((a->align & 3) == 1 || (a->align & 3) == 2))) {
39
+ (a->size == 2 && (a->align == 1 || a->align == 2))) {
40
return false;
41
}
42
break;
43
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
44
}
45
break;
46
case 4:
47
- if ((a->size == 2) && ((a->align & 3) == 3)) {
48
+ if (a->size == 2 && a->align == 3) {
49
return false;
50
}
51
break;
52
--
53
2.20.1
54
55
diff view generated by jsdifflib
1
The FPDSCR register has a similar layout to the FPSCR. In v8.1M it
1
From: Richard Henderson <richard.henderson@linaro.org>
2
gains new fields FZ16 (if half-precision floating point is supported)
3
and LTPSIZE (always reads as 4). Update the reset value and the code
4
that handles writes to this register accordingly.
5
2
3
We're about to rearrange the macro expansion surrounding tbflags,
4
and this field name will be expanded using the bit definition of
5
the same name, resulting in a token pasting error.
6
7
So SCTLR_B -> SCTLR__B in the 3 uses, and document it.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210419202257.161730-3-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201119215617.29887-16-peter.maydell@linaro.org
9
---
13
---
10
target/arm/cpu.h | 5 +++++
14
target/arm/cpu.h | 2 +-
11
hw/intc/armv7m_nvic.c | 9 ++++++++-
15
target/arm/helper.c | 2 +-
12
target/arm/cpu.c | 3 +++
16
target/arm/translate.c | 2 +-
13
3 files changed, 16 insertions(+), 1 deletion(-)
17
3 files changed, 3 insertions(+), 3 deletions(-)
14
18
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
21
--- a/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val);
23
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */
20
#define FPCR_IXE (1 << 12) /* Inexact exception trap enable */
24
*/
21
#define FPCR_IDE (1 << 15) /* Input Denormal exception trap enable */
25
FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2)
22
#define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */
26
FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */
23
+#define FPCR_RMODE_MASK (3 << 22) /* Rounding mode */
27
-FIELD(TBFLAG_A32, SCTLR_B, 15, 1)
24
#define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */
28
+FIELD(TBFLAG_A32, SCTLR__B, 15, 1) /* Cannot overlap with SCTLR_B */
25
#define FPCR_DN (1 << 25) /* Default NaN enable bit */
29
FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1)
26
+#define FPCR_AHP (1 << 26) /* Alternative half-precision */
30
/*
27
#define FPCR_QC (1 << 27) /* Cumulative saturation bit */
31
* Indicates whether cp register reads and writes by guest code should access
28
#define FPCR_V (1 << 28) /* FP overflow flag */
32
diff --git a/target/arm/helper.c b/target/arm/helper.c
29
#define FPCR_C (1 << 29) /* FP carry flag */
30
#define FPCR_Z (1 << 30) /* FP zero flag */
31
#define FPCR_N (1 << 31) /* FP negative flag */
32
33
+#define FPCR_LTPSIZE_SHIFT 16 /* LTPSIZE, M-profile only */
34
+#define FPCR_LTPSIZE_MASK (7 << FPCR_LTPSIZE_SHIFT)
35
+
36
#define FPCR_NZCV_MASK (FPCR_N | FPCR_Z | FPCR_C | FPCR_V)
37
#define FPCR_NZCVQC_MASK (FPCR_NZCV_MASK | FPCR_QC)
38
39
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
40
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
41
--- a/hw/intc/armv7m_nvic.c
34
--- a/target/arm/helper.c
42
+++ b/hw/intc/armv7m_nvic.c
35
+++ b/target/arm/helper.c
43
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
36
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
44
break;
37
bool sctlr_b = arm_sctlr_b(env);
45
case 0xf3c: /* FPDSCR */
38
46
if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
39
if (sctlr_b) {
47
- value &= 0x07c00000;
40
- flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1);
48
+ uint32_t mask = FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK;
41
+ flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR__B, 1);
49
+ if (cpu_isar_feature(any_fp16, cpu)) {
42
}
50
+ mask |= FPCR_FZ16;
43
if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
51
+ }
44
flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
52
+ value &= mask;
45
diff --git a/target/arm/translate.c b/target/arm/translate.c
53
+ if (cpu_isar_feature(aa32_lob, cpu)) {
54
+ value |= 4 << FPCR_LTPSIZE_SHIFT;
55
+ }
56
cpu->env.v7m.fpdscr[attrs.secure] = value;
57
}
58
break;
59
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
60
index XXXXXXX..XXXXXXX 100644
46
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/cpu.c
47
--- a/target/arm/translate.c
62
+++ b/target/arm/cpu.c
48
+++ b/target/arm/translate.c
63
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
49
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
64
* always reset to 4.
50
FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
65
*/
51
dc->debug_target_el =
66
env->v7m.ltpsize = 4;
52
FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
67
+ /* The LTPSIZE field in FPDSCR is constant and reads as 4. */
53
- dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
68
+ env->v7m.fpdscr[M_REG_NS] = 4 << FPCR_LTPSIZE_SHIFT;
54
+ dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR__B);
69
+ env->v7m.fpdscr[M_REG_S] = 4 << FPCR_LTPSIZE_SHIFT;
55
dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
70
}
56
dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
71
57
dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
72
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
73
--
58
--
74
2.20.1
59
2.20.1
75
60
76
61
diff view generated by jsdifflib
1
In v8.1M the PXN architecture extension adds a new PXN bit to the
1
From: Richard Henderson <richard.henderson@linaro.org>
2
MPU_RLAR registers, which forbids execution of code in the region
3
from a privileged mode.
4
2
5
This is another feature which is just in the generic "in v8.1M" set
3
We're about to rearrange the macro expansion surrounding tbflags,
6
and has no ID register field indicating its presence.
4
and this field name will be expanded using the bit definition of
5
the same name, resulting in a token pasting error.
7
6
7
So PSTATE_SS -> PSTATE__SS in the uses, and document it.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210419202257.161730-4-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20201119215617.29887-3-peter.maydell@linaro.org
11
---
13
---
12
target/arm/helper.c | 7 ++++++-
14
target/arm/cpu.h | 2 +-
13
1 file changed, 6 insertions(+), 1 deletion(-)
15
target/arm/helper.c | 4 ++--
16
target/arm/translate-a64.c | 2 +-
17
target/arm/translate.c | 2 +-
18
4 files changed, 5 insertions(+), 5 deletions(-)
14
19
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
25
*/
26
FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
27
FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1)
28
-FIELD(TBFLAG_ANY, PSTATE_SS, 29, 1) /* Not cached. */
29
+FIELD(TBFLAG_ANY, PSTATE__SS, 29, 1) /* Not cached. */
30
FIELD(TBFLAG_ANY, BE_DATA, 28, 1)
31
FIELD(TBFLAG_ANY, MMUIDX, 24, 4)
32
/* Target EL if we take a floating-point-disabled exception */
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
33
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
35
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
36
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
37
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
20
} else {
38
* 0 x Inactive (the TB flag for SS is always 0)
21
uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
39
* 1 0 Active-pending
22
uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
40
* 1 1 Active-not-pending
23
+ bool pxn = false;
41
- * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
24
+
42
+ * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
25
+ if (arm_feature(env, ARM_FEATURE_V8_1M)) {
43
*/
26
+ pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
44
if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
27
+ }
45
(env->pstate & PSTATE_SS)) {
28
46
- flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
29
if (m_is_system_region(env, address)) {
47
+ flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE__SS, 1);
30
/* System space is always execute never */
48
}
31
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
49
32
}
50
*pflags = flags;
33
51
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
34
*prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
52
index XXXXXXX..XXXXXXX 100644
35
- if (*prot && !xn) {
53
--- a/target/arm/translate-a64.c
36
+ if (*prot && !xn && !(pxn && !is_user)) {
54
+++ b/target/arm/translate-a64.c
37
*prot |= PAGE_EXEC;
55
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
38
}
56
* end the TB
39
/* We don't need to look the attribute up in the MAIR0/MAIR1
57
*/
58
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
59
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
60
+ dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
61
dc->is_ldex = false;
62
dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
63
64
diff --git a/target/arm/translate.c b/target/arm/translate.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate.c
67
+++ b/target/arm/translate.c
68
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
69
* end the TB
70
*/
71
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
72
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
73
+ dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
74
dc->is_ldex = false;
75
76
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
40
--
77
--
41
2.20.1
78
2.20.1
42
79
43
80
diff view generated by jsdifflib
1
Implement the v8.1M VSCCLRM insn, which zeros floating point
1
From: Richard Henderson <richard.henderson@linaro.org>
2
registers if there is an active floating point context.
3
This requires support in write_neon_element32() for the MO_32
4
element size, so add it.
5
2
6
Because we want to use arm_gen_condlabel(), we need to move
3
We're about to split tbflags into two parts. These macros
7
the definition of that function up in translate.c so it is
4
will ensure that the correct part is used with the correct
8
before the #include of translate-vfp.c.inc.
5
set of bits.
9
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210419202257.161730-5-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20201119215617.29887-5-peter.maydell@linaro.org
13
---
11
---
14
target/arm/cpu.h | 9 ++++
12
target/arm/cpu.h | 22 +++++++++-
15
target/arm/m-nocp.decode | 8 +++-
13
target/arm/helper-a64.c | 2 +-
16
target/arm/translate.c | 21 +++++----
14
target/arm/helper.c | 85 +++++++++++++++++---------------------
17
target/arm/translate-vfp.c.inc | 84 ++++++++++++++++++++++++++++++++++
15
target/arm/translate-a64.c | 36 ++++++++--------
18
4 files changed, 111 insertions(+), 11 deletions(-)
16
target/arm/translate.c | 48 ++++++++++-----------
17
5 files changed, 101 insertions(+), 92 deletions(-)
19
18
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
21
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_mprofile(const ARMISARegisters *id)
23
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, TCMA, 16, 2)
25
return FIELD_EX32(id->id_pfr1, ID_PFR1, MPROGMOD) != 0;
24
FIELD(TBFLAG_A64, MTE_ACTIVE, 18, 1)
25
FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
26
27
+/*
28
+ * Helpers for using the above.
29
+ */
30
+#define DP_TBFLAG_ANY(DST, WHICH, VAL) \
31
+ (DST = FIELD_DP32(DST, TBFLAG_ANY, WHICH, VAL))
32
+#define DP_TBFLAG_A64(DST, WHICH, VAL) \
33
+ (DST = FIELD_DP32(DST, TBFLAG_A64, WHICH, VAL))
34
+#define DP_TBFLAG_A32(DST, WHICH, VAL) \
35
+ (DST = FIELD_DP32(DST, TBFLAG_A32, WHICH, VAL))
36
+#define DP_TBFLAG_M32(DST, WHICH, VAL) \
37
+ (DST = FIELD_DP32(DST, TBFLAG_M32, WHICH, VAL))
38
+#define DP_TBFLAG_AM32(DST, WHICH, VAL) \
39
+ (DST = FIELD_DP32(DST, TBFLAG_AM32, WHICH, VAL))
40
+
41
+#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN, TBFLAG_ANY, WHICH)
42
+#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN, TBFLAG_A64, WHICH)
43
+#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN, TBFLAG_A32, WHICH)
44
+#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN, TBFLAG_M32, WHICH)
45
+#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN, TBFLAG_AM32, WHICH)
46
+
47
/**
48
* cpu_mmu_index:
49
* @env: The cpu environment
50
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
51
*/
52
static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
53
{
54
- return FIELD_EX32(env->hflags, TBFLAG_ANY, MMUIDX);
55
+ return EX_TBFLAG_ANY(env->hflags, MMUIDX);
26
}
56
}
27
57
28
+static inline bool isar_feature_aa32_m_sec_state(const ARMISARegisters *id)
58
static inline bool bswap_code(bool sctlr_b)
29
+{
59
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
30
+ /*
60
index XXXXXXX..XXXXXXX 100644
31
+ * Return true if M-profile state handling insns
61
--- a/target/arm/helper-a64.c
32
+ * (VSCCLRM, CLRM, FPCTX access insns) are implemented
62
+++ b/target/arm/helper-a64.c
33
+ */
63
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
34
+ return FIELD_EX32(id->id_pfr1, ID_PFR1, SECURITY) >= 3;
64
* the hflags rebuild, since we can pull the composite TBII field
35
+}
65
* from there.
36
+
66
*/
37
static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
67
- tbii = FIELD_EX32(env->hflags, TBFLAG_A64, TBII);
68
+ tbii = EX_TBFLAG_A64(env->hflags, TBII);
69
if ((tbii >> extract64(new_pc, 55, 1)) & 1) {
70
/* TBI is enabled. */
71
int core_mmu_idx = cpu_mmu_index(env, false);
72
diff --git a/target/arm/helper.c b/target/arm/helper.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/target/arm/helper.c
75
+++ b/target/arm/helper.c
76
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
77
static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
78
ARMMMUIdx mmu_idx, uint32_t flags)
38
{
79
{
39
/* Sadly this is encoded differently for A-profile and M-profile */
80
- flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
40
diff --git a/target/arm/m-nocp.decode b/target/arm/m-nocp.decode
81
- flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX,
82
- arm_to_core_mmu_idx(mmu_idx));
83
+ DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
84
+ DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
85
86
if (arm_singlestep_active(env)) {
87
- flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
88
+ DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
89
}
90
return flags;
91
}
92
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
93
bool sctlr_b = arm_sctlr_b(env);
94
95
if (sctlr_b) {
96
- flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR__B, 1);
97
+ DP_TBFLAG_A32(flags, SCTLR__B, 1);
98
}
99
if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
100
- flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
101
+ DP_TBFLAG_ANY(flags, BE_DATA, 1);
102
}
103
- flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
104
+ DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
105
106
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
107
}
108
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
109
uint32_t flags = 0;
110
111
if (arm_v7m_is_handler_mode(env)) {
112
- flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1);
113
+ DP_TBFLAG_M32(flags, HANDLER, 1);
114
}
115
116
/*
117
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
118
if (arm_feature(env, ARM_FEATURE_V8) &&
119
!((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
120
(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
121
- flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1);
122
+ DP_TBFLAG_M32(flags, STACKCHECK, 1);
123
}
124
125
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
126
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
127
{
128
int flags = 0;
129
130
- flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL,
131
- arm_debug_target_el(env));
132
+ DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env));
133
return flags;
134
}
135
136
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
137
uint32_t flags = rebuild_hflags_aprofile(env);
138
139
if (arm_el_is_aa64(env, 1)) {
140
- flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
141
+ DP_TBFLAG_A32(flags, VFPEN, 1);
142
}
143
144
if (arm_current_el(env) < 2 && env->cp15.hstr_el2 &&
145
(arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
146
- flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1);
147
+ DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
148
}
149
150
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
151
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
152
uint64_t sctlr;
153
int tbii, tbid;
154
155
- flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
156
+ DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
157
158
/* Get control bits for tagged addresses. */
159
tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
160
tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
161
162
- flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
163
- flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
164
+ DP_TBFLAG_A64(flags, TBII, tbii);
165
+ DP_TBFLAG_A64(flags, TBID, tbid);
166
167
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
168
int sve_el = sve_exception_el(env, el);
169
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
170
} else {
171
zcr_len = sve_zcr_len_for_el(env, el);
172
}
173
- flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
174
- flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
175
+ DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
176
+ DP_TBFLAG_A64(flags, ZCR_LEN, zcr_len);
177
}
178
179
sctlr = regime_sctlr(env, stage1);
180
181
if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
182
- flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
183
+ DP_TBFLAG_ANY(flags, BE_DATA, 1);
184
}
185
186
if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
187
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
188
* The decision of which action to take is left to a helper.
189
*/
190
if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
191
- flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
192
+ DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
193
}
194
}
195
196
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
197
/* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
198
if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
199
- flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
200
+ DP_TBFLAG_A64(flags, BT, 1);
201
}
202
}
203
204
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
205
case ARMMMUIdx_SE10_1:
206
case ARMMMUIdx_SE10_1_PAN:
207
/* TODO: ARMv8.3-NV */
208
- flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
209
+ DP_TBFLAG_A64(flags, UNPRIV, 1);
210
break;
211
case ARMMMUIdx_E20_2:
212
case ARMMMUIdx_E20_2_PAN:
213
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
214
* gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
215
*/
216
if (env->cp15.hcr_el2 & HCR_TGE) {
217
- flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
218
+ DP_TBFLAG_A64(flags, UNPRIV, 1);
219
}
220
break;
221
default:
222
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
223
* 4) If no Allocation Tag Access, then all accesses are Unchecked.
224
*/
225
if (allocation_tag_access_enabled(env, el, sctlr)) {
226
- flags = FIELD_DP32(flags, TBFLAG_A64, ATA, 1);
227
+ DP_TBFLAG_A64(flags, ATA, 1);
228
if (tbid
229
&& !(env->pstate & PSTATE_TCO)
230
&& (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
231
- flags = FIELD_DP32(flags, TBFLAG_A64, MTE_ACTIVE, 1);
232
+ DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
233
}
234
}
235
/* And again for unprivileged accesses, if required. */
236
- if (FIELD_EX32(flags, TBFLAG_A64, UNPRIV)
237
+ if (EX_TBFLAG_A64(flags, UNPRIV)
238
&& tbid
239
&& !(env->pstate & PSTATE_TCO)
240
&& (sctlr & SCTLR_TCF0)
241
&& allocation_tag_access_enabled(env, 0, sctlr)) {
242
- flags = FIELD_DP32(flags, TBFLAG_A64, MTE0_ACTIVE, 1);
243
+ DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
244
}
245
/* Cache TCMA as well as TBI. */
246
- flags = FIELD_DP32(flags, TBFLAG_A64, TCMA,
247
- aa64_va_parameter_tcma(tcr, mmu_idx));
248
+ DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
249
}
250
251
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
252
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
253
*cs_base = 0;
254
assert_hflags_rebuild_correctly(env);
255
256
- if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) {
257
+ if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
258
*pc = env->pc;
259
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
260
- flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
261
+ DP_TBFLAG_A64(flags, BTYPE, env->btype);
262
}
263
} else {
264
*pc = env->regs[15];
265
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
266
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
267
FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
268
!= env->v7m.secure) {
269
- flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1);
270
+ DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
271
}
272
273
if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
274
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
275
* active FP context; we must create a new FP context before
276
* executing any FP insn.
277
*/
278
- flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1);
279
+ DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
280
}
281
282
bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
283
if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
284
- flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1);
285
+ DP_TBFLAG_M32(flags, LSPACT, 1);
286
}
287
} else {
288
/*
289
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
290
* Note that VECLEN+VECSTRIDE are RES0 for M-profile.
291
*/
292
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
293
- flags = FIELD_DP32(flags, TBFLAG_A32,
294
- XSCALE_CPAR, env->cp15.c15_cpar);
295
+ DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
296
} else {
297
- flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN,
298
- env->vfp.vec_len);
299
- flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE,
300
- env->vfp.vec_stride);
301
+ DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
302
+ DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
303
}
304
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
305
- flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
306
+ DP_TBFLAG_A32(flags, VFPEN, 1);
307
}
308
}
309
310
- flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb);
311
- flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits);
312
+ DP_TBFLAG_AM32(flags, THUMB, env->thumb);
313
+ DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
314
}
315
316
/*
317
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
318
* 1 1 Active-not-pending
319
* SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
320
*/
321
- if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
322
- (env->pstate & PSTATE_SS)) {
323
- flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE__SS, 1);
324
+ if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
325
+ DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
326
}
327
328
*pflags = flags;
329
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
41
index XXXXXXX..XXXXXXX 100644
330
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/m-nocp.decode
331
--- a/target/arm/translate-a64.c
43
+++ b/target/arm/m-nocp.decode
332
+++ b/target/arm/translate-a64.c
44
@@ -XXX,XX +XXX,XX @@
333
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
45
# If the coprocessor is not present or disabled then we will generate
334
!arm_el_is_aa64(env, 3);
46
# the NOCP exception; otherwise we let the insn through to the main decode.
335
dc->thumb = 0;
47
336
dc->sctlr_b = 0;
48
+%vd_dp 22:1 12:4
337
- dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
49
+%vd_sp 12:4 22:1
338
+ dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
50
+
339
dc->condexec_mask = 0;
51
&nocp cp
340
dc->condexec_cond = 0;
52
341
- core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
53
{
342
+ core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
54
# Special cases which do not take an early NOCP: VLLDM and VLSTM
343
dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
55
VLLDM_VLSTM 1110 1100 001 l:1 rn:4 0000 1010 0000 0000
344
- dc->tbii = FIELD_EX32(tb_flags, TBFLAG_A64, TBII);
56
- # TODO: VSCCLRM (new in v8.1M) is similar:
345
- dc->tbid = FIELD_EX32(tb_flags, TBFLAG_A64, TBID);
57
- #VSCCLRM 1110 1100 1-01 1111 ---- 1011 ---- ---0
346
- dc->tcma = FIELD_EX32(tb_flags, TBFLAG_A64, TCMA);
58
+ # VSCCLRM (new in v8.1M) is similar:
347
+ dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
59
+ VSCCLRM 1110 1100 1.01 1111 .... 1011 imm:7 0 vd=%vd_dp size=3
348
+ dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
60
+ VSCCLRM 1110 1100 1.01 1111 .... 1010 imm:8 vd=%vd_sp size=2
349
+ dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
61
350
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
62
NOCP 111- 1110 ---- ---- ---- cp:4 ---- ---- &nocp
351
#if !defined(CONFIG_USER_ONLY)
63
NOCP 111- 110- ---- ---- ---- cp:4 ---- ---- &nocp
352
dc->user = (dc->current_el == 0);
353
#endif
354
- dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
355
- dc->sve_excp_el = FIELD_EX32(tb_flags, TBFLAG_A64, SVEEXC_EL);
356
- dc->sve_len = (FIELD_EX32(tb_flags, TBFLAG_A64, ZCR_LEN) + 1) * 16;
357
- dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE);
358
- dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT);
359
- dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE);
360
- dc->unpriv = FIELD_EX32(tb_flags, TBFLAG_A64, UNPRIV);
361
- dc->ata = FIELD_EX32(tb_flags, TBFLAG_A64, ATA);
362
- dc->mte_active[0] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE_ACTIVE);
363
- dc->mte_active[1] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE0_ACTIVE);
364
+ dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
365
+ dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
366
+ dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16;
367
+ dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
368
+ dc->bt = EX_TBFLAG_A64(tb_flags, BT);
369
+ dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
370
+ dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
371
+ dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
372
+ dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
373
+ dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
374
dc->vec_len = 0;
375
dc->vec_stride = 0;
376
dc->cp_regs = arm_cpu->cp_regs;
377
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
378
* emit code to generate a software step exception
379
* end the TB
380
*/
381
- dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
382
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
383
+ dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
384
+ dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
385
dc->is_ldex = false;
386
- dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
387
+ dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
388
389
/* Bound the number of insns to execute to those left on the page. */
390
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
64
diff --git a/target/arm/translate.c b/target/arm/translate.c
391
diff --git a/target/arm/translate.c b/target/arm/translate.c
65
index XXXXXXX..XXXXXXX 100644
392
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate.c
393
--- a/target/arm/translate.c
67
+++ b/target/arm/translate.c
394
+++ b/target/arm/translate.c
68
@@ -XXX,XX +XXX,XX @@ void arm_translate_init(void)
395
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
69
a64_translate_init();
396
*/
70
}
397
dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
71
398
!arm_el_is_aa64(env, 3);
72
+/* Generate a label used for skipping this instruction */
399
- dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB);
73
+static void arm_gen_condlabel(DisasContext *s)
400
- dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
74
+{
401
- condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC);
75
+ if (!s->condjmp) {
402
+ dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB);
76
+ s->condlabel = gen_new_label();
403
+ dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
77
+ s->condjmp = 1;
404
+ condexec = EX_TBFLAG_AM32(tb_flags, CONDEXEC);
78
+ }
405
dc->condexec_mask = (condexec & 0xf) << 1;
79
+}
406
dc->condexec_cond = condexec >> 4;
80
+
407
81
/* Flags for the disas_set_da_iss info argument:
408
- core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
82
* lower bits hold the Rt register number, higher bits are flags.
409
+ core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
83
*/
410
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
84
@@ -XXX,XX +XXX,XX @@ static void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop)
411
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
85
long off = neon_element_offset(reg, ele, memop);
412
#if !defined(CONFIG_USER_ONLY)
86
413
dc->user = (dc->current_el == 0);
87
switch (memop) {
414
#endif
88
+ case MO_32:
415
- dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
89
+ tcg_gen_st32_i64(src, cpu_env, off);
416
+ dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
90
+ break;
417
91
case MO_64:
418
if (arm_feature(env, ARM_FEATURE_M)) {
92
tcg_gen_st_i64(src, cpu_env, off);
419
dc->vfp_enabled = 1;
93
break;
420
dc->be_data = MO_TE;
94
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
421
- dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER);
95
s->base.is_jmp = DISAS_UPDATE_EXIT;
422
+ dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER);
96
}
423
dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
97
424
regime_is_secure(env, dc->mmu_idx);
98
-/* Generate a label used for skipping this instruction */
425
- dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK);
99
-static void arm_gen_condlabel(DisasContext *s)
426
- dc->v8m_fpccr_s_wrong =
100
-{
427
- FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG);
101
- if (!s->condjmp) {
428
+ dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK);
102
- s->condlabel = gen_new_label();
429
+ dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG);
103
- s->condjmp = 1;
430
dc->v7m_new_fp_ctxt_needed =
104
- }
431
- FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED);
105
-}
432
- dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT);
106
-
433
+ EX_TBFLAG_M32(tb_flags, NEW_FP_CTXT_NEEDED);
107
/* Skip this instruction if the ARM condition is false */
434
+ dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT);
108
static void arm_skip_unless(DisasContext *s, uint32_t cond)
435
} else {
109
{
436
- dc->be_data =
110
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
437
- FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
111
index XXXXXXX..XXXXXXX 100644
438
- dc->debug_target_el =
112
--- a/target/arm/translate-vfp.c.inc
439
- FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
113
+++ b/target/arm/translate-vfp.c.inc
440
- dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR__B);
114
@@ -XXX,XX +XXX,XX @@ static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
441
- dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
115
return true;
442
- dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
116
}
443
- dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
117
444
+ dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
118
+static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
445
+ dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B);
119
+{
446
+ dc->hstr_active = EX_TBFLAG_A32(tb_flags, HSTR_ACTIVE);
120
+ int btmreg, topreg;
447
+ dc->ns = EX_TBFLAG_A32(tb_flags, NS);
121
+ TCGv_i64 zero;
448
+ dc->vfp_enabled = EX_TBFLAG_A32(tb_flags, VFPEN);
122
+ TCGv_i32 aspen, sfpa;
449
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
123
+
450
- dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
124
+ if (!dc_isar_feature(aa32_m_sec_state, s)) {
451
+ dc->c15_cpar = EX_TBFLAG_A32(tb_flags, XSCALE_CPAR);
125
+ /* Before v8.1M, fall through in decode to NOCP check */
452
} else {
126
+ return false;
453
- dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
127
+ }
454
- dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
128
+
455
+ dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN);
129
+ /* Explicitly UNDEF because this takes precedence over NOCP */
456
+ dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE);
130
+ if (!arm_dc_feature(s, ARM_FEATURE_M_MAIN) || !s->v8m_secure) {
457
}
131
+ unallocated_encoding(s);
458
}
132
+ return true;
459
dc->cp_regs = cpu->cp_regs;
133
+ }
460
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
134
+
461
* emit code to generate a software step exception
135
+ if (!dc_isar_feature(aa32_vfp_simd, s)) {
462
* end the TB
136
+ /* NOP if we have neither FP nor MVE */
463
*/
137
+ return true;
464
- dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
138
+ }
465
- dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE__SS);
139
+
466
+ dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
140
+ /*
467
+ dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
141
+ * If FPCCR.ASPEN != 0 && CONTROL_S.SFPA == 0 then there is no
468
dc->is_ldex = false;
142
+ * active floating point context so we must NOP (without doing
469
143
+ * any lazy state preservation or the NOCP check).
470
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
144
+ */
471
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
145
+ aspen = load_cpu_field(v7m.fpccr[M_REG_S]);
472
DisasContext dc = { };
146
+ sfpa = load_cpu_field(v7m.control[M_REG_S]);
473
const TranslatorOps *ops = &arm_translator_ops;
147
+ tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
474
148
+ tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
475
- if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) {
149
+ tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK);
476
+ if (EX_TBFLAG_AM32(tb->flags, THUMB)) {
150
+ tcg_gen_or_i32(sfpa, sfpa, aspen);
477
ops = &thumb_translator_ops;
151
+ arm_gen_condlabel(s);
478
}
152
+ tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel);
479
#ifdef TARGET_AARCH64
153
+
480
- if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
154
+ if (s->fp_excp_el != 0) {
481
+ if (EX_TBFLAG_ANY(tb->flags, AARCH64_STATE)) {
155
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
482
ops = &aarch64_translator_ops;
156
+ syn_uncategorized(), s->fp_excp_el);
483
}
157
+ return true;
484
#endif
158
+ }
159
+
160
+ topreg = a->vd + a->imm - 1;
161
+ btmreg = a->vd;
162
+
163
+ /* Convert to Sreg numbers if the insn specified in Dregs */
164
+ if (a->size == 3) {
165
+ topreg = topreg * 2 + 1;
166
+ btmreg *= 2;
167
+ }
168
+
169
+ if (topreg > 63 || (topreg > 31 && !(topreg & 1))) {
170
+ /* UNPREDICTABLE: we choose to undef */
171
+ unallocated_encoding(s);
172
+ return true;
173
+ }
174
+
175
+ /* Silently ignore requests to clear D16-D31 if they don't exist */
176
+ if (topreg > 31 && !dc_isar_feature(aa32_simd_r32, s)) {
177
+ topreg = 31;
178
+ }
179
+
180
+ if (!vfp_access_check(s)) {
181
+ return true;
182
+ }
183
+
184
+ /* Zero the Sregs from btmreg to topreg inclusive. */
185
+ zero = tcg_const_i64(0);
186
+ if (btmreg & 1) {
187
+ write_neon_element64(zero, btmreg >> 1, 1, MO_32);
188
+ btmreg++;
189
+ }
190
+ for (; btmreg + 1 <= topreg; btmreg += 2) {
191
+ write_neon_element64(zero, btmreg >> 1, 0, MO_64);
192
+ }
193
+ if (btmreg == topreg) {
194
+ write_neon_element64(zero, btmreg >> 1, 0, MO_32);
195
+ btmreg++;
196
+ }
197
+ assert(btmreg == topreg + 1);
198
+ /* TODO: when MVE is implemented, zero VPR here */
199
+ return true;
200
+}
201
+
202
static bool trans_NOCP(DisasContext *s, arg_nocp *a)
203
{
204
/*
205
--
485
--
206
2.20.1
486
2.20.1
207
487
208
488
diff view generated by jsdifflib
1
Factor out the code which handles M-profile lazy FP state preservation
1
From: Richard Henderson <richard.henderson@linaro.org>
2
from full_vfp_access_check(); accesses to the FPCXT_NS register are
2
3
a special case which need to do just this part (corresponding in the
3
In preparation for splitting tb->flags across multiple
4
pseudocode to the PreserveFPState() function), and not the full
4
fields, introduce a structure to hold the value(s).
5
set of actions matching the pseudocode ExecuteFPCheck() which
5
So far this only migrates the one uint32_t and fixes
6
normal FP instructions need to do.
6
all of the places that require adjustment to match.
7
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210419202257.161730-6-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Message-id: 20201119215617.29887-13-peter.maydell@linaro.org
12
---
12
---
13
target/arm/translate-vfp.c.inc | 45 ++++++++++++++++++++--------------
13
target/arm/cpu.h | 26 ++++++++++++---------
14
1 file changed, 27 insertions(+), 18 deletions(-)
14
target/arm/translate.h | 11 +++++++++
15
15
target/arm/helper.c | 48 +++++++++++++++++++++-----------------
16
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
16
target/arm/translate-a64.c | 2 +-
17
index XXXXXXX..XXXXXXX 100644
17
target/arm/translate.c | 7 +++---
18
--- a/target/arm/translate-vfp.c.inc
18
5 files changed, 57 insertions(+), 37 deletions(-)
19
+++ b/target/arm/translate-vfp.c.inc
19
20
@@ -XXX,XX +XXX,XX @@ static inline long vfp_f16_offset(unsigned reg, bool top)
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
return offs;
21
index XXXXXXX..XXXXXXX 100644
22
}
22
--- a/target/arm/cpu.h
23
23
+++ b/target/arm/cpu.h
24
+/*
24
@@ -XXX,XX +XXX,XX @@ typedef struct ARMPACKey {
25
+ * Generate code for M-profile lazy FP state preservation if needed;
25
} ARMPACKey;
26
+ * this corresponds to the pseudocode PreserveFPState() function.
26
#endif
27
28
+/* See the commentary above the TBFLAG field definitions. */
29
+typedef struct CPUARMTBFlags {
30
+ uint32_t flags;
31
+} CPUARMTBFlags;
32
33
typedef struct CPUARMState {
34
/* Regs for current mode. */
35
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
36
uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
37
38
/* Cached TBFLAGS state. See below for which bits are included. */
39
- uint32_t hflags;
40
+ CPUARMTBFlags hflags;
41
42
/* Frequently accessed CPSR bits are stored separately for efficiency.
43
This contains all the other bits. Use cpsr_{read,write} to access
44
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
45
* Helpers for using the above.
46
*/
47
#define DP_TBFLAG_ANY(DST, WHICH, VAL) \
48
- (DST = FIELD_DP32(DST, TBFLAG_ANY, WHICH, VAL))
49
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL))
50
#define DP_TBFLAG_A64(DST, WHICH, VAL) \
51
- (DST = FIELD_DP32(DST, TBFLAG_A64, WHICH, VAL))
52
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A64, WHICH, VAL))
53
#define DP_TBFLAG_A32(DST, WHICH, VAL) \
54
- (DST = FIELD_DP32(DST, TBFLAG_A32, WHICH, VAL))
55
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A32, WHICH, VAL))
56
#define DP_TBFLAG_M32(DST, WHICH, VAL) \
57
- (DST = FIELD_DP32(DST, TBFLAG_M32, WHICH, VAL))
58
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_M32, WHICH, VAL))
59
#define DP_TBFLAG_AM32(DST, WHICH, VAL) \
60
- (DST = FIELD_DP32(DST, TBFLAG_AM32, WHICH, VAL))
61
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_AM32, WHICH, VAL))
62
63
-#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN, TBFLAG_ANY, WHICH)
64
-#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN, TBFLAG_A64, WHICH)
65
-#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN, TBFLAG_A32, WHICH)
66
-#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN, TBFLAG_M32, WHICH)
67
-#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN, TBFLAG_AM32, WHICH)
68
+#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH)
69
+#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A64, WHICH)
70
+#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A32, WHICH)
71
+#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_M32, WHICH)
72
+#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_AM32, WHICH)
73
74
/**
75
* cpu_mmu_index:
76
diff --git a/target/arm/translate.h b/target/arm/translate.h
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/translate.h
79
+++ b/target/arm/translate.h
80
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
81
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
82
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
83
84
+/**
85
+ * arm_tbflags_from_tb:
86
+ * @tb: the TranslationBlock
87
+ *
88
+ * Extract the flag values from @tb.
27
+ */
89
+ */
28
+static void gen_preserve_fp_state(DisasContext *s)
90
+static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb)
29
+{
91
+{
30
+ if (s->v7m_lspact) {
92
+ return (CPUARMTBFlags){ tb->flags };
31
+ /*
32
+ * Lazy state saving affects external memory and also the NVIC,
33
+ * so we must mark it as an IO operation for icount (and cause
34
+ * this to be the last insn in the TB).
35
+ */
36
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
37
+ s->base.is_jmp = DISAS_UPDATE_EXIT;
38
+ gen_io_start();
39
+ }
40
+ gen_helper_v7m_preserve_fp_state(cpu_env);
41
+ /*
42
+ * If the preserve_fp_state helper doesn't throw an exception
43
+ * then it will clear LSPACT; we don't need to repeat this for
44
+ * any further FP insns in this TB.
45
+ */
46
+ s->v7m_lspact = false;
47
+ }
48
+}
93
+}
49
+
94
+
50
/*
95
/*
51
* Check that VFP access is enabled. If it is, do the necessary
96
* Enum for argument to fpstatus_ptr().
52
* M-profile lazy-FP handling and then return true.
97
*/
53
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
98
diff --git a/target/arm/helper.c b/target/arm/helper.c
54
/* Handle M-profile lazy FP state mechanics */
99
index XXXXXXX..XXXXXXX 100644
55
100
--- a/target/arm/helper.c
56
/* Trigger lazy-state preservation if necessary */
101
+++ b/target/arm/helper.c
57
- if (s->v7m_lspact) {
102
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
58
- /*
103
}
59
- * Lazy state saving affects external memory and also the NVIC,
104
#endif
60
- * so we must mark it as an IO operation for icount (and cause
105
61
- * this to be the last insn in the TB).
106
-static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
62
- */
107
- ARMMMUIdx mmu_idx, uint32_t flags)
63
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
108
+static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
64
- s->base.is_jmp = DISAS_UPDATE_EXIT;
109
+ ARMMMUIdx mmu_idx,
65
- gen_io_start();
110
+ CPUARMTBFlags flags)
66
- }
111
{
67
- gen_helper_v7m_preserve_fp_state(cpu_env);
112
DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
68
- /*
113
DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
69
- * If the preserve_fp_state helper doesn't throw an exception
114
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
70
- * then it will clear LSPACT; we don't need to repeat this for
115
return flags;
71
- * any further FP insns in this TB.
116
}
72
- */
117
73
- s->v7m_lspact = false;
118
-static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
74
- }
119
- ARMMMUIdx mmu_idx, uint32_t flags)
75
+ gen_preserve_fp_state(s);
120
+static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
76
121
+ ARMMMUIdx mmu_idx,
77
/* Update ownership of FP context: set FPCCR.S to match current state */
122
+ CPUARMTBFlags flags)
78
if (s->v8m_fpccr_s_wrong) {
123
{
124
bool sctlr_b = arm_sctlr_b(env);
125
126
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
127
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
128
}
129
130
-static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
131
- ARMMMUIdx mmu_idx)
132
+static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
133
+ ARMMMUIdx mmu_idx)
134
{
135
- uint32_t flags = 0;
136
+ CPUARMTBFlags flags = {};
137
138
if (arm_v7m_is_handler_mode(env)) {
139
DP_TBFLAG_M32(flags, HANDLER, 1);
140
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
141
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
142
}
143
144
-static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
145
+static CPUARMTBFlags rebuild_hflags_aprofile(CPUARMState *env)
146
{
147
- int flags = 0;
148
+ CPUARMTBFlags flags = {};
149
150
DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env));
151
return flags;
152
}
153
154
-static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
155
- ARMMMUIdx mmu_idx)
156
+static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
157
+ ARMMMUIdx mmu_idx)
158
{
159
- uint32_t flags = rebuild_hflags_aprofile(env);
160
+ CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
161
162
if (arm_el_is_aa64(env, 1)) {
163
DP_TBFLAG_A32(flags, VFPEN, 1);
164
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
165
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
166
}
167
168
-static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
169
- ARMMMUIdx mmu_idx)
170
+static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
171
+ ARMMMUIdx mmu_idx)
172
{
173
- uint32_t flags = rebuild_hflags_aprofile(env);
174
+ CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
175
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
176
uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
177
uint64_t sctlr;
178
@@ -XXX,XX +XXX,XX @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
179
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
180
}
181
182
-static uint32_t rebuild_hflags_internal(CPUARMState *env)
183
+static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
184
{
185
int el = arm_current_el(env);
186
int fp_el = fp_exception_el(env, el);
187
@@ -XXX,XX +XXX,XX @@ void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
188
int el = arm_current_el(env);
189
int fp_el = fp_exception_el(env, el);
190
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
191
+
192
env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
193
}
194
195
@@ -XXX,XX +XXX,XX @@ void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
196
static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
197
{
198
#ifdef CONFIG_DEBUG_TCG
199
- uint32_t env_flags_current = env->hflags;
200
- uint32_t env_flags_rebuilt = rebuild_hflags_internal(env);
201
+ CPUARMTBFlags c = env->hflags;
202
+ CPUARMTBFlags r = rebuild_hflags_internal(env);
203
204
- if (unlikely(env_flags_current != env_flags_rebuilt)) {
205
+ if (unlikely(c.flags != r.flags)) {
206
fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
207
- env_flags_current, env_flags_rebuilt);
208
+ c.flags, r.flags);
209
abort();
210
}
211
#endif
212
@@ -XXX,XX +XXX,XX @@ static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
213
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
214
target_ulong *cs_base, uint32_t *pflags)
215
{
216
- uint32_t flags = env->hflags;
217
+ CPUARMTBFlags flags;
218
219
*cs_base = 0;
220
assert_hflags_rebuild_correctly(env);
221
+ flags = env->hflags;
222
223
if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
224
*pc = env->pc;
225
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
226
DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
227
}
228
229
- *pflags = flags;
230
+ *pflags = flags.flags;
231
}
232
233
#ifdef TARGET_AARCH64
234
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
235
index XXXXXXX..XXXXXXX 100644
236
--- a/target/arm/translate-a64.c
237
+++ b/target/arm/translate-a64.c
238
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
239
DisasContext *dc = container_of(dcbase, DisasContext, base);
240
CPUARMState *env = cpu->env_ptr;
241
ARMCPU *arm_cpu = env_archcpu(env);
242
- uint32_t tb_flags = dc->base.tb->flags;
243
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
244
int bound, core_mmu_idx;
245
246
dc->isar = &arm_cpu->isar;
247
diff --git a/target/arm/translate.c b/target/arm/translate.c
248
index XXXXXXX..XXXXXXX 100644
249
--- a/target/arm/translate.c
250
+++ b/target/arm/translate.c
251
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
252
DisasContext *dc = container_of(dcbase, DisasContext, base);
253
CPUARMState *env = cs->env_ptr;
254
ARMCPU *cpu = env_archcpu(env);
255
- uint32_t tb_flags = dc->base.tb->flags;
256
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
257
uint32_t condexec, core_mmu_idx;
258
259
dc->isar = &cpu->isar;
260
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
261
{
262
DisasContext dc = { };
263
const TranslatorOps *ops = &arm_translator_ops;
264
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(tb);
265
266
- if (EX_TBFLAG_AM32(tb->flags, THUMB)) {
267
+ if (EX_TBFLAG_AM32(tb_flags, THUMB)) {
268
ops = &thumb_translator_ops;
269
}
270
#ifdef TARGET_AARCH64
271
- if (EX_TBFLAG_ANY(tb->flags, AARCH64_STATE)) {
272
+ if (EX_TBFLAG_ANY(tb_flags, AARCH64_STATE)) {
273
ops = &aarch64_translator_ops;
274
}
275
#endif
79
--
276
--
80
2.20.1
277
2.20.1
81
278
82
279
diff view generated by jsdifflib
1
For v8.1M the architecture mandates that CPUs must provide at
1
From: Richard Henderson <richard.henderson@linaro.org>
2
least the "minimal RAS implementation" from the Reliability,
3
Availability and Serviceability extension. This consists of:
4
* an ESB instruction which is a NOP
5
-- since it is in the HINT space we need only add a comment
6
* an RFSR register which will RAZ/WI
7
* a RAZ/WI AIRCR.IESB bit
8
-- the code which handles writes to AIRCR does not allow setting
9
of RES0 bits, so we already treat this as RAZ/WI; add a comment
10
noting that this is deliberate
11
* minimal implementation of the RAS register block at 0xe0005000
12
-- this will be in a subsequent commit
13
* setting the ID_PFR0.RAS field to 0b0010
14
-- we will do this when we add the Cortex-M55 CPU model
15
2
3
Now that we have all of the proper macros defined, expanding
4
the CPUARMTBFlags structure and populating the two TB fields
5
is relatively simple.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210419202257.161730-7-richard.henderson@linaro.org
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Message-id: 20201119215617.29887-26-peter.maydell@linaro.org
19
---
11
---
20
target/arm/cpu.h | 14 ++++++++++++++
12
target/arm/cpu.h | 49 ++++++++++++++++++++++++------------------
21
target/arm/t32.decode | 4 ++++
13
target/arm/translate.h | 2 +-
22
hw/intc/armv7m_nvic.c | 13 +++++++++++++
14
target/arm/helper.c | 10 +++++----
23
3 files changed, 31 insertions(+)
15
3 files changed, 35 insertions(+), 26 deletions(-)
24
16
25
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
26
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/cpu.h
19
--- a/target/arm/cpu.h
28
+++ b/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
29
@@ -XXX,XX +XXX,XX @@ FIELD(ID_MMFR4, LSM, 20, 4)
21
@@ -XXX,XX +XXX,XX @@ typedef struct ARMPACKey {
30
FIELD(ID_MMFR4, CCIDX, 24, 4)
22
/* See the commentary above the TBFLAG field definitions. */
31
FIELD(ID_MMFR4, EVT, 28, 4)
23
typedef struct CPUARMTBFlags {
32
24
uint32_t flags;
33
+FIELD(ID_PFR0, STATE0, 0, 4)
25
+ target_ulong flags2;
34
+FIELD(ID_PFR0, STATE1, 4, 4)
26
} CPUARMTBFlags;
35
+FIELD(ID_PFR0, STATE2, 8, 4)
27
36
+FIELD(ID_PFR0, STATE3, 12, 4)
28
typedef struct CPUARMState {
37
+FIELD(ID_PFR0, CSV2, 16, 4)
29
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
38
+FIELD(ID_PFR0, AMU, 20, 4)
30
#include "exec/cpu-all.h"
39
+FIELD(ID_PFR0, DIT, 24, 4)
31
40
+FIELD(ID_PFR0, RAS, 28, 4)
32
/*
41
+
33
- * Bit usage in the TB flags field: bit 31 indicates whether we are
42
FIELD(ID_PFR1, PROGMOD, 0, 4)
34
- * in 32 or 64 bit mode. The meaning of the other bits depends on that.
43
FIELD(ID_PFR1, SECURITY, 4, 4)
35
- * We put flags which are shared between 32 and 64 bit mode at the top
44
FIELD(ID_PFR1, MPROGMOD, 8, 4)
36
- * of the word, and flags which apply to only one mode at the bottom.
45
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id)
37
+ * We have more than 32-bits worth of state per TB, so we split the data
46
return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0;
38
+ * between tb->flags and tb->cs_base, which is otherwise unused for ARM.
39
+ * We collect these two parts in CPUARMTBFlags where they are named
40
+ * flags and flags2 respectively.
41
*
42
- * 31 20 18 14 9 0
43
- * +--------------+-----+-----+----------+--------------+
44
- * | | | TBFLAG_A32 | |
45
- * | | +-----+----------+ TBFLAG_AM32 |
46
- * | TBFLAG_ANY | |TBFLAG_M32| |
47
- * | +-----------+----------+--------------|
48
- * | | TBFLAG_A64 |
49
- * +--------------+-------------------------------------+
50
- * 31 20 0
51
+ * The flags that are shared between all execution modes, TBFLAG_ANY,
52
+ * are stored in flags. The flags that are specific to a given mode
53
+ * are stores in flags2. Since cs_base is sized on the configured
54
+ * address size, flags2 always has 64-bits for A64, and a minimum of
55
+ * 32-bits for A32 and M32.
56
+ *
57
+ * The bits for 32-bit A-profile and M-profile partially overlap:
58
+ *
59
+ * 18 9 0
60
+ * +----------------+--------------+
61
+ * | TBFLAG_A32 | |
62
+ * +-----+----------+ TBFLAG_AM32 |
63
+ * | |TBFLAG_M32| |
64
+ * +-----+----------+--------------+
65
+ * 14 9 0
66
*
67
* Unless otherwise noted, these bits are cached in env->hflags.
68
*/
69
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
70
#define DP_TBFLAG_ANY(DST, WHICH, VAL) \
71
(DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL))
72
#define DP_TBFLAG_A64(DST, WHICH, VAL) \
73
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A64, WHICH, VAL))
74
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A64, WHICH, VAL))
75
#define DP_TBFLAG_A32(DST, WHICH, VAL) \
76
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_A32, WHICH, VAL))
77
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A32, WHICH, VAL))
78
#define DP_TBFLAG_M32(DST, WHICH, VAL) \
79
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_M32, WHICH, VAL))
80
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_M32, WHICH, VAL))
81
#define DP_TBFLAG_AM32(DST, WHICH, VAL) \
82
- (DST.flags = FIELD_DP32(DST.flags, TBFLAG_AM32, WHICH, VAL))
83
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_AM32, WHICH, VAL))
84
85
#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH)
86
-#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A64, WHICH)
87
-#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_A32, WHICH)
88
-#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_M32, WHICH)
89
-#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_AM32, WHICH)
90
+#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A64, WHICH)
91
+#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A32, WHICH)
92
+#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_M32, WHICH)
93
+#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_AM32, WHICH)
94
95
/**
96
* cpu_mmu_index:
97
diff --git a/target/arm/translate.h b/target/arm/translate.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/target/arm/translate.h
100
+++ b/target/arm/translate.h
101
@@ -XXX,XX +XXX,XX @@ typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
102
*/
103
static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb)
104
{
105
- return (CPUARMTBFlags){ tb->flags };
106
+ return (CPUARMTBFlags){ tb->flags, tb->cs_base };
47
}
107
}
48
108
49
+static inline bool isar_feature_aa32_ras(const ARMISARegisters *id)
109
/*
50
+{
110
diff --git a/target/arm/helper.c b/target/arm/helper.c
51
+ return FIELD_EX32(id->id_pfr0, ID_PFR0, RAS) != 0;
111
index XXXXXXX..XXXXXXX 100644
52
+}
112
--- a/target/arm/helper.c
53
+
113
+++ b/target/arm/helper.c
54
static inline bool isar_feature_aa32_mprofile(const ARMISARegisters *id)
114
@@ -XXX,XX +XXX,XX @@ static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
115
CPUARMTBFlags c = env->hflags;
116
CPUARMTBFlags r = rebuild_hflags_internal(env);
117
118
- if (unlikely(c.flags != r.flags)) {
119
- fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
120
- c.flags, r.flags);
121
+ if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
122
+ fprintf(stderr, "TCG hflags mismatch "
123
+ "(current:(0x%08x,0x" TARGET_FMT_lx ")"
124
+ " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
125
+ c.flags, c.flags2, r.flags, r.flags2);
126
abort();
127
}
128
#endif
129
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
55
{
130
{
56
return FIELD_EX32(id->id_pfr1, ID_PFR1, MPROGMOD) != 0;
131
CPUARMTBFlags flags;
57
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
132
58
index XXXXXXX..XXXXXXX 100644
133
- *cs_base = 0;
59
--- a/target/arm/t32.decode
134
assert_hflags_rebuild_correctly(env);
60
+++ b/target/arm/t32.decode
135
flags = env->hflags;
61
@@ -XXX,XX +XXX,XX @@ CLZ 1111 1010 1011 ---- 1111 .... 1000 .... @rdm
136
62
# SEV 1111 0011 1010 1111 1000 0000 0000 0100
137
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
63
# SEVL 1111 0011 1010 1111 1000 0000 0000 0101
64
65
+ # For M-profile minimal-RAS ESB can be a NOP, which is the
66
+ # default behaviour since it is in the hint space.
67
+ # ESB 1111 0011 1010 1111 1000 0000 0001 0000
68
+
69
# The canonical nop ends in 0000 0000, but the whole rest
70
# of the space is "reserved hint, behaves as nop".
71
NOP 1111 0011 1010 1111 1000 0000 ---- ----
72
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/hw/intc/armv7m_nvic.c
75
+++ b/hw/intc/armv7m_nvic.c
76
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
77
return 0;
78
}
79
return cpu->env.v7m.sfar;
80
+ case 0xf04: /* RFSR */
81
+ if (!cpu_isar_feature(aa32_ras, cpu)) {
82
+ goto bad_offset;
83
+ }
84
+ /* We provide minimal-RAS only: RFSR is RAZ/WI */
85
+ return 0;
86
case 0xf34: /* FPCCR */
87
if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
88
return 0;
89
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
90
R_V7M_AIRCR_PRIGROUP_SHIFT,
91
R_V7M_AIRCR_PRIGROUP_LENGTH);
92
}
93
+ /* AIRCR.IESB is RAZ/WI because we implement only minimal RAS */
94
if (attrs.secure) {
95
/* These bits are only writable by secure */
96
cpu->env.v7m.aircr = value &
97
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
98
}
99
break;
100
}
138
}
101
+ case 0xf04: /* RFSR */
139
102
+ if (!cpu_isar_feature(aa32_ras, cpu)) {
140
*pflags = flags.flags;
103
+ goto bad_offset;
141
+ *cs_base = flags.flags2;
104
+ }
142
}
105
+ /* We provide minimal-RAS only: RFSR is RAZ/WI */
143
106
+ break;
144
#ifdef TARGET_AARCH64
107
case 0xf34: /* FPCCR */
108
if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
109
/* Not all bits here are banked. */
110
--
145
--
111
2.20.1
146
2.20.1
112
147
113
148
diff view generated by jsdifflib
1
v8.1M introduces a new TRD flag in the CCR register, which enables
1
From: Richard Henderson <richard.henderson@linaro.org>
2
checking for stack frame integrity signatures on SG instructions.
3
This bit is not banked, and is always RAZ/WI to Non-secure code.
4
Adjust the code for handling CCR reads and writes to handle this.
5
2
3
Now that these bits have been moved out of tb->flags,
4
where TBFLAG_ANY was filling from the top, move AM32
5
to fill from the top, and A32 and M32 to fill from the
6
bottom. This means fewer changes when adding new bits.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210419202257.161730-9-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201119215617.29887-23-peter.maydell@linaro.org
9
---
12
---
10
target/arm/cpu.h | 2 ++
13
target/arm/cpu.h | 42 +++++++++++++++++++++---------------------
11
hw/intc/armv7m_nvic.c | 26 ++++++++++++++++++--------
14
1 file changed, 21 insertions(+), 21 deletions(-)
12
2 files changed, 20 insertions(+), 8 deletions(-)
13
15
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu.h
18
--- a/target/arm/cpu.h
17
+++ b/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
18
@@ -XXX,XX +XXX,XX @@ FIELD(V7M_CCR, STKOFHFNMIGN, 10, 1)
20
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
19
FIELD(V7M_CCR, DC, 16, 1)
21
*
20
FIELD(V7M_CCR, IC, 17, 1)
22
* The bits for 32-bit A-profile and M-profile partially overlap:
21
FIELD(V7M_CCR, BP, 18, 1)
23
*
22
+FIELD(V7M_CCR, LOB, 19, 1)
24
- * 18 9 0
23
+FIELD(V7M_CCR, TRD, 20, 1)
25
- * +----------------+--------------+
24
26
- * | TBFLAG_A32 | |
25
/* V7M SCR bits */
27
- * +-----+----------+ TBFLAG_AM32 |
26
FIELD(V7M_SCR, SLEEPONEXIT, 1, 1)
28
- * | |TBFLAG_M32| |
27
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
29
- * +-----+----------+--------------+
28
index XXXXXXX..XXXXXXX 100644
30
- * 14 9 0
29
--- a/hw/intc/armv7m_nvic.c
31
+ * 31 23 11 10 0
30
+++ b/hw/intc/armv7m_nvic.c
32
+ * +-------------+----------+----------------+
31
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
33
+ * | | | TBFLAG_A32 |
32
}
34
+ * | TBFLAG_AM32 | +-----+----------+
33
return cpu->env.v7m.scr[attrs.secure];
35
+ * | | |TBFLAG_M32|
34
case 0xd14: /* Configuration Control. */
36
+ * +-------------+----------------+----------+
35
- /* The BFHFNMIGN bit is the only non-banked bit; we
37
+ * 31 23 5 4 0
36
- * keep it in the non-secure copy of the register.
38
*
37
+ /*
39
* Unless otherwise noted, these bits are cached in env->hflags.
38
+ * Non-banked bits: BFHFNMIGN (stored in the NS copy of the register)
40
*/
39
+ * and TRD (stored in the S copy of the register)
41
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2)
40
*/
42
/*
41
val = cpu->env.v7m.ccr[attrs.secure];
43
* Bit usage when in AArch32 state, both A- and M-profile.
42
val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
44
*/
43
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
45
-FIELD(TBFLAG_AM32, CONDEXEC, 0, 8) /* Not cached. */
44
cpu->env.v7m.scr[attrs.secure] = value;
46
-FIELD(TBFLAG_AM32, THUMB, 8, 1) /* Not cached. */
45
break;
47
+FIELD(TBFLAG_AM32, CONDEXEC, 24, 8) /* Not cached. */
46
case 0xd14: /* Configuration Control. */
48
+FIELD(TBFLAG_AM32, THUMB, 23, 1) /* Not cached. */
47
+ {
49
48
+ uint32_t mask;
50
/*
49
+
51
* Bit usage when in AArch32 state, for A-profile only.
50
if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
52
*/
51
goto bad_offset;
53
-FIELD(TBFLAG_A32, VECLEN, 9, 3) /* Not cached. */
52
}
54
-FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */
53
55
+FIELD(TBFLAG_A32, VECLEN, 0, 3) /* Not cached. */
54
/* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
56
+FIELD(TBFLAG_A32, VECSTRIDE, 3, 2) /* Not cached. */
55
- value &= (R_V7M_CCR_STKALIGN_MASK |
57
/*
56
- R_V7M_CCR_BFHFNMIGN_MASK |
58
* We store the bottom two bits of the CPAR as TB flags and handle
57
- R_V7M_CCR_DIV_0_TRP_MASK |
59
* checks on the other bits at runtime. This shares the same bits as
58
- R_V7M_CCR_UNALIGN_TRP_MASK |
60
* VECSTRIDE, which is OK as no XScale CPU has VFP.
59
- R_V7M_CCR_USERSETMPEND_MASK |
61
* Not cached, because VECLEN+VECSTRIDE are not cached.
60
- R_V7M_CCR_NONBASETHRDENA_MASK);
62
*/
61
+ mask = R_V7M_CCR_STKALIGN_MASK |
63
-FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2)
62
+ R_V7M_CCR_BFHFNMIGN_MASK |
64
-FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */
63
+ R_V7M_CCR_DIV_0_TRP_MASK |
65
-FIELD(TBFLAG_A32, SCTLR__B, 15, 1) /* Cannot overlap with SCTLR_B */
64
+ R_V7M_CCR_UNALIGN_TRP_MASK |
66
-FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1)
65
+ R_V7M_CCR_USERSETMPEND_MASK |
67
+FIELD(TBFLAG_A32, XSCALE_CPAR, 5, 2)
66
+ R_V7M_CCR_NONBASETHRDENA_MASK;
68
+FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */
67
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8_1M) && attrs.secure) {
69
+FIELD(TBFLAG_A32, SCTLR__B, 8, 1) /* Cannot overlap with SCTLR_B */
68
+ /* TRD is always RAZ/WI from NS */
70
+FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1)
69
+ mask |= R_V7M_CCR_TRD_MASK;
71
/*
70
+ }
72
* Indicates whether cp register reads and writes by guest code should access
71
+ value &= mask;
73
* the secure or nonsecure bank of banked registers; note that this is not
72
74
* the same thing as the current security state of the processor!
73
if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
75
*/
74
/* v8M makes NONBASETHRDENA and STKALIGN be RES1 */
76
-FIELD(TBFLAG_A32, NS, 17, 1)
75
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
77
+FIELD(TBFLAG_A32, NS, 10, 1)
76
78
77
cpu->env.v7m.ccr[attrs.secure] = value;
79
/*
78
break;
80
* Bit usage when in AArch32 state, for M-profile only.
79
+ }
81
*/
80
case 0xd24: /* System Handler Control and State (SHCSR) */
82
/* Handler (ie not Thread) mode */
81
if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
83
-FIELD(TBFLAG_M32, HANDLER, 9, 1)
82
goto bad_offset;
84
+FIELD(TBFLAG_M32, HANDLER, 0, 1)
85
/* Whether we should generate stack-limit checks */
86
-FIELD(TBFLAG_M32, STACKCHECK, 10, 1)
87
+FIELD(TBFLAG_M32, STACKCHECK, 1, 1)
88
/* Set if FPCCR.LSPACT is set */
89
-FIELD(TBFLAG_M32, LSPACT, 11, 1) /* Not cached. */
90
+FIELD(TBFLAG_M32, LSPACT, 2, 1) /* Not cached. */
91
/* Set if we must create a new FP context */
92
-FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 12, 1) /* Not cached. */
93
+FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1) /* Not cached. */
94
/* Set if FPCCR.S does not match current security state */
95
-FIELD(TBFLAG_M32, FPCCR_S_WRONG, 13, 1) /* Not cached. */
96
+FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1) /* Not cached. */
97
98
/*
99
* Bit usage when in AArch64 state
83
--
100
--
84
2.20.1
101
2.20.1
85
102
86
103
diff view generated by jsdifflib
1
v8.1M defines a new FP system register FPSCR_nzcvqc; this behaves
1
From: Richard Henderson <richard.henderson@linaro.org>
2
like the existing FPSCR, except that it reads and writes only bits
3
[31:27] of the FPSCR (the N, Z, C, V and QC flag bits). (Unlike the
4
FPSCR, the special case for Rt=15 of writing the CPSR.NZCV is not
5
permitted.)
6
2
7
Implement the register. Since we don't yet implement MVE, we handle
3
Now that other bits have been moved out of tb->flags,
8
the QC bit as RES0, with todo comments for where we will need to add
4
there's no point in filling from the top.
9
support later.
10
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210419202257.161730-10-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20201119215617.29887-11-peter.maydell@linaro.org
14
---
10
---
15
target/arm/cpu.h | 13 +++++++++++++
11
target/arm/cpu.h | 14 +++++++-------
16
target/arm/translate-vfp.c.inc | 27 +++++++++++++++++++++++++++
12
1 file changed, 7 insertions(+), 7 deletions(-)
17
2 files changed, 40 insertions(+)
18
13
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
16
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
17
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val);
18
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
24
#define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */
19
*
25
#define FPCR_DN (1 << 25) /* Default NaN enable bit */
20
* Unless otherwise noted, these bits are cached in env->hflags.
26
#define FPCR_QC (1 << 27) /* Cumulative saturation bit */
21
*/
27
+#define FPCR_V (1 << 28) /* FP overflow flag */
22
-FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
28
+#define FPCR_C (1 << 29) /* FP carry flag */
23
-FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1)
29
+#define FPCR_Z (1 << 30) /* FP zero flag */
24
-FIELD(TBFLAG_ANY, PSTATE__SS, 29, 1) /* Not cached. */
30
+#define FPCR_N (1 << 31) /* FP negative flag */
25
-FIELD(TBFLAG_ANY, BE_DATA, 28, 1)
31
+
26
-FIELD(TBFLAG_ANY, MMUIDX, 24, 4)
32
+#define FPCR_NZCV_MASK (FPCR_N | FPCR_Z | FPCR_C | FPCR_V)
27
+FIELD(TBFLAG_ANY, AARCH64_STATE, 0, 1)
33
+#define FPCR_NZCVQC_MASK (FPCR_NZCV_MASK | FPCR_QC)
28
+FIELD(TBFLAG_ANY, SS_ACTIVE, 1, 1)
34
29
+FIELD(TBFLAG_ANY, PSTATE__SS, 2, 1) /* Not cached. */
35
static inline uint32_t vfp_get_fpsr(CPUARMState *env)
30
+FIELD(TBFLAG_ANY, BE_DATA, 3, 1)
36
{
31
+FIELD(TBFLAG_ANY, MMUIDX, 4, 4)
37
@@ -XXX,XX +XXX,XX @@ enum arm_cpu_mode {
32
/* Target EL if we take a floating-point-disabled exception */
38
#define ARM_VFP_FPEXC 8
33
-FIELD(TBFLAG_ANY, FPEXC_EL, 22, 2)
39
#define ARM_VFP_FPINST 9
34
+FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2)
40
#define ARM_VFP_FPINST2 10
35
/* For A-profile only, target EL for debug exceptions. */
41
+/* These ones are M-profile only */
36
-FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2)
42
+#define ARM_VFP_FPSCR_NZCVQC 2
37
+FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2)
43
+#define ARM_VFP_VPR 12
38
44
+#define ARM_VFP_P0 13
39
/*
45
+#define ARM_VFP_FPCXT_NS 14
40
* Bit usage when in AArch32 state, both A- and M-profile.
46
+#define ARM_VFP_FPCXT_S 15
47
48
/* QEMU-internal value meaning "FPSCR, but we care only about NZCV" */
49
#define QEMU_VFP_FPSCR_NZCV 0xffff
50
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/translate-vfp.c.inc
53
+++ b/target/arm/translate-vfp.c.inc
54
@@ -XXX,XX +XXX,XX @@ static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
55
case ARM_VFP_FPSCR:
56
case QEMU_VFP_FPSCR_NZCV:
57
break;
58
+ case ARM_VFP_FPSCR_NZCVQC:
59
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
60
+ return false;
61
+ }
62
+ break;
63
default:
64
return FPSysRegCheckFailed;
65
}
66
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
67
tcg_temp_free_i32(tmp);
68
gen_lookup_tb(s);
69
break;
70
+ case ARM_VFP_FPSCR_NZCVQC:
71
+ {
72
+ TCGv_i32 fpscr;
73
+ tmp = loadfn(s, opaque);
74
+ /*
75
+ * TODO: when we implement MVE, write the QC bit.
76
+ * For non-MVE, QC is RES0.
77
+ */
78
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
79
+ fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
80
+ tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
81
+ tcg_gen_or_i32(fpscr, fpscr, tmp);
82
+ store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
83
+ tcg_temp_free_i32(tmp);
84
+ break;
85
+ }
86
default:
87
g_assert_not_reached();
88
}
89
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
90
gen_helper_vfp_get_fpscr(tmp, cpu_env);
91
storefn(s, opaque, tmp);
92
break;
93
+ case ARM_VFP_FPSCR_NZCVQC:
94
+ /*
95
+ * TODO: MVE has a QC bit, which we probably won't store
96
+ * in the xregs[] field. For non-MVE, where QC is RES0,
97
+ * we can just fall through to the FPSCR_NZCV case.
98
+ */
99
case QEMU_VFP_FPSCR_NZCV:
100
/*
101
* Read just NZCV; this is a special case to avoid the
102
--
41
--
103
2.20.1
42
2.20.1
104
43
105
44
diff view generated by jsdifflib
1
Currently M-profile borrows the A-profile code for VMSR and VMRS
1
From: Richard Henderson <richard.henderson@linaro.org>
2
(access to the FP system registers), because all it needs to support
3
is the FPSCR. In v8.1M things become significantly more complicated
4
in two ways:
5
2
6
* there are several new FP system registers; some have side effects
3
Use this to signal when memory access alignment is required.
7
on read, and one (FPCXT_NS) needs to avoid the usual
4
This value comes from the CCR register for M-profile, and
8
vfp_access_check() and the "only if FPU implemented" check
5
from the SCTLR register for A-profile.
9
6
10
* all sysregs are now accessible both by VMRS/VMSR (which
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
reads/writes a general purpose register) and also by VLDR/VSTR
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
(which reads/writes them directly to memory)
9
Message-id: 20210419202257.161730-11-richard.henderson@linaro.org
13
14
Refactor the structure of how we handle VMSR/VMRS to cope with this:
15
16
* keep the M-profile code entirely separate from the A-profile code
17
18
* abstract out the "read or write the general purpose register" part
19
of the code into a loadfn or storefn function pointer, so we can
20
reuse it for VLDR/VSTR.
21
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
24
Message-id: 20201119215617.29887-8-peter.maydell@linaro.org
25
---
11
---
26
target/arm/cpu.h | 3 +
12
target/arm/cpu.h | 2 ++
27
target/arm/translate-vfp.c.inc | 182 ++++++++++++++++++++++++++++++---
13
target/arm/translate.h | 2 ++
28
2 files changed, 171 insertions(+), 14 deletions(-)
14
target/arm/helper.c | 19 +++++++++++++++++--
15
target/arm/translate-a64.c | 1 +
16
target/arm/translate.c | 7 +++----
17
5 files changed, 25 insertions(+), 6 deletions(-)
29
18
30
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
31
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/cpu.h
21
--- a/target/arm/cpu.h
33
+++ b/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
34
@@ -XXX,XX +XXX,XX @@ enum arm_cpu_mode {
23
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_ANY, MMUIDX, 4, 4)
35
#define ARM_VFP_FPINST 9
24
FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2)
36
#define ARM_VFP_FPINST2 10
25
/* For A-profile only, target EL for debug exceptions. */
37
26
FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2)
38
+/* QEMU-internal value meaning "FPSCR, but we care only about NZCV" */
27
+/* Memory operations require alignment: SCTLR_ELx.A or CCR.UNALIGN_TRP */
39
+#define QEMU_VFP_FPSCR_NZCV 0xffff
28
+FIELD(TBFLAG_ANY, ALIGN_MEM, 12, 1)
29
30
/*
31
* Bit usage when in AArch32 state, both A- and M-profile.
32
diff --git a/target/arm/translate.h b/target/arm/translate.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate.h
35
+++ b/target/arm/translate.h
36
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
37
bool bt;
38
/* True if any CP15 access is trapped by HSTR_EL2 */
39
bool hstr_active;
40
+ /* True if memory operations require alignment */
41
+ bool align_mem;
42
/*
43
* >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
44
* < 0, set by the current instruction.
45
diff --git a/target/arm/helper.c b/target/arm/helper.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/helper.c
48
+++ b/target/arm/helper.c
49
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
50
ARMMMUIdx mmu_idx)
51
{
52
CPUARMTBFlags flags = {};
53
+ uint32_t ccr = env->v7m.ccr[env->v7m.secure];
40
+
54
+
41
/* iwMMXt coprocessor control registers. */
55
+ /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
42
#define ARM_IWMMXT_wCID 0
56
+ if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
43
#define ARM_IWMMXT_wCon 1
57
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
44
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
58
+ }
45
index XXXXXXX..XXXXXXX 100644
59
46
--- a/target/arm/translate-vfp.c.inc
60
if (arm_v7m_is_handler_mode(env)) {
47
+++ b/target/arm/translate-vfp.c.inc
61
DP_TBFLAG_M32(flags, HANDLER, 1);
48
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
62
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
49
return true;
63
*/
50
}
64
if (arm_feature(env, ARM_FEATURE_V8) &&
51
65
!((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
52
+/*
66
- (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
53
+ * M-profile provides two different sets of instructions that can
67
+ (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
54
+ * access floating point system registers: VMSR/VMRS (which move
68
DP_TBFLAG_M32(flags, STACKCHECK, 1);
55
+ * to/from a general purpose register) and VLDR/VSTR sysreg (which
69
}
56
+ * move directly to/from memory). In some cases there are also side
70
57
+ * effects which must happen after any write to memory (which could
71
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
58
+ * cause an exception). So we implement the common logic for the
72
ARMMMUIdx mmu_idx)
59
+ * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
73
{
60
+ * which take pointers to callback functions which will perform the
74
CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
61
+ * actual "read/write general purpose register" and "read/write
75
+ int el = arm_current_el(env);
62
+ * memory" operations.
63
+ */
64
+
76
+
65
+/*
77
+ if (arm_sctlr(env, el) & SCTLR_A) {
66
+ * Emit code to store the sysreg to its final destination; frees the
78
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
67
+ * TCG temp 'value' it is passed.
79
+ }
68
+ */
80
69
+typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value);
81
if (arm_el_is_aa64(env, 1)) {
70
+/*
82
DP_TBFLAG_A32(flags, VFPEN, 1);
71
+ * Emit code to load the value to be copied to the sysreg; returns
83
}
72
+ * a new TCG temporary
84
73
+ */
85
- if (arm_current_el(env) < 2 && env->cp15.hstr_el2 &&
74
+typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque);
86
+ if (el < 2 && env->cp15.hstr_el2 &&
75
+
87
(arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
76
+/* Common decode/access checks for fp sysreg read/write */
88
DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
77
+typedef enum FPSysRegCheckResult {
89
}
78
+ FPSysRegCheckFailed, /* caller should return false */
90
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
79
+ FPSysRegCheckDone, /* caller should return true */
91
80
+ FPSysRegCheckContinue, /* caller should continue generating code */
92
sctlr = regime_sctlr(env, stage1);
81
+} FPSysRegCheckResult;
93
82
+
94
+ if (sctlr & SCTLR_A) {
83
+static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
95
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
84
+{
85
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
86
+ return FPSysRegCheckFailed;
87
+ }
96
+ }
88
+
97
+
89
+ switch (regno) {
98
if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
90
+ case ARM_VFP_FPSCR:
99
DP_TBFLAG_ANY(flags, BE_DATA, 1);
91
+ case QEMU_VFP_FPSCR_NZCV:
100
}
92
+ break;
101
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
93
+ default:
102
index XXXXXXX..XXXXXXX 100644
94
+ return FPSysRegCheckFailed;
103
--- a/target/arm/translate-a64.c
95
+ }
104
+++ b/target/arm/translate-a64.c
96
+
105
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
97
+ if (!vfp_access_check(s)) {
106
dc->user = (dc->current_el == 0);
98
+ return FPSysRegCheckDone;
107
#endif
99
+ }
108
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
100
+
109
+ dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
101
+ return FPSysRegCheckContinue;
110
dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
102
+}
111
dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16;
103
+
112
dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
104
+static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
113
diff --git a/target/arm/translate.c b/target/arm/translate.c
105
+
114
index XXXXXXX..XXXXXXX 100644
106
+ fp_sysreg_loadfn *loadfn,
115
--- a/target/arm/translate.c
107
+ void *opaque)
116
+++ b/target/arm/translate.c
108
+{
117
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
109
+ /* Do a write to an M-profile floating point system register */
110
+ TCGv_i32 tmp;
111
+
112
+ switch (fp_sysreg_checks(s, regno)) {
113
+ case FPSysRegCheckFailed:
114
+ return false;
115
+ case FPSysRegCheckDone:
116
+ return true;
117
+ case FPSysRegCheckContinue:
118
+ break;
119
+ }
120
+
121
+ switch (regno) {
122
+ case ARM_VFP_FPSCR:
123
+ tmp = loadfn(s, opaque);
124
+ gen_helper_vfp_set_fpscr(cpu_env, tmp);
125
+ tcg_temp_free_i32(tmp);
126
+ gen_lookup_tb(s);
127
+ break;
128
+ default:
129
+ g_assert_not_reached();
130
+ }
131
+ return true;
132
+}
133
+
134
+static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
135
+ fp_sysreg_storefn *storefn,
136
+ void *opaque)
137
+{
138
+ /* Do a read from an M-profile floating point system register */
139
+ TCGv_i32 tmp;
140
+
141
+ switch (fp_sysreg_checks(s, regno)) {
142
+ case FPSysRegCheckFailed:
143
+ return false;
144
+ case FPSysRegCheckDone:
145
+ return true;
146
+ case FPSysRegCheckContinue:
147
+ break;
148
+ }
149
+
150
+ switch (regno) {
151
+ case ARM_VFP_FPSCR:
152
+ tmp = tcg_temp_new_i32();
153
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
154
+ storefn(s, opaque, tmp);
155
+ break;
156
+ case QEMU_VFP_FPSCR_NZCV:
157
+ /*
158
+ * Read just NZCV; this is a special case to avoid the
159
+ * helper call for the "VMRS to CPSR.NZCV" insn.
160
+ */
161
+ tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
162
+ tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
163
+ storefn(s, opaque, tmp);
164
+ break;
165
+ default:
166
+ g_assert_not_reached();
167
+ }
168
+ return true;
169
+}
170
+
171
+static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value)
172
+{
173
+ arg_VMSR_VMRS *a = opaque;
174
+
175
+ if (a->rt == 15) {
176
+ /* Set the 4 flag bits in the CPSR */
177
+ gen_set_nzcv(value);
178
+ tcg_temp_free_i32(value);
179
+ } else {
180
+ store_reg(s, a->rt, value);
181
+ }
182
+}
183
+
184
+static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque)
185
+{
186
+ arg_VMSR_VMRS *a = opaque;
187
+
188
+ return load_reg(s, a->rt);
189
+}
190
+
191
+static bool gen_M_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
192
+{
193
+ /*
194
+ * Accesses to R15 are UNPREDICTABLE; we choose to undef.
195
+ * FPSCR -> r15 is a special case which writes to the PSR flags;
196
+ * set a->reg to a special value to tell gen_M_fp_sysreg_read()
197
+ * we only care about the top 4 bits of FPSCR there.
198
+ */
199
+ if (a->rt == 15) {
200
+ if (a->l && a->reg == ARM_VFP_FPSCR) {
201
+ a->reg = QEMU_VFP_FPSCR_NZCV;
202
+ } else {
203
+ return false;
204
+ }
205
+ }
206
+
207
+ if (a->l) {
208
+ /* VMRS, move FP system register to gp register */
209
+ return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a);
210
+ } else {
211
+ /* VMSR, move gp register to FP system register */
212
+ return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a);
213
+ }
214
+}
215
+
216
static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
217
{
118
{
218
TCGv_i32 tmp;
119
TCGv addr;
219
bool ignore_vfp_enabled = false;
120
220
121
- if (arm_dc_feature(s, ARM_FEATURE_M) &&
221
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
122
- !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
222
- return false;
123
+ if (s->align_mem) {
223
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
124
opc |= MO_ALIGN;
224
+ return gen_M_VMSR_VMRS(s, a);
225
}
125
}
226
126
227
- if (arm_dc_feature(s, ARM_FEATURE_M)) {
127
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
228
- /*
128
{
229
- * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
129
TCGv addr;
230
- * Accesses to R15 are UNPREDICTABLE; we choose to undef.
130
231
- * (FPSCR -> r15 is a special case which writes to the PSR flags.)
131
- if (arm_dc_feature(s, ARM_FEATURE_M) &&
232
- */
132
- !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
233
- if (a->reg != ARM_VFP_FPSCR) {
133
+ if (s->align_mem) {
234
- return false;
134
opc |= MO_ALIGN;
235
- }
236
- if (a->rt == 15 && !a->l) {
237
- return false;
238
- }
239
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
240
+ return false;
241
}
135
}
242
136
243
switch (a->reg) {
137
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
138
dc->user = (dc->current_el == 0);
139
#endif
140
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
141
+ dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
142
143
if (arm_feature(env, ARM_FEATURE_M)) {
144
dc->vfp_enabled = 1;
244
--
145
--
245
2.20.1
146
2.20.1
246
147
247
148
diff view generated by jsdifflib
1
Implement the new-in-v8.1M VLDR/VSTR variants which directly
1
From: Richard Henderson <richard.henderson@linaro.org>
2
read or write FP system registers to memory.
2
3
3
Create a finalize_memop function that computes alignment and
4
endianness and returns the final MemOp for the operation.
5
6
Split out gen_aa32_{ld,st}_internal_i32 which bypasses any special
7
handling of endianness or alignment. Adjust gen_aa32_{ld,st}_i32
8
so that s->be_data is not added by the callers.
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210419202257.161730-12-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20201119215617.29887-10-peter.maydell@linaro.org
7
---
14
---
8
target/arm/vfp.decode | 14 ++++++
15
target/arm/translate.h | 24 ++++++++
9
target/arm/translate-vfp.c.inc | 91 ++++++++++++++++++++++++++++++++++
16
target/arm/translate.c | 100 +++++++++++++++++---------------
10
2 files changed, 105 insertions(+)
17
target/arm/translate-neon.c.inc | 9 +--
11
18
3 files changed, 79 insertions(+), 54 deletions(-)
12
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
19
20
diff --git a/target/arm/translate.h b/target/arm/translate.h
13
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/vfp.decode
22
--- a/target/arm/translate.h
15
+++ b/target/arm/vfp.decode
23
+++ b/target/arm/translate.h
16
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR_hp ---- 1101 u:1 .0 l:1 rn:4 .... 1001 imm:8 vd=%vd_sp
24
@@ -XXX,XX +XXX,XX @@ static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour)
17
VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 vd=%vd_sp
25
return statusptr;
18
VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 vd=%vd_dp
19
20
+# M-profile VLDR/VSTR to sysreg
21
+%vldr_sysreg 22:1 13:3
22
+%imm7_0x4 0:7 !function=times_4
23
+
24
+&vldr_sysreg rn reg imm a w p
25
+@vldr_sysreg .... ... . a:1 . . . rn:4 ... . ... .. ....... \
26
+ reg=%vldr_sysreg imm=%imm7_0x4 &vldr_sysreg
27
+
28
+# P=0 W=0 is SEE "Related encodings", so split into two patterns
29
+VLDR_sysreg ---- 110 1 . . w:1 1 .... ... 0 111 11 ....... @vldr_sysreg p=1
30
+VLDR_sysreg ---- 110 0 . . 1 1 .... ... 0 111 11 ....... @vldr_sysreg p=0 w=1
31
+VSTR_sysreg ---- 110 1 . . w:1 0 .... ... 0 111 11 ....... @vldr_sysreg p=1
32
+VSTR_sysreg ---- 110 0 . . 1 0 .... ... 0 111 11 ....... @vldr_sysreg p=0 w=1
33
+
34
# We split the load/store multiple up into two patterns to avoid
35
# overlap with other insns in the "Advanced SIMD load/store and 64-bit move"
36
# grouping:
37
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/translate-vfp.c.inc
40
+++ b/target/arm/translate-vfp.c.inc
41
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
42
return true;
43
}
26
}
44
27
45
+static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
28
+/**
29
+ * finalize_memop:
30
+ * @s: DisasContext
31
+ * @opc: size+sign+align of the memory operation
32
+ *
33
+ * Build the complete MemOp for a memory operation, including alignment
34
+ * and endianness.
35
+ *
36
+ * If (op & MO_AMASK) then the operation already contains the required
37
+ * alignment, e.g. for AccType_ATOMIC. Otherwise, this an optionally
38
+ * unaligned operation, e.g. for AccType_NORMAL.
39
+ *
40
+ * In the latter case, there are configuration bits that require alignment,
41
+ * and this is applied here. Note that there is no way to indicate that
42
+ * no alignment should ever be enforced; this must be handled manually.
43
+ */
44
+static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
46
+{
45
+{
47
+ arg_vldr_sysreg *a = opaque;
46
+ if (s->align_mem && !(opc & MO_AMASK)) {
48
+ uint32_t offset = a->imm;
47
+ opc |= MO_ALIGN;
49
+ TCGv_i32 addr;
50
+
51
+ if (!a->a) {
52
+ offset = - offset;
53
+ }
48
+ }
54
+
49
+ return opc | s->be_data;
55
+ addr = load_reg(s, a->rn);
56
+ if (a->p) {
57
+ tcg_gen_addi_i32(addr, addr, offset);
58
+ }
59
+
60
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
61
+ gen_helper_v8m_stackcheck(cpu_env, addr);
62
+ }
63
+
64
+ gen_aa32_st_i32(s, value, addr, get_mem_index(s),
65
+ MO_UL | MO_ALIGN | s->be_data);
66
+ tcg_temp_free_i32(value);
67
+
68
+ if (a->w) {
69
+ /* writeback */
70
+ if (!a->p) {
71
+ tcg_gen_addi_i32(addr, addr, offset);
72
+ }
73
+ store_reg(s, a->rn, addr);
74
+ } else {
75
+ tcg_temp_free_i32(addr);
76
+ }
77
+}
50
+}
78
+
51
+
79
+static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque)
52
#endif /* TARGET_ARM_TRANSLATE_H */
53
diff --git a/target/arm/translate.c b/target/arm/translate.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/translate.c
56
+++ b/target/arm/translate.c
57
@@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
58
#define IS_USER_ONLY 0
59
#endif
60
61
-/* Abstractions of "generate code to do a guest load/store for
62
+/*
63
+ * Abstractions of "generate code to do a guest load/store for
64
* AArch32", where a vaddr is always 32 bits (and is zero
65
* extended if we're a 64 bit core) and data is also
66
* 32 bits unless specifically doing a 64 bit access.
67
@@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
68
* that the address argument is TCGv_i32 rather than TCGv.
69
*/
70
71
-static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
72
+static TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
73
{
74
TCGv addr = tcg_temp_new();
75
tcg_gen_extu_i32_tl(addr, a32);
76
@@ -XXX,XX +XXX,XX @@ static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
77
return addr;
78
}
79
80
+/*
81
+ * Internal routines are used for NEON cases where the endianness
82
+ * and/or alignment has already been taken into account and manipulated.
83
+ */
84
+static void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val,
85
+ TCGv_i32 a32, int index, MemOp opc)
80
+{
86
+{
81
+ arg_vldr_sysreg *a = opaque;
87
+ TCGv addr = gen_aa32_addr(s, a32, opc);
82
+ uint32_t offset = a->imm;
88
+ tcg_gen_qemu_ld_i32(val, addr, index, opc);
83
+ TCGv_i32 addr;
89
+ tcg_temp_free(addr);
84
+ TCGv_i32 value = tcg_temp_new_i32();
85
+
86
+ if (!a->a) {
87
+ offset = - offset;
88
+ }
89
+
90
+ addr = load_reg(s, a->rn);
91
+ if (a->p) {
92
+ tcg_gen_addi_i32(addr, addr, offset);
93
+ }
94
+
95
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
96
+ gen_helper_v8m_stackcheck(cpu_env, addr);
97
+ }
98
+
99
+ gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
100
+ MO_UL | MO_ALIGN | s->be_data);
101
+
102
+ if (a->w) {
103
+ /* writeback */
104
+ if (!a->p) {
105
+ tcg_gen_addi_i32(addr, addr, offset);
106
+ }
107
+ store_reg(s, a->rn, addr);
108
+ } else {
109
+ tcg_temp_free_i32(addr);
110
+ }
111
+ return value;
112
+}
90
+}
113
+
91
+
114
+static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
92
+static void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
93
+ TCGv_i32 a32, int index, MemOp opc)
115
+{
94
+{
116
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
95
+ TCGv addr = gen_aa32_addr(s, a32, opc);
117
+ return false;
96
+ tcg_gen_qemu_st_i32(val, addr, index, opc);
118
+ }
97
+ tcg_temp_free(addr);
119
+ if (a->rn == 15) {
120
+ return false;
121
+ }
122
+ return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a);
123
+}
98
+}
124
+
99
+
125
+static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
100
static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
126
+{
101
int index, MemOp opc)
127
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
102
{
128
+ return false;
103
- TCGv addr;
104
-
105
- if (s->align_mem) {
106
- opc |= MO_ALIGN;
107
- }
108
-
109
- addr = gen_aa32_addr(s, a32, opc);
110
- tcg_gen_qemu_ld_i32(val, addr, index, opc);
111
- tcg_temp_free(addr);
112
+ gen_aa32_ld_internal_i32(s, val, a32, index, finalize_memop(s, opc));
113
}
114
115
static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
116
int index, MemOp opc)
117
{
118
- TCGv addr;
119
+ gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
120
+}
121
122
- if (s->align_mem) {
123
- opc |= MO_ALIGN;
124
+#define DO_GEN_LD(SUFF, OPC) \
125
+ static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
126
+ TCGv_i32 a32, int index) \
127
+ { \
128
+ gen_aa32_ld_i32(s, val, a32, index, OPC); \
129
}
130
131
- addr = gen_aa32_addr(s, a32, opc);
132
- tcg_gen_qemu_st_i32(val, addr, index, opc);
133
- tcg_temp_free(addr);
134
-}
135
-
136
-#define DO_GEN_LD(SUFF, OPC) \
137
-static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
138
- TCGv_i32 a32, int index) \
139
-{ \
140
- gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
141
-}
142
-
143
-#define DO_GEN_ST(SUFF, OPC) \
144
-static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
145
- TCGv_i32 a32, int index) \
146
-{ \
147
- gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
148
-}
149
+#define DO_GEN_ST(SUFF, OPC) \
150
+ static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
151
+ TCGv_i32 a32, int index) \
152
+ { \
153
+ gen_aa32_st_i32(s, val, a32, index, OPC); \
129
+ }
154
+ }
130
+ if (a->rn == 15) {
155
131
+ return false;
156
static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
132
+ }
133
+ return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a);
134
+}
135
+
136
static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a)
137
{
157
{
138
TCGv_i32 tmp;
158
@@ -XXX,XX +XXX,XX @@ static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
159
addr = op_addr_rr_pre(s, a);
160
161
tmp = tcg_temp_new_i32();
162
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
163
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
164
disas_set_da_iss(s, mop, issinfo);
165
166
/*
167
@@ -XXX,XX +XXX,XX @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
168
addr = op_addr_rr_pre(s, a);
169
170
tmp = load_reg(s, a->rt);
171
- gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
172
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
173
disas_set_da_iss(s, mop, issinfo);
174
tcg_temp_free_i32(tmp);
175
176
@@ -XXX,XX +XXX,XX @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
177
addr = op_addr_rr_pre(s, a);
178
179
tmp = tcg_temp_new_i32();
180
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
181
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
182
store_reg(s, a->rt, tmp);
183
184
tcg_gen_addi_i32(addr, addr, 4);
185
186
tmp = tcg_temp_new_i32();
187
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
188
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
189
store_reg(s, a->rt + 1, tmp);
190
191
/* LDRD w/ base writeback is undefined if the registers overlap. */
192
@@ -XXX,XX +XXX,XX @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
193
addr = op_addr_rr_pre(s, a);
194
195
tmp = load_reg(s, a->rt);
196
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
197
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
198
tcg_temp_free_i32(tmp);
199
200
tcg_gen_addi_i32(addr, addr, 4);
201
202
tmp = load_reg(s, a->rt + 1);
203
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
204
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
205
tcg_temp_free_i32(tmp);
206
207
op_addr_rr_post(s, a, addr, -4);
208
@@ -XXX,XX +XXX,XX @@ static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
209
addr = op_addr_ri_pre(s, a);
210
211
tmp = tcg_temp_new_i32();
212
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
213
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
214
disas_set_da_iss(s, mop, issinfo);
215
216
/*
217
@@ -XXX,XX +XXX,XX @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
218
addr = op_addr_ri_pre(s, a);
219
220
tmp = load_reg(s, a->rt);
221
- gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
222
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
223
disas_set_da_iss(s, mop, issinfo);
224
tcg_temp_free_i32(tmp);
225
226
@@ -XXX,XX +XXX,XX @@ static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
227
addr = op_addr_ri_pre(s, a);
228
229
tmp = tcg_temp_new_i32();
230
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
231
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
232
store_reg(s, a->rt, tmp);
233
234
tcg_gen_addi_i32(addr, addr, 4);
235
236
tmp = tcg_temp_new_i32();
237
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
238
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
239
store_reg(s, rt2, tmp);
240
241
/* LDRD w/ base writeback is undefined if the registers overlap. */
242
@@ -XXX,XX +XXX,XX @@ static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
243
addr = op_addr_ri_pre(s, a);
244
245
tmp = load_reg(s, a->rt);
246
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
247
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
248
tcg_temp_free_i32(tmp);
249
250
tcg_gen_addi_i32(addr, addr, 4);
251
252
tmp = load_reg(s, rt2);
253
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
254
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
255
tcg_temp_free_i32(tmp);
256
257
op_addr_ri_post(s, a, addr, -4);
258
@@ -XXX,XX +XXX,XX @@ static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
259
addr = load_reg(s, a->rn);
260
tmp = load_reg(s, a->rt);
261
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
262
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
263
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop);
264
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
265
266
tcg_temp_free_i32(tmp);
267
@@ -XXX,XX +XXX,XX @@ static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
268
269
addr = load_reg(s, a->rn);
270
tmp = tcg_temp_new_i32();
271
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
272
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
273
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
274
tcg_temp_free_i32(addr);
275
276
@@ -XXX,XX +XXX,XX @@ static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
277
addr = load_reg(s, a->rn);
278
tcg_gen_add_i32(addr, addr, tmp);
279
280
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
281
- half ? MO_UW | s->be_data : MO_UB);
282
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB);
283
tcg_temp_free_i32(addr);
284
285
tcg_gen_add_i32(tmp, tmp, tmp);
286
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
287
index XXXXXXX..XXXXXXX 100644
288
--- a/target/arm/translate-neon.c.inc
289
+++ b/target/arm/translate-neon.c.inc
290
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
291
addr = tcg_temp_new_i32();
292
load_reg_var(s, addr, a->rn);
293
for (reg = 0; reg < nregs; reg++) {
294
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
295
- s->be_data | size);
296
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), size);
297
if ((vd & 1) && vec_size == 16) {
298
/*
299
* We cannot write 16 bytes at once because the
300
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
301
*/
302
for (reg = 0; reg < nregs; reg++) {
303
if (a->l) {
304
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
305
- s->be_data | a->size);
306
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), a->size);
307
neon_store_element(vd, a->reg_idx, a->size, tmp);
308
} else { /* Store */
309
neon_load_element(tmp, vd, a->reg_idx, a->size);
310
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
311
- s->be_data | a->size);
312
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), a->size);
313
}
314
vd += a->stride;
315
tcg_gen_addi_i32(addr, addr, 1 << a->size);
139
--
316
--
140
2.20.1
317
2.20.1
141
318
142
319
diff view generated by jsdifflib
1
v8.1M introduces a new TRD flag in the CCR register, which enables
1
From: Richard Henderson <richard.henderson@linaro.org>
2
checking for stack frame integrity signatures on SG instructions.
3
Add the code in the SG insn implementation for the new behaviour.
4
2
3
This is the only caller. Adjust some commentary to talk
4
about SCTLR_B instead of the vanishing function.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210419202257.161730-13-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201119215617.29887-24-peter.maydell@linaro.org
8
---
10
---
9
target/arm/m_helper.c | 86 +++++++++++++++++++++++++++++++++++++++++++
11
target/arm/translate.c | 37 ++++++++++++++++---------------------
10
1 file changed, 86 insertions(+)
12
1 file changed, 16 insertions(+), 21 deletions(-)
11
13
12
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/m_helper.c
16
--- a/target/arm/translate.c
15
+++ b/target/arm/m_helper.c
17
+++ b/target/arm/translate.c
16
@@ -XXX,XX +XXX,XX @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
18
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
17
return true;
19
gen_aa32_st_i32(s, val, a32, index, OPC); \
18
}
20
}
19
21
20
+static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
22
-static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
21
+ uint32_t addr, uint32_t *spdata)
23
-{
22
+{
24
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
23
+ /*
25
- if (!IS_USER_ONLY && s->sctlr_b) {
24
+ * Read a word of data from the stack for the SG instruction,
26
- tcg_gen_rotri_i64(val, val, 32);
25
+ * writing the value into *spdata. If the load succeeds, return
27
- }
26
+ * true; otherwise pend an appropriate exception and return false.
28
-}
27
+ * (We can't use data load helpers here that throw an exception
29
-
28
+ * because of the context we're called in, which is halfway through
30
static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
29
+ * arm_v7m_cpu_do_interrupt().)
31
int index, MemOp opc)
30
+ */
32
{
31
+ CPUState *cs = CPU(cpu);
33
TCGv addr = gen_aa32_addr(s, a32, opc);
32
+ CPUARMState *env = &cpu->env;
34
tcg_gen_qemu_ld_i64(val, addr, index, opc);
33
+ MemTxAttrs attrs = {};
35
- gen_aa32_frob64(s, val);
34
+ MemTxResult txres;
35
+ target_ulong page_size;
36
+ hwaddr physaddr;
37
+ int prot;
38
+ ARMMMUFaultInfo fi = {};
39
+ ARMCacheAttrs cacheattrs = {};
40
+ uint32_t value;
41
+
36
+
42
+ if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
37
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
43
+ &attrs, &prot, &page_size, &fi, &cacheattrs)) {
38
+ if (!IS_USER_ONLY && s->sctlr_b) {
44
+ /* MPU/SAU lookup failed */
39
+ tcg_gen_rotri_i64(val, val, 32);
45
+ if (fi.type == ARMFault_QEMU_SFault) {
46
+ qemu_log_mask(CPU_LOG_INT,
47
+ "...SecureFault during stack word read\n");
48
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
49
+ env->v7m.sfar = addr;
50
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
51
+ } else {
52
+ qemu_log_mask(CPU_LOG_INT,
53
+ "...MemManageFault during stack word read\n");
54
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_DACCVIOL_MASK |
55
+ R_V7M_CFSR_MMARVALID_MASK;
56
+ env->v7m.mmfar[M_REG_S] = addr;
57
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false);
58
+ }
59
+ return false;
60
+ }
61
+ value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
62
+ attrs, &txres);
63
+ if (txres != MEMTX_OK) {
64
+ /* BusFault trying to read the data */
65
+ qemu_log_mask(CPU_LOG_INT,
66
+ "...BusFault during stack word read\n");
67
+ env->v7m.cfsr[M_REG_NS] |=
68
+ (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
69
+ env->v7m.bfar = addr;
70
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
71
+ return false;
72
+ }
40
+ }
73
+
41
+
74
+ *spdata = value;
42
tcg_temp_free(addr);
75
+ return true;
43
}
76
+}
44
77
+
45
@@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
78
static bool v7m_handle_execute_nsc(ARMCPU *cpu)
46
TCGv_i32 tmp2 = tcg_temp_new_i32();
79
{
47
TCGv_i64 t64 = tcg_temp_new_i64();
80
/*
48
81
@@ -XXX,XX +XXX,XX @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
49
- /* For AArch32, architecturally the 32-bit word at the lowest
82
*/
83
qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
84
", executing it\n", env->regs[15]);
85
+
86
+ if (cpu_isar_feature(aa32_m_sec_state, cpu) &&
87
+ !arm_v7m_is_handler_mode(env)) {
88
+ /*
50
+ /*
89
+ * v8.1M exception stack frame integrity check. Note that we
51
+ * For AArch32, architecturally the 32-bit word at the lowest
90
+ * must perform the memory access even if CCR_S.TRD is zero
52
* address is always Rt and the one at addr+4 is Rt2, even if
91
+ * and we aren't going to check what the data loaded is.
53
* the CPU is big-endian. That means we don't want to do a
92
+ */
54
- * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
93
+ uint32_t spdata, sp;
55
- * for an architecturally 64-bit access, but instead do a
56
- * 64-bit access using MO_BE if appropriate and then split
57
- * the two halves.
58
- * This only makes a difference for BE32 user-mode, where
59
- * frob64() must not flip the two halves of the 64-bit data
60
- * but this code must treat BE32 user-mode like BE32 system.
61
+ * gen_aa32_ld_i64(), which checks SCTLR_B as if for an
62
+ * architecturally 64-bit access, but instead do a 64-bit access
63
+ * using MO_BE if appropriate and then split the two halves.
64
*/
65
TCGv taddr = gen_aa32_addr(s, addr, opc);
66
67
@@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
68
TCGv_i64 n64 = tcg_temp_new_i64();
69
70
t2 = load_reg(s, rt2);
71
- /* For AArch32, architecturally the 32-bit word at the lowest
94
+
72
+
95
+ /*
73
+ /*
96
+ * We know we are currently NS, so the S stack pointers must be
74
+ * For AArch32, architecturally the 32-bit word at the lowest
97
+ * in other_ss_{psp,msp}, not in regs[13]/other_sp.
75
* address is always Rt and the one at addr+4 is Rt2, even if
98
+ */
76
* the CPU is big-endian. Since we're going to treat this as a
99
+ sp = v7m_using_psp(env) ? env->v7m.other_ss_psp : env->v7m.other_ss_msp;
77
* single 64-bit BE store, we need to put the two halves in the
100
+ if (!v7m_read_sg_stack_word(cpu, mmu_idx, sp, &spdata)) {
78
* opposite order for BE to LE, so that they end up in the right
101
+ /* Stack access failed and an exception has been pended */
79
- * places.
102
+ return false;
80
- * We don't want gen_aa32_frob64() because that does the wrong
103
+ }
81
- * thing for BE32 usermode.
104
+
82
+ * places. We don't want gen_aa32_st_i64, because that checks
105
+ if (env->v7m.ccr[M_REG_S] & R_V7M_CCR_TRD_MASK) {
83
+ * SCTLR_B as if for an architectural 64-bit access.
106
+ if (((spdata & ~1) == 0xfefa125a) ||
84
*/
107
+ !(env->v7m.control[M_REG_S] & 1)) {
85
if (s->be_data == MO_BE) {
108
+ goto gen_invep;
86
tcg_gen_concat_i32_i64(n64, t2, t1);
109
+ }
110
+ }
111
+ }
112
+
113
env->regs[14] &= ~1;
114
env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
115
switch_v7m_security_state(env, true);
116
--
87
--
117
2.20.1
88
2.20.1
118
89
119
90
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Just because operating on a TCGv_i64 temporary does not
4
mean that we're performing a 64-bit operation. Restrict
5
the frobbing to actual 64-bit operations.
6
7
This bug is not currently visible because all current
8
users of these two functions always pass MO_64.
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210419202257.161730-14-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
15
target/arm/translate.c | 4 ++--
16
1 file changed, 2 insertions(+), 2 deletions(-)
17
18
diff --git a/target/arm/translate.c b/target/arm/translate.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/translate.c
21
+++ b/target/arm/translate.c
22
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
23
tcg_gen_qemu_ld_i64(val, addr, index, opc);
24
25
/* Not needed for user-mode BE32, where we use MO_BE instead. */
26
- if (!IS_USER_ONLY && s->sctlr_b) {
27
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
28
tcg_gen_rotri_i64(val, val, 32);
29
}
30
31
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
32
TCGv addr = gen_aa32_addr(s, a32, opc);
33
34
/* Not needed for user-mode BE32, where we use MO_BE instead. */
35
- if (!IS_USER_ONLY && s->sctlr_b) {
36
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
37
TCGv_i64 tmp = tcg_temp_new_i64();
38
tcg_gen_rotri_i64(tmp, val, 32);
39
tcg_gen_qemu_st_i64(tmp, addr, index, opc);
40
--
41
2.20.1
42
43
diff view generated by jsdifflib
1
The constant-expander functions like negate, plus_2, etc, are
1
From: Richard Henderson <richard.henderson@linaro.org>
2
generally useful; move them up in translate.c so we can use them in
3
the VFP/Neon decoders as well as in the A32/T32/T16 decoders.
4
2
3
Adjust the interface to match what has been done to the
4
TCGv_i32 load/store functions.
5
6
This is less obvious, because at present the only user of
7
these functions, trans_VLDST_multiple, also wants to manipulate
8
the endianness to speed up loading multiple bytes. Thus we
9
retain an "internal" interface which is identical to the
10
current gen_aa32_{ld,st}_i64 interface.
11
12
The "new" interface will gain users as we remove the legacy
13
interfaces, gen_aa32_ld64 and gen_aa32_st64.
14
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20210419202257.161730-15-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201119215617.29887-9-peter.maydell@linaro.org
8
---
19
---
9
target/arm/translate.c | 46 +++++++++++++++++++++++-------------------
20
target/arm/translate.c | 78 +++++++++++++++++++--------------
10
1 file changed, 25 insertions(+), 21 deletions(-)
21
target/arm/translate-neon.c.inc | 6 ++-
22
2 files changed, 49 insertions(+), 35 deletions(-)
11
23
12
diff --git a/target/arm/translate.c b/target/arm/translate.c
24
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate.c
26
--- a/target/arm/translate.c
15
+++ b/target/arm/translate.c
27
+++ b/target/arm/translate.c
16
@@ -XXX,XX +XXX,XX @@ static void arm_gen_condlabel(DisasContext *s)
28
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
17
}
29
tcg_temp_free(addr);
18
}
30
}
19
31
20
+/*
32
+static void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val,
21
+ * Constant expanders for the decoders.
33
+ TCGv_i32 a32, int index, MemOp opc)
22
+ */
34
+{
35
+ TCGv addr = gen_aa32_addr(s, a32, opc);
23
+
36
+
24
+static int negate(DisasContext *s, int x)
37
+ tcg_gen_qemu_ld_i64(val, addr, index, opc);
25
+{
38
+
26
+ return -x;
39
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
40
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
41
+ tcg_gen_rotri_i64(val, val, 32);
42
+ }
43
+ tcg_temp_free(addr);
27
+}
44
+}
28
+
45
+
29
+static int plus_2(DisasContext *s, int x)
46
+static void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val,
47
+ TCGv_i32 a32, int index, MemOp opc)
30
+{
48
+{
31
+ return x + 2;
49
+ TCGv addr = gen_aa32_addr(s, a32, opc);
50
+
51
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
52
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
53
+ TCGv_i64 tmp = tcg_temp_new_i64();
54
+ tcg_gen_rotri_i64(tmp, val, 32);
55
+ tcg_gen_qemu_st_i64(tmp, addr, index, opc);
56
+ tcg_temp_free_i64(tmp);
57
+ } else {
58
+ tcg_gen_qemu_st_i64(val, addr, index, opc);
59
+ }
60
+ tcg_temp_free(addr);
32
+}
61
+}
33
+
62
+
34
+static int times_2(DisasContext *s, int x)
63
static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
64
int index, MemOp opc)
65
{
66
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
67
gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
68
}
69
70
+static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
71
+ int index, MemOp opc)
35
+{
72
+{
36
+ return x * 2;
73
+ gen_aa32_ld_internal_i64(s, val, a32, index, finalize_memop(s, opc));
37
+}
74
+}
38
+
75
+
39
+static int times_4(DisasContext *s, int x)
76
+static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
77
+ int index, MemOp opc)
40
+{
78
+{
41
+ return x * 4;
79
+ gen_aa32_st_internal_i64(s, val, a32, index, finalize_memop(s, opc));
42
+}
80
+}
43
+
81
+
44
/* Flags for the disas_set_da_iss info argument:
82
#define DO_GEN_LD(SUFF, OPC) \
45
* lower bits hold the Rt register number, higher bits are flags.
83
static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
46
*/
84
TCGv_i32 a32, int index) \
47
@@ -XXX,XX +XXX,XX @@ static void arm_skip_unless(DisasContext *s, uint32_t cond)
85
@@ -XXX,XX +XXX,XX @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
48
86
gen_aa32_st_i32(s, val, a32, index, OPC); \
49
87
}
50
/*
88
51
- * Constant expanders for the decoders.
89
-static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
52
+ * Constant expanders used by T16/T32 decode
90
- int index, MemOp opc)
53
*/
54
55
-static int negate(DisasContext *s, int x)
56
-{
91
-{
57
- return -x;
92
- TCGv addr = gen_aa32_addr(s, a32, opc);
93
- tcg_gen_qemu_ld_i64(val, addr, index, opc);
94
-
95
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
96
- if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
97
- tcg_gen_rotri_i64(val, val, 32);
98
- }
99
-
100
- tcg_temp_free(addr);
58
-}
101
-}
59
-
102
-
60
-static int plus_2(DisasContext *s, int x)
103
static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
61
-{
104
TCGv_i32 a32, int index)
62
- return x + 2;
105
{
106
- gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
63
-}
107
-}
64
-
108
-
65
-static int times_2(DisasContext *s, int x)
109
-static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
110
- int index, MemOp opc)
66
-{
111
-{
67
- return x * 2;
112
- TCGv addr = gen_aa32_addr(s, a32, opc);
68
-}
69
-
113
-
70
-static int times_4(DisasContext *s, int x)
114
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
71
-{
115
- if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
72
- return x * 4;
116
- TCGv_i64 tmp = tcg_temp_new_i64();
73
-}
117
- tcg_gen_rotri_i64(tmp, val, 32);
74
-
118
- tcg_gen_qemu_st_i64(tmp, addr, index, opc);
75
/* Return only the rotation part of T32ExpandImm. */
119
- tcg_temp_free_i64(tmp);
76
static int t32_expandimm_rot(DisasContext *s, int x)
120
- } else {
121
- tcg_gen_qemu_st_i64(val, addr, index, opc);
122
- }
123
- tcg_temp_free(addr);
124
+ gen_aa32_ld_i64(s, val, a32, index, MO_Q);
125
}
126
127
static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
128
TCGv_i32 a32, int index)
77
{
129
{
130
- gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
131
+ gen_aa32_st_i64(s, val, a32, index, MO_Q);
132
}
133
134
DO_GEN_LD(8u, MO_UB)
135
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
136
index XXXXXXX..XXXXXXX 100644
137
--- a/target/arm/translate-neon.c.inc
138
+++ b/target/arm/translate-neon.c.inc
139
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
140
int tt = a->vd + reg + spacing * xs;
141
142
if (a->l) {
143
- gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
144
+ gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx,
145
+ endian | size);
146
neon_store_element64(tt, n, size, tmp64);
147
} else {
148
neon_load_element64(tmp64, tt, n, size);
149
- gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
150
+ gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx,
151
+ endian | size);
152
}
153
tcg_gen_add_i32(addr, addr, tmp);
154
}
78
--
155
--
79
2.20.1
156
2.20.1
80
157
81
158
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Buglink: https://bugs.launchpad.net/qemu/+bug/1905356
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210419202257.161730-16-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
target/arm/translate.c | 16 ++++++++--------
10
1 file changed, 8 insertions(+), 8 deletions(-)
11
12
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate.c
15
+++ b/target/arm/translate.c
16
@@ -XXX,XX +XXX,XX @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
17
addr = op_addr_rr_pre(s, a);
18
19
tmp = tcg_temp_new_i32();
20
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
21
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
22
store_reg(s, a->rt, tmp);
23
24
tcg_gen_addi_i32(addr, addr, 4);
25
26
tmp = tcg_temp_new_i32();
27
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
28
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
29
store_reg(s, a->rt + 1, tmp);
30
31
/* LDRD w/ base writeback is undefined if the registers overlap. */
32
@@ -XXX,XX +XXX,XX @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
33
addr = op_addr_rr_pre(s, a);
34
35
tmp = load_reg(s, a->rt);
36
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
37
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
38
tcg_temp_free_i32(tmp);
39
40
tcg_gen_addi_i32(addr, addr, 4);
41
42
tmp = load_reg(s, a->rt + 1);
43
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
44
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
45
tcg_temp_free_i32(tmp);
46
47
op_addr_rr_post(s, a, addr, -4);
48
@@ -XXX,XX +XXX,XX @@ static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
49
addr = op_addr_ri_pre(s, a);
50
51
tmp = tcg_temp_new_i32();
52
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
53
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
54
store_reg(s, a->rt, tmp);
55
56
tcg_gen_addi_i32(addr, addr, 4);
57
58
tmp = tcg_temp_new_i32();
59
- gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
60
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
61
store_reg(s, rt2, tmp);
62
63
/* LDRD w/ base writeback is undefined if the registers overlap. */
64
@@ -XXX,XX +XXX,XX @@ static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
65
addr = op_addr_ri_pre(s, a);
66
67
tmp = load_reg(s, a->rt);
68
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
69
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
70
tcg_temp_free_i32(tmp);
71
72
tcg_gen_addi_i32(addr, addr, 4);
73
74
tmp = load_reg(s, rt2);
75
- gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
76
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
77
tcg_temp_free_i32(tmp);
78
79
op_addr_ri_post(s, a, addr, -4);
80
--
81
2.20.1
82
83
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-17-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
16
addr = load_reg(s, a->rn);
17
tmp = load_reg(s, a->rt);
18
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
19
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop);
20
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
21
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
22
23
tcg_temp_free_i32(tmp);
24
@@ -XXX,XX +XXX,XX @@ static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
25
26
addr = load_reg(s, a->rn);
27
tmp = tcg_temp_new_i32();
28
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
29
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
30
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
31
tcg_temp_free_i32(addr);
32
33
--
34
2.20.1
35
36
diff view generated by jsdifflib
1
In v8.1M a new exception return check is added which may cause a NOCP
1
From: Richard Henderson <richard.henderson@linaro.org>
2
UsageFault (see rule R_XLTP): before we clear s0..s15 and the FPSCR
3
we must check whether access to CP10 from the Security state of the
4
returning exception is disabled; if it is then we must take a fault.
5
2
6
(Note that for our implementation CPPWR is always RAZ/WI and so can
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
never cause CP10 accesses to fail.)
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-18-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
8
10
9
The other v8.1M change to this register-clearing code is that if MVE
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
10
is implemented VPR must also be cleared, so add a TODO comment to
11
that effect.
12
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20201119215617.29887-20-peter.maydell@linaro.org
16
---
17
target/arm/m_helper.c | 22 +++++++++++++++++++++-
18
1 file changed, 21 insertions(+), 1 deletion(-)
19
20
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
21
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/m_helper.c
13
--- a/target/arm/translate.c
23
+++ b/target/arm/m_helper.c
14
+++ b/target/arm/translate.c
24
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
15
@@ -XXX,XX +XXX,XX @@ static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
25
v7m_exception_taken(cpu, excret, true, false);
26
return;
27
} else {
16
} else {
28
- /* Clear s0..s15 and FPSCR */
17
tmp = load_reg(s, i);
29
+ if (arm_feature(env, ARM_FEATURE_V8_1M)) {
18
}
30
+ /* v8.1M adds this NOCP check */
19
- gen_aa32_st32(s, tmp, addr, mem_idx);
31
+ bool nsacr_pass = exc_secure ||
20
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
32
+ extract32(env->v7m.nsacr, 10, 1);
21
tcg_temp_free_i32(tmp);
33
+ bool cpacr_pass = v7m_cpacr_pass(env, exc_secure, true);
22
34
+ if (!nsacr_pass) {
23
/* No need to add after the last transfer. */
35
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
24
@@ -XXX,XX +XXX,XX @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
36
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
25
}
37
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
26
38
+ "stackframe: NSACR prevents clearing FPU registers\n");
27
tmp = tcg_temp_new_i32();
39
+ v7m_exception_taken(cpu, excret, true, false);
28
- gen_aa32_ld32u(s, tmp, addr, mem_idx);
40
+ } else if (!cpacr_pass) {
29
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
41
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
30
if (user) {
42
+ exc_secure);
31
tmp2 = tcg_const_i32(i);
43
+ env->v7m.cfsr[exc_secure] |= R_V7M_CFSR_NOCP_MASK;
32
gen_helper_set_user_reg(cpu_env, tmp2, tmp);
44
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
45
+ "stackframe: CPACR prevents clearing FPU registers\n");
46
+ v7m_exception_taken(cpu, excret, true, false);
47
+ }
48
+ }
49
+ /* Clear s0..s15 and FPSCR; TODO also VPR when MVE is implemented */
50
int i;
51
52
for (i = 0; i < 16; i += 2) {
53
--
33
--
54
2.20.1
34
2.20.1
55
35
56
36
diff view generated by jsdifflib
1
In v8.1M a REVIDR register is defined, which is at address 0xe00ecfc
1
From: Richard Henderson <richard.henderson@linaro.org>
2
and is a read-only IMPDEF register providing implementation specific
3
minor revision information, like the v8A REVIDR_EL1. Implement this.
4
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-19-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201119215617.29887-19-peter.maydell@linaro.org
8
---
7
---
9
hw/intc/armv7m_nvic.c | 5 +++++
8
target/arm/translate.c | 4 ++--
10
1 file changed, 5 insertions(+)
9
1 file changed, 2 insertions(+), 2 deletions(-)
11
10
12
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/hw/intc/armv7m_nvic.c
13
--- a/target/arm/translate.c
15
+++ b/hw/intc/armv7m_nvic.c
14
+++ b/target/arm/translate.c
16
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
15
@@ -XXX,XX +XXX,XX @@ static bool trans_RFE(DisasContext *s, arg_RFE *a)
17
}
16
18
return val;
17
/* Load PC into tmp and CPSR into tmp2. */
19
}
18
t1 = tcg_temp_new_i32();
20
+ case 0xcfc:
19
- gen_aa32_ld32u(s, t1, addr, get_mem_index(s));
21
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8_1M)) {
20
+ gen_aa32_ld_i32(s, t1, addr, get_mem_index(s), MO_UL | MO_ALIGN);
22
+ goto bad_offset;
21
tcg_gen_addi_i32(addr, addr, 4);
23
+ }
22
t2 = tcg_temp_new_i32();
24
+ return cpu->revidr;
23
- gen_aa32_ld32u(s, t2, addr, get_mem_index(s));
25
case 0xd00: /* CPUID Base. */
24
+ gen_aa32_ld_i32(s, t2, addr, get_mem_index(s), MO_UL | MO_ALIGN);
26
return cpu->midr;
25
27
case 0xd04: /* Interrupt Control State (ICSR) */
26
if (a->w) {
27
/* Base writeback. */
28
--
28
--
29
2.20.1
29
2.20.1
30
30
31
31
diff view generated by jsdifflib
1
From: Alex Chen <alex.chen@huawei.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
We should use printf format specifier "%u" instead of "%d" for
4
argument of type "unsigned int".
5
6
Reported-by: Euler Robot <euler.robot@huawei.com>
7
Signed-off-by: Alex Chen <alex.chen@huawei.com>
8
Message-id: 20201126111109.112238-5-alex.chen@huawei.com
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-20-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
7
---
12
hw/misc/imx6ul_ccm.c | 4 ++--
8
target/arm/translate.c | 4 ++--
13
1 file changed, 2 insertions(+), 2 deletions(-)
9
1 file changed, 2 insertions(+), 2 deletions(-)
14
10
15
diff --git a/hw/misc/imx6ul_ccm.c b/hw/misc/imx6ul_ccm.c
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/misc/imx6ul_ccm.c
13
--- a/target/arm/translate.c
18
+++ b/hw/misc/imx6ul_ccm.c
14
+++ b/target/arm/translate.c
19
@@ -XXX,XX +XXX,XX @@ static const char *imx6ul_ccm_reg_name(uint32_t reg)
15
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
20
case CCM_CMEOR:
21
return "CMEOR";
22
default:
23
- sprintf(unknown, "%d ?", reg);
24
+ sprintf(unknown, "%u ?", reg);
25
return unknown;
26
}
16
}
27
}
17
tcg_gen_addi_i32(addr, addr, offset);
28
@@ -XXX,XX +XXX,XX @@ static const char *imx6ul_analog_reg_name(uint32_t reg)
18
tmp = load_reg(s, 14);
29
case USB_ANALOG_DIGPROG:
19
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
30
return "USB_ANALOG_DIGPROG";
20
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
31
default:
21
tcg_temp_free_i32(tmp);
32
- sprintf(unknown, "%d ?", reg);
22
tmp = load_cpu_field(spsr);
33
+ sprintf(unknown, "%u ?", reg);
23
tcg_gen_addi_i32(addr, addr, 4);
34
return unknown;
24
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
35
}
25
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
36
}
26
tcg_temp_free_i32(tmp);
27
if (writeback) {
28
switch (amode) {
37
--
29
--
38
2.20.1
30
2.20.1
39
31
40
32
diff view generated by jsdifflib
1
For M-profile before v8.1M, the only valid register for VMSR/VMRS is
1
From: Richard Henderson <richard.henderson@linaro.org>
2
the FPSCR. We have a comment that states this, but the actual logic
3
to forbid accesses for any other register value is missing, so we
4
would end up with A-profile style behaviour. Add the missing check.
5
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-21-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201119215617.29887-7-peter.maydell@linaro.org
9
---
7
---
10
target/arm/translate-vfp.c.inc | 5 ++++-
8
target/arm/translate-vfp.c.inc | 8 ++++----
11
1 file changed, 4 insertions(+), 1 deletion(-)
9
1 file changed, 4 insertions(+), 4 deletions(-)
12
10
13
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
11
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-vfp.c.inc
13
--- a/target/arm/translate-vfp.c.inc
16
+++ b/target/arm/translate-vfp.c.inc
14
+++ b/target/arm/translate-vfp.c.inc
17
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
18
* Accesses to R15 are UNPREDICTABLE; we choose to undef.
16
for (i = 0; i < n; i++) {
19
* (FPSCR -> r15 is a special case which writes to the PSR flags.)
17
if (a->l) {
20
*/
18
/* load */
21
- if (a->rt == 15 && (!a->l || a->reg != ARM_VFP_FPSCR)) {
19
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
22
+ if (a->reg != ARM_VFP_FPSCR) {
20
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
23
+ return false;
21
vfp_store_reg32(tmp, a->vd + i);
24
+ }
22
} else {
25
+ if (a->rt == 15 && !a->l) {
23
/* store */
26
return false;
24
vfp_load_reg32(tmp, a->vd + i);
25
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
26
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
27
}
27
}
28
tcg_gen_addi_i32(addr, addr, offset);
29
}
30
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
31
for (i = 0; i < n; i++) {
32
if (a->l) {
33
/* load */
34
- gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
35
+ gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
36
vfp_store_reg64(tmp, a->vd + i);
37
} else {
38
/* store */
39
vfp_load_reg64(tmp, a->vd + i);
40
- gen_aa32_st64(s, tmp, addr, get_mem_index(s));
41
+ gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
42
}
43
tcg_gen_addi_i32(addr, addr, offset);
28
}
44
}
29
--
45
--
30
2.20.1
46
2.20.1
31
47
32
48
diff view generated by jsdifflib
1
We defined a constant name for the mask of NZCV bits in the FPCR/FPSCR
1
From: Richard Henderson <richard.henderson@linaro.org>
2
in the previous commit; use it in a couple of places in existing code,
3
where we're masking out everything except NZCV for the "load to Rt=15
4
sets CPSR.NZCV" special case.
5
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-22-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201119215617.29887-12-peter.maydell@linaro.org
9
---
7
---
10
target/arm/translate-vfp.c.inc | 4 ++--
8
target/arm/translate-vfp.c.inc | 12 ++++++------
11
1 file changed, 2 insertions(+), 2 deletions(-)
9
1 file changed, 6 insertions(+), 6 deletions(-)
12
10
13
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
11
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-vfp.c.inc
13
--- a/target/arm/translate-vfp.c.inc
16
+++ b/target/arm/translate-vfp.c.inc
14
+++ b/target/arm/translate-vfp.c.inc
17
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
18
* helper call for the "VMRS to CPSR.NZCV" insn.
16
addr = add_reg_for_lit(s, a->rn, offset);
19
*/
17
tmp = tcg_temp_new_i32();
20
tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
18
if (a->l) {
21
- tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
19
- gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
22
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
20
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN);
23
storefn(s, opaque, tmp);
21
vfp_store_reg32(tmp, a->vd);
24
break;
22
} else {
25
default:
23
vfp_load_reg32(tmp, a->vd);
26
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
24
- gen_aa32_st16(s, tmp, addr, get_mem_index(s));
27
case ARM_VFP_FPSCR:
25
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN);
28
if (a->rt == 15) {
26
}
29
tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
27
tcg_temp_free_i32(tmp);
30
- tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
28
tcg_temp_free_i32(addr);
31
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
29
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
32
} else {
30
addr = add_reg_for_lit(s, a->rn, offset);
33
tmp = tcg_temp_new_i32();
31
tmp = tcg_temp_new_i32();
34
gen_helper_vfp_get_fpscr(tmp, cpu_env);
32
if (a->l) {
33
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
34
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
35
vfp_store_reg32(tmp, a->vd);
36
} else {
37
vfp_load_reg32(tmp, a->vd);
38
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
39
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
40
}
41
tcg_temp_free_i32(tmp);
42
tcg_temp_free_i32(addr);
43
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
44
addr = add_reg_for_lit(s, a->rn, offset);
45
tmp = tcg_temp_new_i64();
46
if (a->l) {
47
- gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
48
+ gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
49
vfp_store_reg64(tmp, a->vd);
50
} else {
51
vfp_load_reg64(tmp, a->vd);
52
- gen_aa32_st64(s, tmp, addr, get_mem_index(s));
53
+ gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
54
}
55
tcg_temp_free_i64(tmp);
56
tcg_temp_free_i32(addr);
35
--
57
--
36
2.20.1
58
2.20.1
37
59
38
60
diff view generated by jsdifflib
1
From: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Trusted Firmware now supports A72 on sbsa-ref by default [1] so enable
4
it for QEMU as well. A53 was already enabled there.
5
6
1. https://review.trustedfirmware.org/c/TF-A/trusted-firmware-a/+/7117
7
8
Signed-off-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20201120141705.246690-1-marcin.juszkiewicz@linaro.org
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-23-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
7
---
14
hw/arm/sbsa-ref.c | 23 ++++++++++++++++++++---
8
target/arm/translate.h | 1 +
15
1 file changed, 20 insertions(+), 3 deletions(-)
9
target/arm/translate.c | 15 +++++++++++++
10
target/arm/translate-neon.c.inc | 37 +++++++++++++++++++++++++--------
11
3 files changed, 44 insertions(+), 9 deletions(-)
16
12
17
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
13
diff --git a/target/arm/translate.h b/target/arm/translate.h
18
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/arm/sbsa-ref.c
15
--- a/target/arm/translate.h
20
+++ b/hw/arm/sbsa-ref.c
16
+++ b/target/arm/translate.h
21
@@ -XXX,XX +XXX,XX @@ static const int sbsa_ref_irqmap[] = {
17
@@ -XXX,XX +XXX,XX @@ void arm_test_cc(DisasCompare *cmp, int cc);
22
[SBSA_GWDT] = 16,
18
void arm_free_cc(DisasCompare *cmp);
23
};
19
void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
24
20
void arm_gen_test_cc(int cc, TCGLabel *label);
25
+static const char * const valid_cpus[] = {
21
+MemOp pow2_align(unsigned i);
26
+ ARM_CPU_TYPE_NAME("cortex-a53"),
22
27
+ ARM_CPU_TYPE_NAME("cortex-a57"),
23
/* Return state of Alternate Half-precision flag, caller frees result */
28
+ ARM_CPU_TYPE_NAME("cortex-a72"),
24
static inline TCGv_i32 get_ahp_flag(void)
29
+};
25
diff --git a/target/arm/translate.c b/target/arm/translate.c
30
+
26
index XXXXXXX..XXXXXXX 100644
31
+static bool cpu_type_valid(const char *cpu)
27
--- a/target/arm/translate.c
28
+++ b/target/arm/translate.c
29
@@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
30
#define IS_USER_ONLY 0
31
#endif
32
33
+MemOp pow2_align(unsigned i)
32
+{
34
+{
33
+ int i;
35
+ static const MemOp mop_align[] = {
34
+
36
+ 0, MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16,
35
+ for (i = 0; i < ARRAY_SIZE(valid_cpus); i++) {
37
+ /*
36
+ if (strcmp(cpu, valid_cpus[i]) == 0) {
38
+ * FIXME: TARGET_PAGE_BITS_MIN affects TLB_FLAGS_MASK such
37
+ return true;
39
+ * that 256-bit alignment (MO_ALIGN_32) cannot be supported:
38
+ }
40
+ * see get_alignment_bits(). Enforce only 128-bit alignment for now.
39
+ }
41
+ */
40
+ return false;
42
+ MO_ALIGN_16
43
+ };
44
+ g_assert(i < ARRAY_SIZE(mop_align));
45
+ return mop_align[i];
41
+}
46
+}
42
+
47
+
43
static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx)
48
/*
44
{
49
* Abstractions of "generate code to do a guest load/store for
45
uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER;
50
* AArch32", where a vaddr is always 32 bits (and is zero
46
@@ -XXX,XX +XXX,XX @@ static void sbsa_ref_init(MachineState *machine)
51
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
47
const CPUArchIdList *possible_cpus;
52
index XXXXXXX..XXXXXXX 100644
48
int n, sbsa_max_cpus;
53
--- a/target/arm/translate-neon.c.inc
49
54
+++ b/target/arm/translate-neon.c.inc
50
- if (strcmp(machine->cpu_type, ARM_CPU_TYPE_NAME("cortex-a57"))) {
55
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
51
- error_report("sbsa-ref: CPU type other than the built-in "
56
int size = a->size;
52
- "cortex-a57 not supported");
57
int nregs = a->n + 1;
53
+ if (!cpu_type_valid(machine->cpu_type)) {
58
TCGv_i32 addr, tmp;
54
+ error_report("mach-virt: CPU type %s not supported", machine->cpu_type);
59
+ MemOp mop, align;
55
exit(1);
60
61
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
62
return false;
63
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
64
return false;
56
}
65
}
57
66
67
+ align = 0;
68
if (size == 3) {
69
if (nregs != 4 || a->a == 0) {
70
return false;
71
}
72
/* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */
73
- size = 2;
74
- }
75
- if (nregs == 1 && a->a == 1 && size == 0) {
76
- return false;
77
- }
78
- if (nregs == 3 && a->a == 1) {
79
- return false;
80
+ size = MO_32;
81
+ align = MO_ALIGN_16;
82
+ } else if (a->a) {
83
+ switch (nregs) {
84
+ case 1:
85
+ if (size == 0) {
86
+ return false;
87
+ }
88
+ align = MO_ALIGN;
89
+ break;
90
+ case 2:
91
+ align = pow2_align(size + 1);
92
+ break;
93
+ case 3:
94
+ return false;
95
+ case 4:
96
+ align = pow2_align(size + 2);
97
+ break;
98
+ default:
99
+ g_assert_not_reached();
100
+ }
101
}
102
103
if (!vfp_access_check(s)) {
104
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
105
*/
106
stride = a->t ? 2 : 1;
107
vec_size = nregs == 1 ? stride * 8 : 8;
108
-
109
+ mop = size | align;
110
tmp = tcg_temp_new_i32();
111
addr = tcg_temp_new_i32();
112
load_reg_var(s, addr, a->rn);
113
for (reg = 0; reg < nregs; reg++) {
114
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), size);
115
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
116
if ((vd & 1) && vec_size == 16) {
117
/*
118
* We cannot write 16 bytes at once because the
119
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
120
}
121
tcg_gen_addi_i32(addr, addr, 1 << size);
122
vd += stride;
123
+
124
+ /* Subsequent memory operations inherit alignment */
125
+ mop &= ~MO_AMASK;
126
}
127
tcg_temp_free_i32(tmp);
128
tcg_temp_free_i32(addr);
58
--
129
--
59
2.20.1
130
2.20.1
60
131
61
132
diff view generated by jsdifflib
1
v8.1M adds new encodings of VLLDM and VLSTM (where bit 7 is set).
1
From: Richard Henderson <richard.henderson@linaro.org>
2
The only difference is that:
3
* the old T1 encodings UNDEF if the implementation implements 32
4
Dregs (this is currently architecturally impossible for M-profile)
5
* the new T2 encodings have the implementation-defined option to
6
read from memory (discarding the data) or write UNKNOWN values to
7
memory for the stack slots that would be D16-D31
8
2
9
We choose not to make those accesses, so for us the two
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
instructions behave identically assuming they don't UNDEF.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-24-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-neon.c.inc | 27 ++++++++++++++++++++++-----
9
1 file changed, 22 insertions(+), 5 deletions(-)
11
10
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20201119215617.29887-21-peter.maydell@linaro.org
15
---
16
target/arm/m-nocp.decode | 2 +-
17
target/arm/translate-vfp.c.inc | 25 +++++++++++++++++++++++++
18
2 files changed, 26 insertions(+), 1 deletion(-)
19
20
diff --git a/target/arm/m-nocp.decode b/target/arm/m-nocp.decode
21
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/m-nocp.decode
13
--- a/target/arm/translate-neon.c.inc
23
+++ b/target/arm/m-nocp.decode
14
+++ b/target/arm/translate-neon.c.inc
24
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
25
26
{
16
{
27
# Special cases which do not take an early NOCP: VLLDM and VLSTM
17
/* Neon load/store multiple structures */
28
- VLLDM_VLSTM 1110 1100 001 l:1 rn:4 0000 1010 0000 0000
18
int nregs, interleave, spacing, reg, n;
29
+ VLLDM_VLSTM 1110 1100 001 l:1 rn:4 0000 1010 op:1 000 0000
19
- MemOp endian = s->be_data;
30
# VSCCLRM (new in v8.1M) is similar:
20
+ MemOp mop, align, endian;
31
VSCCLRM 1110 1100 1.01 1111 .... 1011 imm:7 0 vd=%vd_dp size=3
21
int mmu_idx = get_mem_index(s);
32
VSCCLRM 1110 1100 1.01 1111 .... 1010 imm:8 vd=%vd_sp size=2
22
int size = a->size;
33
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
23
TCGv_i64 tmp64;
34
index XXXXXXX..XXXXXXX 100644
24
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
35
--- a/target/arm/translate-vfp.c.inc
25
}
36
+++ b/target/arm/translate-vfp.c.inc
26
37
@@ -XXX,XX +XXX,XX @@ static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
27
/* For our purposes, bytes are always little-endian. */
38
!arm_dc_feature(s, ARM_FEATURE_V8)) {
28
+ endian = s->be_data;
39
return false;
29
if (size == 0) {
30
endian = MO_LE;
40
}
31
}
41
+
32
+
42
+ if (a->op) {
33
+ /* Enforce alignment requested by the instruction */
43
+ /*
34
+ if (a->align) {
44
+ * T2 encoding ({D0-D31} reglist): v8.1M and up. We choose not
35
+ align = pow2_align(a->align + 2); /* 4 ** a->align */
45
+ * to take the IMPDEF option to make memory accesses to the stack
46
+ * slots that correspond to the D16-D31 registers (discarding
47
+ * read data and writing UNKNOWN values), so for us the T2
48
+ * encoding behaves identically to the T1 encoding.
49
+ */
50
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
51
+ return false;
52
+ }
53
+ } else {
36
+ } else {
54
+ /*
37
+ align = s->align_mem ? MO_ALIGN : 0;
55
+ * T1 encoding ({D0-D15} reglist); undef if we have 32 Dregs.
56
+ * This is currently architecturally impossible, but we add the
57
+ * check to stay in line with the pseudocode. Note that we must
58
+ * emit code for the UNDEF so it takes precedence over the NOCP.
59
+ */
60
+ if (dc_isar_feature(aa32_simd_r32, s)) {
61
+ unallocated_encoding(s);
62
+ return true;
63
+ }
64
+ }
38
+ }
65
+
39
+
66
/*
40
/*
67
* If not secure, UNDEF. We must emit code for this
41
* Consecutive little-endian elements from a single register
68
* rather than returning false so that this takes
42
* can be promoted to a larger little-endian operation.
43
*/
44
if (interleave == 1 && endian == MO_LE) {
45
+ /* Retain any natural alignment. */
46
+ if (align == MO_ALIGN) {
47
+ align = pow2_align(size);
48
+ }
49
size = 3;
50
}
51
+
52
tmp64 = tcg_temp_new_i64();
53
addr = tcg_temp_new_i32();
54
tmp = tcg_const_i32(1 << size);
55
load_reg_var(s, addr, a->rn);
56
+
57
+ mop = endian | size | align;
58
for (reg = 0; reg < nregs; reg++) {
59
for (n = 0; n < 8 >> size; n++) {
60
int xs;
61
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
62
int tt = a->vd + reg + spacing * xs;
63
64
if (a->l) {
65
- gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx,
66
- endian | size);
67
+ gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx, mop);
68
neon_store_element64(tt, n, size, tmp64);
69
} else {
70
neon_load_element64(tmp64, tt, n, size);
71
- gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx,
72
- endian | size);
73
+ gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx, mop);
74
}
75
tcg_gen_add_i32(addr, addr, tmp);
76
+
77
+ /* Subsequent memory operations inherit alignment */
78
+ mop &= ~MO_AMASK;
79
}
80
}
81
}
69
--
82
--
70
2.20.1
83
2.20.1
71
84
72
85
diff view generated by jsdifflib
1
In v8.1M, vector table fetch failures don't set HFSR.FORCED (see rule
1
From: Richard Henderson <richard.henderson@linaro.org>
2
R_LLRP). (In previous versions of the architecture this was either
3
required or IMPDEF.)
4
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-25-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201119215617.29887-18-peter.maydell@linaro.org
8
---
7
---
9
target/arm/m_helper.c | 6 +++++-
8
target/arm/translate-neon.c.inc | 48 ++++++++++++++++++++++++++++-----
10
1 file changed, 5 insertions(+), 1 deletion(-)
9
1 file changed, 42 insertions(+), 6 deletions(-)
11
10
12
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
11
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/m_helper.c
13
--- a/target/arm/translate-neon.c.inc
15
+++ b/target/arm/m_helper.c
14
+++ b/target/arm/translate-neon.c.inc
16
@@ -XXX,XX +XXX,XX @@ load_fail:
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
17
* The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
16
int nregs = a->n + 1;
18
* secure); otherwise it targets the same security state as the
17
int vd = a->vd;
19
* underlying exception.
18
TCGv_i32 addr, tmp;
20
+ * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
19
+ MemOp mop;
21
*/
20
22
if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
21
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
23
exc_secure = true;
22
return false;
23
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
24
return true;
24
}
25
}
25
- env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
26
26
+ env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK;
27
+ /* Pick up SCTLR settings */
27
+ if (!arm_feature(env, ARM_FEATURE_V8_1M)) {
28
+ mop = finalize_memop(s, a->size);
28
+ env->v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
29
+
30
+ if (a->align) {
31
+ MemOp align_op;
32
+
33
+ switch (nregs) {
34
+ case 1:
35
+ /* For VLD1, use natural alignment. */
36
+ align_op = MO_ALIGN;
37
+ break;
38
+ case 2:
39
+ /* For VLD2, use double alignment. */
40
+ align_op = pow2_align(a->size + 1);
41
+ break;
42
+ case 4:
43
+ if (a->size == MO_32) {
44
+ /*
45
+ * For VLD4.32, align = 1 is double alignment, align = 2 is
46
+ * quad alignment; align = 3 is rejected above.
47
+ */
48
+ align_op = pow2_align(a->size + a->align);
49
+ } else {
50
+ /* For VLD4.8 and VLD.16, we want quad alignment. */
51
+ align_op = pow2_align(a->size + 2);
52
+ }
53
+ break;
54
+ default:
55
+ /* For VLD3, the alignment field is zero and rejected above. */
56
+ g_assert_not_reached();
57
+ }
58
+
59
+ mop = (mop & ~MO_AMASK) | align_op;
29
+ }
60
+ }
30
armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
61
+
31
return false;
62
tmp = tcg_temp_new_i32();
32
}
63
addr = tcg_temp_new_i32();
64
load_reg_var(s, addr, a->rn);
65
- /*
66
- * TODO: if we implemented alignment exceptions, we should check
67
- * addr against the alignment encoded in a->align here.
68
- */
69
+
70
for (reg = 0; reg < nregs; reg++) {
71
if (a->l) {
72
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), a->size);
73
+ gen_aa32_ld_internal_i32(s, tmp, addr, get_mem_index(s), mop);
74
neon_store_element(vd, a->reg_idx, a->size, tmp);
75
} else { /* Store */
76
neon_load_element(tmp, vd, a->reg_idx, a->size);
77
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), a->size);
78
+ gen_aa32_st_internal_i32(s, tmp, addr, get_mem_index(s), mop);
79
}
80
vd += a->stride;
81
tcg_gen_addi_i32(addr, addr, 1 << a->size);
82
+
83
+ /* Subsequent memory operations inherit alignment */
84
+ mop &= ~MO_AMASK;
85
}
86
tcg_temp_free_i32(addr);
87
tcg_temp_free_i32(tmp);
33
--
88
--
34
2.20.1
89
2.20.1
35
90
36
91
diff view generated by jsdifflib
1
From: Alex Chen <alex.chen@huawei.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
We should use printf format specifier "%u" instead of "%d" for
3
In the case of gpr load, merge the size and is_signed arguments;
4
argument of type "unsigned int".
4
otherwise, simply convert size to memop.
5
5
6
Reported-by: Euler Robot <euler.robot@huawei.com>
7
Signed-off-by: Alex Chen <alex.chen@huawei.com>
8
Message-id: 20201126111109.112238-4-alex.chen@huawei.com
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210419202257.161730-26-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
hw/misc/imx6_ccm.c | 20 ++++++++++----------
11
target/arm/translate-a64.c | 78 ++++++++++++++++----------------------
13
hw/misc/imx6_src.c | 2 +-
12
1 file changed, 33 insertions(+), 45 deletions(-)
14
2 files changed, 11 insertions(+), 11 deletions(-)
13
15
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
16
diff --git a/hw/misc/imx6_ccm.c b/hw/misc/imx6_ccm.c
17
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/misc/imx6_ccm.c
16
--- a/target/arm/translate-a64.c
19
+++ b/hw/misc/imx6_ccm.c
17
+++ b/target/arm/translate-a64.c
20
@@ -XXX,XX +XXX,XX @@ static const char *imx6_ccm_reg_name(uint32_t reg)
18
@@ -XXX,XX +XXX,XX @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
21
case CCM_CMEOR:
19
* Store from GPR register to memory.
22
return "CMEOR";
20
*/
23
default:
21
static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
24
- sprintf(unknown, "%d ?", reg);
22
- TCGv_i64 tcg_addr, int size, int memidx,
25
+ sprintf(unknown, "%u ?", reg);
23
+ TCGv_i64 tcg_addr, MemOp memop, int memidx,
26
return unknown;
24
bool iss_valid,
27
}
25
unsigned int iss_srt,
28
}
26
bool iss_sf, bool iss_ar)
29
@@ -XXX,XX +XXX,XX @@ static const char *imx6_analog_reg_name(uint32_t reg)
27
{
30
case USB_ANALOG_DIGPROG:
28
- g_assert(size <= 3);
31
return "USB_ANALOG_DIGPROG";
29
- tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
32
default:
30
+ memop = finalize_memop(s, memop);
33
- sprintf(unknown, "%d ?", reg);
31
+ tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
34
+ sprintf(unknown, "%u ?", reg);
32
35
return unknown;
33
if (iss_valid) {
36
}
34
uint32_t syn;
37
}
35
38
@@ -XXX,XX +XXX,XX @@ static uint64_t imx6_analog_get_pll2_clk(IMX6CCMState *dev)
36
syn = syn_data_abort_with_iss(0,
39
freq *= 20;
37
- size,
40
}
38
+ (memop & MO_SIZE),
41
39
false,
42
- DPRINTF("freq = %d\n", (uint32_t)freq);
40
iss_srt,
43
+ DPRINTF("freq = %u\n", (uint32_t)freq);
41
iss_sf,
44
42
@@ -XXX,XX +XXX,XX @@ static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
45
return freq;
43
}
46
}
44
47
@@ -XXX,XX +XXX,XX @@ static uint64_t imx6_analog_get_pll2_pfd0_clk(IMX6CCMState *dev)
45
static void do_gpr_st(DisasContext *s, TCGv_i64 source,
48
freq = imx6_analog_get_pll2_clk(dev) * 18
46
- TCGv_i64 tcg_addr, int size,
49
/ EXTRACT(dev->analog[CCM_ANALOG_PFD_528], PFD0_FRAC);
47
+ TCGv_i64 tcg_addr, MemOp memop,
50
48
bool iss_valid,
51
- DPRINTF("freq = %d\n", (uint32_t)freq);
49
unsigned int iss_srt,
52
+ DPRINTF("freq = %u\n", (uint32_t)freq);
50
bool iss_sf, bool iss_ar)
53
51
{
54
return freq;
52
- do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
55
}
53
+ do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
56
@@ -XXX,XX +XXX,XX @@ static uint64_t imx6_analog_get_pll2_pfd2_clk(IMX6CCMState *dev)
54
iss_valid, iss_srt, iss_sf, iss_ar);
57
freq = imx6_analog_get_pll2_clk(dev) * 18
55
}
58
/ EXTRACT(dev->analog[CCM_ANALOG_PFD_528], PFD2_FRAC);
56
59
57
/*
60
- DPRINTF("freq = %d\n", (uint32_t)freq);
58
* Load from memory to GPR register
61
+ DPRINTF("freq = %u\n", (uint32_t)freq);
59
*/
62
60
-static void do_gpr_ld_memidx(DisasContext *s,
63
return freq;
61
- TCGv_i64 dest, TCGv_i64 tcg_addr,
64
}
62
- int size, bool is_signed,
65
@@ -XXX,XX +XXX,XX @@ static uint64_t imx6_analog_get_periph_clk(IMX6CCMState *dev)
63
- bool extend, int memidx,
66
break;
64
+static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
67
}
65
+ MemOp memop, bool extend, int memidx,
68
66
bool iss_valid, unsigned int iss_srt,
69
- DPRINTF("freq = %d\n", (uint32_t)freq);
67
bool iss_sf, bool iss_ar)
70
+ DPRINTF("freq = %u\n", (uint32_t)freq);
68
{
71
69
- MemOp memop = s->be_data + size;
72
return freq;
70
-
73
}
71
- g_assert(size <= 3);
74
@@ -XXX,XX +XXX,XX @@ static uint64_t imx6_ccm_get_ahb_clk(IMX6CCMState *dev)
72
-
75
freq = imx6_analog_get_periph_clk(dev)
73
- if (is_signed) {
76
/ (1 + EXTRACT(dev->ccm[CCM_CBCDR], AHB_PODF));
74
- memop += MO_SIGN;
77
75
- }
78
- DPRINTF("freq = %d\n", (uint32_t)freq);
76
-
79
+ DPRINTF("freq = %u\n", (uint32_t)freq);
77
+ memop = finalize_memop(s, memop);
80
78
tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
81
return freq;
79
82
}
80
- if (extend && is_signed) {
83
@@ -XXX,XX +XXX,XX @@ static uint64_t imx6_ccm_get_ipg_clk(IMX6CCMState *dev)
81
- g_assert(size < 3);
84
freq = imx6_ccm_get_ahb_clk(dev)
82
+ if (extend && (memop & MO_SIGN)) {
85
/ (1 + EXTRACT(dev->ccm[CCM_CBCDR], IPG_PODF));
83
+ g_assert((memop & MO_SIZE) <= MO_32);
86
84
tcg_gen_ext32u_i64(dest, dest);
87
- DPRINTF("freq = %d\n", (uint32_t)freq);
85
}
88
+ DPRINTF("freq = %u\n", (uint32_t)freq);
86
89
87
@@ -XXX,XX +XXX,XX @@ static void do_gpr_ld_memidx(DisasContext *s,
90
return freq;
88
uint32_t syn;
91
}
89
92
@@ -XXX,XX +XXX,XX @@ static uint64_t imx6_ccm_get_per_clk(IMX6CCMState *dev)
90
syn = syn_data_abort_with_iss(0,
93
freq = imx6_ccm_get_ipg_clk(dev)
91
- size,
94
/ (1 + EXTRACT(dev->ccm[CCM_CSCMR1], PERCLK_PODF));
92
- is_signed,
95
93
+ (memop & MO_SIZE),
96
- DPRINTF("freq = %d\n", (uint32_t)freq);
94
+ (memop & MO_SIGN) != 0,
97
+ DPRINTF("freq = %u\n", (uint32_t)freq);
95
iss_srt,
98
96
iss_sf,
99
return freq;
97
iss_ar,
100
}
98
@@ -XXX,XX +XXX,XX @@ static void do_gpr_ld_memidx(DisasContext *s,
101
@@ -XXX,XX +XXX,XX @@ static uint32_t imx6_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
99
}
102
break;
100
}
103
}
101
104
102
-static void do_gpr_ld(DisasContext *s,
105
- DPRINTF("Clock = %d) = %d\n", clock, freq);
103
- TCGv_i64 dest, TCGv_i64 tcg_addr,
106
+ DPRINTF("Clock = %d) = %u\n", clock, freq);
104
- int size, bool is_signed, bool extend,
107
105
+static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
108
return freq;
106
+ MemOp memop, bool extend,
109
}
107
bool iss_valid, unsigned int iss_srt,
110
diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c
108
bool iss_sf, bool iss_ar)
111
index XXXXXXX..XXXXXXX 100644
109
{
112
--- a/hw/misc/imx6_src.c
110
- do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
113
+++ b/hw/misc/imx6_src.c
111
- get_mem_index(s),
114
@@ -XXX,XX +XXX,XX @@ static const char *imx6_src_reg_name(uint32_t reg)
112
+ do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
115
case SRC_GPR10:
113
iss_valid, iss_srt, iss_sf, iss_ar);
116
return "SRC_GPR10";
114
}
117
default:
115
118
- sprintf(unknown, "%d ?", reg);
116
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
119
+ sprintf(unknown, "%u ?", reg);
117
}
120
return unknown;
118
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
119
false, rn != 31, size);
120
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt,
121
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt,
122
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
123
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
124
return;
125
@@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
126
/* Only unsigned 32bit loads target 32bit registers. */
127
bool iss_sf = opc != 0;
128
129
- do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, false,
130
- true, rt, iss_sf, false);
131
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
132
+ false, true, rt, iss_sf, false);
133
}
134
tcg_temp_free_i64(clean_addr);
135
}
136
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
137
/* Do not modify tcg_rt before recognizing any exception
138
* from the second load.
139
*/
140
- do_gpr_ld(s, tmp, clean_addr, size, is_signed, false,
141
- false, 0, false, false);
142
+ do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN,
143
+ false, false, 0, false, false);
144
tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
145
- do_gpr_ld(s, tcg_rt2, clean_addr, size, is_signed, false,
146
- false, 0, false, false);
147
+ do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN,
148
+ false, false, 0, false, false);
149
150
tcg_gen_mov_i64(tcg_rt, tmp);
151
tcg_temp_free_i64(tmp);
152
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
153
do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
154
iss_valid, rt, iss_sf, false);
155
} else {
156
- do_gpr_ld_memidx(s, tcg_rt, clean_addr, size,
157
- is_signed, is_extended, memidx,
158
+ do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
159
+ is_extended, memidx,
160
iss_valid, rt, iss_sf, false);
161
}
162
}
163
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
164
do_gpr_st(s, tcg_rt, clean_addr, size,
165
true, rt, iss_sf, false);
166
} else {
167
- do_gpr_ld(s, tcg_rt, clean_addr, size,
168
- is_signed, is_extended,
169
- true, rt, iss_sf, false);
170
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
171
+ is_extended, true, rt, iss_sf, false);
172
}
173
}
174
}
175
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
176
do_gpr_st(s, tcg_rt, clean_addr, size,
177
true, rt, iss_sf, false);
178
} else {
179
- do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, is_extended,
180
- true, rt, iss_sf, false);
181
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
182
+ is_extended, true, rt, iss_sf, false);
183
}
184
}
185
}
186
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
187
* full load-acquire (we only need "load-acquire processor consistent"),
188
* but we choose to implement them as full LDAQ.
189
*/
190
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false,
191
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false,
192
true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
193
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
194
return;
195
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
196
is_wback || rn != 31, size);
197
198
tcg_rt = cpu_reg(s, rt);
199
- do_gpr_ld(s, tcg_rt, clean_addr, size, /* is_signed */ false,
200
+ do_gpr_ld(s, tcg_rt, clean_addr, size,
201
/* extend */ false, /* iss_valid */ !is_wback,
202
/* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
203
204
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
205
* Load-AcquirePC semantics; we implement as the slightly more
206
* restrictive Load-Acquire.
207
*/
208
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, is_signed, extend,
209
- true, rt, iss_sf, true);
210
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size + is_signed * MO_SIGN,
211
+ extend, true, rt, iss_sf, true);
212
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
121
}
213
}
122
}
214
}
123
--
215
--
124
2.20.1
216
2.20.1
125
217
126
218
diff view generated by jsdifflib
1
From: Havard Skinnemoen <hskinnemoen@google.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Dump the collected random data after a randomness test failure.
3
For 128-bit load/store, use 16-byte alignment. This
4
requires that we perform the two operations in the
5
correct order so that we generate the alignment fault
6
before modifying memory.
4
7
5
Note that this relies on the test having called
6
g_test_set_nonfatal_assertions() so we don't abort immediately on the
7
assertion failure.
8
9
Signed-off-by: Havard Skinnemoen <hskinnemoen@google.com>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
[PMM: minor commit message tweak]
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210419202257.161730-27-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
12
---
14
tests/qtest/npcm7xx_rng-test.c | 12 ++++++++++++
13
target/arm/translate-a64.c | 42 +++++++++++++++++++++++---------------
15
1 file changed, 12 insertions(+)
14
1 file changed, 26 insertions(+), 16 deletions(-)
16
15
17
diff --git a/tests/qtest/npcm7xx_rng-test.c b/tests/qtest/npcm7xx_rng-test.c
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
18
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
19
--- a/tests/qtest/npcm7xx_rng-test.c
18
--- a/target/arm/translate-a64.c
20
+++ b/tests/qtest/npcm7xx_rng-test.c
19
+++ b/target/arm/translate-a64.c
21
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
22
21
static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
23
#include "libqtest-single.h"
22
{
24
#include "qemu/bitops.h"
23
/* This writes the bottom N bits of a 128 bit wide vector to memory */
25
+#include "qemu-common.h"
24
- TCGv_i64 tmp = tcg_temp_new_i64();
26
25
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
27
#define RNG_BASE_ADDR 0xf000b000
26
+ TCGv_i64 tmplo = tcg_temp_new_i64();
28
27
+ MemOp mop;
29
@@ -XXX,XX +XXX,XX @@
30
/* Number of bits to collect for randomness tests. */
31
#define TEST_INPUT_BITS (128)
32
33
+static void dump_buf_if_failed(const uint8_t *buf, size_t size)
34
+{
35
+ if (g_test_failed()) {
36
+ qemu_hexdump(stderr, "", buf, size);
37
+ }
38
+}
39
+
28
+
40
static void rng_writeb(unsigned int offset, uint8_t value)
29
+ tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
41
{
30
+
42
writeb(RNG_BASE_ADDR + offset, value);
31
if (size < 4) {
43
@@ -XXX,XX +XXX,XX @@ static void test_continuous_monobit(void)
32
- tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
33
- s->be_data + size);
34
+ mop = finalize_memop(s, size);
35
+ tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
36
} else {
37
bool be = s->be_data == MO_BE;
38
TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
39
+ TCGv_i64 tmphi = tcg_temp_new_i64();
40
41
+ tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
42
+
43
+ mop = s->be_data | MO_Q;
44
+ tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
45
+ mop | (s->align_mem ? MO_ALIGN_16 : 0));
46
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
47
- tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
48
- s->be_data | MO_Q);
49
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
50
- tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
51
- s->be_data | MO_Q);
52
+ tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr,
53
+ get_mem_index(s), mop);
54
+
55
tcg_temp_free_i64(tcg_hiaddr);
56
+ tcg_temp_free_i64(tmphi);
44
}
57
}
45
58
46
g_assert_cmpfloat(calc_monobit_p(buf, sizeof(buf)), >, 0.01);
59
- tcg_temp_free_i64(tmp);
47
+ dump_buf_if_failed(buf, sizeof(buf));
60
+ tcg_temp_free_i64(tmplo);
48
}
61
}
49
62
50
/*
63
/*
51
@@ -XXX,XX +XXX,XX @@ static void test_continuous_runs(void)
64
@@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
65
/* This always zero-extends and writes to a full 128 bit wide vector */
66
TCGv_i64 tmplo = tcg_temp_new_i64();
67
TCGv_i64 tmphi = NULL;
68
+ MemOp mop;
69
70
if (size < 4) {
71
- MemOp memop = s->be_data + size;
72
- tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
73
+ mop = finalize_memop(s, size);
74
+ tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
75
} else {
76
bool be = s->be_data == MO_BE;
77
TCGv_i64 tcg_hiaddr;
78
@@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
79
tmphi = tcg_temp_new_i64();
80
tcg_hiaddr = tcg_temp_new_i64();
81
82
+ mop = s->be_data | MO_Q;
83
+ tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
84
+ mop | (s->align_mem ? MO_ALIGN_16 : 0));
85
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
86
- tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
87
- s->be_data | MO_Q);
88
- tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
89
- s->be_data | MO_Q);
90
+ tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr,
91
+ get_mem_index(s), mop);
92
tcg_temp_free_i64(tcg_hiaddr);
52
}
93
}
53
94
54
g_assert_cmpfloat(calc_runs_p(buf.l, sizeof(buf) * BITS_PER_BYTE), >, 0.01);
55
+ dump_buf_if_failed(buf.c, sizeof(buf));
56
}
57
58
/*
59
@@ -XXX,XX +XXX,XX @@ static void test_first_byte_monobit(void)
60
}
61
62
g_assert_cmpfloat(calc_monobit_p(buf, sizeof(buf)), >, 0.01);
63
+ dump_buf_if_failed(buf, sizeof(buf));
64
}
65
66
/*
67
@@ -XXX,XX +XXX,XX @@ static void test_first_byte_runs(void)
68
}
69
70
g_assert_cmpfloat(calc_runs_p(buf.l, sizeof(buf) * BITS_PER_BYTE), >, 0.01);
71
+ dump_buf_if_failed(buf.c, sizeof(buf));
72
}
73
74
int main(int argc, char **argv)
75
--
95
--
76
2.20.1
96
2.20.1
77
97
78
98
diff view generated by jsdifflib
1
Implement the new-in-v8.1M FPCXT_S floating point system register.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
This is for saving and restoring the secure floating point context,
3
and it reads and writes bits [27:0] from the FPSCR and the
4
CONTROL.SFPA bit in bit [31].
5
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-28-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201119215617.29887-14-peter.maydell@linaro.org
9
---
7
---
10
target/arm/translate-vfp.c.inc | 58 ++++++++++++++++++++++++++++++++++
8
target/arm/translate-a64.c | 23 ++++++++++++++---------
11
1 file changed, 58 insertions(+)
9
1 file changed, 14 insertions(+), 9 deletions(-)
12
10
13
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-vfp.c.inc
13
--- a/target/arm/translate-a64.c
16
+++ b/target/arm/translate-vfp.c.inc
14
+++ b/target/arm/translate-a64.c
17
@@ -XXX,XX +XXX,XX @@ static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
15
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
18
return false;
16
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
17
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
18
true, rn != 31, size);
19
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt,
20
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
21
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
22
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
23
return;
24
25
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
19
}
26
}
27
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
28
false, rn != 31, size);
29
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt,
30
- disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
31
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
32
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
33
+ rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
34
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
35
return;
36
37
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
38
int size = extract32(insn, 30, 2);
39
TCGv_i64 clean_addr, dirty_addr;
40
bool is_store = false;
41
- bool is_signed = false;
42
bool extend = false;
43
bool iss_sf;
44
+ MemOp mop;
45
46
if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
47
unallocated_encoding(s);
48
return;
49
}
50
51
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
52
+ mop = size | MO_ALIGN;
53
+
54
switch (opc) {
55
case 0: /* STLURB */
56
is_store = true;
57
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
58
unallocated_encoding(s);
59
return;
60
}
61
- is_signed = true;
62
+ mop |= MO_SIGN;
20
break;
63
break;
21
+ case ARM_VFP_FPCXT_S:
64
case 3: /* LDAPURS* 32-bit variant */
22
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
65
if (size > 1) {
23
+ return false;
66
unallocated_encoding(s);
24
+ }
67
return;
25
+ if (!s->v8m_secure) {
68
}
26
+ return false;
69
- is_signed = true;
27
+ }
70
+ mop |= MO_SIGN;
28
+ break;
71
extend = true; /* zero-extend 32->64 after signed load */
29
default:
30
return FPSysRegCheckFailed;
31
}
32
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
33
tcg_temp_free_i32(tmp);
34
break;
72
break;
35
}
36
+ case ARM_VFP_FPCXT_S:
37
+ {
38
+ TCGv_i32 sfpa, control, fpscr;
39
+ /* Set FPSCR[27:0] and CONTROL.SFPA from value */
40
+ tmp = loadfn(s, opaque);
41
+ sfpa = tcg_temp_new_i32();
42
+ tcg_gen_shri_i32(sfpa, tmp, 31);
43
+ control = load_cpu_field(v7m.control[M_REG_S]);
44
+ tcg_gen_deposit_i32(control, control, sfpa,
45
+ R_V7M_CONTROL_SFPA_SHIFT, 1);
46
+ store_cpu_field(control, v7m.control[M_REG_S]);
47
+ fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
48
+ tcg_gen_andi_i32(fpscr, fpscr, FPCR_NZCV_MASK);
49
+ tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
50
+ tcg_gen_or_i32(fpscr, fpscr, tmp);
51
+ store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
52
+ tcg_temp_free_i32(tmp);
53
+ tcg_temp_free_i32(sfpa);
54
+ break;
55
+ }
56
default:
73
default:
57
g_assert_not_reached();
74
g_assert_not_reached();
58
}
75
}
59
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
76
60
tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
77
- iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
61
storefn(s, opaque, tmp);
78
+ iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
62
break;
79
63
+ case ARM_VFP_FPCXT_S:
80
if (rn == 31) {
64
+ {
81
gen_check_sp_alignment(s);
65
+ TCGv_i32 control, sfpa, fpscr;
82
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
66
+ /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
83
if (is_store) {
67
+ tmp = tcg_temp_new_i32();
84
/* Store-Release semantics */
68
+ sfpa = tcg_temp_new_i32();
85
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
69
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
86
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, iss_sf, true);
70
+ tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
87
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
71
+ control = load_cpu_field(v7m.control[M_REG_S]);
88
} else {
72
+ tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
89
/*
73
+ tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
90
* Load-AcquirePC semantics; we implement as the slightly more
74
+ tcg_gen_or_i32(tmp, tmp, sfpa);
91
* restrictive Load-Acquire.
75
+ tcg_temp_free_i32(sfpa);
92
*/
76
+ /*
93
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size + is_signed * MO_SIGN,
77
+ * Store result before updating FPSCR etc, in case
94
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
78
+ * it is a memory write which causes an exception.
95
extend, true, rt, iss_sf, true);
79
+ */
96
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
80
+ storefn(s, opaque, tmp);
81
+ /*
82
+ * Now we must reset FPSCR from FPDSCR_NS, and clear
83
+ * CONTROL.SFPA; so we'll end the TB here.
84
+ */
85
+ tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
86
+ store_cpu_field(control, v7m.control[M_REG_S]);
87
+ fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
88
+ gen_helper_vfp_set_fpscr(cpu_env, fpscr);
89
+ tcg_temp_free_i32(fpscr);
90
+ gen_lookup_tb(s);
91
+ break;
92
+ }
93
default:
94
g_assert_not_reached();
95
}
97
}
96
--
98
--
97
2.20.1
99
2.20.1
98
100
99
101
diff view generated by jsdifflib
1
In v8.0M, on exception entry the registers R0-R3, R12, APSR and EPSR
1
From: Richard Henderson <richard.henderson@linaro.org>
2
are zeroed for an exception taken to Non-secure state; for an
3
exception taken to Secure state they become UNKNOWN, and we chose to
4
leave them at their previous values.
5
2
6
In v8.1M the behaviour is specified more tightly and these registers
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
are always zeroed regardless of the security state that the exception
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
targets (see rule R_KPZV). Implement this.
5
Message-id: 20210419202257.161730-29-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-a64.c | 20 ++++++++++----------
9
1 file changed, 10 insertions(+), 10 deletions(-)
9
10
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20201119215617.29887-17-peter.maydell@linaro.org
13
---
14
target/arm/m_helper.c | 16 ++++++++++++----
15
1 file changed, 12 insertions(+), 4 deletions(-)
16
17
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
18
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/m_helper.c
13
--- a/target/arm/translate-a64.c
20
+++ b/target/arm/m_helper.c
14
+++ b/target/arm/translate-a64.c
21
@@ -XXX,XX +XXX,XX @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
15
@@ -XXX,XX +XXX,XX @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
22
* Clear registers if necessary to prevent non-secure exception
16
23
* code being able to see register values from secure code.
17
/* Store from vector register to memory */
24
* Where register values become architecturally UNKNOWN we leave
18
static void do_vec_st(DisasContext *s, int srcidx, int element,
25
- * them with their previous values.
19
- TCGv_i64 tcg_addr, int size, MemOp endian)
26
+ * them with their previous values. v8.1M is tighter than v8.0M
20
+ TCGv_i64 tcg_addr, MemOp mop)
27
+ * here and always zeroes the caller-saved registers regardless
21
{
28
+ * of the security state the exception is targeting.
22
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
29
*/
23
30
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
24
- read_vec_element(s, tcg_tmp, srcidx, element, size);
31
- if (!targets_secure) {
25
- tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
32
+ if (!targets_secure || arm_feature(env, ARM_FEATURE_V8_1M)) {
26
+ read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
33
/*
27
+ tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
34
* Always clear the caller-saved registers (they have been
28
35
* pushed to the stack earlier in v7m_push_stack()).
29
tcg_temp_free_i64(tcg_tmp);
36
@@ -XXX,XX +XXX,XX @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
30
}
37
* v7m_push_callee_stack()).
31
38
*/
32
/* Load from memory to vector register */
39
int i;
33
static void do_vec_ld(DisasContext *s, int destidx, int element,
40
+ /*
34
- TCGv_i64 tcg_addr, int size, MemOp endian)
41
+ * r4..r11 are callee-saves, zero only if background
35
+ TCGv_i64 tcg_addr, MemOp mop)
42
+ * state was Secure (EXCRET.S == 1) and exception
36
{
43
+ * targets Non-secure state
37
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
44
+ */
38
45
+ bool zero_callee_saves = !targets_secure &&
39
- tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
46
+ (lr & R_V7M_EXCRET_S_MASK);
40
- write_vec_element(s, tcg_tmp, destidx, element, size);
47
41
+ tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
48
for (i = 0; i < 13; i++) {
42
+ write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
49
- /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
43
50
- if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
44
tcg_temp_free_i64(tcg_tmp);
51
+ if (i < 4 || i > 11 || zero_callee_saves) {
45
}
52
env->regs[i] = 0;
46
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
53
}
47
for (xs = 0; xs < selem; xs++) {
48
int tt = (rt + r + xs) % 32;
49
if (is_store) {
50
- do_vec_st(s, tt, e, clean_addr, size, endian);
51
+ do_vec_st(s, tt, e, clean_addr, size | endian);
52
} else {
53
- do_vec_ld(s, tt, e, clean_addr, size, endian);
54
+ do_vec_ld(s, tt, e, clean_addr, size | endian);
54
}
55
}
56
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
57
}
58
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
59
} else {
60
/* Load/store one element per register */
61
if (is_load) {
62
- do_vec_ld(s, rt, index, clean_addr, scale, s->be_data);
63
+ do_vec_ld(s, rt, index, clean_addr, scale | s->be_data);
64
} else {
65
- do_vec_st(s, rt, index, clean_addr, scale, s->be_data);
66
+ do_vec_st(s, rt, index, clean_addr, scale | s->be_data);
67
}
68
}
69
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
55
--
70
--
56
2.20.1
71
2.20.1
57
72
58
73
diff view generated by jsdifflib
1
In arm_cpu_realizefn() we check whether the board code disabled EL3
1
From: Richard Henderson <richard.henderson@linaro.org>
2
via the has_el3 CPU object property, which we create if the CPU
3
starts with the ARM_FEATURE_EL3 feature bit. If it is disabled, then
4
we turn off ARM_FEATURE_EL3 and also zero out the relevant fields in
5
the ID_PFR1 and ID_AA64PFR0 registers.
6
2
7
This codepath was incorrectly being taken for M-profile CPUs, which
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
do not have an EL3 and don't set ARM_FEATURE_EL3, but which may have
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
the M-profile Security extension and so should have non-zero values
5
Message-id: 20210419202257.161730-30-richard.henderson@linaro.org
10
in the ID_PFR1.Security field.
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-a64.c | 15 +++++++++++----
9
1 file changed, 11 insertions(+), 4 deletions(-)
11
10
12
Restrict the handling of the feature flag to A/R-profile cores.
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
13
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20201119215617.29887-4-peter.maydell@linaro.org
17
---
18
target/arm/cpu.c | 2 +-
19
1 file changed, 1 insertion(+), 1 deletion(-)
20
21
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
22
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/cpu.c
13
--- a/target/arm/translate-a64.c
24
+++ b/target/arm/cpu.c
14
+++ b/target/arm/translate-a64.c
25
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
15
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
26
}
16
bool is_postidx = extract32(insn, 23, 1);
17
bool is_q = extract32(insn, 30, 1);
18
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
19
- MemOp endian = s->be_data;
20
+ MemOp endian, align, mop;
21
22
int total; /* total bytes */
23
int elements; /* elements per vector */
24
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
27
}
25
}
28
26
29
- if (!cpu->has_el3) {
27
/* For our purposes, bytes are always little-endian. */
30
+ if (!arm_feature(env, ARM_FEATURE_M) && !cpu->has_el3) {
28
+ endian = s->be_data;
31
/* If the has_el3 CPU property is disabled then we need to disable the
29
if (size == 0) {
32
* feature.
30
endian = MO_LE;
33
*/
31
}
32
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
33
* Consecutive little-endian elements from a single register
34
* can be promoted to a larger little-endian operation.
35
*/
36
+ align = MO_ALIGN;
37
if (selem == 1 && endian == MO_LE) {
38
+ align = pow2_align(size);
39
size = 3;
40
}
41
- elements = (is_q ? 16 : 8) >> size;
42
+ if (!s->align_mem) {
43
+ align = 0;
44
+ }
45
+ mop = endian | size | align;
46
47
+ elements = (is_q ? 16 : 8) >> size;
48
tcg_ebytes = tcg_const_i64(1 << size);
49
for (r = 0; r < rpt; r++) {
50
int e;
51
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
52
for (xs = 0; xs < selem; xs++) {
53
int tt = (rt + r + xs) % 32;
54
if (is_store) {
55
- do_vec_st(s, tt, e, clean_addr, size | endian);
56
+ do_vec_st(s, tt, e, clean_addr, mop);
57
} else {
58
- do_vec_ld(s, tt, e, clean_addr, size | endian);
59
+ do_vec_ld(s, tt, e, clean_addr, mop);
60
}
61
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
62
}
34
--
63
--
35
2.20.1
64
2.20.1
36
65
37
66
diff view generated by jsdifflib
1
From: Vikram Garhwal <fnu.vikram@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Reviewed-by: Francisco Iglesias <francisco.iglesias@xilinx.com>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Vikram Garhwal <fnu.vikram@xilinx.com>
5
Message-id: 20210419202257.161730-31-richard.henderson@linaro.org
6
Message-id: 1605728926-352690-5-git-send-email-fnu.vikram@xilinx.com
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
7
---
9
MAINTAINERS | 8 ++++++++
8
target/arm/translate-a64.c | 9 +++++----
10
1 file changed, 8 insertions(+)
9
1 file changed, 5 insertions(+), 4 deletions(-)
11
10
12
diff --git a/MAINTAINERS b/MAINTAINERS
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/MAINTAINERS
13
--- a/target/arm/translate-a64.c
15
+++ b/MAINTAINERS
14
+++ b/target/arm/translate-a64.c
16
@@ -XXX,XX +XXX,XX @@ F: hw/net/opencores_eth.c
15
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
17
16
int index = is_q << 3 | S << 2 | size;
18
Devices
17
int xs, total;
19
-------
18
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
20
+Xilinx CAN
19
+ MemOp mop;
21
+M: Vikram Garhwal <fnu.vikram@xilinx.com>
20
22
+M: Francisco Iglesias <francisco.iglesias@xilinx.com>
21
if (extract32(insn, 31, 1)) {
23
+S: Maintained
22
unallocated_encoding(s);
24
+F: hw/net/can/xlnx-*
23
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
25
+F: include/hw/net/xlnx-*
24
26
+F: tests/qtest/xlnx-can-test*
25
clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
27
+
26
total);
28
EDU
27
+ mop = finalize_memop(s, scale);
29
M: Jiri Slaby <jslaby@suse.cz>
28
30
S: Maintained
29
tcg_ebytes = tcg_const_i64(1 << scale);
30
for (xs = 0; xs < selem; xs++) {
31
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
32
/* Load and replicate to all elements */
33
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
34
35
- tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr,
36
- get_mem_index(s), s->be_data + scale);
37
+ tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
38
tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
39
(is_q + 1) * 8, vec_full_reg_size(s),
40
tcg_tmp);
41
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
42
} else {
43
/* Load/store one element per register */
44
if (is_load) {
45
- do_vec_ld(s, rt, index, clean_addr, scale | s->be_data);
46
+ do_vec_ld(s, rt, index, clean_addr, mop);
47
} else {
48
- do_vec_st(s, rt, index, clean_addr, scale | s->be_data);
49
+ do_vec_st(s, rt, index, clean_addr, mop);
50
}
51
}
52
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
31
--
53
--
32
2.20.1
54
2.20.1
33
55
34
56
diff view generated by jsdifflib
1
Correct a typo in the name we give the NVIC object.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210419202257.161730-32-richard.henderson@linaro.org
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20201119215617.29887-28-peter.maydell@linaro.org
7
---
7
---
8
hw/arm/armv7m.c | 2 +-
8
target/arm/translate-sve.c | 2 +-
9
1 file changed, 1 insertion(+), 1 deletion(-)
9
1 file changed, 1 insertion(+), 1 deletion(-)
10
10
11
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
11
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/hw/arm/armv7m.c
13
--- a/target/arm/translate-sve.c
14
+++ b/hw/arm/armv7m.c
14
+++ b/target/arm/translate-sve.c
15
@@ -XXX,XX +XXX,XX @@ static void armv7m_instance_init(Object *obj)
15
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a)
16
16
clean_addr = gen_mte_check1(s, temp, false, true, msz);
17
memory_region_init(&s->container, obj, "armv7m-container", UINT64_MAX);
17
18
18
tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s),
19
- object_initialize_child(obj, "nvnic", &s->nvic, TYPE_NVIC);
19
- s->be_data | dtype_mop[a->dtype]);
20
+ object_initialize_child(obj, "nvic", &s->nvic, TYPE_NVIC);
20
+ finalize_memop(s, dtype_mop[a->dtype]));
21
object_property_add_alias(obj, "num-irq",
21
22
OBJECT(&s->nvic), "num-irq");
22
/* Broadcast to *all* elements. */
23
23
tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd),
24
--
24
--
25
2.20.1
25
2.20.1
26
26
27
27
diff view generated by jsdifflib
1
From: Alex Chen <alex.chen@huawei.com>
1
From: Cornelia Huck <cohuck@redhat.com>
2
2
3
We should use printf format specifier "%u" instead of "%d" for
3
Add 6.1 machine types for arm/i440fx/q35/s390x/spapr.
4
argument of type "unsigned int".
4
5
5
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
6
Reported-by: Euler Robot <euler.robot@huawei.com>
6
Acked-by: Greg Kurz <groug@kaod.org>
7
Signed-off-by: Alex Chen <alex.chen@huawei.com>
7
Message-id: 20210331111900.118274-1-cohuck@redhat.com
8
Message-id: 20201126111109.112238-2-alex.chen@huawei.com
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
hw/misc/imx25_ccm.c | 12 ++++++------
11
include/hw/boards.h | 3 +++
13
1 file changed, 6 insertions(+), 6 deletions(-)
12
include/hw/i386/pc.h | 3 +++
14
13
hw/arm/virt.c | 7 ++++++-
15
diff --git a/hw/misc/imx25_ccm.c b/hw/misc/imx25_ccm.c
14
hw/core/machine.c | 3 +++
16
index XXXXXXX..XXXXXXX 100644
15
hw/i386/pc.c | 3 +++
17
--- a/hw/misc/imx25_ccm.c
16
hw/i386/pc_piix.c | 14 +++++++++++++-
18
+++ b/hw/misc/imx25_ccm.c
17
hw/i386/pc_q35.c | 13 ++++++++++++-
19
@@ -XXX,XX +XXX,XX @@ static const char *imx25_ccm_reg_name(uint32_t reg)
18
hw/ppc/spapr.c | 17 ++++++++++++++---
20
case IMX25_CCM_LPIMR1_REG:
19
hw/s390x/s390-virtio-ccw.c | 14 +++++++++++++-
21
return "lpimr1";
20
9 files changed, 70 insertions(+), 7 deletions(-)
22
default:
21
23
- sprintf(unknown, "[%d ?]", reg);
22
diff --git a/include/hw/boards.h b/include/hw/boards.h
24
+ sprintf(unknown, "[%u ?]", reg);
23
index XXXXXXX..XXXXXXX 100644
25
return unknown;
24
--- a/include/hw/boards.h
26
}
25
+++ b/include/hw/boards.h
27
}
26
@@ -XXX,XX +XXX,XX @@ struct MachineState {
28
@@ -XXX,XX +XXX,XX @@ static uint32_t imx25_ccm_get_mpll_clk(IMXCCMState *dev)
27
} \
29
freq = imx_ccm_calc_pll(s->reg[IMX25_CCM_MPCTL_REG], CKIH_FREQ);
28
type_init(machine_initfn##_register_types)
30
}
29
31
30
+extern GlobalProperty hw_compat_6_0[];
32
- DPRINTF("freq = %d\n", freq);
31
+extern const size_t hw_compat_6_0_len;
33
+ DPRINTF("freq = %u\n", freq);
32
+
34
33
extern GlobalProperty hw_compat_5_2[];
35
return freq;
34
extern const size_t hw_compat_5_2_len;
36
}
35
37
@@ -XXX,XX +XXX,XX @@ static uint32_t imx25_ccm_get_mcu_clk(IMXCCMState *dev)
36
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
38
37
index XXXXXXX..XXXXXXX 100644
39
freq = freq / (1 + EXTRACT(s->reg[IMX25_CCM_CCTL_REG], ARM_CLK_DIV));
38
--- a/include/hw/i386/pc.h
40
39
+++ b/include/hw/i386/pc.h
41
- DPRINTF("freq = %d\n", freq);
40
@@ -XXX,XX +XXX,XX @@ bool pc_system_ovmf_table_find(const char *entry, uint8_t **data,
42
+ DPRINTF("freq = %u\n", freq);
41
void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
43
42
const CPUArchIdList *apic_ids, GArray *entry);
44
return freq;
43
45
}
44
+extern GlobalProperty pc_compat_6_0[];
46
@@ -XXX,XX +XXX,XX @@ static uint32_t imx25_ccm_get_ahb_clk(IMXCCMState *dev)
45
+extern const size_t pc_compat_6_0_len;
47
freq = imx25_ccm_get_mcu_clk(dev)
46
+
48
/ (1 + EXTRACT(s->reg[IMX25_CCM_CCTL_REG], AHB_CLK_DIV));
47
extern GlobalProperty pc_compat_5_2[];
49
48
extern const size_t pc_compat_5_2_len;
50
- DPRINTF("freq = %d\n", freq);
49
51
+ DPRINTF("freq = %u\n", freq);
50
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
52
51
index XXXXXXX..XXXXXXX 100644
53
return freq;
52
--- a/hw/arm/virt.c
54
}
53
+++ b/hw/arm/virt.c
55
@@ -XXX,XX +XXX,XX @@ static uint32_t imx25_ccm_get_ipg_clk(IMXCCMState *dev)
54
@@ -XXX,XX +XXX,XX @@ static void machvirt_machine_init(void)
56
55
}
57
freq = imx25_ccm_get_ahb_clk(dev) / 2;
56
type_init(machvirt_machine_init);
58
57
59
- DPRINTF("freq = %d\n", freq);
58
+static void virt_machine_6_1_options(MachineClass *mc)
60
+ DPRINTF("freq = %u\n", freq);
59
+{
61
60
+}
62
return freq;
61
+DEFINE_VIRT_MACHINE_AS_LATEST(6, 1)
63
}
62
+
64
@@ -XXX,XX +XXX,XX @@ static uint32_t imx25_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
63
static void virt_machine_6_0_options(MachineClass *mc)
65
break;
64
{
66
}
65
}
67
66
-DEFINE_VIRT_MACHINE_AS_LATEST(6, 0)
68
- DPRINTF("Clock = %d) = %d\n", clock, freq);
67
+DEFINE_VIRT_MACHINE(6, 0)
69
+ DPRINTF("Clock = %d) = %u\n", clock, freq);
68
70
69
static void virt_machine_5_2_options(MachineClass *mc)
71
return freq;
70
{
72
}
71
diff --git a/hw/core/machine.c b/hw/core/machine.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/hw/core/machine.c
74
+++ b/hw/core/machine.c
75
@@ -XXX,XX +XXX,XX @@
76
#include "hw/virtio/virtio.h"
77
#include "hw/virtio/virtio-pci.h"
78
79
+GlobalProperty hw_compat_6_0[] = {};
80
+const size_t hw_compat_6_0_len = G_N_ELEMENTS(hw_compat_6_0);
81
+
82
GlobalProperty hw_compat_5_2[] = {
83
{ "ICH9-LPC", "smm-compat", "on"},
84
{ "PIIX4_PM", "smm-compat", "on"},
85
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/hw/i386/pc.c
88
+++ b/hw/i386/pc.c
89
@@ -XXX,XX +XXX,XX @@
90
#include "trace.h"
91
#include CONFIG_DEVICES
92
93
+GlobalProperty pc_compat_6_0[] = {};
94
+const size_t pc_compat_6_0_len = G_N_ELEMENTS(pc_compat_6_0);
95
+
96
GlobalProperty pc_compat_5_2[] = {
97
{ "ICH9-LPC", "x-smi-cpu-hotunplug", "off" },
98
};
99
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/hw/i386/pc_piix.c
102
+++ b/hw/i386/pc_piix.c
103
@@ -XXX,XX +XXX,XX @@ static void pc_i440fx_machine_options(MachineClass *m)
104
machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
105
}
106
107
-static void pc_i440fx_6_0_machine_options(MachineClass *m)
108
+static void pc_i440fx_6_1_machine_options(MachineClass *m)
109
{
110
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
111
pc_i440fx_machine_options(m);
112
@@ -XXX,XX +XXX,XX @@ static void pc_i440fx_6_0_machine_options(MachineClass *m)
113
pcmc->default_cpu_version = 1;
114
}
115
116
+DEFINE_I440FX_MACHINE(v6_1, "pc-i440fx-6.1", NULL,
117
+ pc_i440fx_6_1_machine_options);
118
+
119
+static void pc_i440fx_6_0_machine_options(MachineClass *m)
120
+{
121
+ pc_i440fx_6_1_machine_options(m);
122
+ m->alias = NULL;
123
+ m->is_default = false;
124
+ compat_props_add(m->compat_props, hw_compat_6_0, hw_compat_6_0_len);
125
+ compat_props_add(m->compat_props, pc_compat_6_0, pc_compat_6_0_len);
126
+}
127
+
128
DEFINE_I440FX_MACHINE(v6_0, "pc-i440fx-6.0", NULL,
129
pc_i440fx_6_0_machine_options);
130
131
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
132
index XXXXXXX..XXXXXXX 100644
133
--- a/hw/i386/pc_q35.c
134
+++ b/hw/i386/pc_q35.c
135
@@ -XXX,XX +XXX,XX @@ static void pc_q35_machine_options(MachineClass *m)
136
m->max_cpus = 288;
137
}
138
139
-static void pc_q35_6_0_machine_options(MachineClass *m)
140
+static void pc_q35_6_1_machine_options(MachineClass *m)
141
{
142
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
143
pc_q35_machine_options(m);
144
@@ -XXX,XX +XXX,XX @@ static void pc_q35_6_0_machine_options(MachineClass *m)
145
pcmc->default_cpu_version = 1;
146
}
147
148
+DEFINE_Q35_MACHINE(v6_1, "pc-q35-6.1", NULL,
149
+ pc_q35_6_1_machine_options);
150
+
151
+static void pc_q35_6_0_machine_options(MachineClass *m)
152
+{
153
+ pc_q35_6_1_machine_options(m);
154
+ m->alias = NULL;
155
+ compat_props_add(m->compat_props, hw_compat_6_0, hw_compat_6_0_len);
156
+ compat_props_add(m->compat_props, pc_compat_6_0, pc_compat_6_0_len);
157
+}
158
+
159
DEFINE_Q35_MACHINE(v6_0, "pc-q35-6.0", NULL,
160
pc_q35_6_0_machine_options);
161
162
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
163
index XXXXXXX..XXXXXXX 100644
164
--- a/hw/ppc/spapr.c
165
+++ b/hw/ppc/spapr.c
166
@@ -XXX,XX +XXX,XX @@ static void spapr_machine_latest_class_options(MachineClass *mc)
167
type_init(spapr_machine_register_##suffix)
168
169
/*
170
- * pseries-6.0
171
+ * pseries-6.1
172
*/
173
-static void spapr_machine_6_0_class_options(MachineClass *mc)
174
+static void spapr_machine_6_1_class_options(MachineClass *mc)
175
{
176
/* Defaults for the latest behaviour inherited from the base class */
177
}
178
179
-DEFINE_SPAPR_MACHINE(6_0, "6.0", true);
180
+DEFINE_SPAPR_MACHINE(6_1, "6.1", true);
181
+
182
+/*
183
+ * pseries-6.0
184
+ */
185
+static void spapr_machine_6_0_class_options(MachineClass *mc)
186
+{
187
+ spapr_machine_6_1_class_options(mc);
188
+ compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len);
189
+}
190
+
191
+DEFINE_SPAPR_MACHINE(6_0, "6.0", false);
192
193
/*
194
* pseries-5.2
195
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
196
index XXXXXXX..XXXXXXX 100644
197
--- a/hw/s390x/s390-virtio-ccw.c
198
+++ b/hw/s390x/s390-virtio-ccw.c
199
@@ -XXX,XX +XXX,XX @@ bool css_migration_enabled(void)
200
} \
201
type_init(ccw_machine_register_##suffix)
202
203
+static void ccw_machine_6_1_instance_options(MachineState *machine)
204
+{
205
+}
206
+
207
+static void ccw_machine_6_1_class_options(MachineClass *mc)
208
+{
209
+}
210
+DEFINE_CCW_MACHINE(6_1, "6.1", true);
211
+
212
static void ccw_machine_6_0_instance_options(MachineState *machine)
213
{
214
+ ccw_machine_6_1_instance_options(machine);
215
}
216
217
static void ccw_machine_6_0_class_options(MachineClass *mc)
218
{
219
+ ccw_machine_6_1_class_options(mc);
220
+ compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len);
221
}
222
-DEFINE_CCW_MACHINE(6_0, "6.0", true);
223
+DEFINE_CCW_MACHINE(6_0, "6.0", false);
224
225
static void ccw_machine_5_2_instance_options(MachineState *machine)
226
{
73
--
227
--
74
2.20.1
228
2.20.1
75
229
76
230
diff view generated by jsdifflib
1
The RAS feature has a block of memory-mapped registers at offset
1
Currently the gpex PCI controller implements no special behaviour for
2
0x5000 within the PPB. For a "minimal RAS" implementation we provide
2
guest accesses to areas of the PIO and MMIO where it has not mapped
3
no error records and so the only registers that exist in the block
3
any PCI devices, which means that for Arm you end up with a CPU
4
are ERRIIDR and ERRDEVID.
4
exception due to a data abort.
5
5
6
The "RAZ/WI for privileged, BusFault for nonprivileged" behaviour
6
Most host OSes expect "like an x86 PC" behaviour, where bad accesses
7
of the "nvic-default" region is actually valid for minimal-RAS,
7
like this return -1 for reads and ignore writes. In the interests of
8
so the main benefit of providing an explicit implementation of
8
not being surprising, make host CPU accesses to these windows behave
9
the register block is more accurate LOG_UNIMP messages, and a
9
as -1/discard where there's no mapped PCI device.
10
framework for where we could add a real RAS implementation later
11
if necessary.
12
10
11
The old behaviour generally didn't cause any problems, because
12
almost always the guest OS will map the PCI devices and then only
13
access where it has mapped them. One corner case where you will see
14
this kind of access is if Linux attempts to probe legacy ISA
15
devices via a PIO window access. So far the only case where we've
16
seen this has been via the syzkaller fuzzer.
17
18
Reported-by: Dmitry Vyukov <dvyukov@google.com>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20201119215617.29887-27-peter.maydell@linaro.org
21
Acked-by: Michael S. Tsirkin <mst@redhat.com>
22
Message-id: 20210325163315.27724-1-peter.maydell@linaro.org
23
Fixes: https://bugs.launchpad.net/qemu/+bug/1918917
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
25
---
17
include/hw/intc/armv7m_nvic.h | 1 +
26
include/hw/pci-host/gpex.h | 4 +++
18
hw/intc/armv7m_nvic.c | 56 +++++++++++++++++++++++++++++++++++
27
hw/core/machine.c | 4 ++-
19
2 files changed, 57 insertions(+)
28
hw/pci-host/gpex.c | 56 ++++++++++++++++++++++++++++++++++++--
29
3 files changed, 60 insertions(+), 4 deletions(-)
20
30
21
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
31
diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h
22
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
23
--- a/include/hw/intc/armv7m_nvic.h
33
--- a/include/hw/pci-host/gpex.h
24
+++ b/include/hw/intc/armv7m_nvic.h
34
+++ b/include/hw/pci-host/gpex.h
25
@@ -XXX,XX +XXX,XX @@ struct NVICState {
35
@@ -XXX,XX +XXX,XX @@ struct GPEXHost {
26
MemoryRegion sysreg_ns_mem;
36
27
MemoryRegion systickmem;
37
MemoryRegion io_ioport;
28
MemoryRegion systick_ns_mem;
38
MemoryRegion io_mmio;
29
+ MemoryRegion ras_mem;
39
+ MemoryRegion io_ioport_window;
30
MemoryRegion container;
40
+ MemoryRegion io_mmio_window;
31
MemoryRegion defaultmem;
41
qemu_irq irq[GPEX_NUM_IRQS];
32
42
int irq_num[GPEX_NUM_IRQS];
33
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
43
+
44
+ bool allow_unmapped_accesses;
45
};
46
47
struct GPEXConfig {
48
diff --git a/hw/core/machine.c b/hw/core/machine.c
34
index XXXXXXX..XXXXXXX 100644
49
index XXXXXXX..XXXXXXX 100644
35
--- a/hw/intc/armv7m_nvic.c
50
--- a/hw/core/machine.c
36
+++ b/hw/intc/armv7m_nvic.c
51
+++ b/hw/core/machine.c
37
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps nvic_systick_ops = {
52
@@ -XXX,XX +XXX,XX @@
38
.endianness = DEVICE_NATIVE_ENDIAN,
53
#include "hw/virtio/virtio.h"
39
};
54
#include "hw/virtio/virtio-pci.h"
40
55
56
-GlobalProperty hw_compat_6_0[] = {};
57
+GlobalProperty hw_compat_6_0[] = {
58
+ { "gpex-pcihost", "allow-unmapped-accesses", "false" },
59
+};
60
const size_t hw_compat_6_0_len = G_N_ELEMENTS(hw_compat_6_0);
61
62
GlobalProperty hw_compat_5_2[] = {
63
diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/hw/pci-host/gpex.c
66
+++ b/hw/pci-host/gpex.c
67
@@ -XXX,XX +XXX,XX @@ static void gpex_host_realize(DeviceState *dev, Error **errp)
68
int i;
69
70
pcie_host_mmcfg_init(pex, PCIE_MMCFG_SIZE_MAX);
71
+ sysbus_init_mmio(sbd, &pex->mmio);
41
+
72
+
42
+static MemTxResult ras_read(void *opaque, hwaddr addr,
73
+ /*
43
+ uint64_t *data, unsigned size,
74
+ * Note that the MemoryRegions io_mmio and io_ioport that we pass
44
+ MemTxAttrs attrs)
75
+ * to pci_register_root_bus() are not the same as the
45
+{
76
+ * MemoryRegions io_mmio_window and io_ioport_window that we
46
+ if (attrs.user) {
77
+ * expose as SysBus MRs. The difference is in the behaviour of
47
+ return MEMTX_ERROR;
78
+ * accesses to addresses where no PCI device has been mapped.
79
+ *
80
+ * io_mmio and io_ioport are the underlying PCI view of the PCI
81
+ * address space, and when a PCI device does a bus master access
82
+ * to a bad address this is reported back to it as a transaction
83
+ * failure.
84
+ *
85
+ * io_mmio_window and io_ioport_window implement "unmapped
86
+ * addresses read as -1 and ignore writes"; this is traditional
87
+ * x86 PC behaviour, which is not mandated by the PCI spec proper
88
+ * but expected by much PCI-using guest software, including Linux.
89
+ *
90
+ * In the interests of not being unnecessarily surprising, we
91
+ * implement it in the gpex PCI host controller, by providing the
92
+ * _window MRs, which are containers with io ops that implement
93
+ * the 'background' behaviour and which hold the real PCI MRs as
94
+ * subregions.
95
+ */
96
memory_region_init(&s->io_mmio, OBJECT(s), "gpex_mmio", UINT64_MAX);
97
memory_region_init(&s->io_ioport, OBJECT(s), "gpex_ioport", 64 * 1024);
98
99
- sysbus_init_mmio(sbd, &pex->mmio);
100
- sysbus_init_mmio(sbd, &s->io_mmio);
101
- sysbus_init_mmio(sbd, &s->io_ioport);
102
+ if (s->allow_unmapped_accesses) {
103
+ memory_region_init_io(&s->io_mmio_window, OBJECT(s),
104
+ &unassigned_io_ops, OBJECT(s),
105
+ "gpex_mmio_window", UINT64_MAX);
106
+ memory_region_init_io(&s->io_ioport_window, OBJECT(s),
107
+ &unassigned_io_ops, OBJECT(s),
108
+ "gpex_ioport_window", 64 * 1024);
109
+
110
+ memory_region_add_subregion(&s->io_mmio_window, 0, &s->io_mmio);
111
+ memory_region_add_subregion(&s->io_ioport_window, 0, &s->io_ioport);
112
+ sysbus_init_mmio(sbd, &s->io_mmio_window);
113
+ sysbus_init_mmio(sbd, &s->io_ioport_window);
114
+ } else {
115
+ sysbus_init_mmio(sbd, &s->io_mmio);
116
+ sysbus_init_mmio(sbd, &s->io_ioport);
48
+ }
117
+ }
49
+
118
+
50
+ switch (addr) {
119
for (i = 0; i < GPEX_NUM_IRQS; i++) {
51
+ case 0xe10: /* ERRIIDR */
120
sysbus_init_irq(sbd, &s->irq[i]);
52
+ /* architect field = Arm; product/variant/revision 0 */
121
s->irq_num[i] = -1;
53
+ *data = 0x43b;
122
@@ -XXX,XX +XXX,XX @@ static const char *gpex_host_root_bus_path(PCIHostState *host_bridge,
54
+ break;
123
return "0000:00";
55
+ case 0xfc8: /* ERRDEVID */
124
}
56
+ /* Minimal RAS: we implement 0 error record indexes */
125
57
+ *data = 0;
126
+static Property gpex_host_properties[] = {
58
+ break;
127
+ /*
59
+ default:
128
+ * Permit CPU accesses to unmapped areas of the PIO and MMIO windows
60
+ qemu_log_mask(LOG_UNIMP, "Read RAS register offset 0x%x\n",
129
+ * (discarding writes and returning -1 for reads) rather than aborting.
61
+ (uint32_t)addr);
130
+ */
62
+ *data = 0;
131
+ DEFINE_PROP_BOOL("allow-unmapped-accesses", GPEXHost,
63
+ break;
132
+ allow_unmapped_accesses, true),
64
+ }
133
+ DEFINE_PROP_END_OF_LIST(),
65
+ return MEMTX_OK;
66
+}
67
+
68
+static MemTxResult ras_write(void *opaque, hwaddr addr,
69
+ uint64_t value, unsigned size,
70
+ MemTxAttrs attrs)
71
+{
72
+ if (attrs.user) {
73
+ return MEMTX_ERROR;
74
+ }
75
+
76
+ switch (addr) {
77
+ default:
78
+ qemu_log_mask(LOG_UNIMP, "Write to RAS register offset 0x%x\n",
79
+ (uint32_t)addr);
80
+ break;
81
+ }
82
+ return MEMTX_OK;
83
+}
84
+
85
+static const MemoryRegionOps ras_ops = {
86
+ .read_with_attrs = ras_read,
87
+ .write_with_attrs = ras_write,
88
+ .endianness = DEVICE_NATIVE_ENDIAN,
89
+};
134
+};
90
+
135
+
91
/*
136
static void gpex_host_class_init(ObjectClass *klass, void *data)
92
* Unassigned portions of the PPB space are RAZ/WI for privileged
137
{
93
* accesses, and fault for non-privileged accesses.
138
DeviceClass *dc = DEVICE_CLASS(klass);
94
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
139
@@ -XXX,XX +XXX,XX @@ static void gpex_host_class_init(ObjectClass *klass, void *data)
95
&s->systick_ns_mem, 1);
140
dc->realize = gpex_host_realize;
96
}
141
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
97
142
dc->fw_name = "pci";
98
+ if (cpu_isar_feature(aa32_ras, s->cpu)) {
143
+ device_class_set_props(dc, gpex_host_properties);
99
+ memory_region_init_io(&s->ras_mem, OBJECT(s),
100
+ &ras_ops, s, "nvic_ras", 0x1000);
101
+ memory_region_add_subregion(&s->container, 0x5000, &s->ras_mem);
102
+ }
103
+
104
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container);
105
}
144
}
106
145
146
static void gpex_host_initfn(Object *obj)
107
--
147
--
108
2.20.1
148
2.20.1
109
149
110
150
diff view generated by jsdifflib