1
First set of arm patches for 6.2. I have a lot more in my
1
target-arm queue: the big stuff here is the final part of
2
to-review queue still...
2
rth's patches for Cortex-A76 and Neoverse-N1 support;
3
also present are Gavin's NUMA series and a few other things.
3
4
5
thanks
4
-- PMM
6
-- PMM
5
7
6
The following changes since commit d42685765653ec155fdf60910662f8830bdb2cef:
8
The following changes since commit 554623226f800acf48a2ed568900c1c968ec9a8b:
7
9
8
Open 6.2 development tree (2021-08-25 10:25:12 +0100)
10
Merge tag 'qemu-sparc-20220508' of https://github.com/mcayland/qemu into staging (2022-05-08 17:03:26 -0500)
9
11
10
are available in the Git repository at:
12
are available in the Git repository at:
11
13
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210825
14
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220509
13
15
14
for you to fetch changes up to 24b1a6aa43615be22c7ee66bd68ec5675f6a6a9a:
16
for you to fetch changes up to ae9141d4a3265553503bf07d3574b40f84615a34:
15
17
16
docs: Document how to use gdb with unix sockets (2021-08-25 10:48:51 +0100)
18
hw/acpi/aml-build: Use existing CPU topology to build PPTT table (2022-05-09 11:47:55 +0100)
17
19
18
----------------------------------------------------------------
20
----------------------------------------------------------------
19
target-arm queue:
21
target-arm queue:
20
* More MVE emulation work
22
* MAINTAINERS/.mailmap: update email for Leif Lindholm
21
* Implement M-profile trapping on division by zero
23
* hw/arm: add version information to sbsa-ref machine DT
22
* kvm: use RCU_READ_LOCK_GUARD() in kvm_arch_fixup_msi_route()
24
* Enable new features for -cpu max:
23
* hw/char/pl011: add support for sending break
25
FEAT_Debugv8p2, FEAT_Debugv8p4, FEAT_RAS (minimal version only),
24
* fsl-imx6ul: Instantiate SAI1/2/3 and ASRC as unimplemented devices
26
FEAT_IESB, FEAT_CSV2, FEAT_CSV2_2, FEAT_CSV3, FEAT_DGH
25
* hw/dma/pl330: Add memory region to replace default
27
* Emulate Cortex-A76
26
* sbsa-ref: Rename SBSA_GWDT enum value
28
* Emulate Neoverse-N1
27
* fsl-imx7: Instantiate SAI1/2/3 as unimplemented devices
29
* Fix the virt board default NUMA topology
28
* docs: Document how to use gdb with unix sockets
29
30
30
----------------------------------------------------------------
31
----------------------------------------------------------------
31
Eduardo Habkost (1):
32
Gavin Shan (6):
32
sbsa-ref: Rename SBSA_GWDT enum value
33
qapi/machine.json: Add cluster-id
34
qtest/numa-test: Specify CPU topology in aarch64_numa_cpu()
35
hw/arm/virt: Consider SMP configuration in CPU topology
36
qtest/numa-test: Correct CPU and NUMA association in aarch64_numa_cpu()
37
hw/arm/virt: Fix CPU's default NUMA node ID
38
hw/acpi/aml-build: Use existing CPU topology to build PPTT table
33
39
34
Guenter Roeck (2):
40
Leif Lindholm (2):
35
fsl-imx6ul: Instantiate SAI1/2/3 and ASRC as unimplemented devices
41
MAINTAINERS/.mailmap: update email for Leif Lindholm
36
fsl-imx7: Instantiate SAI1/2/3 as unimplemented devices
42
hw/arm: add versioning to sbsa-ref machine DT
37
43
38
Hamza Mahfooz (1):
44
Richard Henderson (24):
39
target/arm: kvm: use RCU_READ_LOCK_GUARD() in kvm_arch_fixup_msi_route()
45
target/arm: Handle cpreg registration for missing EL
46
target/arm: Drop EL3 no EL2 fallbacks
47
target/arm: Merge zcr reginfo
48
target/arm: Adjust definition of CONTEXTIDR_EL2
49
target/arm: Move cortex impdef sysregs to cpu_tcg.c
50
target/arm: Update qemu-system-arm -cpu max to cortex-a57
51
target/arm: Set ID_DFR0.PerfMon for qemu-system-arm -cpu max
52
target/arm: Split out aa32_max_features
53
target/arm: Annotate arm_max_initfn with FEAT identifiers
54
target/arm: Use field names for manipulating EL2 and EL3 modes
55
target/arm: Enable FEAT_Debugv8p2 for -cpu max
56
target/arm: Enable FEAT_Debugv8p4 for -cpu max
57
target/arm: Add minimal RAS registers
58
target/arm: Enable SCR and HCR bits for RAS
59
target/arm: Implement virtual SError exceptions
60
target/arm: Implement ESB instruction
61
target/arm: Enable FEAT_RAS for -cpu max
62
target/arm: Enable FEAT_IESB for -cpu max
63
target/arm: Enable FEAT_CSV2 for -cpu max
64
target/arm: Enable FEAT_CSV2_2 for -cpu max
65
target/arm: Enable FEAT_CSV3 for -cpu max
66
target/arm: Enable FEAT_DGH for -cpu max
67
target/arm: Define cortex-a76
68
target/arm: Define neoverse-n1
40
69
41
Jan Luebbe (1):
70
docs/system/arm/emulation.rst | 10 +
42
hw/char/pl011: add support for sending break
71
docs/system/arm/virt.rst | 2 +
43
72
qapi/machine.json | 6 +-
44
Peter Maydell (37):
73
target/arm/cpregs.h | 11 +
45
target/arm: Note that we handle VMOVL as a special case of VSHLL
74
target/arm/cpu.h | 23 ++
46
target/arm: Print MVE VPR in CPU dumps
75
target/arm/helper.h | 1 +
47
target/arm: Fix MVE VSLI by 0 and VSRI by <dt>
76
target/arm/internals.h | 16 ++
48
target/arm: Fix signed VADDV
77
target/arm/syndrome.h | 5 +
49
target/arm: Fix mask handling for MVE narrowing operations
78
target/arm/a32.decode | 16 +-
50
target/arm: Fix 48-bit saturating shifts
79
target/arm/t32.decode | 18 +-
51
target/arm: Fix MVE 48-bit SQRSHRL for small right shifts
80
hw/acpi/aml-build.c | 111 ++++----
52
target/arm: Fix calculation of LTP mask when LR is 0
81
hw/arm/sbsa-ref.c | 16 ++
53
target/arm: Factor out mve_eci_mask()
82
hw/arm/virt.c | 21 +-
54
target/arm: Fix VPT advance when ECI is non-zero
83
hw/core/machine-hmp-cmds.c | 4 +
55
target/arm: Fix VLDRB/H/W for predicated elements
84
hw/core/machine.c | 16 ++
56
target/arm: Implement MVE VMULL (polynomial)
85
target/arm/cpu.c | 66 ++++-
57
target/arm: Implement MVE incrementing/decrementing dup insns
86
target/arm/cpu64.c | 353 ++++++++++++++-----------
58
target/arm: Factor out gen_vpst()
87
target/arm/cpu_tcg.c | 227 +++++++++++-----
59
target/arm: Implement MVE integer vector comparisons
88
target/arm/helper.c | 600 +++++++++++++++++++++++++-----------------
60
target/arm: Implement MVE integer vector-vs-scalar comparisons
89
target/arm/op_helper.c | 43 +++
61
target/arm: Implement MVE VPSEL
90
target/arm/translate-a64.c | 18 ++
62
target/arm: Implement MVE VMLAS
91
target/arm/translate.c | 23 ++
63
target/arm: Implement MVE shift-by-scalar
92
tests/qtest/numa-test.c | 19 +-
64
target/arm: Move 'x' and 'a' bit definitions into vmlaldav formats
93
.mailmap | 3 +-
65
target/arm: Implement MVE integer min/max across vector
94
MAINTAINERS | 2 +-
66
target/arm: Implement MVE VABAV
95
25 files changed, 1068 insertions(+), 562 deletions(-)
67
target/arm: Implement MVE narrowing moves
68
target/arm: Rename MVEGenDualAccOpFn to MVEGenLongDualAccOpFn
69
target/arm: Implement MVE VMLADAV and VMLSLDAV
70
target/arm: Implement MVE VMLA
71
target/arm: Implement MVE saturating doubling multiply accumulates
72
target/arm: Implement MVE VQABS, VQNEG
73
target/arm: Implement MVE VMAXA, VMINA
74
target/arm: Implement MVE VMOV to/from 2 general-purpose registers
75
target/arm: Implement MVE VPNOT
76
target/arm: Implement MVE VCTP
77
target/arm: Implement MVE scatter-gather insns
78
target/arm: Implement MVE scatter-gather immediate forms
79
target/arm: Implement MVE interleaving loads/stores
80
target/arm: Re-indent sdiv and udiv helpers
81
target/arm: Implement M-profile trapping on division by zero
82
83
Sebastian Meyer (1):
84
docs: Document how to use gdb with unix sockets
85
86
Wen, Jianxian (1):
87
hw/dma/pl330: Add memory region to replace default
88
89
docs/system/gdb.rst | 26 +-
90
include/hw/arm/fsl-imx7.h | 5 +
91
target/arm/cpu.h | 1 +
92
target/arm/helper-mve.h | 283 ++++++++++
93
target/arm/helper.h | 4 +-
94
target/arm/translate-a32.h | 2 +
95
target/arm/vec_internal.h | 11 +
96
target/arm/mve.decode | 226 +++++++-
97
target/arm/t32.decode | 1 +
98
hw/arm/exynos4210.c | 3 +
99
hw/arm/fsl-imx6ul.c | 12 +
100
hw/arm/fsl-imx7.c | 7 +
101
hw/arm/sbsa-ref.c | 6 +-
102
hw/arm/xilinx_zynq.c | 3 +
103
hw/char/pl011.c | 6 +
104
hw/dma/pl330.c | 26 +-
105
target/arm/cpu.c | 3 +
106
target/arm/helper.c | 34 +-
107
target/arm/kvm.c | 17 +-
108
target/arm/m_helper.c | 4 +
109
target/arm/mve_helper.c | 1254 ++++++++++++++++++++++++++++++++++++++++++--
110
target/arm/translate-mve.c | 877 ++++++++++++++++++++++++++++++-
111
target/arm/translate-vfp.c | 2 +-
112
target/arm/translate.c | 37 +-
113
target/arm/vec_helper.c | 14 +-
114
25 files changed, 2746 insertions(+), 118 deletions(-)
115
diff view generated by jsdifflib
Deleted patch
1
Although the architecture doesn't define it as an alias, VMOVL
2
(vector move long) is encoded as a VSHLL with a zero shift.
3
Add a comment in the decode file noting that we handle VMOVL
4
as part of VSHLL.
5
1
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/arm/mve.decode | 2 ++
10
1 file changed, 2 insertions(+)
11
12
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/mve.decode
15
+++ b/target/arm/mve.decode
16
@@ -XXX,XX +XXX,XX @@ VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
17
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
18
19
# VSHLL T1 encoding; the T2 VSHLL encoding is elsewhere in this file
20
+# Note that VMOVL is encoded as "VSHLL with a zero shift count"; we
21
+# implement it that way rather than special-casing it in the decode.
22
VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
23
VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
24
25
--
26
2.20.1
27
28
diff view generated by jsdifflib
1
Implement the MVE gather-loads and scatter-stores which
1
From: Leif Lindholm <quic_llindhol@quicinc.com>
2
form the address by adding a base value from a scalar
3
register to an offset in each element of a vector.
4
2
3
NUVIA was acquired by Qualcomm in March 2021, but kept functioning on
4
separate infrastructure for a transitional period. We've now switched
5
over to contributing as Qualcomm Innovation Center (quicinc), so update
6
my email address to reflect this.
7
8
Signed-off-by: Leif Lindholm <quic_llindhol@quicinc.com>
9
Message-id: 20220505113740.75565-1-quic_llindhol@quicinc.com
10
Cc: Leif Lindholm <leif@nuviainc.com>
11
Cc: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
[Fixed commit message typo]
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
---
15
---
8
target/arm/helper-mve.h | 32 +++++++++
16
.mailmap | 3 ++-
9
target/arm/mve.decode | 12 ++++
17
MAINTAINERS | 2 +-
10
target/arm/mve_helper.c | 129 +++++++++++++++++++++++++++++++++++++
18
2 files changed, 3 insertions(+), 2 deletions(-)
11
target/arm/translate-mve.c | 97 ++++++++++++++++++++++++++++
12
4 files changed, 270 insertions(+)
13
19
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
20
diff --git a/.mailmap b/.mailmap
15
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
22
--- a/.mailmap
17
+++ b/target/arm/helper-mve.h
23
+++ b/.mailmap
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vstrb_h, TCG_CALL_NO_WG, void, env, ptr, i32)
24
@@ -XXX,XX +XXX,XX @@ Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
19
DEF_HELPER_FLAGS_3(mve_vstrb_w, TCG_CALL_NO_WG, void, env, ptr, i32)
25
Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
20
DEF_HELPER_FLAGS_3(mve_vstrh_w, TCG_CALL_NO_WG, void, env, ptr, i32)
26
Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
21
27
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
22
+DEF_HELPER_FLAGS_4(mve_vldrb_sg_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
-Leif Lindholm <leif@nuviainc.com> <leif.lindholm@linaro.org>
23
+DEF_HELPER_FLAGS_4(mve_vldrb_sg_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
24
+DEF_HELPER_FLAGS_4(mve_vldrh_sg_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
25
+
31
Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org>
26
+DEF_HELPER_FLAGS_4(mve_vldrb_sg_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
27
+DEF_HELPER_FLAGS_4(mve_vldrb_sg_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
Paul Burton <paulburton@kernel.org> <paul.burton@imgtec.com>
28
+DEF_HELPER_FLAGS_4(mve_vldrb_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
diff --git a/MAINTAINERS b/MAINTAINERS
29
+DEF_HELPER_FLAGS_4(mve_vldrh_sg_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_4(mve_vldrh_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vldrw_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vldrd_sg_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+
34
+DEF_HELPER_FLAGS_4(mve_vstrb_sg_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_4(mve_vstrb_sg_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(mve_vstrb_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_4(mve_vstrh_sg_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_4(mve_vstrh_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(mve_vstrw_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_4(mve_vstrd_sg_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
+
42
+DEF_HELPER_FLAGS_4(mve_vldrh_sg_os_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
+
44
+DEF_HELPER_FLAGS_4(mve_vldrh_sg_os_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
45
+DEF_HELPER_FLAGS_4(mve_vldrh_sg_os_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
46
+DEF_HELPER_FLAGS_4(mve_vldrw_sg_os_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
47
+DEF_HELPER_FLAGS_4(mve_vldrd_sg_os_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
48
+
49
+DEF_HELPER_FLAGS_4(mve_vstrh_sg_os_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
50
+DEF_HELPER_FLAGS_4(mve_vstrh_sg_os_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
51
+DEF_HELPER_FLAGS_4(mve_vstrw_sg_os_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
52
+DEF_HELPER_FLAGS_4(mve_vstrd_sg_os_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
53
+
54
DEF_HELPER_FLAGS_3(mve_vdup, TCG_CALL_NO_WG, void, env, ptr, i32)
55
56
DEF_HELPER_FLAGS_4(mve_vidupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
57
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
58
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/mve.decode
36
--- a/MAINTAINERS
60
+++ b/target/arm/mve.decode
37
+++ b/MAINTAINERS
61
@@ -XXX,XX +XXX,XX @@
38
@@ -XXX,XX +XXX,XX @@ F: include/hw/ssi/imx_spi.h
62
&shl_scalar qda rm size
39
SBSA-REF
63
&vmaxv qm rda size
40
M: Radoslaw Biernacki <rad@semihalf.com>
64
&vabav qn qm rda size
41
M: Peter Maydell <peter.maydell@linaro.org>
65
+&vldst_sg qd qm rn size msize os
42
-R: Leif Lindholm <leif@nuviainc.com>
66
+
43
+R: Leif Lindholm <quic_llindhol@quicinc.com>
67
+# scatter-gather memory size is in bits 6:4
44
L: qemu-arm@nongnu.org
68
+%sg_msize 6:1 4:1
45
S: Maintained
69
46
F: hw/arm/sbsa-ref.c
70
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
71
# Note that both Rn and Qd are 3 bits only (no D bit)
72
@vldst_wn ... u:1 ... . . . . l:1 . rn:3 qd:3 . ... .. imm:7 &vldr_vstr
73
74
+@vldst_sg .... .... .... rn:4 .... ... size:2 ... ... os:1 &vldst_sg \
75
+ qd=%qd qm=%qm msize=%sg_msize
76
+
77
@1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm
78
@1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0
79
@2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn
80
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111101 ....... @vldr_vstr \
81
VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \
82
size=2 p=1
83
84
+# gather loads/scatter stores
85
+VLDR_S_sg 111 0 1100 1 . 01 .... ... 0 111 . .... .... @vldst_sg
86
+VLDR_U_sg 111 1 1100 1 . 01 .... ... 0 111 . .... .... @vldst_sg
87
+VSTR_sg 111 0 1100 1 . 00 .... ... 0 111 . .... .... @vldst_sg
88
+
89
# Moves between 2 32-bit vector lanes and 2 general purpose registers
90
VMOV_to_2gp 1110 1100 0 . 00 rt2:4 ... 0 1111 000 idx:1 rt:4 qd=%qd
91
VMOV_from_2gp 1110 1100 0 . 01 rt2:4 ... 0 1111 000 idx:1 rt:4 qd=%qd
92
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
93
index XXXXXXX..XXXXXXX 100644
94
--- a/target/arm/mve_helper.c
95
+++ b/target/arm/mve_helper.c
96
@@ -XXX,XX +XXX,XX @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
97
#undef DO_VLDR
98
#undef DO_VSTR
99
100
+/*
101
+ * Gather loads/scatter stores. Here each element of Qm specifies
102
+ * an offset to use from the base register Rm. In the _os_ versions
103
+ * that offset is scaled by the element size.
104
+ * For loads, predicated lanes are zeroed instead of retaining
105
+ * their previous values.
106
+ */
107
+#define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN) \
108
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
109
+ uint32_t base) \
110
+ { \
111
+ TYPE *d = vd; \
112
+ OFFTYPE *m = vm; \
113
+ uint16_t mask = mve_element_mask(env); \
114
+ uint16_t eci_mask = mve_eci_mask(env); \
115
+ unsigned e; \
116
+ uint32_t addr; \
117
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \
118
+ if (!(eci_mask & 1)) { \
119
+ continue; \
120
+ } \
121
+ addr = ADDRFN(base, m[H##ESIZE(e)]); \
122
+ d[H##ESIZE(e)] = (mask & 1) ? \
123
+ cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \
124
+ } \
125
+ mve_advance_vpt(env); \
126
+ }
127
+
128
+/* We know here TYPE is unsigned so always the same as the offset type */
129
+#define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN) \
130
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
131
+ uint32_t base) \
132
+ { \
133
+ TYPE *d = vd; \
134
+ TYPE *m = vm; \
135
+ uint16_t mask = mve_element_mask(env); \
136
+ unsigned e; \
137
+ uint32_t addr; \
138
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
139
+ addr = ADDRFN(base, m[H##ESIZE(e)]); \
140
+ if (mask & 1) { \
141
+ cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
142
+ } \
143
+ } \
144
+ mve_advance_vpt(env); \
145
+ }
146
+
147
+/*
148
+ * 64-bit accesses are slightly different: they are done as two 32-bit
149
+ * accesses, controlled by the predicate mask for the relevant beat,
150
+ * and with a single 32-bit offset in the first of the two Qm elements.
151
+ * Note that for QEMU our IMPDEF AIRCR.ENDIANNESS is always 0 (little).
152
+ */
153
+#define DO_VLDR64_SG(OP, ADDRFN) \
154
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
155
+ uint32_t base) \
156
+ { \
157
+ uint32_t *d = vd; \
158
+ uint32_t *m = vm; \
159
+ uint16_t mask = mve_element_mask(env); \
160
+ uint16_t eci_mask = mve_eci_mask(env); \
161
+ unsigned e; \
162
+ uint32_t addr; \
163
+ for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
164
+ if (!(eci_mask & 1)) { \
165
+ continue; \
166
+ } \
167
+ addr = ADDRFN(base, m[H4(e & ~1)]); \
168
+ addr += 4 * (e & 1); \
169
+ d[H4(e)] = (mask & 1) ? cpu_ldl_data_ra(env, addr, GETPC()) : 0; \
170
+ } \
171
+ mve_advance_vpt(env); \
172
+ }
173
+
174
+#define DO_VSTR64_SG(OP, ADDRFN) \
175
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
176
+ uint32_t base) \
177
+ { \
178
+ uint32_t *d = vd; \
179
+ uint32_t *m = vm; \
180
+ uint16_t mask = mve_element_mask(env); \
181
+ unsigned e; \
182
+ uint32_t addr; \
183
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
184
+ addr = ADDRFN(base, m[H4(e & ~1)]); \
185
+ addr += 4 * (e & 1); \
186
+ if (mask & 1) { \
187
+ cpu_stl_data_ra(env, addr, d[H4(e)], GETPC()); \
188
+ } \
189
+ } \
190
+ mve_advance_vpt(env); \
191
+ }
192
+
193
+#define ADDR_ADD(BASE, OFFSET) ((BASE) + (OFFSET))
194
+#define ADDR_ADD_OSH(BASE, OFFSET) ((BASE) + ((OFFSET) << 1))
195
+#define ADDR_ADD_OSW(BASE, OFFSET) ((BASE) + ((OFFSET) << 2))
196
+#define ADDR_ADD_OSD(BASE, OFFSET) ((BASE) + ((OFFSET) << 3))
197
+
198
+DO_VLDR_SG(vldrb_sg_sh, ldsb, 2, int16_t, uint16_t, ADDR_ADD)
199
+DO_VLDR_SG(vldrb_sg_sw, ldsb, 4, int32_t, uint32_t, ADDR_ADD)
200
+DO_VLDR_SG(vldrh_sg_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD)
201
+
202
+DO_VLDR_SG(vldrb_sg_ub, ldub, 1, uint8_t, uint8_t, ADDR_ADD)
203
+DO_VLDR_SG(vldrb_sg_uh, ldub, 2, uint16_t, uint16_t, ADDR_ADD)
204
+DO_VLDR_SG(vldrb_sg_uw, ldub, 4, uint32_t, uint32_t, ADDR_ADD)
205
+DO_VLDR_SG(vldrh_sg_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD)
206
+DO_VLDR_SG(vldrh_sg_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD)
207
+DO_VLDR_SG(vldrw_sg_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD)
208
+DO_VLDR64_SG(vldrd_sg_ud, ADDR_ADD)
209
+
210
+DO_VLDR_SG(vldrh_sg_os_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD_OSH)
211
+DO_VLDR_SG(vldrh_sg_os_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD_OSH)
212
+DO_VLDR_SG(vldrh_sg_os_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD_OSH)
213
+DO_VLDR_SG(vldrw_sg_os_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD_OSW)
214
+DO_VLDR64_SG(vldrd_sg_os_ud, ADDR_ADD_OSD)
215
+
216
+DO_VSTR_SG(vstrb_sg_ub, stb, 1, uint8_t, ADDR_ADD)
217
+DO_VSTR_SG(vstrb_sg_uh, stb, 2, uint16_t, ADDR_ADD)
218
+DO_VSTR_SG(vstrb_sg_uw, stb, 4, uint32_t, ADDR_ADD)
219
+DO_VSTR_SG(vstrh_sg_uh, stw, 2, uint16_t, ADDR_ADD)
220
+DO_VSTR_SG(vstrh_sg_uw, stw, 4, uint32_t, ADDR_ADD)
221
+DO_VSTR_SG(vstrw_sg_uw, stl, 4, uint32_t, ADDR_ADD)
222
+DO_VSTR64_SG(vstrd_sg_ud, ADDR_ADD)
223
+
224
+DO_VSTR_SG(vstrh_sg_os_uh, stw, 2, uint16_t, ADDR_ADD_OSH)
225
+DO_VSTR_SG(vstrh_sg_os_uw, stw, 4, uint32_t, ADDR_ADD_OSH)
226
+DO_VSTR_SG(vstrw_sg_os_uw, stl, 4, uint32_t, ADDR_ADD_OSW)
227
+DO_VSTR64_SG(vstrd_sg_os_ud, ADDR_ADD_OSD)
228
+
229
/*
230
* The mergemask(D, R, M) macro performs the operation "*D = R" but
231
* storing only the bytes which correspond to 1 bits in M,
232
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
233
index XXXXXXX..XXXXXXX 100644
234
--- a/target/arm/translate-mve.c
235
+++ b/target/arm/translate-mve.c
236
@@ -XXX,XX +XXX,XX @@ static inline int vidup_imm(DisasContext *s, int x)
237
#include "decode-mve.c.inc"
238
239
typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
240
+typedef void MVEGenLdStSGFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
241
typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
242
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
243
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
244
@@ -XXX,XX +XXX,XX @@ DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h, MO_8)
245
DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w, MO_8)
246
DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w, MO_16)
247
248
+static bool do_ldst_sg(DisasContext *s, arg_vldst_sg *a, MVEGenLdStSGFn fn)
249
+{
250
+ TCGv_i32 addr;
251
+ TCGv_ptr qd, qm;
252
+
253
+ if (!dc_isar_feature(aa32_mve, s) ||
254
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
255
+ !fn || a->rn == 15) {
256
+ /* Rn case is UNPREDICTABLE */
257
+ return false;
258
+ }
259
+
260
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
261
+ return true;
262
+ }
263
+
264
+ addr = load_reg(s, a->rn);
265
+
266
+ qd = mve_qreg_ptr(a->qd);
267
+ qm = mve_qreg_ptr(a->qm);
268
+ fn(cpu_env, qd, qm, addr);
269
+ tcg_temp_free_ptr(qd);
270
+ tcg_temp_free_ptr(qm);
271
+ tcg_temp_free_i32(addr);
272
+ mve_update_eci(s);
273
+ return true;
274
+}
275
+
276
+/*
277
+ * The naming scheme here is "vldrb_sg_sh == in-memory byte loads
278
+ * signextended to halfword elements in register". _os_ indicates that
279
+ * the offsets in Qm should be scaled by the element size.
280
+ */
281
+/* This macro is just to make the arrays more compact in these functions */
282
+#define F(N) gen_helper_mve_##N
283
+
284
+/* VLDRB/VSTRB (ie msize 1) with OS=1 is UNPREDICTABLE; we UNDEF */
285
+static bool trans_VLDR_S_sg(DisasContext *s, arg_vldst_sg *a)
286
+{
287
+ static MVEGenLdStSGFn * const fns[2][4][4] = { {
288
+ { NULL, F(vldrb_sg_sh), F(vldrb_sg_sw), NULL },
289
+ { NULL, NULL, F(vldrh_sg_sw), NULL },
290
+ { NULL, NULL, NULL, NULL },
291
+ { NULL, NULL, NULL, NULL }
292
+ }, {
293
+ { NULL, NULL, NULL, NULL },
294
+ { NULL, NULL, F(vldrh_sg_os_sw), NULL },
295
+ { NULL, NULL, NULL, NULL },
296
+ { NULL, NULL, NULL, NULL }
297
+ }
298
+ };
299
+ if (a->qd == a->qm) {
300
+ return false; /* UNPREDICTABLE */
301
+ }
302
+ return do_ldst_sg(s, a, fns[a->os][a->msize][a->size]);
303
+}
304
+
305
+static bool trans_VLDR_U_sg(DisasContext *s, arg_vldst_sg *a)
306
+{
307
+ static MVEGenLdStSGFn * const fns[2][4][4] = { {
308
+ { F(vldrb_sg_ub), F(vldrb_sg_uh), F(vldrb_sg_uw), NULL },
309
+ { NULL, F(vldrh_sg_uh), F(vldrh_sg_uw), NULL },
310
+ { NULL, NULL, F(vldrw_sg_uw), NULL },
311
+ { NULL, NULL, NULL, F(vldrd_sg_ud) }
312
+ }, {
313
+ { NULL, NULL, NULL, NULL },
314
+ { NULL, F(vldrh_sg_os_uh), F(vldrh_sg_os_uw), NULL },
315
+ { NULL, NULL, F(vldrw_sg_os_uw), NULL },
316
+ { NULL, NULL, NULL, F(vldrd_sg_os_ud) }
317
+ }
318
+ };
319
+ if (a->qd == a->qm) {
320
+ return false; /* UNPREDICTABLE */
321
+ }
322
+ return do_ldst_sg(s, a, fns[a->os][a->msize][a->size]);
323
+}
324
+
325
+static bool trans_VSTR_sg(DisasContext *s, arg_vldst_sg *a)
326
+{
327
+ static MVEGenLdStSGFn * const fns[2][4][4] = { {
328
+ { F(vstrb_sg_ub), F(vstrb_sg_uh), F(vstrb_sg_uw), NULL },
329
+ { NULL, F(vstrh_sg_uh), F(vstrh_sg_uw), NULL },
330
+ { NULL, NULL, F(vstrw_sg_uw), NULL },
331
+ { NULL, NULL, NULL, F(vstrd_sg_ud) }
332
+ }, {
333
+ { NULL, NULL, NULL, NULL },
334
+ { NULL, F(vstrh_sg_os_uh), F(vstrh_sg_os_uw), NULL },
335
+ { NULL, NULL, F(vstrw_sg_os_uw), NULL },
336
+ { NULL, NULL, NULL, F(vstrd_sg_os_ud) }
337
+ }
338
+ };
339
+ return do_ldst_sg(s, a, fns[a->os][a->msize][a->size]);
340
+}
341
+
342
+#undef F
343
+
344
static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
345
{
346
TCGv_ptr qd;
347
--
47
--
348
2.20.1
48
2.25.1
349
49
350
50
diff view generated by jsdifflib
1
Implement the MVE VMLAS insn, which multiplies a vector by a vector
1
From: Richard Henderson <richard.henderson@linaro.org>
2
and adds a scalar.
2
3
3
More gracefully handle cpregs when EL2 and/or EL3 are missing.
4
If the reg is entirely inaccessible, do not register it at all.
5
If the reg is for EL2, and EL3 is present but EL2 is not,
6
either discard, squash to res0, const, or keep unchanged.
7
8
Per rule RJFFP, mark the 4 aarch32 hypervisor access registers
9
with ARM_CP_EL3_NO_EL2_KEEP, and mark all of the EL2 address
10
translation and tlb invalidation "regs" ARM_CP_EL3_NO_EL2_UNDEF.
11
Mark the 2 virtualization processor id regs ARM_CP_EL3_NO_EL2_C_NZ.
12
13
This will simplify cpreg registration for conditional arm features.
14
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20220506180242.216785-2-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
---
19
---
7
target/arm/helper-mve.h | 4 ++++
20
target/arm/cpregs.h | 11 +++
8
target/arm/mve.decode | 3 +++
21
target/arm/helper.c | 178 ++++++++++++++++++++++++++++++--------------
9
target/arm/mve_helper.c | 26 ++++++++++++++++++++++++++
22
2 files changed, 133 insertions(+), 56 deletions(-)
10
target/arm/translate-mve.c | 1 +
23
11
4 files changed, 34 insertions(+)
24
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
26
--- a/target/arm/cpregs.h
16
+++ b/target/arm/helper-mve.h
27
+++ b/target/arm/cpregs.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqdmullb_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i3
28
@@ -XXX,XX +XXX,XX @@ enum {
18
DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
ARM_CP_SVE = 1 << 14,
19
DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
/* Flag: Do not expose in gdb sysreg xml. */
20
31
ARM_CP_NO_GDB = 1 << 15,
21
+DEF_HELPER_FLAGS_4(mve_vmlasb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+ /*
22
+DEF_HELPER_FLAGS_4(mve_vmlash, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+ * Flags: If EL3 but not EL2...
23
+DEF_HELPER_FLAGS_4(mve_vmlasw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
+ * - UNDEF: discard the cpreg,
24
+
35
+ * - KEEP: retain the cpreg as is,
25
DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
36
+ * - C_NZ: set const on the cpreg, but retain resetvalue,
26
DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
37
+ * - else: set const on the cpreg, zero resetvalue, aka RES0.
27
DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
38
+ * See rule RJFFP in section D1.1.3 of DDI0487H.a.
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
39
+ */
40
+ ARM_CP_EL3_NO_EL2_UNDEF = 1 << 16,
41
+ ARM_CP_EL3_NO_EL2_KEEP = 1 << 17,
42
+ ARM_CP_EL3_NO_EL2_C_NZ = 1 << 18,
43
};
44
45
/*
46
diff --git a/target/arm/helper.c b/target/arm/helper.c
29
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
48
--- a/target/arm/helper.c
31
+++ b/target/arm/mve.decode
49
+++ b/target/arm/helper.c
32
@@ -XXX,XX +XXX,XX @@ VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
50
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
33
VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
51
.access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
34
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
52
{ .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
35
53
.opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
36
+# The U bit (28) is don't-care because it does not affect the result
54
- .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_FPU,
37
+VMLAS 111- 1110 0 . .. ... 1 ... 1 1110 . 100 .... @2scalar
55
+ .access = PL2_RW,
38
+
56
+ .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
39
# Vector add across vector
57
.fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
58
{ .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
59
.opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
60
- .access = PL2_RW, .resetvalue = 0,
61
+ .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
62
.writefn = dacr_write, .raw_writefn = raw_write,
63
.fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
64
{ .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
65
.opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
66
- .access = PL2_RW, .resetvalue = 0,
67
+ .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
68
.fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
69
{ .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
70
.type = ARM_CP_ALIAS,
71
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
72
.writefn = tlbimva_hyp_is_write },
73
{ .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
74
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
75
- .type = ARM_CP_NO_RAW, .access = PL2_W,
76
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
77
.writefn = tlbi_aa64_alle2_write },
78
{ .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
79
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
80
- .type = ARM_CP_NO_RAW, .access = PL2_W,
81
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
82
.writefn = tlbi_aa64_vae2_write },
83
{ .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
84
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
85
- .access = PL2_W, .type = ARM_CP_NO_RAW,
86
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
87
.writefn = tlbi_aa64_vae2_write },
88
{ .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
89
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
90
- .access = PL2_W, .type = ARM_CP_NO_RAW,
91
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
92
.writefn = tlbi_aa64_alle2is_write },
93
{ .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
94
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
95
- .type = ARM_CP_NO_RAW, .access = PL2_W,
96
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
97
.writefn = tlbi_aa64_vae2is_write },
98
{ .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
99
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
100
- .access = PL2_W, .type = ARM_CP_NO_RAW,
101
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
102
.writefn = tlbi_aa64_vae2is_write },
103
#ifndef CONFIG_USER_ONLY
104
/* Unlike the other EL2-related AT operations, these must
105
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
106
{ .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
107
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
108
.access = PL2_W, .accessfn = at_s1e2_access,
109
- .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
110
+ .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
111
+ .writefn = ats_write64 },
112
{ .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
113
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
114
.access = PL2_W, .accessfn = at_s1e2_access,
115
- .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
116
+ .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
117
+ .writefn = ats_write64 },
118
/* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
119
* if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
120
* with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
121
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
122
{ .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
123
.opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
124
.access = PL2_RW, .accessfn = access_tda,
125
- .type = ARM_CP_NOP },
126
+ .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP },
127
/* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
128
* Channel but Linux may try to access this register. The 32-bit
129
* alias is DBGDCCINT.
130
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo tlbirange_reginfo[] = {
131
.access = PL2_W, .type = ARM_CP_NOP },
132
{ .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
133
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
134
- .access = PL2_W, .type = ARM_CP_NO_RAW,
135
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
136
.writefn = tlbi_aa64_rvae2is_write },
137
{ .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
138
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
139
- .access = PL2_W, .type = ARM_CP_NO_RAW,
140
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
141
.writefn = tlbi_aa64_rvae2is_write },
142
{ .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
143
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
144
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo tlbirange_reginfo[] = {
145
.access = PL2_W, .type = ARM_CP_NOP },
146
{ .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
147
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
148
- .access = PL2_W, .type = ARM_CP_NO_RAW,
149
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
150
.writefn = tlbi_aa64_rvae2is_write },
151
{ .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
152
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
153
- .access = PL2_W, .type = ARM_CP_NO_RAW,
154
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
155
.writefn = tlbi_aa64_rvae2is_write },
156
{ .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
157
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
158
- .access = PL2_W, .type = ARM_CP_NO_RAW,
159
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
160
.writefn = tlbi_aa64_rvae2_write },
161
{ .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
162
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
163
- .access = PL2_W, .type = ARM_CP_NO_RAW,
164
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
165
.writefn = tlbi_aa64_rvae2_write },
166
{ .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
167
.opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
168
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo tlbios_reginfo[] = {
169
.writefn = tlbi_aa64_vae1is_write },
170
{ .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
171
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
172
- .access = PL2_W, .type = ARM_CP_NO_RAW,
173
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
174
.writefn = tlbi_aa64_alle2is_write },
175
{ .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
176
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
177
- .access = PL2_W, .type = ARM_CP_NO_RAW,
178
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
179
.writefn = tlbi_aa64_vae2is_write },
180
{ .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
181
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
182
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo tlbios_reginfo[] = {
183
.writefn = tlbi_aa64_alle1is_write },
184
{ .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
185
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
186
- .access = PL2_W, .type = ARM_CP_NO_RAW,
187
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
188
.writefn = tlbi_aa64_vae2is_write },
189
{ .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
190
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
191
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
192
{ .name = "VPIDR", .state = ARM_CP_STATE_AA32,
193
.cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
194
.access = PL2_RW, .accessfn = access_el3_aa32ns,
195
- .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
196
+ .resetvalue = cpu->midr,
197
+ .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
198
.fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
199
{ .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
200
.opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
201
.access = PL2_RW, .resetvalue = cpu->midr,
202
+ .type = ARM_CP_EL3_NO_EL2_C_NZ,
203
.fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
204
{ .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
205
.cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
206
.access = PL2_RW, .accessfn = access_el3_aa32ns,
207
- .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
208
+ .resetvalue = vmpidr_def,
209
+ .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
210
.fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
211
{ .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
212
.opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
213
- .access = PL2_RW,
214
- .resetvalue = vmpidr_def,
215
+ .access = PL2_RW, .resetvalue = vmpidr_def,
216
+ .type = ARM_CP_EL3_NO_EL2_C_NZ,
217
.fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
218
};
219
define_arm_cp_regs(cpu, vpidr_regs);
220
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
221
int crm, int opc1, int opc2,
222
const char *name)
40
{
223
{
41
VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
224
+ CPUARMState *env = &cpu->env;
42
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
225
uint32_t key;
43
index XXXXXXX..XXXXXXX 100644
226
ARMCPRegInfo *r2;
44
--- a/target/arm/mve_helper.c
227
bool is64 = r->type & ARM_CP_64BIT;
45
+++ b/target/arm/mve_helper.c
228
bool ns = secstate & ARM_CP_SECSTATE_NS;
46
@@ -XXX,XX +XXX,XX @@ DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w)
229
int cp = r->cp;
47
mve_advance_vpt(env); \
230
- bool isbanked;
231
size_t name_len;
232
+ bool make_const;
233
234
switch (state) {
235
case ARM_CP_STATE_AA32:
236
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
237
}
48
}
238
}
49
239
50
+/* "accumulating" version where FN takes d as well as n and m */
240
+ /*
51
+#define DO_2OP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
241
+ * Eliminate registers that are not present because the EL is missing.
52
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
242
+ * Doing this here makes it easier to put all registers for a given
53
+ uint32_t rm) \
243
+ * feature into the same ARMCPRegInfo array and define them all at once.
54
+ { \
244
+ */
55
+ TYPE *d = vd, *n = vn; \
245
+ make_const = false;
56
+ TYPE m = rm; \
246
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
57
+ uint16_t mask = mve_element_mask(env); \
247
+ /*
58
+ unsigned e; \
248
+ * An EL2 register without EL2 but with EL3 is (usually) RES0.
59
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
249
+ * See rule RJFFP in section D1.1.3 of DDI0487H.a.
60
+ mergemask(&d[H##ESIZE(e)], \
250
+ */
61
+ FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m), mask); \
251
+ int min_el = ctz32(r->access) / 2;
62
+ } \
252
+ if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
63
+ mve_advance_vpt(env); \
253
+ if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
254
+ return;
255
+ }
256
+ make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
257
+ }
258
+ } else {
259
+ CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
260
+ ? PL2_RW : PL1_RW);
261
+ if ((r->access & max_el) == 0) {
262
+ return;
263
+ }
64
+ }
264
+ }
65
+
265
+
66
/* provide unsigned 2-op scalar helpers for all sizes */
266
/* Combine cpreg and name into one allocation. */
67
#define DO_2OP_SCALAR_U(OP, FN) \
267
name_len = strlen(name) + 1;
68
DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
268
r2 = g_malloc(sizeof(*r2) + name_len);
69
@@ -XXX,XX +XXX,XX @@ DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w)
269
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
70
DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \
270
r2->opaque = opaque;
71
DO_2OP_SCALAR(OP##w, 4, int32_t, FN)
271
}
72
272
73
+#define DO_2OP_ACC_SCALAR_U(OP, FN) \
273
- isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
74
+ DO_2OP_ACC_SCALAR(OP##b, 1, uint8_t, FN) \
274
- if (isbanked) {
75
+ DO_2OP_ACC_SCALAR(OP##h, 2, uint16_t, FN) \
275
+ if (make_const) {
76
+ DO_2OP_ACC_SCALAR(OP##w, 4, uint32_t, FN)
276
+ /* This should not have been a very special register to begin. */
77
+
277
+ int old_special = r2->type & ARM_CP_SPECIAL_MASK;
78
DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
278
+ assert(old_special == 0 || old_special == ARM_CP_NOP);
79
DO_2OP_SCALAR_U(vsub_scalar, DO_SUB)
279
/*
80
DO_2OP_SCALAR_U(vmul_scalar, DO_MUL)
280
- * Register is banked (using both entries in array).
81
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
281
- * Overwriting fieldoffset as the array is only used to define
82
DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
282
- * banked registers but later only fieldoffset is used.
83
DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
283
+ * Set the special function to CONST, retaining the other flags.
84
284
+ * This is important for e.g. ARM_CP_SVE so that we still
85
+/* Vector by vector plus scalar */
285
+ * take the SVE trap if CPTR_EL3.EZ == 0.
86
+#define DO_VMLAS(D, N, M) ((N) * (D) + (M))
286
*/
87
+
287
- r2->fieldoffset = r->bank_fieldoffsets[ns];
88
+DO_2OP_ACC_SCALAR_U(vmlas, DO_VMLAS)
288
- }
89
+
289
+ r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
90
/*
290
+ /*
91
* Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the
291
+ * Usually, these registers become RES0, but there are a few
92
* input (smaller) type and LESIZE, LTYPE, LH for the output (long) type.
292
+ * special cases like VPIDR_EL2 which have a constant non-zero
93
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
293
+ * value with writes ignored.
94
index XXXXXXX..XXXXXXX 100644
294
+ */
95
--- a/target/arm/translate-mve.c
295
+ if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
96
+++ b/target/arm/translate-mve.c
296
+ r2->resetvalue = 0;
97
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR(VQSUB_U_scalar, vqsubu_scalar)
297
+ }
98
DO_2OP_SCALAR(VQDMULH_scalar, vqdmulh_scalar)
298
+ /*
99
DO_2OP_SCALAR(VQRDMULH_scalar, vqrdmulh_scalar)
299
+ * ARM_CP_CONST has precedence, so removing the callbacks and
100
DO_2OP_SCALAR(VBRSR, vbrsr)
300
+ * offsets are not strictly necessary, but it is potentially
101
+DO_2OP_SCALAR(VMLAS, vmlas)
301
+ * less confusing to debug later.
102
302
+ */
103
static bool trans_VQDMULLB_scalar(DisasContext *s, arg_2scalar *a)
303
+ r2->readfn = NULL;
104
{
304
+ r2->writefn = NULL;
305
+ r2->raw_readfn = NULL;
306
+ r2->raw_writefn = NULL;
307
+ r2->resetfn = NULL;
308
+ r2->fieldoffset = 0;
309
+ r2->bank_fieldoffsets[0] = 0;
310
+ r2->bank_fieldoffsets[1] = 0;
311
+ } else {
312
+ bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
313
314
- if (state == ARM_CP_STATE_AA32) {
315
if (isbanked) {
316
/*
317
- * If the register is banked then we don't need to migrate or
318
- * reset the 32-bit instance in certain cases:
319
- *
320
- * 1) If the register has both 32-bit and 64-bit instances then we
321
- * can count on the 64-bit instance taking care of the
322
- * non-secure bank.
323
- * 2) If ARMv8 is enabled then we can count on a 64-bit version
324
- * taking care of the secure bank. This requires that separate
325
- * 32 and 64-bit definitions are provided.
326
+ * Register is banked (using both entries in array).
327
+ * Overwriting fieldoffset as the array is only used to define
328
+ * banked registers but later only fieldoffset is used.
329
*/
330
- if ((r->state == ARM_CP_STATE_BOTH && ns) ||
331
- (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
332
+ r2->fieldoffset = r->bank_fieldoffsets[ns];
333
+ }
334
+ if (state == ARM_CP_STATE_AA32) {
335
+ if (isbanked) {
336
+ /*
337
+ * If the register is banked then we don't need to migrate or
338
+ * reset the 32-bit instance in certain cases:
339
+ *
340
+ * 1) If the register has both 32-bit and 64-bit instances
341
+ * then we can count on the 64-bit instance taking care
342
+ * of the non-secure bank.
343
+ * 2) If ARMv8 is enabled then we can count on a 64-bit
344
+ * version taking care of the secure bank. This requires
345
+ * that separate 32 and 64-bit definitions are provided.
346
+ */
347
+ if ((r->state == ARM_CP_STATE_BOTH && ns) ||
348
+ (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
349
+ r2->type |= ARM_CP_ALIAS;
350
+ }
351
+ } else if ((secstate != r->secure) && !ns) {
352
+ /*
353
+ * The register is not banked so we only want to allow
354
+ * migration of the non-secure instance.
355
+ */
356
r2->type |= ARM_CP_ALIAS;
357
}
358
- } else if ((secstate != r->secure) && !ns) {
359
- /*
360
- * The register is not banked so we only want to allow migration
361
- * of the non-secure instance.
362
- */
363
- r2->type |= ARM_CP_ALIAS;
364
- }
365
366
- if (HOST_BIG_ENDIAN &&
367
- r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
368
- r2->fieldoffset += sizeof(uint32_t);
369
+ if (HOST_BIG_ENDIAN &&
370
+ r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
371
+ r2->fieldoffset += sizeof(uint32_t);
372
+ }
373
}
374
}
375
376
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
377
* multiple times. Special registers (ie NOP/WFI) are
378
* never migratable and not even raw-accessible.
379
*/
380
- if (r->type & ARM_CP_SPECIAL_MASK) {
381
+ if (r2->type & ARM_CP_SPECIAL_MASK) {
382
r2->type |= ARM_CP_NO_RAW;
383
}
384
if (((r->crm == CP_ANY) && crm != 0) ||
105
--
385
--
106
2.20.1
386
2.25.1
107
108
diff view generated by jsdifflib
1
Implement the MVE incrementing/decrementing dup insns VIDUP, VDDUP,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
VIWDUP and VDWDUP. These fill the elements of a vector with
2
3
successively incrementing values, starting at the offset specified in
3
Drop el3_no_el2_cp_reginfo, el3_no_el2_v8_cp_reginfo, and the local
4
a general purpose register. The final value of the offset is written
4
vpidr_regs definition, and rely on the squashing to ARM_CP_CONST
5
back to this register. The wrapping variants take a second general
5
while registering for v8.
6
purpose register which specifies the point where the count should
6
7
wrap back to 0.
7
This is a behavior change for v7 cpus with Security Extensions and
8
8
without Virtualization Extensions, in that the virtualization cpregs
9
are now correctly not present. This would be a migration compatibility
10
break, except that we have an existing bug in which migration of 32-bit
11
cpus with Security Extensions enabled does not work.
12
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20220506180242.216785-3-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
---
17
---
12
target/arm/helper-mve.h | 12 ++++
18
target/arm/helper.c | 158 ++++----------------------------------------
13
target/arm/mve.decode | 25 ++++++++
19
1 file changed, 13 insertions(+), 145 deletions(-)
14
target/arm/mve_helper.c | 63 +++++++++++++++++++
20
15
target/arm/translate-mve.c | 120 +++++++++++++++++++++++++++++++++++++
21
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
4 files changed, 220 insertions(+)
17
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
19
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-mve.h
23
--- a/target/arm/helper.c
21
+++ b/target/arm/helper-mve.h
24
+++ b/target/arm/helper.c
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vstrh_w, TCG_CALL_NO_WG, void, env, ptr, i32)
25
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
23
26
.fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
24
DEF_HELPER_FLAGS_3(mve_vdup, TCG_CALL_NO_WG, void, env, ptr, i32)
27
};
25
28
26
+DEF_HELPER_FLAGS_4(mve_vidupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
29
-/* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
27
+DEF_HELPER_FLAGS_4(mve_viduph, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
30
-static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
28
+DEF_HELPER_FLAGS_4(mve_vidupw, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
31
- { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
29
+
32
- .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
30
+DEF_HELPER_FLAGS_5(mve_viwdupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
33
- .access = PL2_RW,
31
+DEF_HELPER_FLAGS_5(mve_viwduph, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
34
- .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
32
+DEF_HELPER_FLAGS_5(mve_viwdupw, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
35
- { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
33
+
36
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
34
+DEF_HELPER_FLAGS_5(mve_vdwdupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
37
- .access = PL2_RW,
35
+DEF_HELPER_FLAGS_5(mve_vdwduph, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
38
- .type = ARM_CP_CONST, .resetvalue = 0 },
36
+DEF_HELPER_FLAGS_5(mve_vdwdupw, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
39
- { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
37
+
40
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
38
DEF_HELPER_FLAGS_3(mve_vclsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
41
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
39
DEF_HELPER_FLAGS_3(mve_vclsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
42
- { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
40
DEF_HELPER_FLAGS_3(mve_vclsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
43
- .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
41
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
44
- .access = PL2_RW,
42
index XXXXXXX..XXXXXXX 100644
45
- .type = ARM_CP_CONST, .resetvalue = 0 },
43
--- a/target/arm/mve.decode
46
- { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
44
+++ b/target/arm/mve.decode
47
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
45
@@ -XXX,XX +XXX,XX @@
48
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
46
&2scalar qd qn rm size
49
- { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
47
&1imm qd imm cmode op
50
- .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
48
&2shift qd qm shift size
51
- .access = PL2_RW, .type = ARM_CP_CONST,
49
+&vidup qd rn size imm
52
- .resetvalue = 0 },
50
+&viwdup qd rn rm size imm
53
- { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
51
54
- .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
52
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
55
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
53
# Note that both Rn and Qd are 3 bits only (no D bit)
56
- { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
54
@@ -XXX,XX +XXX,XX @@ VDUP 1110 1110 1 1 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=0
57
- .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
55
VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 1 1 0000 @vdup size=1
58
- .access = PL2_RW, .type = ARM_CP_CONST,
56
VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2
59
- .resetvalue = 0 },
57
60
- { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
58
+# Incrementing and decrementing dup
61
- .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
59
+
62
- .access = PL2_RW, .type = ARM_CP_CONST,
60
+# VIDUP, VDDUP format immediate: 1 << (immh:imml)
63
- .resetvalue = 0 },
61
+%imm_vidup 7:1 0:1 !function=vidup_imm
64
- { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
62
+
65
- .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
63
+# VIDUP, VDDUP registers: Rm bits [3:1] from insn, bit 0 is 1;
66
- .access = PL2_RW, .type = ARM_CP_CONST,
64
+# Rn bits [3:1] from insn, bit 0 is 0
67
- .resetvalue = 0 },
65
+%vidup_rm 1:3 !function=times_2_plus_1
68
- { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
66
+%vidup_rn 17:3 !function=times_2
69
- .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
67
+
70
- .access = PL2_RW, .type = ARM_CP_CONST,
68
+@vidup .... .... . . size:2 .... .... .... .... .... \
71
- .resetvalue = 0 },
69
+ qd=%qd imm=%imm_vidup rn=%vidup_rn &vidup
72
- { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
70
+@viwdup .... .... . . size:2 .... .... .... .... .... \
73
- .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
71
+ qd=%qd imm=%imm_vidup rm=%vidup_rm rn=%vidup_rn &viwdup
74
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
72
+{
75
- { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
73
+ VIDUP 1110 1110 0 . .. ... 1 ... 0 1111 . 110 111 . @vidup
76
- .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
74
+ VIWDUP 1110 1110 0 . .. ... 1 ... 0 1111 . 110 ... . @viwdup
77
- .access = PL2_RW, .accessfn = access_el3_aa32ns,
75
+}
78
- .type = ARM_CP_CONST, .resetvalue = 0 },
76
+{
79
- { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
77
+ VDDUP 1110 1110 0 . .. ... 1 ... 1 1111 . 110 111 . @vidup
80
- .cp = 15, .opc1 = 6, .crm = 2,
78
+ VDWDUP 1110 1110 0 . .. ... 1 ... 1 1111 . 110 ... . @viwdup
81
- .access = PL2_RW, .accessfn = access_el3_aa32ns,
79
+}
82
- .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
80
+
83
- { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
81
# multiply-add long dual accumulate
84
- .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
82
# rdahi: bits [3:1] from insn, bit 0 is 1
85
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
83
# rdalo: bits [3:1] from insn, bit 0 is 0
86
- { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
84
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
87
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
85
index XXXXXXX..XXXXXXX 100644
88
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
86
--- a/target/arm/mve_helper.c
89
- { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
87
+++ b/target/arm/mve_helper.c
90
- .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
88
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift)
91
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
92
- { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
93
- .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
94
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
95
- { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
96
- .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
97
- .resetvalue = 0 },
98
- { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
99
- .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
100
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
101
- { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
102
- .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
103
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
104
- { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
105
- .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
106
- .resetvalue = 0 },
107
- { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
108
- .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
109
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
110
- { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
111
- .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
112
- .resetvalue = 0 },
113
- { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
114
- .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
115
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
116
- { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
117
- .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
118
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
119
- { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
120
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
121
- .access = PL2_RW, .accessfn = access_tda,
122
- .type = ARM_CP_CONST, .resetvalue = 0 },
123
- { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
124
- .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
125
- .access = PL2_RW, .accessfn = access_el3_aa32ns,
126
- .type = ARM_CP_CONST, .resetvalue = 0 },
127
- { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
128
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
129
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
130
- { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
131
- .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
132
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
133
- { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
134
- .type = ARM_CP_CONST,
135
- .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
136
- .access = PL2_RW, .resetvalue = 0 },
137
-};
138
-
139
-/* Ditto, but for registers which exist in ARMv8 but not v7 */
140
-static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
141
- { .name = "HCR2", .state = ARM_CP_STATE_AA32,
142
- .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
143
- .access = PL2_RW,
144
- .type = ARM_CP_CONST, .resetvalue = 0 },
145
-};
146
-
147
static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
89
{
148
{
90
return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF);
149
ARMCPU *cpu = env_archcpu(env);
91
}
150
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
92
+
151
define_arm_cp_regs(cpu, v8_idregs);
93
+#define DO_VIDUP(OP, ESIZE, TYPE, FN) \
152
define_arm_cp_regs(cpu, v8_cp_reginfo);
94
+ uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \
153
}
95
+ uint32_t offset, uint32_t imm) \
154
- if (arm_feature(env, ARM_FEATURE_EL2)) {
96
+ { \
97
+ TYPE *d = vd; \
98
+ uint16_t mask = mve_element_mask(env); \
99
+ unsigned e; \
100
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
101
+ mergemask(&d[H##ESIZE(e)], offset, mask); \
102
+ offset = FN(offset, imm); \
103
+ } \
104
+ mve_advance_vpt(env); \
105
+ return offset; \
106
+ }
107
+
108
+#define DO_VIWDUP(OP, ESIZE, TYPE, FN) \
109
+ uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \
110
+ uint32_t offset, uint32_t wrap, \
111
+ uint32_t imm) \
112
+ { \
113
+ TYPE *d = vd; \
114
+ uint16_t mask = mve_element_mask(env); \
115
+ unsigned e; \
116
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
117
+ mergemask(&d[H##ESIZE(e)], offset, mask); \
118
+ offset = FN(offset, wrap, imm); \
119
+ } \
120
+ mve_advance_vpt(env); \
121
+ return offset; \
122
+ }
123
+
124
+#define DO_VIDUP_ALL(OP, FN) \
125
+ DO_VIDUP(OP##b, 1, int8_t, FN) \
126
+ DO_VIDUP(OP##h, 2, int16_t, FN) \
127
+ DO_VIDUP(OP##w, 4, int32_t, FN)
128
+
129
+#define DO_VIWDUP_ALL(OP, FN) \
130
+ DO_VIWDUP(OP##b, 1, int8_t, FN) \
131
+ DO_VIWDUP(OP##h, 2, int16_t, FN) \
132
+ DO_VIWDUP(OP##w, 4, int32_t, FN)
133
+
134
+static uint32_t do_add_wrap(uint32_t offset, uint32_t wrap, uint32_t imm)
135
+{
136
+ offset += imm;
137
+ if (offset == wrap) {
138
+ offset = 0;
139
+ }
140
+ return offset;
141
+}
142
+
143
+static uint32_t do_sub_wrap(uint32_t offset, uint32_t wrap, uint32_t imm)
144
+{
145
+ if (offset == 0) {
146
+ offset = wrap;
147
+ }
148
+ offset -= imm;
149
+ return offset;
150
+}
151
+
152
+DO_VIDUP_ALL(vidup, DO_ADD)
153
+DO_VIWDUP_ALL(viwdup, do_add_wrap)
154
+DO_VIWDUP_ALL(vdwdup, do_sub_wrap)
155
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
156
index XXXXXXX..XXXXXXX 100644
157
--- a/target/arm/translate-mve.c
158
+++ b/target/arm/translate-mve.c
159
@@ -XXX,XX +XXX,XX @@
160
#include "translate.h"
161
#include "translate-a32.h"
162
163
+static inline int vidup_imm(DisasContext *s, int x)
164
+{
165
+ return 1 << x;
166
+}
167
+
168
/* Include the generated decoder */
169
#include "decode-mve.c.inc"
170
171
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenTwoOpShiftFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
172
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
173
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
174
typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
175
+typedef void MVEGenVIDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32);
176
+typedef void MVEGenVIWDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
177
178
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
179
static inline long mve_qreg_offset(unsigned reg)
180
@@ -XXX,XX +XXX,XX @@ static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a)
181
mve_update_eci(s);
182
return true;
183
}
184
+
185
+static bool do_vidup(DisasContext *s, arg_vidup *a, MVEGenVIDUPFn *fn)
186
+{
187
+ TCGv_ptr qd;
188
+ TCGv_i32 rn;
189
+
155
+
190
+ /*
156
+ /*
191
+ * Vector increment/decrement with wrap and duplicate (VIDUP, VDDUP).
157
+ * Register the base EL2 cpregs.
192
+ * This fills the vector with elements of successively increasing
158
+ * Pre v8, these registers are implemented only as part of the
193
+ * or decreasing values, starting from Rn.
159
+ * Virtualization Extensions (EL2 present). Beginning with v8,
160
+ * if EL2 is missing but EL3 is enabled, mostly these become
161
+ * RES0 from EL3, with some specific exceptions.
194
+ */
162
+ */
195
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) {
163
+ if (arm_feature(env, ARM_FEATURE_EL2)
196
+ return false;
164
+ || (arm_feature(env, ARM_FEATURE_EL3)
197
+ }
165
+ && arm_feature(env, ARM_FEATURE_V8))) {
198
+ if (a->size == MO_64) {
166
uint64_t vmpidr_def = mpidr_read_val(env);
199
+ /* size 0b11 is another encoding */
167
ARMCPRegInfo vpidr_regs[] = {
200
+ return false;
168
{ .name = "VPIDR", .state = ARM_CP_STATE_AA32,
201
+ }
169
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
202
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
170
};
203
+ return true;
171
define_one_arm_cp_reg(cpu, &rvbar);
204
+ }
172
}
173
- } else {
174
- /* If EL2 is missing but higher ELs are enabled, we need to
175
- * register the no_el2 reginfos.
176
- */
177
- if (arm_feature(env, ARM_FEATURE_EL3)) {
178
- /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
179
- * of MIDR_EL1 and MPIDR_EL1.
180
- */
181
- ARMCPRegInfo vpidr_regs[] = {
182
- { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
183
- .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
184
- .access = PL2_RW, .accessfn = access_el3_aa32ns,
185
- .type = ARM_CP_CONST, .resetvalue = cpu->midr,
186
- .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
187
- { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
188
- .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
189
- .access = PL2_RW, .accessfn = access_el3_aa32ns,
190
- .type = ARM_CP_NO_RAW,
191
- .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
192
- };
193
- define_arm_cp_regs(cpu, vpidr_regs);
194
- define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
195
- if (arm_feature(env, ARM_FEATURE_V8)) {
196
- define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
197
- }
198
- }
199
}
205
+
200
+
206
+ qd = mve_qreg_ptr(a->qd);
201
+ /* Register the base EL3 cpregs. */
207
+ rn = load_reg(s, a->rn);
202
if (arm_feature(env, ARM_FEATURE_EL3)) {
208
+ fn(rn, cpu_env, qd, rn, tcg_constant_i32(a->imm));
203
define_arm_cp_regs(cpu, el3_cp_reginfo);
209
+ store_reg(s, a->rn, rn);
204
ARMCPRegInfo el3_regs[] = {
210
+ tcg_temp_free_ptr(qd);
211
+ mve_update_eci(s);
212
+ return true;
213
+}
214
+
215
+static bool do_viwdup(DisasContext *s, arg_viwdup *a, MVEGenVIWDUPFn *fn)
216
+{
217
+ TCGv_ptr qd;
218
+ TCGv_i32 rn, rm;
219
+
220
+ /*
221
+ * Vector increment/decrement with wrap and duplicate (VIWDUp, VDWDUP)
222
+ * This fills the vector with elements of successively increasing
223
+ * or decreasing values, starting from Rn. Rm specifies a point where
224
+ * the count wraps back around to 0. The updated offset is written back
225
+ * to Rn.
226
+ */
227
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) {
228
+ return false;
229
+ }
230
+ if (!fn || a->rm == 13 || a->rm == 15) {
231
+ /*
232
+ * size 0b11 is another encoding; Rm == 13 is UNPREDICTABLE;
233
+ * Rm == 13 is VIWDUP, VDWDUP.
234
+ */
235
+ return false;
236
+ }
237
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
238
+ return true;
239
+ }
240
+
241
+ qd = mve_qreg_ptr(a->qd);
242
+ rn = load_reg(s, a->rn);
243
+ rm = load_reg(s, a->rm);
244
+ fn(rn, cpu_env, qd, rn, rm, tcg_constant_i32(a->imm));
245
+ store_reg(s, a->rn, rn);
246
+ tcg_temp_free_ptr(qd);
247
+ tcg_temp_free_i32(rm);
248
+ mve_update_eci(s);
249
+ return true;
250
+}
251
+
252
+static bool trans_VIDUP(DisasContext *s, arg_vidup *a)
253
+{
254
+ static MVEGenVIDUPFn * const fns[] = {
255
+ gen_helper_mve_vidupb,
256
+ gen_helper_mve_viduph,
257
+ gen_helper_mve_vidupw,
258
+ NULL,
259
+ };
260
+ return do_vidup(s, a, fns[a->size]);
261
+}
262
+
263
+static bool trans_VDDUP(DisasContext *s, arg_vidup *a)
264
+{
265
+ static MVEGenVIDUPFn * const fns[] = {
266
+ gen_helper_mve_vidupb,
267
+ gen_helper_mve_viduph,
268
+ gen_helper_mve_vidupw,
269
+ NULL,
270
+ };
271
+ /* VDDUP is just like VIDUP but with a negative immediate */
272
+ a->imm = -a->imm;
273
+ return do_vidup(s, a, fns[a->size]);
274
+}
275
+
276
+static bool trans_VIWDUP(DisasContext *s, arg_viwdup *a)
277
+{
278
+ static MVEGenVIWDUPFn * const fns[] = {
279
+ gen_helper_mve_viwdupb,
280
+ gen_helper_mve_viwduph,
281
+ gen_helper_mve_viwdupw,
282
+ NULL,
283
+ };
284
+ return do_viwdup(s, a, fns[a->size]);
285
+}
286
+
287
+static bool trans_VDWDUP(DisasContext *s, arg_viwdup *a)
288
+{
289
+ static MVEGenVIWDUPFn * const fns[] = {
290
+ gen_helper_mve_vdwdupb,
291
+ gen_helper_mve_vdwduph,
292
+ gen_helper_mve_vdwdupw,
293
+ NULL,
294
+ };
295
+ return do_viwdup(s, a, fns[a->size]);
296
+}
297
--
205
--
298
2.20.1
206
2.25.1
299
300
diff view generated by jsdifflib
1
Implement the MVE integer vector comparison instructions that compare
1
From: Richard Henderson <richard.henderson@linaro.org>
2
each element against a scalar from a general purpose register. These
3
are "VCMP (vector)" encodings T4, T5 and T6 and "VPT (vector)"
4
encodings T4, T5 and T6.
5
2
6
We have to move the decodetree pattern for VPST, because it
3
Drop zcr_no_el2_reginfo and merge the 3 registers into one array,
7
overlaps with VCMP T4 with size = 0b11.
4
now that ZCR_EL2 can be squashed to RES0 and ZCR_EL3 dropped
5
while registering.
8
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220506180242.216785-4-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
---
11
---
12
target/arm/helper-mve.h | 32 +++++++++++++++++++++++++++
12
target/arm/helper.c | 55 ++++++++++++++-------------------------------
13
target/arm/mve.decode | 18 +++++++++++++---
13
1 file changed, 17 insertions(+), 38 deletions(-)
14
target/arm/mve_helper.c | 44 +++++++++++++++++++++++++++++++-------
15
target/arm/translate-mve.c | 43 +++++++++++++++++++++++++++++++++++++
16
4 files changed, 126 insertions(+), 11 deletions(-)
17
14
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
19
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-mve.h
17
--- a/target/arm/helper.c
21
+++ b/target/arm/helper-mve.h
18
+++ b/target/arm/helper.c
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vcmpgtw, TCG_CALL_NO_WG, void, env, ptr, ptr)
19
@@ -XXX,XX +XXX,XX @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
23
DEF_HELPER_FLAGS_3(mve_vcmpleb, TCG_CALL_NO_WG, void, env, ptr, ptr)
20
}
24
DEF_HELPER_FLAGS_3(mve_vcmpleh, TCG_CALL_NO_WG, void, env, ptr, ptr)
25
DEF_HELPER_FLAGS_3(mve_vcmplew, TCG_CALL_NO_WG, void, env, ptr, ptr)
26
+
27
+DEF_HELPER_FLAGS_3(mve_vcmpeq_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
28
+DEF_HELPER_FLAGS_3(mve_vcmpeq_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
29
+DEF_HELPER_FLAGS_3(mve_vcmpeq_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
30
+
31
+DEF_HELPER_FLAGS_3(mve_vcmpne_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
32
+DEF_HELPER_FLAGS_3(mve_vcmpne_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
33
+DEF_HELPER_FLAGS_3(mve_vcmpne_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
34
+
35
+DEF_HELPER_FLAGS_3(mve_vcmpcs_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
36
+DEF_HELPER_FLAGS_3(mve_vcmpcs_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
37
+DEF_HELPER_FLAGS_3(mve_vcmpcs_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
38
+
39
+DEF_HELPER_FLAGS_3(mve_vcmphi_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
40
+DEF_HELPER_FLAGS_3(mve_vcmphi_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
41
+DEF_HELPER_FLAGS_3(mve_vcmphi_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
42
+
43
+DEF_HELPER_FLAGS_3(mve_vcmpge_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
44
+DEF_HELPER_FLAGS_3(mve_vcmpge_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
45
+DEF_HELPER_FLAGS_3(mve_vcmpge_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
46
+
47
+DEF_HELPER_FLAGS_3(mve_vcmplt_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
48
+DEF_HELPER_FLAGS_3(mve_vcmplt_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
49
+DEF_HELPER_FLAGS_3(mve_vcmplt_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
50
+
51
+DEF_HELPER_FLAGS_3(mve_vcmpgt_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
52
+DEF_HELPER_FLAGS_3(mve_vcmpgt_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
53
+DEF_HELPER_FLAGS_3(mve_vcmpgt_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
54
+
55
+DEF_HELPER_FLAGS_3(mve_vcmple_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
56
+DEF_HELPER_FLAGS_3(mve_vcmple_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
57
+DEF_HELPER_FLAGS_3(mve_vcmple_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
58
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/mve.decode
61
+++ b/target/arm/mve.decode
62
@@ -XXX,XX +XXX,XX @@
63
&vidup qd rn size imm
64
&viwdup qd rn rm size imm
65
&vcmp qm qn size mask
66
+&vcmp_scalar qn rm size mask
67
68
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
69
# Note that both Rn and Qd are 3 bits only (no D bit)
70
@@ -XXX,XX +XXX,XX @@
71
# Vector comparison; 4-bit Qm but 3-bit Qn
72
%mask_22_13 22:1 13:3
73
@vcmp .... .... .. size:2 qn:3 . .... .... .... .... &vcmp qm=%qm mask=%mask_22_13
74
+@vcmp_scalar .... .... .. size:2 qn:3 . .... .... .... rm:4 &vcmp_scalar \
75
+ mask=%mask_22_13
76
77
# Vector loads and stores
78
79
@@ -XXX,XX +XXX,XX @@ VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
80
rdahi=%rdahi rdalo=%rdalo
81
}
21
}
82
22
83
-# Predicate operations
23
-static const ARMCPRegInfo zcr_el1_reginfo = {
84
-VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
24
- .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
25
- .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
26
- .access = PL1_RW, .type = ARM_CP_SVE,
27
- .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
28
- .writefn = zcr_write, .raw_writefn = raw_write
29
-};
85
-
30
-
86
# Logical immediate operations (1 reg and modified-immediate)
31
-static const ARMCPRegInfo zcr_el2_reginfo = {
87
32
- .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
88
# The cmode/op bits here decode VORR/VBIC/VMOV/VMVN, but
33
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
89
@@ -XXX,XX +XXX,XX @@ VCMPGE 1111 1110 0 . .. ... 1 ... 1 1111 0 0 . 0 ... 0 @vcmp
34
- .access = PL2_RW, .type = ARM_CP_SVE,
90
VCMPLT 1111 1110 0 . .. ... 1 ... 1 1111 1 0 . 0 ... 0 @vcmp
35
- .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
91
VCMPGT 1111 1110 0 . .. ... 1 ... 1 1111 0 0 . 0 ... 1 @vcmp
36
- .writefn = zcr_write, .raw_writefn = raw_write
92
VCMPLE 1111 1110 0 . .. ... 1 ... 1 1111 1 0 . 0 ... 1 @vcmp
37
-};
93
+
38
-
94
+{
39
-static const ARMCPRegInfo zcr_no_el2_reginfo = {
95
+ VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
40
- .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
96
+ VCMPEQ_scalar 1111 1110 0 . .. ... 1 ... 0 1111 0 1 0 0 .... @vcmp_scalar
41
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
97
+}
42
- .access = PL2_RW, .type = ARM_CP_SVE,
98
+VCMPNE_scalar 1111 1110 0 . .. ... 1 ... 0 1111 1 1 0 0 .... @vcmp_scalar
43
- .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
99
+VCMPCS_scalar 1111 1110 0 . .. ... 1 ... 0 1111 0 1 1 0 .... @vcmp_scalar
44
-};
100
+VCMPHI_scalar 1111 1110 0 . .. ... 1 ... 0 1111 1 1 1 0 .... @vcmp_scalar
45
-
101
+VCMPGE_scalar 1111 1110 0 . .. ... 1 ... 1 1111 0 1 0 0 .... @vcmp_scalar
46
-static const ARMCPRegInfo zcr_el3_reginfo = {
102
+VCMPLT_scalar 1111 1110 0 . .. ... 1 ... 1 1111 1 1 0 0 .... @vcmp_scalar
47
- .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
103
+VCMPGT_scalar 1111 1110 0 . .. ... 1 ... 1 1111 0 1 1 0 .... @vcmp_scalar
48
- .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
104
+VCMPLE_scalar 1111 1110 0 . .. ... 1 ... 1 1111 1 1 1 0 .... @vcmp_scalar
49
- .access = PL3_RW, .type = ARM_CP_SVE,
105
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
50
- .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
106
index XXXXXXX..XXXXXXX 100644
51
- .writefn = zcr_write, .raw_writefn = raw_write
107
--- a/target/arm/mve_helper.c
52
+static const ARMCPRegInfo zcr_reginfo[] = {
108
+++ b/target/arm/mve_helper.c
53
+ { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
109
@@ -XXX,XX +XXX,XX @@ DO_VIWDUP_ALL(vdwdup, do_sub_wrap)
54
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
110
mve_advance_vpt(env); \
55
+ .access = PL1_RW, .type = ARM_CP_SVE,
56
+ .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
57
+ .writefn = zcr_write, .raw_writefn = raw_write },
58
+ { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
59
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
60
+ .access = PL2_RW, .type = ARM_CP_SVE,
61
+ .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
62
+ .writefn = zcr_write, .raw_writefn = raw_write },
63
+ { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
64
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
65
+ .access = PL3_RW, .type = ARM_CP_SVE,
66
+ .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
67
+ .writefn = zcr_write, .raw_writefn = raw_write },
68
};
69
70
void hw_watchpoint_update(ARMCPU *cpu, int n)
71
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
111
}
72
}
112
73
113
-#define DO_VCMP_S(OP, FN) \
74
if (cpu_isar_feature(aa64_sve, cpu)) {
114
- DO_VCMP(OP##b, 1, int8_t, FN) \
75
- define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
115
- DO_VCMP(OP##h, 2, int16_t, FN) \
76
- if (arm_feature(env, ARM_FEATURE_EL2)) {
116
- DO_VCMP(OP##w, 4, int32_t, FN)
77
- define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
117
+#define DO_VCMP_SCALAR(OP, ESIZE, TYPE, FN) \
78
- } else {
118
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
79
- define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
119
+ uint32_t rm) \
80
- }
120
+ { \
81
- if (arm_feature(env, ARM_FEATURE_EL3)) {
121
+ TYPE *n = vn; \
82
- define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
122
+ uint16_t mask = mve_element_mask(env); \
83
- }
123
+ uint16_t eci_mask = mve_eci_mask(env); \
84
+ define_arm_cp_regs(cpu, zcr_reginfo);
124
+ uint16_t beatpred = 0; \
125
+ uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
126
+ unsigned e; \
127
+ for (e = 0; e < 16 / ESIZE; e++) { \
128
+ bool r = FN(n[H##ESIZE(e)], (TYPE)rm); \
129
+ /* Comparison sets 0/1 bits for each byte in the element */ \
130
+ beatpred |= r * emask; \
131
+ emask <<= ESIZE; \
132
+ } \
133
+ beatpred &= mask; \
134
+ env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
135
+ (beatpred & eci_mask); \
136
+ mve_advance_vpt(env); \
137
+ }
138
139
-#define DO_VCMP_U(OP, FN) \
140
- DO_VCMP(OP##b, 1, uint8_t, FN) \
141
- DO_VCMP(OP##h, 2, uint16_t, FN) \
142
- DO_VCMP(OP##w, 4, uint32_t, FN)
143
+#define DO_VCMP_S(OP, FN) \
144
+ DO_VCMP(OP##b, 1, int8_t, FN) \
145
+ DO_VCMP(OP##h, 2, int16_t, FN) \
146
+ DO_VCMP(OP##w, 4, int32_t, FN) \
147
+ DO_VCMP_SCALAR(OP##_scalarb, 1, int8_t, FN) \
148
+ DO_VCMP_SCALAR(OP##_scalarh, 2, int16_t, FN) \
149
+ DO_VCMP_SCALAR(OP##_scalarw, 4, int32_t, FN)
150
+
151
+#define DO_VCMP_U(OP, FN) \
152
+ DO_VCMP(OP##b, 1, uint8_t, FN) \
153
+ DO_VCMP(OP##h, 2, uint16_t, FN) \
154
+ DO_VCMP(OP##w, 4, uint32_t, FN) \
155
+ DO_VCMP_SCALAR(OP##_scalarb, 1, uint8_t, FN) \
156
+ DO_VCMP_SCALAR(OP##_scalarh, 2, uint16_t, FN) \
157
+ DO_VCMP_SCALAR(OP##_scalarw, 4, uint32_t, FN)
158
159
#define DO_EQ(N, M) ((N) == (M))
160
#define DO_NE(N, M) ((N) != (M))
161
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
162
index XXXXXXX..XXXXXXX 100644
163
--- a/target/arm/translate-mve.c
164
+++ b/target/arm/translate-mve.c
165
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
166
typedef void MVEGenVIDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32);
167
typedef void MVEGenVIWDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
168
typedef void MVEGenCmpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
169
+typedef void MVEGenScalarCmpFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
170
171
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
172
static inline long mve_qreg_offset(unsigned reg)
173
@@ -XXX,XX +XXX,XX @@ static bool do_vcmp(DisasContext *s, arg_vcmp *a, MVEGenCmpFn *fn)
174
return true;
175
}
176
177
+static bool do_vcmp_scalar(DisasContext *s, arg_vcmp_scalar *a,
178
+ MVEGenScalarCmpFn *fn)
179
+{
180
+ TCGv_ptr qn;
181
+ TCGv_i32 rm;
182
+
183
+ if (!dc_isar_feature(aa32_mve, s) || !fn || a->rm == 13) {
184
+ return false;
185
+ }
186
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
187
+ return true;
188
+ }
189
+
190
+ qn = mve_qreg_ptr(a->qn);
191
+ if (a->rm == 15) {
192
+ /* Encoding Rm=0b1111 means "constant zero" */
193
+ rm = tcg_constant_i32(0);
194
+ } else {
195
+ rm = load_reg(s, a->rm);
196
+ }
197
+ fn(cpu_env, qn, rm);
198
+ tcg_temp_free_ptr(qn);
199
+ tcg_temp_free_i32(rm);
200
+ if (a->mask) {
201
+ /* VPT */
202
+ gen_vpst(s, a->mask);
203
+ }
204
+ mve_update_eci(s);
205
+ return true;
206
+}
207
+
208
#define DO_VCMP(INSN, FN) \
209
static bool trans_##INSN(DisasContext *s, arg_vcmp *a) \
210
{ \
211
@@ -XXX,XX +XXX,XX @@ static bool do_vcmp(DisasContext *s, arg_vcmp *a, MVEGenCmpFn *fn)
212
NULL, \
213
}; \
214
return do_vcmp(s, a, fns[a->size]); \
215
+ } \
216
+ static bool trans_##INSN##_scalar(DisasContext *s, \
217
+ arg_vcmp_scalar *a) \
218
+ { \
219
+ static MVEGenScalarCmpFn * const fns[] = { \
220
+ gen_helper_mve_##FN##_scalarb, \
221
+ gen_helper_mve_##FN##_scalarh, \
222
+ gen_helper_mve_##FN##_scalarw, \
223
+ NULL, \
224
+ }; \
225
+ return do_vcmp_scalar(s, a, fns[a->size]); \
226
}
85
}
227
86
228
DO_VCMP(VCMPEQ, vcmpeq)
87
#ifdef TARGET_AARCH64
229
--
88
--
230
2.20.1
89
2.25.1
231
232
diff view generated by jsdifflib
1
In do_sqrshl48_d() and do_uqrshl48_d() we got some of the edge
1
From: Richard Henderson <richard.henderson@linaro.org>
2
cases wrong and failed to saturate correctly:
3
2
4
(1) In do_sqrshl48_d() we used the same code that do_shrshl_bhs()
3
This register is present for either VHE or Debugv8p2.
5
does to obtain the saturated most-negative and most-positive 48-bit
6
signed values for the large-shift-left case. This gives (1 << 47)
7
for saturate-to-most-negative, but we weren't sign-extending this
8
value to the 64-bit output as the pseudocode requires.
9
4
10
(2) For left shifts by less than 48, we copied the "8/16 bit" code
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
from do_sqrshl_bhs() and do_uqrshl_bhs(). This doesn't do the right
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
thing because it assumes the C type we're working with is at least
7
Message-id: 20220506180242.216785-5-richard.henderson@linaro.org
13
twice the number of bits we're saturating to (so that a shift left by
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
bits-1 can't shift anything off the top of the value). This isn't
9
---
15
true for bits == 48, so we would incorrectly return 0 rather than the
10
target/arm/helper.c | 15 +++++++++++----
16
most-positive value for situations like "shift (1 << 44) right by
11
1 file changed, 11 insertions(+), 4 deletions(-)
17
20". Instead check for saturation by doing the shift and signextend
18
and then testing whether shifting back left again gives the original
19
value.
20
12
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
22
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
23
---
24
target/arm/mve_helper.c | 12 +++++-------
25
1 file changed, 5 insertions(+), 7 deletions(-)
26
27
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
28
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/mve_helper.c
15
--- a/target/arm/helper.c
30
+++ b/target/arm/mve_helper.c
16
+++ b/target/arm/helper.c
31
@@ -XXX,XX +XXX,XX @@ static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
17
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo jazelle_regs[] = {
32
}
18
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
33
return src >> -shift;
19
};
34
} else if (shift < 48) {
20
35
- int64_t val = src << shift;
21
+static const ARMCPRegInfo contextidr_el2 = {
36
- int64_t extval = sextract64(val, 0, 48);
22
+ .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
37
- if (!sat || val == extval) {
23
+ .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
38
+ int64_t extval = sextract64(src << shift, 0, 48);
24
+ .access = PL2_RW,
39
+ if (!sat || src == (extval >> shift)) {
25
+ .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
40
return extval;
26
+};
41
}
27
+
42
} else if (!sat || src == 0) {
28
static const ARMCPRegInfo vhe_reginfo[] = {
43
@@ -XXX,XX +XXX,XX @@ static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
29
- { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
30
- .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
31
- .access = PL2_RW,
32
- .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) },
33
{ .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
34
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
35
.access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
36
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
37
define_one_arm_cp_reg(cpu, &ssbs_reginfo);
44
}
38
}
45
39
46
*sat = 1;
40
+ if (cpu_isar_feature(aa64_vh, cpu) ||
47
- return (1ULL << 47) - (src >= 0);
41
+ cpu_isar_feature(aa64_debugv8p2, cpu)) {
48
+ return src >= 0 ? MAKE_64BIT_MASK(0, 47) : MAKE_64BIT_MASK(47, 17);
42
+ define_one_arm_cp_reg(cpu, &contextidr_el2);
49
}
43
+ }
50
44
if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
51
/* Operate on 64-bit values, but saturate at 48 bits */
45
define_arm_cp_regs(cpu, vhe_reginfo);
52
@@ -XXX,XX +XXX,XX @@ static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift,
46
}
53
return extval;
54
}
55
} else if (shift < 48) {
56
- uint64_t val = src << shift;
57
- uint64_t extval = extract64(val, 0, 48);
58
- if (!sat || val == extval) {
59
+ uint64_t extval = extract64(src << shift, 0, 48);
60
+ if (!sat || src == (extval >> shift)) {
61
return extval;
62
}
63
} else if (!sat || src == 0) {
64
--
47
--
65
2.20.1
48
2.25.1
66
67
diff view generated by jsdifflib
1
Implement the MVE narrowing move insns VMOVN, VQMOVN and VQMOVUN.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
These take a double-width input, narrow it (possibly saturating) and
2
3
store the result to either the top or bottom half of the output
3
Previously we were defining some of these in user-only mode,
4
element.
4
but none of them are accessible from user-only, therefore
5
5
define them only in system mode.
6
7
This will shortly be used from cpu_tcg.c also.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20220506180242.216785-6-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
---
13
---
9
target/arm/helper-mve.h | 20 ++++++++++
14
target/arm/internals.h | 6 ++++
10
target/arm/mve.decode | 12 ++++++
15
target/arm/cpu64.c | 64 +++---------------------------------------
11
target/arm/mve_helper.c | 78 ++++++++++++++++++++++++++++++++++++++
16
target/arm/cpu_tcg.c | 59 ++++++++++++++++++++++++++++++++++++++
12
target/arm/translate-mve.c | 22 +++++++++++
17
3 files changed, 69 insertions(+), 60 deletions(-)
13
4 files changed, 132 insertions(+)
18
14
19
diff --git a/target/arm/internals.h b/target/arm/internals.h
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
21
--- a/target/arm/internals.h
18
+++ b/target/arm/helper-mve.h
22
+++ b/target/arm/internals.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vnegw, TCG_CALL_NO_WG, void, env, ptr, ptr)
23
@@ -XXX,XX +XXX,XX @@ int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
20
DEF_HELPER_FLAGS_3(mve_vfnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
24
int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
21
DEF_HELPER_FLAGS_3(mve_vfnegs, TCG_CALL_NO_WG, void, env, ptr, ptr)
25
#endif
22
26
23
+DEF_HELPER_FLAGS_3(mve_vmovnbb, TCG_CALL_NO_WG, void, env, ptr, ptr)
27
+#ifdef CONFIG_USER_ONLY
24
+DEF_HELPER_FLAGS_3(mve_vmovnbh, TCG_CALL_NO_WG, void, env, ptr, ptr)
28
+static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
25
+DEF_HELPER_FLAGS_3(mve_vmovntb, TCG_CALL_NO_WG, void, env, ptr, ptr)
29
+#else
26
+DEF_HELPER_FLAGS_3(mve_vmovnth, TCG_CALL_NO_WG, void, env, ptr, ptr)
30
+void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu);
27
+
31
+#endif
28
+DEF_HELPER_FLAGS_3(mve_vqmovunbb, TCG_CALL_NO_WG, void, env, ptr, ptr)
32
+
29
+DEF_HELPER_FLAGS_3(mve_vqmovunbh, TCG_CALL_NO_WG, void, env, ptr, ptr)
33
#endif
30
+DEF_HELPER_FLAGS_3(mve_vqmovuntb, TCG_CALL_NO_WG, void, env, ptr, ptr)
34
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
31
+DEF_HELPER_FLAGS_3(mve_vqmovunth, TCG_CALL_NO_WG, void, env, ptr, ptr)
32
+
33
+DEF_HELPER_FLAGS_3(mve_vqmovnbsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
34
+DEF_HELPER_FLAGS_3(mve_vqmovnbsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
35
+DEF_HELPER_FLAGS_3(mve_vqmovntsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
36
+DEF_HELPER_FLAGS_3(mve_vqmovntsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
37
+
38
+DEF_HELPER_FLAGS_3(mve_vqmovnbub, TCG_CALL_NO_WG, void, env, ptr, ptr)
39
+DEF_HELPER_FLAGS_3(mve_vqmovnbuh, TCG_CALL_NO_WG, void, env, ptr, ptr)
40
+DEF_HELPER_FLAGS_3(mve_vqmovntub, TCG_CALL_NO_WG, void, env, ptr, ptr)
41
+DEF_HELPER_FLAGS_3(mve_vqmovntuh, TCG_CALL_NO_WG, void, env, ptr, ptr)
42
+
43
DEF_HELPER_FLAGS_4(mve_vand, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
44
DEF_HELPER_FLAGS_4(mve_vbic, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
45
DEF_HELPER_FLAGS_4(mve_vorr, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
46
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
47
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/mve.decode
36
--- a/target/arm/cpu64.c
49
+++ b/target/arm/mve.decode
37
+++ b/target/arm/cpu64.c
50
@@ -XXX,XX +XXX,XX @@ VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
38
@@ -XXX,XX +XXX,XX @@
51
VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
39
#include "hvf_arm.h"
52
VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
40
#include "qapi/visitor.h"
53
41
#include "hw/qdev-properties.h"
54
+ VQMOVUNB 111 0 1110 0 . 11 .. 01 ... 0 1110 1 0 . 0 ... 1 @1op
42
-#include "cpregs.h"
55
+ VQMOVN_BS 111 0 1110 0 . 11 .. 11 ... 0 1110 0 0 . 0 ... 1 @1op
43
+#include "internals.h"
56
+
44
57
VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
45
46
-#ifndef CONFIG_USER_ONLY
47
-static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
48
-{
49
- ARMCPU *cpu = env_archcpu(env);
50
-
51
- /* Number of cores is in [25:24]; otherwise we RAZ */
52
- return (cpu->core_count - 1) << 24;
53
-}
54
-#endif
55
-
56
-static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = {
57
-#ifndef CONFIG_USER_ONLY
58
- { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64,
59
- .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2,
60
- .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
61
- .writefn = arm_cp_write_ignore },
62
- { .name = "L2CTLR",
63
- .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2,
64
- .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
65
- .writefn = arm_cp_write_ignore },
66
-#endif
67
- { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64,
68
- .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3,
69
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
70
- { .name = "L2ECTLR",
71
- .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3,
72
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
73
- { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH,
74
- .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0,
75
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
76
- { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
77
- .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0,
78
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
79
- { .name = "CPUACTLR",
80
- .cp = 15, .opc1 = 0, .crm = 15,
81
- .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
82
- { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
83
- .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1,
84
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
85
- { .name = "CPUECTLR",
86
- .cp = 15, .opc1 = 1, .crm = 15,
87
- .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
88
- { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64,
89
- .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2,
90
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
91
- { .name = "CPUMERRSR",
92
- .cp = 15, .opc1 = 2, .crm = 15,
93
- .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
94
- { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64,
95
- .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3,
96
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
97
- { .name = "L2MERRSR",
98
- .cp = 15, .opc1 = 3, .crm = 15,
99
- .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
100
-};
101
-
102
static void aarch64_a57_initfn(Object *obj)
103
{
104
ARMCPU *cpu = ARM_CPU(obj);
105
@@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj)
106
cpu->gic_num_lrs = 4;
107
cpu->gic_vpribits = 5;
108
cpu->gic_vprebits = 5;
109
- define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
110
+ define_cortex_a72_a57_a53_cp_reginfo(cpu);
58
}
111
}
59
112
60
@@ -XXX,XX +XXX,XX @@ VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
113
static void aarch64_a53_initfn(Object *obj)
61
VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
114
@@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj)
62
VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
115
cpu->gic_num_lrs = 4;
63
116
cpu->gic_vpribits = 5;
64
+ VMOVNB 111 1 1110 0 . 11 .. 01 ... 0 1110 1 0 . 0 ... 1 @1op
117
cpu->gic_vprebits = 5;
65
+ VQMOVN_BU 111 1 1110 0 . 11 .. 11 ... 0 1110 0 0 . 0 ... 1 @1op
118
- define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
66
+
119
+ define_cortex_a72_a57_a53_cp_reginfo(cpu);
67
VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
68
}
120
}
69
121
70
@@ -XXX,XX +XXX,XX @@ VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
122
static void aarch64_a72_initfn(Object *obj)
71
VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
123
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
72
VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
124
cpu->gic_num_lrs = 4;
73
125
cpu->gic_vpribits = 5;
74
+ VQMOVUNT 111 0 1110 0 . 11 .. 01 ... 1 1110 1 0 . 0 ... 1 @1op
126
cpu->gic_vprebits = 5;
75
+ VQMOVN_TS 111 0 1110 0 . 11 .. 11 ... 1 1110 0 0 . 0 ... 1 @1op
127
- define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
76
+
128
+ define_cortex_a72_a57_a53_cp_reginfo(cpu);
77
VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
78
}
129
}
79
130
80
@@ -XXX,XX +XXX,XX @@ VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
131
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
81
VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
132
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
82
VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
83
84
+ VMOVNT 111 1 1110 0 . 11 .. 01 ... 1 1110 1 0 . 0 ... 1 @1op
85
+ VQMOVN_TU 111 1 1110 0 . 11 .. 11 ... 1 1110 0 0 . 0 ... 1 @1op
86
+
87
VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
88
}
89
90
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
91
index XXXXXXX..XXXXXXX 100644
133
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/mve_helper.c
134
--- a/target/arm/cpu_tcg.c
93
+++ b/target/arm/mve_helper.c
135
+++ b/target/arm/cpu_tcg.c
94
@@ -XXX,XX +XXX,XX @@ DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
136
@@ -XXX,XX +XXX,XX @@
95
DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
137
#endif
96
DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
138
#include "cpregs.h"
97
139
98
+#define DO_VMOVN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
140
+#ifndef CONFIG_USER_ONLY
99
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
141
+static uint64_t l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
100
+ { \
142
+{
101
+ LTYPE *m = vm; \
143
+ ARMCPU *cpu = env_archcpu(env);
102
+ TYPE *d = vd; \
144
+
103
+ uint16_t mask = mve_element_mask(env); \
145
+ /* Number of cores is in [25:24]; otherwise we RAZ */
104
+ unsigned le; \
146
+ return (cpu->core_count - 1) << 24;
105
+ mask >>= ESIZE * TOP; \
147
+}
106
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
148
+
107
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], \
149
+static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = {
108
+ m[H##LESIZE(le)], mask); \
150
+ { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64,
109
+ } \
151
+ .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2,
110
+ mve_advance_vpt(env); \
152
+ .access = PL1_RW, .readfn = l2ctlr_read,
111
+ }
153
+ .writefn = arm_cp_write_ignore },
112
+
154
+ { .name = "L2CTLR",
113
+DO_VMOVN(vmovnbb, false, 1, uint8_t, 2, uint16_t)
155
+ .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2,
114
+DO_VMOVN(vmovnbh, false, 2, uint16_t, 4, uint32_t)
156
+ .access = PL1_RW, .readfn = l2ctlr_read,
115
+DO_VMOVN(vmovntb, true, 1, uint8_t, 2, uint16_t)
157
+ .writefn = arm_cp_write_ignore },
116
+DO_VMOVN(vmovnth, true, 2, uint16_t, 4, uint32_t)
158
+ { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64,
117
+
159
+ .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3,
118
+#define DO_VMOVN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
160
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
119
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
161
+ { .name = "L2ECTLR",
120
+ { \
162
+ .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3,
121
+ LTYPE *m = vm; \
163
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
122
+ TYPE *d = vd; \
164
+ { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH,
123
+ uint16_t mask = mve_element_mask(env); \
165
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0,
124
+ bool qc = false; \
166
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
125
+ unsigned le; \
167
+ { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
126
+ mask >>= ESIZE * TOP; \
168
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0,
127
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
169
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
128
+ bool sat = false; \
170
+ { .name = "CPUACTLR",
129
+ TYPE r = FN(m[H##LESIZE(le)], &sat); \
171
+ .cp = 15, .opc1 = 0, .crm = 15,
130
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
172
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
131
+ qc |= sat & mask & 1; \
173
+ { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
132
+ } \
174
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1,
133
+ if (qc) { \
175
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
134
+ env->vfp.qc[0] = qc; \
176
+ { .name = "CPUECTLR",
135
+ } \
177
+ .cp = 15, .opc1 = 1, .crm = 15,
136
+ mve_advance_vpt(env); \
178
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
137
+ }
179
+ { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64,
138
+
180
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2,
139
+#define DO_VMOVN_SAT_UB(BOP, TOP, FN) \
181
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
140
+ DO_VMOVN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
182
+ { .name = "CPUMERRSR",
141
+ DO_VMOVN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
183
+ .cp = 15, .opc1 = 2, .crm = 15,
142
+
184
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
143
+#define DO_VMOVN_SAT_UH(BOP, TOP, FN) \
185
+ { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64,
144
+ DO_VMOVN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
186
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3,
145
+ DO_VMOVN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
187
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
146
+
188
+ { .name = "L2MERRSR",
147
+#define DO_VMOVN_SAT_SB(BOP, TOP, FN) \
189
+ .cp = 15, .opc1 = 3, .crm = 15,
148
+ DO_VMOVN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
190
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
149
+ DO_VMOVN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
191
+};
150
+
192
+
151
+#define DO_VMOVN_SAT_SH(BOP, TOP, FN) \
193
+void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu)
152
+ DO_VMOVN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
194
+{
153
+ DO_VMOVN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
195
+ define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
154
+
196
+}
155
+#define DO_VQMOVN_SB(N, SATP) \
197
+#endif /* !CONFIG_USER_ONLY */
156
+ do_sat_bhs((int64_t)(N), INT8_MIN, INT8_MAX, SATP)
198
+
157
+#define DO_VQMOVN_UB(N, SATP) \
199
/* CPU models. These are not needed for the AArch64 linux-user build. */
158
+ do_sat_bhs((uint64_t)(N), 0, UINT8_MAX, SATP)
200
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
159
+#define DO_VQMOVUN_B(N, SATP) \
201
160
+ do_sat_bhs((int64_t)(N), 0, UINT8_MAX, SATP)
161
+
162
+#define DO_VQMOVN_SH(N, SATP) \
163
+ do_sat_bhs((int64_t)(N), INT16_MIN, INT16_MAX, SATP)
164
+#define DO_VQMOVN_UH(N, SATP) \
165
+ do_sat_bhs((uint64_t)(N), 0, UINT16_MAX, SATP)
166
+#define DO_VQMOVUN_H(N, SATP) \
167
+ do_sat_bhs((int64_t)(N), 0, UINT16_MAX, SATP)
168
+
169
+DO_VMOVN_SAT_SB(vqmovnbsb, vqmovntsb, DO_VQMOVN_SB)
170
+DO_VMOVN_SAT_SH(vqmovnbsh, vqmovntsh, DO_VQMOVN_SH)
171
+DO_VMOVN_SAT_UB(vqmovnbub, vqmovntub, DO_VQMOVN_UB)
172
+DO_VMOVN_SAT_UH(vqmovnbuh, vqmovntuh, DO_VQMOVN_UH)
173
+DO_VMOVN_SAT_SB(vqmovunbb, vqmovuntb, DO_VQMOVUN_B)
174
+DO_VMOVN_SAT_SH(vqmovunbh, vqmovunth, DO_VQMOVUN_H)
175
+
176
uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
177
uint32_t shift)
178
{
179
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
180
index XXXXXXX..XXXXXXX 100644
181
--- a/target/arm/translate-mve.c
182
+++ b/target/arm/translate-mve.c
183
@@ -XXX,XX +XXX,XX @@ DO_1OP(VCLS, vcls)
184
DO_1OP(VABS, vabs)
185
DO_1OP(VNEG, vneg)
186
187
+/* Narrowing moves: only size 0 and 1 are valid */
188
+#define DO_VMOVN(INSN, FN) \
189
+ static bool trans_##INSN(DisasContext *s, arg_1op *a) \
190
+ { \
191
+ static MVEGenOneOpFn * const fns[] = { \
192
+ gen_helper_mve_##FN##b, \
193
+ gen_helper_mve_##FN##h, \
194
+ NULL, \
195
+ NULL, \
196
+ }; \
197
+ return do_1op(s, a, fns[a->size]); \
198
+ }
199
+
200
+DO_VMOVN(VMOVNB, vmovnb)
201
+DO_VMOVN(VMOVNT, vmovnt)
202
+DO_VMOVN(VQMOVUNB, vqmovunb)
203
+DO_VMOVN(VQMOVUNT, vqmovunt)
204
+DO_VMOVN(VQMOVN_BS, vqmovnbs)
205
+DO_VMOVN(VQMOVN_TS, vqmovnts)
206
+DO_VMOVN(VQMOVN_BU, vqmovnbu)
207
+DO_VMOVN(VQMOVN_TU, vqmovntu)
208
+
209
static bool trans_VREV16(DisasContext *s, arg_1op *a)
210
{
211
static MVEGenOneOpFn * const fns[] = {
212
--
202
--
213
2.20.1
203
2.25.1
214
215
diff view generated by jsdifflib
1
All the users of the vmlaldav formats have an 'x bit in bit 12 and an
1
From: Richard Henderson <richard.henderson@linaro.org>
2
'a' bit in bit 5; move these to the format rather than specifying them
3
in each insn pattern.
4
2
3
Instead of starting with cortex-a15 and adding v8 features to
4
a v7 cpu, begin with a v8 cpu stripped of its aarch64 features.
5
This fixes the long-standing to-do where we only enabled v8
6
features for user-only.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20220506180242.216785-7-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
---
12
---
8
target/arm/mve.decode | 16 ++++++++--------
13
target/arm/cpu_tcg.c | 151 ++++++++++++++++++++++++++-----------------
9
1 file changed, 8 insertions(+), 8 deletions(-)
14
1 file changed, 92 insertions(+), 59 deletions(-)
10
15
11
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
16
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
12
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/mve.decode
18
--- a/target/arm/cpu_tcg.c
14
+++ b/target/arm/mve.decode
19
+++ b/target/arm/cpu_tcg.c
15
@@ -XXX,XX +XXX,XX @@ VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2
20
@@ -XXX,XX +XXX,XX @@ static void arm_v7m_class_init(ObjectClass *oc, void *data)
16
21
static void arm_max_initfn(Object *obj)
17
&vmlaldav rdahi rdalo size qn qm x a
22
{
18
23
ARMCPU *cpu = ARM_CPU(obj);
19
-@vmlaldav .... .... . ... ... . ... . .... .... qm:3 . \
24
+ uint32_t t;
20
+@vmlaldav .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \
25
21
qn=%qn rdahi=%rdahi rdalo=%rdalo size=%size_16 &vmlaldav
26
- cortex_a15_initfn(obj);
22
-@vmlaldav_nosz .... .... . ... ... . ... . .... .... qm:3 . \
27
+ /* aarch64_a57_initfn, advertising none of the aarch64 features */
23
+@vmlaldav_nosz .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \
28
+ cpu->dtb_compatible = "arm,cortex-a57";
24
qn=%qn rdahi=%rdahi rdalo=%rdalo size=0 &vmlaldav
29
+ set_feature(&cpu->env, ARM_FEATURE_V8);
25
-VMLALDAV_S 1110 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav
30
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
26
-VMLALDAV_U 1111 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav
31
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
27
+VMLALDAV_S 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
32
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
28
+VMLALDAV_U 1111 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
33
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
29
34
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
30
-VMLSLDAV 1110 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav
35
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
31
+VMLSLDAV 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 1 @vmlaldav
36
+ cpu->midr = 0x411fd070;
32
37
+ cpu->revidr = 0x00000000;
33
-VRMLALDAVH_S 1110 1110 1 ... ... 0 ... x:1 1111 . 0 a:1 0 ... 0 @vmlaldav_nosz
38
+ cpu->reset_fpsid = 0x41034070;
34
-VRMLALDAVH_U 1111 1110 1 ... ... 0 ... x:1 1111 . 0 a:1 0 ... 0 @vmlaldav_nosz
39
+ cpu->isar.mvfr0 = 0x10110222;
35
+VRMLALDAVH_S 1110 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz
40
+ cpu->isar.mvfr1 = 0x12111111;
36
+VRMLALDAVH_U 1111 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz
41
+ cpu->isar.mvfr2 = 0x00000043;
37
42
+ cpu->ctr = 0x8444c004;
38
-VRMLSLDAVH 1111 1110 1 ... ... 0 ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav_nosz
43
+ cpu->reset_sctlr = 0x00c50838;
39
+VRMLSLDAVH 1111 1110 1 ... ... 0 ... . 1110 . 0 . 0 ... 1 @vmlaldav_nosz
44
+ cpu->isar.id_pfr0 = 0x00000131;
40
45
+ cpu->isar.id_pfr1 = 0x00011011;
41
# Scalar operations
46
+ cpu->isar.id_dfr0 = 0x03010066;
47
+ cpu->id_afr0 = 0x00000000;
48
+ cpu->isar.id_mmfr0 = 0x10101105;
49
+ cpu->isar.id_mmfr1 = 0x40000000;
50
+ cpu->isar.id_mmfr2 = 0x01260000;
51
+ cpu->isar.id_mmfr3 = 0x02102211;
52
+ cpu->isar.id_isar0 = 0x02101110;
53
+ cpu->isar.id_isar1 = 0x13112111;
54
+ cpu->isar.id_isar2 = 0x21232042;
55
+ cpu->isar.id_isar3 = 0x01112131;
56
+ cpu->isar.id_isar4 = 0x00011142;
57
+ cpu->isar.id_isar5 = 0x00011121;
58
+ cpu->isar.id_isar6 = 0;
59
+ cpu->isar.dbgdidr = 0x3516d000;
60
+ cpu->clidr = 0x0a200023;
61
+ cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
62
+ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
63
+ cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
64
+ define_cortex_a72_a57_a53_cp_reginfo(cpu);
65
66
- /* old-style VFP short-vector support */
67
- cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1);
68
+ /* Add additional features supported by QEMU */
69
+ t = cpu->isar.id_isar5;
70
+ t = FIELD_DP32(t, ID_ISAR5, AES, 2);
71
+ t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
72
+ t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
73
+ t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
74
+ t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
75
+ t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
76
+ cpu->isar.id_isar5 = t;
77
+
78
+ t = cpu->isar.id_isar6;
79
+ t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
80
+ t = FIELD_DP32(t, ID_ISAR6, DP, 1);
81
+ t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
82
+ t = FIELD_DP32(t, ID_ISAR6, SB, 1);
83
+ t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
84
+ t = FIELD_DP32(t, ID_ISAR6, BF16, 1);
85
+ t = FIELD_DP32(t, ID_ISAR6, I8MM, 1);
86
+ cpu->isar.id_isar6 = t;
87
+
88
+ t = cpu->isar.mvfr1;
89
+ t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */
90
+ t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
91
+ cpu->isar.mvfr1 = t;
92
+
93
+ t = cpu->isar.mvfr2;
94
+ t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
95
+ t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
96
+ cpu->isar.mvfr2 = t;
97
+
98
+ t = cpu->isar.id_mmfr3;
99
+ t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
100
+ cpu->isar.id_mmfr3 = t;
101
+
102
+ t = cpu->isar.id_mmfr4;
103
+ t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
104
+ t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
105
+ t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
106
+ t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
107
+ cpu->isar.id_mmfr4 = t;
108
+
109
+ t = cpu->isar.id_pfr0;
110
+ t = FIELD_DP32(t, ID_PFR0, DIT, 1);
111
+ cpu->isar.id_pfr0 = t;
112
+
113
+ t = cpu->isar.id_pfr2;
114
+ t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
115
+ cpu->isar.id_pfr2 = t;
116
117
#ifdef CONFIG_USER_ONLY
118
/*
119
- * We don't set these in system emulation mode for the moment,
120
- * since we don't correctly set (all of) the ID registers to
121
- * advertise them.
122
+ * Break with true ARMv8 and add back old-style VFP short-vector support.
123
+ * Only do this for user-mode, where -cpu max is the default, so that
124
+ * older v6 and v7 programs are more likely to work without adjustment.
125
*/
126
- set_feature(&cpu->env, ARM_FEATURE_V8);
127
- {
128
- uint32_t t;
129
-
130
- t = cpu->isar.id_isar5;
131
- t = FIELD_DP32(t, ID_ISAR5, AES, 2);
132
- t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
133
- t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
134
- t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
135
- t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
136
- t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
137
- cpu->isar.id_isar5 = t;
138
-
139
- t = cpu->isar.id_isar6;
140
- t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
141
- t = FIELD_DP32(t, ID_ISAR6, DP, 1);
142
- t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
143
- t = FIELD_DP32(t, ID_ISAR6, SB, 1);
144
- t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
145
- t = FIELD_DP32(t, ID_ISAR6, BF16, 1);
146
- t = FIELD_DP32(t, ID_ISAR6, I8MM, 1);
147
- cpu->isar.id_isar6 = t;
148
-
149
- t = cpu->isar.mvfr1;
150
- t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */
151
- t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
152
- cpu->isar.mvfr1 = t;
153
-
154
- t = cpu->isar.mvfr2;
155
- t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
156
- t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
157
- cpu->isar.mvfr2 = t;
158
-
159
- t = cpu->isar.id_mmfr3;
160
- t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
161
- cpu->isar.id_mmfr3 = t;
162
-
163
- t = cpu->isar.id_mmfr4;
164
- t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
165
- t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
166
- t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
167
- t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
168
- cpu->isar.id_mmfr4 = t;
169
-
170
- t = cpu->isar.id_pfr0;
171
- t = FIELD_DP32(t, ID_PFR0, DIT, 1);
172
- cpu->isar.id_pfr0 = t;
173
-
174
- t = cpu->isar.id_pfr2;
175
- t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
176
- cpu->isar.id_pfr2 = t;
177
- }
178
-#endif /* CONFIG_USER_ONLY */
179
+ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1);
180
+#endif
181
}
182
#endif /* !TARGET_AARCH64 */
42
183
43
--
184
--
44
2.20.1
185
2.25.1
45
46
diff view generated by jsdifflib
1
From: Sebastian Meyer <meyer@absint.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
With gdb 9.0 and better it is possible to connect to a gdbstub
3
We set this for qemu-system-aarch64, but failed to do so
4
over unix sockets, which is better than a TCP socket connection
4
for the strictly 32-bit emulation.
5
in some situations. The QEMU command line to set this up is
6
non-obvious; document it.
7
5
8
Signed-off-by: Sebastian Meyer <meyer@absint.com>
6
Fixes: 3bec78447a9 ("target/arm: Provide ARMv8.4-PMU in '-cpu max'")
9
Message-id: 162867284829.27377.4784930719350564918-0@git.sr.ht
10
[PMM: Tweaked commit message; adjusted wording in a couple of
11
places; fixed rST formatting issue; moved section up out of
12
the 'advanced debugging options' subsection]
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220506180242.216785-8-richard.henderson@linaro.org
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
11
---
17
docs/system/gdb.rst | 26 +++++++++++++++++++++++++-
12
target/arm/cpu_tcg.c | 4 ++++
18
1 file changed, 25 insertions(+), 1 deletion(-)
13
1 file changed, 4 insertions(+)
19
14
20
diff --git a/docs/system/gdb.rst b/docs/system/gdb.rst
15
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
21
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
22
--- a/docs/system/gdb.rst
17
--- a/target/arm/cpu_tcg.c
23
+++ b/docs/system/gdb.rst
18
+++ b/target/arm/cpu_tcg.c
24
@@ -XXX,XX +XXX,XX @@ The ``-s`` option will make QEMU listen for an incoming connection
19
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
25
from gdb on TCP port 1234, and ``-S`` will make QEMU not start the
20
t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
26
guest until you tell it to from gdb. (If you want to specify which
21
cpu->isar.id_pfr2 = t;
27
TCP port to use or to use something other than TCP for the gdbstub
22
28
-connection, use the ``-gdb dev`` option instead of ``-s``.)
23
+ t = cpu->isar.id_dfr0;
29
+connection, use the ``-gdb dev`` option instead of ``-s``. See
24
+ t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
30
+`Using unix sockets`_ for an example.)
25
+ cpu->isar.id_dfr0 = t;
31
32
.. parsed-literal::
33
34
@@ -XXX,XX +XXX,XX @@ not just those in the cluster you are currently working on::
35
36
(gdb) set schedule-multiple on
37
38
+Using unix sockets
39
+==================
40
+
26
+
41
+An alternate method for connecting gdb to the QEMU gdbstub is to use
27
#ifdef CONFIG_USER_ONLY
42
+a unix socket (if supported by your operating system). This is useful when
28
/*
43
+running several tests in parallel, or if you do not have a known free TCP
29
* Break with true ARMv8 and add back old-style VFP short-vector support.
44
+port (e.g. when running automated tests).
45
+
46
+First create a chardev with the appropriate options, then
47
+instruct the gdbserver to use that device:
48
+
49
+.. parsed-literal::
50
+
51
+ |qemu_system| -chardev socket,path=/tmp/gdb-socket,server=on,wait=off,id=gdb0 -gdb chardev:gdb0 -S ...
52
+
53
+Start gdb as before, but this time connect using the path to
54
+the socket::
55
+
56
+ (gdb) target remote /tmp/gdb-socket
57
+
58
+Note that to use a unix socket for the connection you will need
59
+gdb version 9.0 or newer.
60
+
61
Advanced debugging options
62
==========================
63
64
--
30
--
65
2.20.1
31
2.25.1
66
67
diff view generated by jsdifflib
1
Implement the MVE VPNOT insn, which inverts the bits in VPR.P0
1
From: Richard Henderson <richard.henderson@linaro.org>
2
(subject to both predication and to beatwise execution).
3
2
3
Share the code to set AArch32 max features so that we no
4
longer have code drift between qemu{-system,}-{arm,aarch64}.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220506180242.216785-9-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
---
10
---
7
target/arm/helper-mve.h | 1 +
11
target/arm/internals.h | 2 +
8
target/arm/mve.decode | 1 +
12
target/arm/cpu64.c | 50 +-----------------
9
target/arm/mve_helper.c | 17 +++++++++++++++++
13
target/arm/cpu_tcg.c | 114 ++++++++++++++++++++++-------------------
10
target/arm/translate-mve.c | 19 +++++++++++++++++++
14
3 files changed, 65 insertions(+), 101 deletions(-)
11
4 files changed, 38 insertions(+)
12
15
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
diff --git a/target/arm/internals.h b/target/arm/internals.h
14
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
18
--- a/target/arm/internals.h
16
+++ b/target/arm/helper-mve.h
19
+++ b/target/arm/internals.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vorn, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
@@ -XXX,XX +XXX,XX @@ static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
18
DEF_HELPER_FLAGS_4(mve_veor, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu);
19
22
#endif
20
DEF_HELPER_FLAGS_4(mve_vpsel, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
21
+DEF_HELPER_FLAGS_1(mve_vpnot, TCG_CALL_NO_WG, void, env)
24
+void aa32_max_features(ARMCPU *cpu);
22
25
+
23
DEF_HELPER_FLAGS_4(mve_vaddb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
#endif
24
DEF_HELPER_FLAGS_4(mve_vaddh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
25
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
26
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/mve.decode
29
--- a/target/arm/cpu64.c
28
+++ b/target/arm/mve.decode
30
+++ b/target/arm/cpu64.c
29
@@ -XXX,XX +XXX,XX @@ VCMPGT 1111 1110 0 . .. ... 1 ... 1 1111 0 0 . 0 ... 1 @vcmp
31
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
30
VCMPLE 1111 1110 0 . .. ... 1 ... 1 1111 1 0 . 0 ... 1 @vcmp
31
32
{
32
{
33
+ VPNOT 1111 1110 0 0 11 000 1 000 0 1111 0100 1101
33
ARMCPU *cpu = ARM_CPU(obj);
34
VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
34
uint64_t t;
35
VCMPEQ_scalar 1111 1110 0 . .. ... 1 ... 0 1111 0 1 0 0 .... @vcmp_scalar
35
- uint32_t u;
36
}
36
37
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
37
if (kvm_enabled() || hvf_enabled()) {
38
/* With KVM or HVF, '-cpu max' is identical to '-cpu host' */
39
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
40
t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1);
41
cpu->isar.id_aa64zfr0 = t;
42
43
- /* Replicate the same data to the 32-bit id registers. */
44
- u = cpu->isar.id_isar5;
45
- u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
46
- u = FIELD_DP32(u, ID_ISAR5, SHA1, 1);
47
- u = FIELD_DP32(u, ID_ISAR5, SHA2, 1);
48
- u = FIELD_DP32(u, ID_ISAR5, CRC32, 1);
49
- u = FIELD_DP32(u, ID_ISAR5, RDM, 1);
50
- u = FIELD_DP32(u, ID_ISAR5, VCMA, 1);
51
- cpu->isar.id_isar5 = u;
52
-
53
- u = cpu->isar.id_isar6;
54
- u = FIELD_DP32(u, ID_ISAR6, JSCVT, 1);
55
- u = FIELD_DP32(u, ID_ISAR6, DP, 1);
56
- u = FIELD_DP32(u, ID_ISAR6, FHM, 1);
57
- u = FIELD_DP32(u, ID_ISAR6, SB, 1);
58
- u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
59
- u = FIELD_DP32(u, ID_ISAR6, BF16, 1);
60
- u = FIELD_DP32(u, ID_ISAR6, I8MM, 1);
61
- cpu->isar.id_isar6 = u;
62
-
63
- u = cpu->isar.id_pfr0;
64
- u = FIELD_DP32(u, ID_PFR0, DIT, 1);
65
- cpu->isar.id_pfr0 = u;
66
-
67
- u = cpu->isar.id_pfr2;
68
- u = FIELD_DP32(u, ID_PFR2, SSBS, 1);
69
- cpu->isar.id_pfr2 = u;
70
-
71
- u = cpu->isar.id_mmfr3;
72
- u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */
73
- cpu->isar.id_mmfr3 = u;
74
-
75
- u = cpu->isar.id_mmfr4;
76
- u = FIELD_DP32(u, ID_MMFR4, HPDS, 1); /* AA32HPD */
77
- u = FIELD_DP32(u, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
78
- u = FIELD_DP32(u, ID_MMFR4, CNP, 1); /* TTCNP */
79
- u = FIELD_DP32(u, ID_MMFR4, XNX, 1); /* TTS2UXN */
80
- cpu->isar.id_mmfr4 = u;
81
-
82
t = cpu->isar.id_aa64dfr0;
83
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
84
cpu->isar.id_aa64dfr0 = t;
85
86
- u = cpu->isar.id_dfr0;
87
- u = FIELD_DP32(u, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
88
- cpu->isar.id_dfr0 = u;
89
-
90
- u = cpu->isar.mvfr1;
91
- u = FIELD_DP32(u, MVFR1, FPHP, 3); /* v8.2-FP16 */
92
- u = FIELD_DP32(u, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
93
- cpu->isar.mvfr1 = u;
94
+ /* Replicate the same data to the 32-bit id registers. */
95
+ aa32_max_features(cpu);
96
97
#ifdef CONFIG_USER_ONLY
98
/*
99
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
38
index XXXXXXX..XXXXXXX 100644
100
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/mve_helper.c
101
--- a/target/arm/cpu_tcg.c
40
+++ b/target/arm/mve_helper.c
102
+++ b/target/arm/cpu_tcg.c
41
@@ -XXX,XX +XXX,XX @@ void HELPER(mve_vpsel)(CPUARMState *env, void *vd, void *vn, void *vm)
103
@@ -XXX,XX +XXX,XX @@
42
mve_advance_vpt(env);
104
#endif
43
}
105
#include "cpregs.h"
44
106
45
+void HELPER(mve_vpnot)(CPUARMState *env)
107
+
108
+/* Share AArch32 -cpu max features with AArch64. */
109
+void aa32_max_features(ARMCPU *cpu)
46
+{
110
+{
47
+ /*
111
+ uint32_t t;
48
+ * P0 bits for unexecuted beats (where eci_mask is 0) are unchanged.
112
+
49
+ * P0 bits for predicated lanes in executed bits (where mask is 0) are 0.
113
+ /* Add additional features supported by QEMU */
50
+ * P0 bits otherwise are inverted.
114
+ t = cpu->isar.id_isar5;
51
+ * (This is the same logic as VCMP.)
115
+ t = FIELD_DP32(t, ID_ISAR5, AES, 2);
52
+ * This insn is itself subject to predication and to beat-wise execution,
116
+ t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
53
+ * and after it executes VPT state advances in the usual way.
117
+ t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
54
+ */
118
+ t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
55
+ uint16_t mask = mve_element_mask(env);
119
+ t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
56
+ uint16_t eci_mask = mve_eci_mask(env);
120
+ t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
57
+ uint16_t beatpred = ~env->v7m.vpr & mask;
121
+ cpu->isar.id_isar5 = t;
58
+ env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (beatpred & eci_mask);
122
+
59
+ mve_advance_vpt(env);
123
+ t = cpu->isar.id_isar6;
124
+ t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
125
+ t = FIELD_DP32(t, ID_ISAR6, DP, 1);
126
+ t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
127
+ t = FIELD_DP32(t, ID_ISAR6, SB, 1);
128
+ t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
129
+ t = FIELD_DP32(t, ID_ISAR6, BF16, 1);
130
+ t = FIELD_DP32(t, ID_ISAR6, I8MM, 1);
131
+ cpu->isar.id_isar6 = t;
132
+
133
+ t = cpu->isar.mvfr1;
134
+ t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */
135
+ t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
136
+ cpu->isar.mvfr1 = t;
137
+
138
+ t = cpu->isar.mvfr2;
139
+ t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
140
+ t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
141
+ cpu->isar.mvfr2 = t;
142
+
143
+ t = cpu->isar.id_mmfr3;
144
+ t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
145
+ cpu->isar.id_mmfr3 = t;
146
+
147
+ t = cpu->isar.id_mmfr4;
148
+ t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
149
+ t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
150
+ t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
151
+ t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
152
+ cpu->isar.id_mmfr4 = t;
153
+
154
+ t = cpu->isar.id_pfr0;
155
+ t = FIELD_DP32(t, ID_PFR0, DIT, 1);
156
+ cpu->isar.id_pfr0 = t;
157
+
158
+ t = cpu->isar.id_pfr2;
159
+ t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
160
+ cpu->isar.id_pfr2 = t;
161
+
162
+ t = cpu->isar.id_dfr0;
163
+ t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
164
+ cpu->isar.id_dfr0 = t;
60
+}
165
+}
61
+
166
+
62
#define DO_1OP_SAT(OP, ESIZE, TYPE, FN) \
167
#ifndef CONFIG_USER_ONLY
63
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
168
static uint64_t l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
64
{ \
65
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/target/arm/translate-mve.c
68
+++ b/target/arm/translate-mve.c
69
@@ -XXX,XX +XXX,XX @@ static bool trans_VPST(DisasContext *s, arg_VPST *a)
70
return true;
71
}
72
73
+static bool trans_VPNOT(DisasContext *s, arg_VPNOT *a)
74
+{
75
+ /*
76
+ * Invert the predicate in VPR.P0. We have call out to
77
+ * a helper because this insn itself is beatwise and can
78
+ * be predicated.
79
+ */
80
+ if (!dc_isar_feature(aa32_mve, s)) {
81
+ return false;
82
+ }
83
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
84
+ return true;
85
+ }
86
+
87
+ gen_helper_mve_vpnot(cpu_env);
88
+ mve_update_eci(s);
89
+ return true;
90
+}
91
+
92
static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
93
{
169
{
94
/* VADDV: vector add across vector */
170
@@ -XXX,XX +XXX,XX @@ static void arm_v7m_class_init(ObjectClass *oc, void *data)
171
static void arm_max_initfn(Object *obj)
172
{
173
ARMCPU *cpu = ARM_CPU(obj);
174
- uint32_t t;
175
176
/* aarch64_a57_initfn, advertising none of the aarch64 features */
177
cpu->dtb_compatible = "arm,cortex-a57";
178
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
179
cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
180
define_cortex_a72_a57_a53_cp_reginfo(cpu);
181
182
- /* Add additional features supported by QEMU */
183
- t = cpu->isar.id_isar5;
184
- t = FIELD_DP32(t, ID_ISAR5, AES, 2);
185
- t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
186
- t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
187
- t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
188
- t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
189
- t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
190
- cpu->isar.id_isar5 = t;
191
-
192
- t = cpu->isar.id_isar6;
193
- t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
194
- t = FIELD_DP32(t, ID_ISAR6, DP, 1);
195
- t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
196
- t = FIELD_DP32(t, ID_ISAR6, SB, 1);
197
- t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
198
- t = FIELD_DP32(t, ID_ISAR6, BF16, 1);
199
- t = FIELD_DP32(t, ID_ISAR6, I8MM, 1);
200
- cpu->isar.id_isar6 = t;
201
-
202
- t = cpu->isar.mvfr1;
203
- t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */
204
- t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
205
- cpu->isar.mvfr1 = t;
206
-
207
- t = cpu->isar.mvfr2;
208
- t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
209
- t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
210
- cpu->isar.mvfr2 = t;
211
-
212
- t = cpu->isar.id_mmfr3;
213
- t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
214
- cpu->isar.id_mmfr3 = t;
215
-
216
- t = cpu->isar.id_mmfr4;
217
- t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
218
- t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
219
- t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
220
- t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
221
- cpu->isar.id_mmfr4 = t;
222
-
223
- t = cpu->isar.id_pfr0;
224
- t = FIELD_DP32(t, ID_PFR0, DIT, 1);
225
- cpu->isar.id_pfr0 = t;
226
-
227
- t = cpu->isar.id_pfr2;
228
- t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
229
- cpu->isar.id_pfr2 = t;
230
-
231
- t = cpu->isar.id_dfr0;
232
- t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
233
- cpu->isar.id_dfr0 = t;
234
+ aa32_max_features(cpu);
235
236
#ifdef CONFIG_USER_ONLY
237
/*
95
--
238
--
96
2.20.1
239
2.25.1
97
98
diff view generated by jsdifflib
1
Implement the MVE VMAXA and VMINA insns, which take the absolute
1
From: Richard Henderson <richard.henderson@linaro.org>
2
value of the signed elements in the input vector and then accumulate
3
the unsigned max or min into the destination vector.
4
2
3
Update the legacy feature names to the current names.
4
Provide feature names for id changes that were not marked.
5
Sort the field updates into increasing bitfield order.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220506180242.216785-10-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
---
11
---
8
target/arm/helper-mve.h | 8 ++++++++
12
target/arm/cpu64.c | 100 +++++++++++++++++++++----------------------
9
target/arm/mve.decode | 4 ++++
13
target/arm/cpu_tcg.c | 48 ++++++++++-----------
10
target/arm/mve_helper.c | 26 ++++++++++++++++++++++++++
14
2 files changed, 74 insertions(+), 74 deletions(-)
11
target/arm/translate-mve.c | 2 ++
12
4 files changed, 40 insertions(+)
13
15
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
18
--- a/target/arm/cpu64.c
17
+++ b/target/arm/helper-mve.h
19
+++ b/target/arm/cpu64.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vqnegb, TCG_CALL_NO_WG, void, env, ptr, ptr)
20
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
19
DEF_HELPER_FLAGS_3(mve_vqnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
21
cpu->midr = t;
20
DEF_HELPER_FLAGS_3(mve_vqnegw, TCG_CALL_NO_WG, void, env, ptr, ptr)
22
21
23
t = cpu->isar.id_aa64isar0;
22
+DEF_HELPER_FLAGS_3(mve_vmaxab, TCG_CALL_NO_WG, void, env, ptr, ptr)
24
- t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
23
+DEF_HELPER_FLAGS_3(mve_vmaxah, TCG_CALL_NO_WG, void, env, ptr, ptr)
25
- t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
24
+DEF_HELPER_FLAGS_3(mve_vmaxaw, TCG_CALL_NO_WG, void, env, ptr, ptr)
26
- t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */
25
+
27
+ t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */
26
+DEF_HELPER_FLAGS_3(mve_vminab, TCG_CALL_NO_WG, void, env, ptr, ptr)
28
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */
27
+DEF_HELPER_FLAGS_3(mve_vminah, TCG_CALL_NO_WG, void, env, ptr, ptr)
29
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* FEAT_SHA512 */
28
+DEF_HELPER_FLAGS_3(mve_vminaw, TCG_CALL_NO_WG, void, env, ptr, ptr)
30
t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
29
+
31
- t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);
30
DEF_HELPER_FLAGS_3(mve_vmovnbb, TCG_CALL_NO_WG, void, env, ptr, ptr)
32
- t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);
31
DEF_HELPER_FLAGS_3(mve_vmovnbh, TCG_CALL_NO_WG, void, env, ptr, ptr)
33
- t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);
32
DEF_HELPER_FLAGS_3(mve_vmovntb, TCG_CALL_NO_WG, void, env, ptr, ptr)
34
- t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
35
- t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);
36
- t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
37
- t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);
38
- t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */
39
- t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */
40
- t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1);
41
+ t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2); /* FEAT_LSE */
42
+ t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1); /* FEAT_RDM */
43
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1); /* FEAT_SHA3 */
44
+ t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1); /* FEAT_SM3 */
45
+ t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1); /* FEAT_SM4 */
46
+ t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1); /* FEAT_DotProd */
47
+ t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1); /* FEAT_FHM */
48
+ t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* FEAT_FlagM2 */
49
+ t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */
50
+ t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1); /* FEAT_RNG */
51
cpu->isar.id_aa64isar0 = t;
52
53
t = cpu->isar.id_aa64isar1;
54
- t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2);
55
- t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1);
56
- t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
57
- t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);
58
- t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);
59
- t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1);
60
- t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);
61
- t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* ARMv8.4-RCPC */
62
- t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1);
63
+ t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); /* FEAT_DPB2 */
64
+ t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); /* FEAT_JSCVT */
65
+ t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); /* FEAT_FCMA */
66
+ t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* FEAT_LRCPC2 */
67
+ t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1); /* FEAT_FRINTTS */
68
+ t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1); /* FEAT_SB */
69
+ t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1); /* FEAT_SPECRES */
70
+ t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1); /* FEAT_BF16 */
71
+ t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); /* FEAT_I8MM */
72
cpu->isar.id_aa64isar1 = t;
73
74
t = cpu->isar.id_aa64pfr0;
75
+ t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); /* FEAT_FP16 */
76
+ t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); /* FEAT_FP16 */
77
t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
78
- t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);
79
- t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
80
- t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1);
81
- t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1);
82
+ t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); /* FEAT_SEL2 */
83
+ t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */
84
cpu->isar.id_aa64pfr0 = t;
85
86
t = cpu->isar.id_aa64pfr1;
87
- t = FIELD_DP64(t, ID_AA64PFR1, BT, 1);
88
- t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2);
89
+ t = FIELD_DP64(t, ID_AA64PFR1, BT, 1); /* FEAT_BTI */
90
+ t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2); /* FEAT_SSBS2 */
91
/*
92
* Begin with full support for MTE. This will be downgraded to MTE=0
93
* during realize if the board provides no tag memory, much like
94
* we do for EL2 with the virtualization=on property.
95
*/
96
- t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);
97
+ t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */
98
cpu->isar.id_aa64pfr1 = t;
99
100
t = cpu->isar.id_aa64mmfr0;
101
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
102
cpu->isar.id_aa64mmfr0 = t;
103
104
t = cpu->isar.id_aa64mmfr1;
105
- t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
106
- t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
107
- t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);
108
- t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* ATS1E1 */
109
- t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* VMID16 */
110
- t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* TTS2UXN */
111
+ t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */
112
+ t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); /* FEAT_VHE */
113
+ t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* FEAT_HPDS */
114
+ t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); /* FEAT_LOR */
115
+ t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* FEAT_PAN2 */
116
+ t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */
117
cpu->isar.id_aa64mmfr1 = t;
118
119
t = cpu->isar.id_aa64mmfr2;
120
- t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1);
121
- t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* TTCNP */
122
- t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* TTST */
123
- t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */
124
- t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1); /* FEAT_TTL */
125
- t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2); /* FEAT_BBM at level 2 */
126
+ t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* FEAT_TTCNP */
127
+ t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); /* FEAT_UAO */
128
+ t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */
129
+ t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* FEAT_TTST */
130
+ t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1); /* FEAT_TTL */
131
+ t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2); /* FEAT_BBM at level 2 */
132
cpu->isar.id_aa64mmfr2 = t;
133
134
t = cpu->isar.id_aa64zfr0;
135
t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
136
- t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* PMULL */
137
- t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1);
138
- t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1);
139
- t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1);
140
- t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1);
141
- t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1);
142
- t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1);
143
- t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1);
144
+ t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* FEAT_SVE_PMULL128 */
145
+ t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1); /* FEAT_SVE_BitPerm */
146
+ t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1); /* FEAT_BF16 */
147
+ t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1); /* FEAT_SVE_SHA3 */
148
+ t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1); /* FEAT_SVE_SM4 */
149
+ t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1); /* FEAT_I8MM */
150
+ t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1); /* FEAT_F32MM */
151
+ t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1); /* FEAT_F64MM */
152
cpu->isar.id_aa64zfr0 = t;
153
154
t = cpu->isar.id_aa64dfr0;
155
- t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
156
+ t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */
157
cpu->isar.id_aa64dfr0 = t;
158
159
/* Replicate the same data to the 32-bit id registers. */
160
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
34
index XXXXXXX..XXXXXXX 100644
161
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
162
--- a/target/arm/cpu_tcg.c
36
+++ b/target/arm/mve.decode
163
+++ b/target/arm/cpu_tcg.c
37
@@ -XXX,XX +XXX,XX @@ VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
164
@@ -XXX,XX +XXX,XX @@ void aa32_max_features(ARMCPU *cpu)
38
VQMOVUNB 111 0 1110 0 . 11 .. 01 ... 0 1110 1 0 . 0 ... 1 @1op
165
39
VQMOVN_BS 111 0 1110 0 . 11 .. 11 ... 0 1110 0 0 . 0 ... 1 @1op
166
/* Add additional features supported by QEMU */
40
167
t = cpu->isar.id_isar5;
41
+ VMAXA 111 0 1110 0 . 11 .. 11 ... 0 1110 1 0 . 0 ... 1 @1op
168
- t = FIELD_DP32(t, ID_ISAR5, AES, 2);
42
+
169
- t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
43
VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
170
- t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
171
+ t = FIELD_DP32(t, ID_ISAR5, AES, 2); /* FEAT_PMULL */
172
+ t = FIELD_DP32(t, ID_ISAR5, SHA1, 1); /* FEAT_SHA1 */
173
+ t = FIELD_DP32(t, ID_ISAR5, SHA2, 1); /* FEAT_SHA256 */
174
t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
175
- t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
176
- t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
177
+ t = FIELD_DP32(t, ID_ISAR5, RDM, 1); /* FEAT_RDM */
178
+ t = FIELD_DP32(t, ID_ISAR5, VCMA, 1); /* FEAT_FCMA */
179
cpu->isar.id_isar5 = t;
180
181
t = cpu->isar.id_isar6;
182
- t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
183
- t = FIELD_DP32(t, ID_ISAR6, DP, 1);
184
- t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
185
- t = FIELD_DP32(t, ID_ISAR6, SB, 1);
186
- t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
187
- t = FIELD_DP32(t, ID_ISAR6, BF16, 1);
188
- t = FIELD_DP32(t, ID_ISAR6, I8MM, 1);
189
+ t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1); /* FEAT_JSCVT */
190
+ t = FIELD_DP32(t, ID_ISAR6, DP, 1); /* Feat_DotProd */
191
+ t = FIELD_DP32(t, ID_ISAR6, FHM, 1); /* FEAT_FHM */
192
+ t = FIELD_DP32(t, ID_ISAR6, SB, 1); /* FEAT_SB */
193
+ t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1); /* FEAT_SPECRES */
194
+ t = FIELD_DP32(t, ID_ISAR6, BF16, 1); /* FEAT_AA32BF16 */
195
+ t = FIELD_DP32(t, ID_ISAR6, I8MM, 1); /* FEAT_AA32I8MM */
196
cpu->isar.id_isar6 = t;
197
198
t = cpu->isar.mvfr1;
199
- t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */
200
- t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
201
+ t = FIELD_DP32(t, MVFR1, FPHP, 3); /* FEAT_FP16 */
202
+ t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* FEAT_FP16 */
203
cpu->isar.mvfr1 = t;
204
205
t = cpu->isar.mvfr2;
206
- t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
207
- t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
208
+ t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
209
+ t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
210
cpu->isar.mvfr2 = t;
211
212
t = cpu->isar.id_mmfr3;
213
- t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
214
+ t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* FEAT_PAN2 */
215
cpu->isar.id_mmfr3 = t;
216
217
t = cpu->isar.id_mmfr4;
218
- t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
219
- t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
220
- t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
221
- t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
222
+ t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* FEAT_AA32HPD */
223
+ t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
224
+ t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* FEAT_TTCNP */
225
+ t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX*/
226
cpu->isar.id_mmfr4 = t;
227
228
t = cpu->isar.id_pfr0;
229
- t = FIELD_DP32(t, ID_PFR0, DIT, 1);
230
+ t = FIELD_DP32(t, ID_PFR0, DIT, 1); /* FEAT_DIT */
231
cpu->isar.id_pfr0 = t;
232
233
t = cpu->isar.id_pfr2;
234
- t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
235
+ t = FIELD_DP32(t, ID_PFR2, SSBS, 1); /* FEAT_SSBS */
236
cpu->isar.id_pfr2 = t;
237
238
t = cpu->isar.id_dfr0;
239
- t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
240
+ t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* FEAT_PMUv3p4 */
241
cpu->isar.id_dfr0 = t;
44
}
242
}
45
243
46
@@ -XXX,XX +XXX,XX @@ VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
47
VQMOVUNT 111 0 1110 0 . 11 .. 01 ... 1 1110 1 0 . 0 ... 1 @1op
48
VQMOVN_TS 111 0 1110 0 . 11 .. 11 ... 1 1110 0 0 . 0 ... 1 @1op
49
50
+ VMINA 111 0 1110 0 . 11 .. 11 ... 1 1110 1 0 . 0 ... 1 @1op
51
+
52
VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
53
}
54
55
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/mve_helper.c
58
+++ b/target/arm/mve_helper.c
59
@@ -XXX,XX +XXX,XX @@ DO_1OP_SAT(vqabsw, 4, int32_t, DO_VQABS_W)
60
DO_1OP_SAT(vqnegb, 1, int8_t, DO_VQNEG_B)
61
DO_1OP_SAT(vqnegh, 2, int16_t, DO_VQNEG_H)
62
DO_1OP_SAT(vqnegw, 4, int32_t, DO_VQNEG_W)
63
+
64
+/*
65
+ * VMAXA, VMINA: vd is unsigned; vm is signed, and we take its
66
+ * absolute value; we then do an unsigned comparison.
67
+ */
68
+#define DO_VMAXMINA(OP, ESIZE, STYPE, UTYPE, FN) \
69
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
70
+ { \
71
+ UTYPE *d = vd; \
72
+ STYPE *m = vm; \
73
+ uint16_t mask = mve_element_mask(env); \
74
+ unsigned e; \
75
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
76
+ UTYPE r = DO_ABS(m[H##ESIZE(e)]); \
77
+ r = FN(d[H##ESIZE(e)], r); \
78
+ mergemask(&d[H##ESIZE(e)], r, mask); \
79
+ } \
80
+ mve_advance_vpt(env); \
81
+ }
82
+
83
+DO_VMAXMINA(vmaxab, 1, int8_t, uint8_t, DO_MAX)
84
+DO_VMAXMINA(vmaxah, 2, int16_t, uint16_t, DO_MAX)
85
+DO_VMAXMINA(vmaxaw, 4, int32_t, uint32_t, DO_MAX)
86
+DO_VMAXMINA(vminab, 1, int8_t, uint8_t, DO_MIN)
87
+DO_VMAXMINA(vminah, 2, int16_t, uint16_t, DO_MIN)
88
+DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN)
89
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
90
index XXXXXXX..XXXXXXX 100644
91
--- a/target/arm/translate-mve.c
92
+++ b/target/arm/translate-mve.c
93
@@ -XXX,XX +XXX,XX @@ DO_1OP(VABS, vabs)
94
DO_1OP(VNEG, vneg)
95
DO_1OP(VQABS, vqabs)
96
DO_1OP(VQNEG, vqneg)
97
+DO_1OP(VMAXA, vmaxa)
98
+DO_1OP(VMINA, vmina)
99
100
/* Narrowing moves: only size 0 and 1 are valid */
101
#define DO_VMOVN(INSN, FN) \
102
--
244
--
103
2.20.1
245
2.25.1
104
105
diff view generated by jsdifflib
1
Implement the MVE VLDR/VSTR insns which do scatter-gather using base
1
From: Richard Henderson <richard.henderson@linaro.org>
2
addresses from Qm plus or minus an immediate offset (possibly with
3
writeback). Note that writeback is not predicated but it does have
4
to honour ECI state, so we have to add an eci_mask check to the
5
VSTR_SG macros (the VLDR_SG macros already needed this to be able
6
to distinguish "skip beat" from "set predicated element to 0").
7
2
3
Use FIELD_DP{32,64} to manipulate id_pfr1 and id_aa64pfr0
4
during arm_cpu_realizefn.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220506180242.216785-11-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
---
10
---
11
target/arm/helper-mve.h | 5 +++
11
target/arm/cpu.c | 22 +++++++++++++---------
12
target/arm/mve.decode | 10 +++++
12
1 file changed, 13 insertions(+), 9 deletions(-)
13
target/arm/mve_helper.c | 91 ++++++++++++++++++++++++--------------
14
target/arm/translate-mve.c | 72 ++++++++++++++++++++++++++++++
15
4 files changed, 146 insertions(+), 32 deletions(-)
16
13
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
18
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-mve.h
16
--- a/target/arm/cpu.c
20
+++ b/target/arm/helper-mve.h
17
+++ b/target/arm/cpu.c
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vstrh_sg_os_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
22
DEF_HELPER_FLAGS_4(mve_vstrw_sg_os_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
*/
23
DEF_HELPER_FLAGS_4(mve_vstrd_sg_os_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
unset_feature(env, ARM_FEATURE_EL3);
24
21
25
+DEF_HELPER_FLAGS_4(mve_vldrw_sg_wb_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
- /* Disable the security extension feature bits in the processor feature
26
+DEF_HELPER_FLAGS_4(mve_vldrd_sg_wb_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
- * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
27
+DEF_HELPER_FLAGS_4(mve_vstrw_sg_wb_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+ /*
28
+DEF_HELPER_FLAGS_4(mve_vstrd_sg_wb_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+ * Disable the security extension feature bits in the processor
29
+
26
+ * feature registers as well.
30
DEF_HELPER_FLAGS_3(mve_vdup, TCG_CALL_NO_WG, void, env, ptr, i32)
27
*/
31
28
- cpu->isar.id_pfr1 &= ~0xf0;
32
DEF_HELPER_FLAGS_4(mve_vidupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
29
- cpu->isar.id_aa64pfr0 &= ~0xf000;
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
30
+ cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, ID_PFR1, SECURITY, 0);
34
index XXXXXXX..XXXXXXX 100644
31
+ cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
35
--- a/target/arm/mve.decode
32
+ ID_AA64PFR0, EL3, 0);
36
+++ b/target/arm/mve.decode
37
@@ -XXX,XX +XXX,XX @@
38
&vmaxv qm rda size
39
&vabav qn qm rda size
40
&vldst_sg qd qm rn size msize os
41
+&vldst_sg_imm qd qm a w imm
42
43
# scatter-gather memory size is in bits 6:4
44
%sg_msize 6:1 4:1
45
@@ -XXX,XX +XXX,XX @@
46
@vldst_sg .... .... .... rn:4 .... ... size:2 ... ... os:1 &vldst_sg \
47
qd=%qd qm=%qm msize=%sg_msize
48
49
+# Qm is in the fields usually labeled Qn
50
+@vldst_sg_imm .... .... a:1 . w:1 . .... .... .... . imm:7 &vldst_sg_imm \
51
+ qd=%qd qm=%qn
52
+
53
@1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm
54
@1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0
55
@2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn
56
@@ -XXX,XX +XXX,XX @@ VLDR_S_sg 111 0 1100 1 . 01 .... ... 0 111 . .... .... @vldst_sg
57
VLDR_U_sg 111 1 1100 1 . 01 .... ... 0 111 . .... .... @vldst_sg
58
VSTR_sg 111 0 1100 1 . 00 .... ... 0 111 . .... .... @vldst_sg
59
60
+VLDRW_sg_imm 111 1 1101 ... 1 ... 0 ... 1 1110 .... .... @vldst_sg_imm
61
+VLDRD_sg_imm 111 1 1101 ... 1 ... 0 ... 1 1111 .... .... @vldst_sg_imm
62
+VSTRW_sg_imm 111 1 1101 ... 0 ... 0 ... 1 1110 .... .... @vldst_sg_imm
63
+VSTRD_sg_imm 111 1 1101 ... 0 ... 0 ... 1 1111 .... .... @vldst_sg_imm
64
+
65
# Moves between 2 32-bit vector lanes and 2 general purpose registers
66
VMOV_to_2gp 1110 1100 0 . 00 rt2:4 ... 0 1111 000 idx:1 rt:4 qd=%qd
67
VMOV_from_2gp 1110 1100 0 . 01 rt2:4 ... 0 1111 000 idx:1 rt:4 qd=%qd
68
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/target/arm/mve_helper.c
71
+++ b/target/arm/mve_helper.c
72
@@ -XXX,XX +XXX,XX @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
73
* For loads, predicated lanes are zeroed instead of retaining
74
* their previous values.
75
*/
76
-#define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN) \
77
+#define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB) \
78
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
79
uint32_t base) \
80
{ \
81
@@ -XXX,XX +XXX,XX @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
82
addr = ADDRFN(base, m[H##ESIZE(e)]); \
83
d[H##ESIZE(e)] = (mask & 1) ? \
84
cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \
85
+ if (WB) { \
86
+ m[H##ESIZE(e)] = addr; \
87
+ } \
88
} \
89
mve_advance_vpt(env); \
90
}
33
}
91
34
92
/* We know here TYPE is unsigned so always the same as the offset type */
35
if (!cpu->has_el2) {
93
-#define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN) \
36
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
94
+#define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN, WB) \
95
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
96
uint32_t base) \
97
{ \
98
TYPE *d = vd; \
99
TYPE *m = vm; \
100
uint16_t mask = mve_element_mask(env); \
101
+ uint16_t eci_mask = mve_eci_mask(env); \
102
unsigned e; \
103
uint32_t addr; \
104
- for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
105
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \
106
+ if (!(eci_mask & 1)) { \
107
+ continue; \
108
+ } \
109
addr = ADDRFN(base, m[H##ESIZE(e)]); \
110
if (mask & 1) { \
111
cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
112
} \
113
+ if (WB) { \
114
+ m[H##ESIZE(e)] = addr; \
115
+ } \
116
} \
117
mve_advance_vpt(env); \
118
}
37
}
119
@@ -XXX,XX +XXX,XX @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
38
120
* accesses, controlled by the predicate mask for the relevant beat,
39
if (!arm_feature(env, ARM_FEATURE_EL2)) {
121
* and with a single 32-bit offset in the first of the two Qm elements.
40
- /* Disable the hypervisor feature bits in the processor feature
122
* Note that for QEMU our IMPDEF AIRCR.ENDIANNESS is always 0 (little).
41
- * registers if we don't have EL2. These are id_pfr1[15:12] and
123
+ * Address writeback happens on the odd beats and updates the address
42
- * id_aa64pfr0_el1[11:8].
124
+ * stored in the even-beat element.
43
+ /*
125
*/
44
+ * Disable the hypervisor feature bits in the processor feature
126
-#define DO_VLDR64_SG(OP, ADDRFN) \
45
+ * registers if we don't have EL2.
127
+#define DO_VLDR64_SG(OP, ADDRFN, WB) \
46
*/
128
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
47
- cpu->isar.id_aa64pfr0 &= ~0xf00;
129
uint32_t base) \
48
- cpu->isar.id_pfr1 &= ~0xf000;
130
{ \
49
+ cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
131
@@ -XXX,XX +XXX,XX @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
50
+ ID_AA64PFR0, EL2, 0);
132
addr = ADDRFN(base, m[H4(e & ~1)]); \
51
+ cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1,
133
addr += 4 * (e & 1); \
52
+ ID_PFR1, VIRTUALIZATION, 0);
134
d[H4(e)] = (mask & 1) ? cpu_ldl_data_ra(env, addr, GETPC()) : 0; \
135
+ if (WB && (e & 1)) { \
136
+ m[H4(e & ~1)] = addr - 4; \
137
+ } \
138
} \
139
mve_advance_vpt(env); \
140
}
53
}
141
54
142
-#define DO_VSTR64_SG(OP, ADDRFN) \
55
#ifndef CONFIG_USER_ONLY
143
+#define DO_VSTR64_SG(OP, ADDRFN, WB) \
144
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
145
uint32_t base) \
146
{ \
147
uint32_t *d = vd; \
148
uint32_t *m = vm; \
149
uint16_t mask = mve_element_mask(env); \
150
+ uint16_t eci_mask = mve_eci_mask(env); \
151
unsigned e; \
152
uint32_t addr; \
153
- for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
154
+ for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
155
+ if (!(eci_mask & 1)) { \
156
+ continue; \
157
+ } \
158
addr = ADDRFN(base, m[H4(e & ~1)]); \
159
addr += 4 * (e & 1); \
160
if (mask & 1) { \
161
cpu_stl_data_ra(env, addr, d[H4(e)], GETPC()); \
162
} \
163
+ if (WB && (e & 1)) { \
164
+ m[H4(e & ~1)] = addr - 4; \
165
+ } \
166
} \
167
mve_advance_vpt(env); \
168
}
169
@@ -XXX,XX +XXX,XX @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
170
#define ADDR_ADD_OSW(BASE, OFFSET) ((BASE) + ((OFFSET) << 2))
171
#define ADDR_ADD_OSD(BASE, OFFSET) ((BASE) + ((OFFSET) << 3))
172
173
-DO_VLDR_SG(vldrb_sg_sh, ldsb, 2, int16_t, uint16_t, ADDR_ADD)
174
-DO_VLDR_SG(vldrb_sg_sw, ldsb, 4, int32_t, uint32_t, ADDR_ADD)
175
-DO_VLDR_SG(vldrh_sg_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD)
176
+DO_VLDR_SG(vldrb_sg_sh, ldsb, 2, int16_t, uint16_t, ADDR_ADD, false)
177
+DO_VLDR_SG(vldrb_sg_sw, ldsb, 4, int32_t, uint32_t, ADDR_ADD, false)
178
+DO_VLDR_SG(vldrh_sg_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD, false)
179
180
-DO_VLDR_SG(vldrb_sg_ub, ldub, 1, uint8_t, uint8_t, ADDR_ADD)
181
-DO_VLDR_SG(vldrb_sg_uh, ldub, 2, uint16_t, uint16_t, ADDR_ADD)
182
-DO_VLDR_SG(vldrb_sg_uw, ldub, 4, uint32_t, uint32_t, ADDR_ADD)
183
-DO_VLDR_SG(vldrh_sg_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD)
184
-DO_VLDR_SG(vldrh_sg_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD)
185
-DO_VLDR_SG(vldrw_sg_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD)
186
-DO_VLDR64_SG(vldrd_sg_ud, ADDR_ADD)
187
+DO_VLDR_SG(vldrb_sg_ub, ldub, 1, uint8_t, uint8_t, ADDR_ADD, false)
188
+DO_VLDR_SG(vldrb_sg_uh, ldub, 2, uint16_t, uint16_t, ADDR_ADD, false)
189
+DO_VLDR_SG(vldrb_sg_uw, ldub, 4, uint32_t, uint32_t, ADDR_ADD, false)
190
+DO_VLDR_SG(vldrh_sg_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD, false)
191
+DO_VLDR_SG(vldrh_sg_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD, false)
192
+DO_VLDR_SG(vldrw_sg_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, false)
193
+DO_VLDR64_SG(vldrd_sg_ud, ADDR_ADD, false)
194
195
-DO_VLDR_SG(vldrh_sg_os_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD_OSH)
196
-DO_VLDR_SG(vldrh_sg_os_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD_OSH)
197
-DO_VLDR_SG(vldrh_sg_os_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD_OSH)
198
-DO_VLDR_SG(vldrw_sg_os_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD_OSW)
199
-DO_VLDR64_SG(vldrd_sg_os_ud, ADDR_ADD_OSD)
200
+DO_VLDR_SG(vldrh_sg_os_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD_OSH, false)
201
+DO_VLDR_SG(vldrh_sg_os_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD_OSH, false)
202
+DO_VLDR_SG(vldrh_sg_os_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD_OSH, false)
203
+DO_VLDR_SG(vldrw_sg_os_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD_OSW, false)
204
+DO_VLDR64_SG(vldrd_sg_os_ud, ADDR_ADD_OSD, false)
205
206
-DO_VSTR_SG(vstrb_sg_ub, stb, 1, uint8_t, ADDR_ADD)
207
-DO_VSTR_SG(vstrb_sg_uh, stb, 2, uint16_t, ADDR_ADD)
208
-DO_VSTR_SG(vstrb_sg_uw, stb, 4, uint32_t, ADDR_ADD)
209
-DO_VSTR_SG(vstrh_sg_uh, stw, 2, uint16_t, ADDR_ADD)
210
-DO_VSTR_SG(vstrh_sg_uw, stw, 4, uint32_t, ADDR_ADD)
211
-DO_VSTR_SG(vstrw_sg_uw, stl, 4, uint32_t, ADDR_ADD)
212
-DO_VSTR64_SG(vstrd_sg_ud, ADDR_ADD)
213
+DO_VSTR_SG(vstrb_sg_ub, stb, 1, uint8_t, ADDR_ADD, false)
214
+DO_VSTR_SG(vstrb_sg_uh, stb, 2, uint16_t, ADDR_ADD, false)
215
+DO_VSTR_SG(vstrb_sg_uw, stb, 4, uint32_t, ADDR_ADD, false)
216
+DO_VSTR_SG(vstrh_sg_uh, stw, 2, uint16_t, ADDR_ADD, false)
217
+DO_VSTR_SG(vstrh_sg_uw, stw, 4, uint32_t, ADDR_ADD, false)
218
+DO_VSTR_SG(vstrw_sg_uw, stl, 4, uint32_t, ADDR_ADD, false)
219
+DO_VSTR64_SG(vstrd_sg_ud, ADDR_ADD, false)
220
221
-DO_VSTR_SG(vstrh_sg_os_uh, stw, 2, uint16_t, ADDR_ADD_OSH)
222
-DO_VSTR_SG(vstrh_sg_os_uw, stw, 4, uint32_t, ADDR_ADD_OSH)
223
-DO_VSTR_SG(vstrw_sg_os_uw, stl, 4, uint32_t, ADDR_ADD_OSW)
224
-DO_VSTR64_SG(vstrd_sg_os_ud, ADDR_ADD_OSD)
225
+DO_VSTR_SG(vstrh_sg_os_uh, stw, 2, uint16_t, ADDR_ADD_OSH, false)
226
+DO_VSTR_SG(vstrh_sg_os_uw, stw, 4, uint32_t, ADDR_ADD_OSH, false)
227
+DO_VSTR_SG(vstrw_sg_os_uw, stl, 4, uint32_t, ADDR_ADD_OSW, false)
228
+DO_VSTR64_SG(vstrd_sg_os_ud, ADDR_ADD_OSD, false)
229
+
230
+DO_VLDR_SG(vldrw_sg_wb_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, true)
231
+DO_VLDR64_SG(vldrd_sg_wb_ud, ADDR_ADD, true)
232
+DO_VSTR_SG(vstrw_sg_wb_uw, stl, 4, uint32_t, ADDR_ADD, true)
233
+DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true)
234
235
/*
236
* The mergemask(D, R, M) macro performs the operation "*D = R" but
237
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
238
index XXXXXXX..XXXXXXX 100644
239
--- a/target/arm/translate-mve.c
240
+++ b/target/arm/translate-mve.c
241
@@ -XXX,XX +XXX,XX @@ static bool trans_VSTR_sg(DisasContext *s, arg_vldst_sg *a)
242
243
#undef F
244
245
+static bool do_ldst_sg_imm(DisasContext *s, arg_vldst_sg_imm *a,
246
+ MVEGenLdStSGFn *fn, unsigned msize)
247
+{
248
+ uint32_t offset;
249
+ TCGv_ptr qd, qm;
250
+
251
+ if (!dc_isar_feature(aa32_mve, s) ||
252
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
253
+ !fn) {
254
+ return false;
255
+ }
256
+
257
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
258
+ return true;
259
+ }
260
+
261
+ offset = a->imm << msize;
262
+ if (!a->a) {
263
+ offset = -offset;
264
+ }
265
+
266
+ qd = mve_qreg_ptr(a->qd);
267
+ qm = mve_qreg_ptr(a->qm);
268
+ fn(cpu_env, qd, qm, tcg_constant_i32(offset));
269
+ tcg_temp_free_ptr(qd);
270
+ tcg_temp_free_ptr(qm);
271
+ mve_update_eci(s);
272
+ return true;
273
+}
274
+
275
+static bool trans_VLDRW_sg_imm(DisasContext *s, arg_vldst_sg_imm *a)
276
+{
277
+ static MVEGenLdStSGFn * const fns[] = {
278
+ gen_helper_mve_vldrw_sg_uw,
279
+ gen_helper_mve_vldrw_sg_wb_uw,
280
+ };
281
+ if (a->qd == a->qm) {
282
+ return false; /* UNPREDICTABLE */
283
+ }
284
+ return do_ldst_sg_imm(s, a, fns[a->w], MO_32);
285
+}
286
+
287
+static bool trans_VLDRD_sg_imm(DisasContext *s, arg_vldst_sg_imm *a)
288
+{
289
+ static MVEGenLdStSGFn * const fns[] = {
290
+ gen_helper_mve_vldrd_sg_ud,
291
+ gen_helper_mve_vldrd_sg_wb_ud,
292
+ };
293
+ if (a->qd == a->qm) {
294
+ return false; /* UNPREDICTABLE */
295
+ }
296
+ return do_ldst_sg_imm(s, a, fns[a->w], MO_64);
297
+}
298
+
299
+static bool trans_VSTRW_sg_imm(DisasContext *s, arg_vldst_sg_imm *a)
300
+{
301
+ static MVEGenLdStSGFn * const fns[] = {
302
+ gen_helper_mve_vstrw_sg_uw,
303
+ gen_helper_mve_vstrw_sg_wb_uw,
304
+ };
305
+ return do_ldst_sg_imm(s, a, fns[a->w], MO_32);
306
+}
307
+
308
+static bool trans_VSTRD_sg_imm(DisasContext *s, arg_vldst_sg_imm *a)
309
+{
310
+ static MVEGenLdStSGFn * const fns[] = {
311
+ gen_helper_mve_vstrd_sg_ud,
312
+ gen_helper_mve_vstrd_sg_wb_ud,
313
+ };
314
+ return do_ldst_sg_imm(s, a, fns[a->w], MO_64);
315
+}
316
+
317
static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
318
{
319
TCGv_ptr qd;
320
--
56
--
321
2.20.1
57
2.25.1
322
323
diff view generated by jsdifflib
1
Include the MVE VPR register value in the CPU dumps produced by
1
From: Richard Henderson <richard.henderson@linaro.org>
2
arm_cpu_dump_state() if we are printing FPU information. This
3
makes it easier to interpret debug logs when predication is
4
active.
5
2
3
The only portion of FEAT_Debugv8p2 that is relevant to QEMU
4
is CONTEXTIDR_EL2, which is also conditionally implemented
5
with FEAT_VHE. The rest of the debug extension concerns the
6
External debug interface, which is outside the scope of QEMU.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20220506180242.216785-12-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
---
12
---
9
target/arm/cpu.c | 3 +++
13
docs/system/arm/emulation.rst | 1 +
10
1 file changed, 3 insertions(+)
14
target/arm/cpu.c | 1 +
15
target/arm/cpu64.c | 1 +
16
target/arm/cpu_tcg.c | 2 ++
17
4 files changed, 5 insertions(+)
11
18
19
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
20
index XXXXXXX..XXXXXXX 100644
21
--- a/docs/system/arm/emulation.rst
22
+++ b/docs/system/arm/emulation.rst
23
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
24
- FEAT_BTI (Branch Target Identification)
25
- FEAT_DIT (Data Independent Timing instructions)
26
- FEAT_DPB (DC CVAP instruction)
27
+- FEAT_Debugv8p2 (Debug changes for v8.2)
28
- FEAT_DotProd (Advanced SIMD dot product instructions)
29
- FEAT_FCMA (Floating-point complex number instructions)
30
- FEAT_FHM (Floating-point half-precision multiplication instructions)
12
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
31
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
13
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/cpu.c
33
--- a/target/arm/cpu.c
15
+++ b/target/arm/cpu.c
34
+++ b/target/arm/cpu.c
16
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
35
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
17
i, v);
36
* feature registers as well.
18
}
37
*/
19
qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
38
cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, ID_PFR1, SECURITY, 0);
20
+ if (cpu_isar_feature(aa32_mve, cpu)) {
39
+ cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPSDBG, 0);
21
+ qemu_fprintf(f, "VPR: %08x\n", env->v7m.vpr);
40
cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
22
+ }
41
ID_AA64PFR0, EL3, 0);
23
}
42
}
43
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/cpu64.c
46
+++ b/target/arm/cpu64.c
47
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
48
cpu->isar.id_aa64zfr0 = t;
49
50
t = cpu->isar.id_aa64dfr0;
51
+ t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 8); /* FEAT_Debugv8p2 */
52
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */
53
cpu->isar.id_aa64dfr0 = t;
54
55
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/cpu_tcg.c
58
+++ b/target/arm/cpu_tcg.c
59
@@ -XXX,XX +XXX,XX @@ void aa32_max_features(ARMCPU *cpu)
60
cpu->isar.id_pfr2 = t;
61
62
t = cpu->isar.id_dfr0;
63
+ t = FIELD_DP32(t, ID_DFR0, COPDBG, 8); /* FEAT_Debugv8p2 */
64
+ t = FIELD_DP32(t, ID_DFR0, COPSDBG, 8); /* FEAT_Debugv8p2 */
65
t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* FEAT_PMUv3p4 */
66
cpu->isar.id_dfr0 = t;
24
}
67
}
25
26
--
68
--
27
2.20.1
69
2.25.1
28
29
diff view generated by jsdifflib
Deleted patch
1
In the MVE shift-and-insert insns, we special case VSLI by 0
2
and VSRI by <dt>. VSRI by <dt> means "don't update the destination",
3
which is what we've implemented. However VSLI by 0 is "set
4
destination to the input", so we don't want to use the same
5
special-casing that we do for VSRI by <dt>.
6
1
7
Since the generic logic gives the right answer for a shift
8
by 0, just use that.
9
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
target/arm/mve_helper.c | 9 +++++----
14
1 file changed, 5 insertions(+), 4 deletions(-)
15
16
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/mve_helper.c
19
+++ b/target/arm/mve_helper.c
20
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
21
uint16_t mask; \
22
uint64_t shiftmask; \
23
unsigned e; \
24
- if (shift == 0 || shift == ESIZE * 8) { \
25
+ if (shift == ESIZE * 8) { \
26
/* \
27
- * Only VSLI can shift by 0; only VSRI can shift by <dt>. \
28
- * The generic logic would give the right answer for 0 but \
29
- * fails for <dt>. \
30
+ * Only VSRI can shift by <dt>; it should mean "don't \
31
+ * update the destination". The generic logic can't handle \
32
+ * this because it would try to shift by an out-of-range \
33
+ * amount, so special case it here. \
34
*/ \
35
goto done; \
36
} \
37
--
38
2.20.1
39
40
diff view generated by jsdifflib
Deleted patch
1
A cut-and-paste error meant we handled signed VADDV like
2
unsigned VADDV; fix the type used.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/arm/mve_helper.c | 6 +++---
8
1 file changed, 3 insertions(+), 3 deletions(-)
9
10
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/arm/mve_helper.c
13
+++ b/target/arm/mve_helper.c
14
@@ -XXX,XX +XXX,XX @@ DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true)
15
return ra; \
16
} \
17
18
-DO_VADDV(vaddvsb, 1, uint8_t)
19
-DO_VADDV(vaddvsh, 2, uint16_t)
20
-DO_VADDV(vaddvsw, 4, uint32_t)
21
+DO_VADDV(vaddvsb, 1, int8_t)
22
+DO_VADDV(vaddvsh, 2, int16_t)
23
+DO_VADDV(vaddvsw, 4, int32_t)
24
DO_VADDV(vaddvub, 1, uint8_t)
25
DO_VADDV(vaddvuh, 2, uint16_t)
26
DO_VADDV(vaddvuw, 4, uint32_t)
27
--
28
2.20.1
29
30
diff view generated by jsdifflib
Deleted patch
1
In the MVE helpers for the narrowing operations (DO_VSHRN and
2
DO_VSHRN_SAT) we were using the wrong bits of the predicate mask for
3
the 'top' versions of the insn. This is because the loop works over
4
the double-sized input elements and shifts the predicate mask by that
5
many bits each time, but when we write out the half-sized output we
6
must look at the mask bits for whichever half of the element we are
7
writing to.
8
1
9
Correct this by shifting the whole mask right by ESIZE bits for the
10
'top' insns. This allows us also to simplify the saturation bit
11
checking (where we had noticed that we needed to look at a different
12
mask bit for the 'top' insn.)
13
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
---
17
target/arm/mve_helper.c | 4 +++-
18
1 file changed, 3 insertions(+), 1 deletion(-)
19
20
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/mve_helper.c
23
+++ b/target/arm/mve_helper.c
24
@@ -XXX,XX +XXX,XX @@ DO_VSHLL_ALL(vshllt, true)
25
TYPE *d = vd; \
26
uint16_t mask = mve_element_mask(env); \
27
unsigned le; \
28
+ mask >>= ESIZE * TOP; \
29
for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
30
TYPE r = FN(m[H##LESIZE(le)], shift); \
31
mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
32
@@ -XXX,XX +XXX,XX @@ static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max,
33
uint16_t mask = mve_element_mask(env); \
34
bool qc = false; \
35
unsigned le; \
36
+ mask >>= ESIZE * TOP; \
37
for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
38
bool sat = false; \
39
TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \
40
mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
41
- qc |= sat && (mask & 1 << (TOP * ESIZE)); \
42
+ qc |= sat & mask & 1; \
43
} \
44
if (qc) { \
45
env->vfp.qc[0] = qc; \
46
--
47
2.20.1
48
49
diff view generated by jsdifflib
Deleted patch
1
We got an edge case wrong in the 48-bit SQRSHRL implementation: if
2
the shift is to the right, although it always makes the result
3
smaller than the input value it might not be within the 48-bit range
4
the result is supposed to be if the input had some bits in [63..48]
5
set and the shift didn't bring all of those within the [47..0] range.
6
1
7
Handle this similarly to the way we already do for this case in
8
do_uqrshl48_d(): extend the calculated result from 48 bits,
9
and return that if not saturating or if it doesn't change the
10
result; otherwise fall through to return a saturated value.
11
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
---
15
target/arm/mve_helper.c | 11 +++++++++--
16
1 file changed, 9 insertions(+), 2 deletions(-)
17
18
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/mve_helper.c
21
+++ b/target/arm/mve_helper.c
22
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift)
23
static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
24
bool round, uint32_t *sat)
25
{
26
+ int64_t val, extval;
27
+
28
if (shift <= -48) {
29
/* Rounding the sign bit always produces 0. */
30
if (round) {
31
@@ -XXX,XX +XXX,XX @@ static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
32
} else if (shift < 0) {
33
if (round) {
34
src >>= -shift - 1;
35
- return (src >> 1) + (src & 1);
36
+ val = (src >> 1) + (src & 1);
37
+ } else {
38
+ val = src >> -shift;
39
+ }
40
+ extval = sextract64(val, 0, 48);
41
+ if (!sat || val == extval) {
42
+ return extval;
43
}
44
- return src >> -shift;
45
} else if (shift < 48) {
46
int64_t extval = sextract64(src << shift, 0, 48);
47
if (!sat || src == (extval >> shift)) {
48
--
49
2.20.1
50
51
diff view generated by jsdifflib
1
We were not paying attention to the ECI state when advancing the VPT
1
From: Richard Henderson <richard.henderson@linaro.org>
2
state. Architecturally, VPT state advance happens for every beat
3
(see the pseudocode VPTAdvance()), so on every beat the 4 bits of
4
VPR.P0 corresponding to the current beat are inverted if required,
5
and at the end of beats 1 and 3 the VPR MASK fields are updated.
6
This means that if the ECI state says we should not be executing all
7
4 beats then we need to skip some of the updating of the VPR that we
8
currently do in mve_advance_vpt().
9
2
3
This extension concerns changes to the External Debug interface,
4
with Secure and Non-secure access to the debug registers, and all
5
of it is outside the scope of QEMU. Indicating support for this
6
is mandatory with FEAT_SEL2, which we do implement.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20220506180242.216785-13-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
---
12
---
13
target/arm/mve_helper.c | 24 +++++++++++++++++-------
13
docs/system/arm/emulation.rst | 1 +
14
1 file changed, 17 insertions(+), 7 deletions(-)
14
target/arm/cpu64.c | 2 +-
15
target/arm/cpu_tcg.c | 4 ++--
16
3 files changed, 4 insertions(+), 3 deletions(-)
15
17
16
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
18
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
17
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/mve_helper.c
20
--- a/docs/system/arm/emulation.rst
19
+++ b/target/arm/mve_helper.c
21
+++ b/docs/system/arm/emulation.rst
20
@@ -XXX,XX +XXX,XX @@ static void mve_advance_vpt(CPUARMState *env)
22
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
21
/* Advance the VPT and ECI state if necessary */
23
- FEAT_DIT (Data Independent Timing instructions)
22
uint32_t vpr = env->v7m.vpr;
24
- FEAT_DPB (DC CVAP instruction)
23
unsigned mask01, mask23;
25
- FEAT_Debugv8p2 (Debug changes for v8.2)
24
+ uint16_t inv_mask;
26
+- FEAT_Debugv8p4 (Debug changes for v8.4)
25
+ uint16_t eci_mask = mve_eci_mask(env);
27
- FEAT_DotProd (Advanced SIMD dot product instructions)
26
28
- FEAT_FCMA (Floating-point complex number instructions)
27
if ((env->condexec_bits & 0xf) == 0) {
29
- FEAT_FHM (Floating-point half-precision multiplication instructions)
28
env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ?
30
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
29
@@ -XXX,XX +XXX,XX @@ static void mve_advance_vpt(CPUARMState *env)
31
index XXXXXXX..XXXXXXX 100644
30
return;
32
--- a/target/arm/cpu64.c
31
}
33
+++ b/target/arm/cpu64.c
32
34
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
33
+ /* Invert P0 bits if needed, but only for beats we actually executed */
35
cpu->isar.id_aa64zfr0 = t;
34
mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01);
36
35
mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23);
37
t = cpu->isar.id_aa64dfr0;
36
- if (mask01 > 8) {
38
- t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 8); /* FEAT_Debugv8p2 */
37
- /* high bit set, but not 0b1000: invert the relevant half of P0 */
39
+ t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 9); /* FEAT_Debugv8p4 */
38
- vpr ^= 0xff;
40
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */
39
+ /* Start by assuming we invert all bits corresponding to executed beats */
41
cpu->isar.id_aa64dfr0 = t;
40
+ inv_mask = eci_mask;
42
41
+ if (mask01 <= 8) {
43
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
42
+ /* MASK01 says don't invert low half of P0 */
44
index XXXXXXX..XXXXXXX 100644
43
+ inv_mask &= ~0xff;
45
--- a/target/arm/cpu_tcg.c
44
}
46
+++ b/target/arm/cpu_tcg.c
45
- if (mask23 > 8) {
47
@@ -XXX,XX +XXX,XX @@ void aa32_max_features(ARMCPU *cpu)
46
- /* high bit set, but not 0b1000: invert the relevant half of P0 */
48
cpu->isar.id_pfr2 = t;
47
- vpr ^= 0xff00;
49
48
+ if (mask23 <= 8) {
50
t = cpu->isar.id_dfr0;
49
+ /* MASK23 says don't invert high half of P0 */
51
- t = FIELD_DP32(t, ID_DFR0, COPDBG, 8); /* FEAT_Debugv8p2 */
50
+ inv_mask &= ~0xff00;
52
- t = FIELD_DP32(t, ID_DFR0, COPSDBG, 8); /* FEAT_Debugv8p2 */
51
}
53
+ t = FIELD_DP32(t, ID_DFR0, COPDBG, 9); /* FEAT_Debugv8p4 */
52
- vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1);
54
+ t = FIELD_DP32(t, ID_DFR0, COPSDBG, 9); /* FEAT_Debugv8p4 */
53
+ vpr ^= inv_mask;
55
t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* FEAT_PMUv3p4 */
54
+ /* Only update MASK01 if beat 1 executed */
56
cpu->isar.id_dfr0 = t;
55
+ if (eci_mask & 0xf0) {
56
+ vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1);
57
+ }
58
+ /* Beat 3 always executes, so update MASK23 */
59
vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1);
60
env->v7m.vpr = vpr;
61
}
57
}
62
--
58
--
63
2.20.1
59
2.25.1
64
65
diff view generated by jsdifflib
1
Implement the MVE saturating doubling multiply accumulate insns
1
From: Richard Henderson <richard.henderson@linaro.org>
2
VQDMLAH, VQRDMLAH, VQDMLASH and VQRDMLASH. These perform a multiply,
3
double, add the accumulator shifted by the element size, possibly
4
round, saturate to twice the element size, then take the high half of
5
the result. The *MLAH insns do vector * scalar + vector, and the
6
*MLASH insns do vector * vector + scalar.
7
2
3
Add only the system registers required to implement zero error
4
records. This means that all values for ERRSELR are out of range,
5
which means that it and all of the indexed error record registers
6
need not be implemented.
7
8
Add the EL2 registers required for injecting virtual SError.
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20220506180242.216785-14-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
---
14
---
11
target/arm/helper-mve.h | 16 +++++++
15
target/arm/cpu.h | 5 +++
12
target/arm/mve.decode | 5 ++
16
target/arm/helper.c | 84 +++++++++++++++++++++++++++++++++++++++++++++
13
target/arm/mve_helper.c | 95 ++++++++++++++++++++++++++++++++++++++
17
2 files changed, 89 insertions(+)
14
target/arm/translate-mve.c | 4 ++
15
4 files changed, 120 insertions(+)
16
18
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-mve.h
21
--- a/target/arm/cpu.h
20
+++ b/target/arm/helper-mve.h
22
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmlasb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
22
DEF_HELPER_FLAGS_4(mve_vmlash, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
uint64_t tfsr_el[4]; /* tfsre0_el1 is index 0. */
23
DEF_HELPER_FLAGS_4(mve_vmlasw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
uint64_t gcr_el1;
24
26
uint64_t rgsr_el1;
25
+DEF_HELPER_FLAGS_4(mve_vqdmlahb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(mve_vqdmlahh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vqdmlahw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+
27
+
29
+DEF_HELPER_FLAGS_4(mve_vqrdmlahb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+ /* Minimal RAS registers */
30
+DEF_HELPER_FLAGS_4(mve_vqrdmlahh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+ uint64_t disr_el1;
31
+DEF_HELPER_FLAGS_4(mve_vqrdmlahw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+ uint64_t vdisr_el2;
31
+ uint64_t vsesr_el2;
32
} cp15;
33
34
struct {
35
diff --git a/target/arm/helper.c b/target/arm/helper.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/helper.c
38
+++ b/target/arm/helper.c
39
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
40
.access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
41
};
42
43
+/*
44
+ * Check for traps to RAS registers, which are controlled
45
+ * by HCR_EL2.TERR and SCR_EL3.TERR.
46
+ */
47
+static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
48
+ bool isread)
49
+{
50
+ int el = arm_current_el(env);
32
+
51
+
33
+DEF_HELPER_FLAGS_4(mve_vqdmlashb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
52
+ if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
34
+DEF_HELPER_FLAGS_4(mve_vqdmlashh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
53
+ return CP_ACCESS_TRAP_EL2;
35
+DEF_HELPER_FLAGS_4(mve_vqdmlashw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
+
37
+DEF_HELPER_FLAGS_4(mve_vqrdmlashb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_4(mve_vqrdmlashh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(mve_vqrdmlashw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+
41
DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
42
DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
43
DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
44
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/mve.decode
47
+++ b/target/arm/mve.decode
48
@@ -XXX,XX +XXX,XX @@ VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
49
VMLA 111- 1110 0 . .. ... 1 ... 0 1110 . 100 .... @2scalar
50
VMLAS 111- 1110 0 . .. ... 1 ... 1 1110 . 100 .... @2scalar
51
52
+VQRDMLAH 1110 1110 0 . .. ... 0 ... 0 1110 . 100 .... @2scalar
53
+VQRDMLASH 1110 1110 0 . .. ... 0 ... 1 1110 . 100 .... @2scalar
54
+VQDMLAH 1110 1110 0 . .. ... 0 ... 0 1110 . 110 .... @2scalar
55
+VQDMLASH 1110 1110 0 . .. ... 0 ... 1 1110 . 110 .... @2scalar
56
+
57
# Vector add across vector
58
{
59
VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
60
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/mve_helper.c
63
+++ b/target/arm/mve_helper.c
64
@@ -XXX,XX +XXX,XX @@ DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w)
65
mve_advance_vpt(env); \
66
}
67
68
+#define DO_2OP_SAT_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
69
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
70
+ uint32_t rm) \
71
+ { \
72
+ TYPE *d = vd, *n = vn; \
73
+ TYPE m = rm; \
74
+ uint16_t mask = mve_element_mask(env); \
75
+ unsigned e; \
76
+ bool qc = false; \
77
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
78
+ bool sat = false; \
79
+ mergemask(&d[H##ESIZE(e)], \
80
+ FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m, &sat), \
81
+ mask); \
82
+ qc |= sat & mask & 1; \
83
+ } \
84
+ if (qc) { \
85
+ env->vfp.qc[0] = qc; \
86
+ } \
87
+ mve_advance_vpt(env); \
88
+ }
54
+ }
89
+
55
+ if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) {
90
/* provide unsigned 2-op scalar helpers for all sizes */
56
+ return CP_ACCESS_TRAP_EL3;
91
#define DO_2OP_SCALAR_U(OP, FN) \
57
+ }
92
DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
58
+ return CP_ACCESS_OK;
93
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
94
DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
95
DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
96
97
+static int8_t do_vqdmlah_b(int8_t a, int8_t b, int8_t c, int round, bool *sat)
98
+{
99
+ int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 8) + (round << 7);
100
+ return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
101
+}
59
+}
102
+
60
+
103
+static int16_t do_vqdmlah_h(int16_t a, int16_t b, int16_t c,
61
+static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
104
+ int round, bool *sat)
105
+{
62
+{
106
+ int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 16) + (round << 15);
63
+ int el = arm_current_el(env);
107
+ return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
64
+
65
+ if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
66
+ return env->cp15.vdisr_el2;
67
+ }
68
+ if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
69
+ return 0; /* RAZ/WI */
70
+ }
71
+ return env->cp15.disr_el1;
108
+}
72
+}
109
+
73
+
110
+static int32_t do_vqdmlah_w(int32_t a, int32_t b, int32_t c,
74
+static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
111
+ int round, bool *sat)
112
+{
75
+{
113
+ /*
76
+ int el = arm_current_el(env);
114
+ * Architecturally we should do the entire add, double, round
77
+
115
+ * and then check for saturation. We do three saturating adds,
78
+ if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
116
+ * but we need to be careful about the order. If the first
79
+ env->cp15.vdisr_el2 = val;
117
+ * m1 + m2 saturates then it's impossible for the *2+rc to
80
+ return;
118
+ * bring it back into the non-saturated range. However, if
119
+ * m1 + m2 is negative then it's possible that doing the doubling
120
+ * would take the intermediate result below INT64_MAX and the
121
+ * addition of the rounding constant then brings it back in range.
122
+ * So we add half the rounding constant and half the "c << esize"
123
+ * before doubling rather than adding the rounding constant after
124
+ * the doubling.
125
+ */
126
+ int64_t m1 = (int64_t)a * b;
127
+ int64_t m2 = (int64_t)c << 31;
128
+ int64_t r;
129
+ if (sadd64_overflow(m1, m2, &r) ||
130
+ sadd64_overflow(r, (round << 30), &r) ||
131
+ sadd64_overflow(r, r, &r)) {
132
+ *sat = true;
133
+ return r < 0 ? INT32_MAX : INT32_MIN;
134
+ }
81
+ }
135
+ return r >> 32;
82
+ if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
83
+ return; /* RAZ/WI */
84
+ }
85
+ env->cp15.disr_el1 = val;
136
+}
86
+}
137
+
87
+
138
+/*
88
+/*
139
+ * The *MLAH insns are vector * scalar + vector;
89
+ * Minimal RAS implementation with no Error Records.
140
+ * the *MLASH insns are vector * vector + scalar
90
+ * Which means that all of the Error Record registers:
91
+ * ERXADDR_EL1
92
+ * ERXCTLR_EL1
93
+ * ERXFR_EL1
94
+ * ERXMISC0_EL1
95
+ * ERXMISC1_EL1
96
+ * ERXMISC2_EL1
97
+ * ERXMISC3_EL1
98
+ * ERXPFGCDN_EL1 (RASv1p1)
99
+ * ERXPFGCTL_EL1 (RASv1p1)
100
+ * ERXPFGF_EL1 (RASv1p1)
101
+ * ERXSTATUS_EL1
102
+ * and
103
+ * ERRSELR_EL1
104
+ * may generate UNDEFINED, which is the effect we get by not
105
+ * listing them at all.
141
+ */
106
+ */
142
+#define DO_VQDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 0, S)
107
+static const ARMCPRegInfo minimal_ras_reginfo[] = {
143
+#define DO_VQDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 0, S)
108
+ { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
144
+#define DO_VQDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 0, S)
109
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
145
+#define DO_VQRDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 1, S)
110
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
146
+#define DO_VQRDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 1, S)
111
+ .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
147
+#define DO_VQRDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 1, S)
112
+ { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
113
+ .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
114
+ .access = PL1_R, .accessfn = access_terr,
115
+ .type = ARM_CP_CONST, .resetvalue = 0 },
116
+ { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
117
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
118
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
119
+ { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
120
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
121
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
122
+};
148
+
123
+
149
+#define DO_VQDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 0, S)
124
/* Return the exception level to which exceptions should be taken
150
+#define DO_VQDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 0, S)
125
* via SVEAccessTrap. If an exception should be routed through
151
+#define DO_VQDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 0, S)
126
* AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
152
+#define DO_VQRDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 1, S)
127
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
153
+#define DO_VQRDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 1, S)
128
if (cpu_isar_feature(aa64_ssbs, cpu)) {
154
+#define DO_VQRDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 1, S)
129
define_one_arm_cp_reg(cpu, &ssbs_reginfo);
155
+
130
}
156
+DO_2OP_SAT_ACC_SCALAR(vqdmlahb, 1, int8_t, DO_VQDMLAH_B)
131
+ if (cpu_isar_feature(any_ras, cpu)) {
157
+DO_2OP_SAT_ACC_SCALAR(vqdmlahh, 2, int16_t, DO_VQDMLAH_H)
132
+ define_arm_cp_regs(cpu, minimal_ras_reginfo);
158
+DO_2OP_SAT_ACC_SCALAR(vqdmlahw, 4, int32_t, DO_VQDMLAH_W)
133
+ }
159
+DO_2OP_SAT_ACC_SCALAR(vqrdmlahb, 1, int8_t, DO_VQRDMLAH_B)
134
160
+DO_2OP_SAT_ACC_SCALAR(vqrdmlahh, 2, int16_t, DO_VQRDMLAH_H)
135
if (cpu_isar_feature(aa64_vh, cpu) ||
161
+DO_2OP_SAT_ACC_SCALAR(vqrdmlahw, 4, int32_t, DO_VQRDMLAH_W)
136
cpu_isar_feature(aa64_debugv8p2, cpu)) {
162
+
163
+DO_2OP_SAT_ACC_SCALAR(vqdmlashb, 1, int8_t, DO_VQDMLASH_B)
164
+DO_2OP_SAT_ACC_SCALAR(vqdmlashh, 2, int16_t, DO_VQDMLASH_H)
165
+DO_2OP_SAT_ACC_SCALAR(vqdmlashw, 4, int32_t, DO_VQDMLASH_W)
166
+DO_2OP_SAT_ACC_SCALAR(vqrdmlashb, 1, int8_t, DO_VQRDMLASH_B)
167
+DO_2OP_SAT_ACC_SCALAR(vqrdmlashh, 2, int16_t, DO_VQRDMLASH_H)
168
+DO_2OP_SAT_ACC_SCALAR(vqrdmlashw, 4, int32_t, DO_VQRDMLASH_W)
169
+
170
/* Vector by scalar plus vector */
171
#define DO_VMLA(D, N, M) ((N) * (M) + (D))
172
173
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/arm/translate-mve.c
176
+++ b/target/arm/translate-mve.c
177
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR(VQRDMULH_scalar, vqrdmulh_scalar)
178
DO_2OP_SCALAR(VBRSR, vbrsr)
179
DO_2OP_SCALAR(VMLA, vmla)
180
DO_2OP_SCALAR(VMLAS, vmlas)
181
+DO_2OP_SCALAR(VQDMLAH, vqdmlah)
182
+DO_2OP_SCALAR(VQRDMLAH, vqrdmlah)
183
+DO_2OP_SCALAR(VQDMLASH, vqdmlash)
184
+DO_2OP_SCALAR(VQRDMLASH, vqrdmlash)
185
186
static bool trans_VQDMULLB_scalar(DisasContext *s, arg_2scalar *a)
187
{
188
--
137
--
189
2.20.1
138
2.25.1
190
191
diff view generated by jsdifflib
1
We're about to make a code change to the sdiv and udiv helper
1
From: Richard Henderson <richard.henderson@linaro.org>
2
functions, so first fix their indentation and coding style.
3
2
3
Enable writes to the TERR and TEA bits when RAS is enabled.
4
These bits are otherwise RES0.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220506180242.216785-15-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210730151636.17254-2-peter.maydell@linaro.org
7
---
10
---
8
target/arm/helper.c | 15 +++++++++------
11
target/arm/helper.c | 9 +++++++++
9
1 file changed, 9 insertions(+), 6 deletions(-)
12
1 file changed, 9 insertions(+)
10
13
11
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
12
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/helper.c
16
--- a/target/arm/helper.c
14
+++ b/target/arm/helper.c
17
+++ b/target/arm/helper.c
15
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(uxtb16)(uint32_t x)
18
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
16
19
}
17
int32_t HELPER(sdiv)(int32_t num, int32_t den)
20
valid_mask &= ~SCR_NET;
18
{
21
19
- if (den == 0)
22
+ if (cpu_isar_feature(aa64_ras, cpu)) {
20
- return 0;
23
+ valid_mask |= SCR_TERR;
21
- if (num == INT_MIN && den == -1)
24
+ }
22
- return INT_MIN;
25
if (cpu_isar_feature(aa64_lor, cpu)) {
23
+ if (den == 0) {
26
valid_mask |= SCR_TLOR;
24
+ return 0;
27
}
25
+ }
28
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
26
+ if (num == INT_MIN && den == -1) {
29
}
27
+ return INT_MIN;
30
} else {
28
+ }
31
valid_mask &= ~(SCR_RW | SCR_ST);
29
return num / den;
32
+ if (cpu_isar_feature(aa32_ras, cpu)) {
30
}
33
+ valid_mask |= SCR_TERR;
31
34
+ }
32
uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
35
}
33
{
36
34
- if (den == 0)
37
if (!arm_feature(env, ARM_FEATURE_EL2)) {
35
- return 0;
38
@@ -XXX,XX +XXX,XX @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
36
+ if (den == 0) {
39
if (cpu_isar_feature(aa64_vh, cpu)) {
37
+ return 0;
40
valid_mask |= HCR_E2H;
38
+ }
41
}
39
return num / den;
42
+ if (cpu_isar_feature(aa64_ras, cpu)) {
40
}
43
+ valid_mask |= HCR_TERR | HCR_TEA;
41
44
+ }
45
if (cpu_isar_feature(aa64_lor, cpu)) {
46
valid_mask |= HCR_TLOR;
47
}
42
--
48
--
43
2.20.1
49
2.25.1
44
45
diff view generated by jsdifflib
1
Unlike A-profile, for M-profile the UDIV and SDIV insns can be
1
From: Richard Henderson <richard.henderson@linaro.org>
2
configured to raise an exception on division by zero, using the CCR
3
DIV_0_TRP bit.
4
2
5
Implement support for setting this bit by making the helper functions
3
Virtual SError exceptions are raised by setting HCR_EL2.VSE,
6
raise the appropriate exception.
4
and are routed to EL1 just like other virtual exceptions.
7
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220506180242.216785-16-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210730151636.17254-3-peter.maydell@linaro.org
11
---
10
---
12
target/arm/cpu.h | 1 +
11
target/arm/cpu.h | 2 ++
13
target/arm/helper.h | 4 ++--
12
target/arm/internals.h | 8 ++++++++
14
target/arm/helper.c | 19 +++++++++++++++++--
13
target/arm/syndrome.h | 5 +++++
15
target/arm/m_helper.c | 4 ++++
14
target/arm/cpu.c | 38 +++++++++++++++++++++++++++++++++++++-
16
target/arm/translate.c | 4 ++--
15
target/arm/helper.c | 40 +++++++++++++++++++++++++++++++++++++++-
17
5 files changed, 26 insertions(+), 6 deletions(-)
16
5 files changed, 91 insertions(+), 2 deletions(-)
18
17
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
20
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
21
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@
22
@@ -XXX,XX +XXX,XX @@
24
#define EXCP_LAZYFP 20 /* v7M fault during lazy FP stacking */
25
#define EXCP_LSERR 21 /* v8M LSERR SecureFault */
23
#define EXCP_LSERR 21 /* v8M LSERR SecureFault */
26
#define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */
24
#define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */
27
+#define EXCP_DIVBYZERO 23 /* v7M DIVBYZERO UsageFault */
25
#define EXCP_DIVBYZERO 23 /* v7M DIVBYZERO UsageFault */
26
+#define EXCP_VSERR 24
28
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
27
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
29
28
30
#define ARMV7M_EXCP_RESET 1
29
#define ARMV7M_EXCP_RESET 1
31
diff --git a/target/arm/helper.h b/target/arm/helper.h
30
@@ -XXX,XX +XXX,XX @@ enum {
32
index XXXXXXX..XXXXXXX 100644
31
#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
33
--- a/target/arm/helper.h
32
#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
34
+++ b/target/arm/helper.h
33
#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
35
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(add_saturate, i32, env, i32, i32)
34
+#define CPU_INTERRUPT_VSERR CPU_INTERRUPT_TGT_INT_0
36
DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
35
37
DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
36
/* The usual mapping for an AArch64 system register to its AArch32
38
DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
37
* counterpart is for the 32 bit world to have access to the lower
39
-DEF_HELPER_FLAGS_2(sdiv, TCG_CALL_NO_RWG_SE, s32, s32, s32)
38
diff --git a/target/arm/internals.h b/target/arm/internals.h
40
-DEF_HELPER_FLAGS_2(udiv, TCG_CALL_NO_RWG_SE, i32, i32, i32)
39
index XXXXXXX..XXXXXXX 100644
41
+DEF_HELPER_FLAGS_3(sdiv, TCG_CALL_NO_RWG, s32, env, s32, s32)
40
--- a/target/arm/internals.h
42
+DEF_HELPER_FLAGS_3(udiv, TCG_CALL_NO_RWG, i32, env, i32, i32)
41
+++ b/target/arm/internals.h
43
DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32)
42
@@ -XXX,XX +XXX,XX @@ void arm_cpu_update_virq(ARMCPU *cpu);
44
43
*/
45
#define PAS_OP(pfx) \
44
void arm_cpu_update_vfiq(ARMCPU *cpu);
45
46
+/**
47
+ * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
48
+ *
49
+ * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
50
+ * following a change to the HCR_EL2.VSE bit.
51
+ */
52
+void arm_cpu_update_vserr(ARMCPU *cpu);
53
+
54
/**
55
* arm_mmu_idx_el:
56
* @env: The cpu environment
57
diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/syndrome.h
60
+++ b/target/arm/syndrome.h
61
@@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_pcalignment(void)
62
return (EC_PCALIGNMENT << ARM_EL_EC_SHIFT) | ARM_EL_IL;
63
}
64
65
+static inline uint32_t syn_serror(uint32_t extra)
66
+{
67
+ return (EC_SERROR << ARM_EL_EC_SHIFT) | ARM_EL_IL | extra;
68
+}
69
+
70
#endif /* TARGET_ARM_SYNDROME_H */
71
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/cpu.c
74
+++ b/target/arm/cpu.c
75
@@ -XXX,XX +XXX,XX @@ static bool arm_cpu_has_work(CPUState *cs)
76
return (cpu->power_state != PSCI_OFF)
77
&& cs->interrupt_request &
78
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
79
- | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
80
+ | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
81
| CPU_INTERRUPT_EXITTB);
82
}
83
84
@@ -XXX,XX +XXX,XX @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
85
return false;
86
}
87
return !(env->daif & PSTATE_I);
88
+ case EXCP_VSERR:
89
+ if (!(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) {
90
+ /* VIRQs are only taken when hypervized. */
91
+ return false;
92
+ }
93
+ return !(env->daif & PSTATE_A);
94
default:
95
g_assert_not_reached();
96
}
97
@@ -XXX,XX +XXX,XX @@ static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
98
goto found;
99
}
100
}
101
+ if (interrupt_request & CPU_INTERRUPT_VSERR) {
102
+ excp_idx = EXCP_VSERR;
103
+ target_el = 1;
104
+ if (arm_excp_unmasked(cs, excp_idx, target_el,
105
+ cur_el, secure, hcr_el2)) {
106
+ /* Taking a virtual abort clears HCR_EL2.VSE */
107
+ env->cp15.hcr_el2 &= ~HCR_VSE;
108
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR);
109
+ goto found;
110
+ }
111
+ }
112
return false;
113
114
found:
115
@@ -XXX,XX +XXX,XX @@ void arm_cpu_update_vfiq(ARMCPU *cpu)
116
}
117
}
118
119
+void arm_cpu_update_vserr(ARMCPU *cpu)
120
+{
121
+ /*
122
+ * Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit.
123
+ */
124
+ CPUARMState *env = &cpu->env;
125
+ CPUState *cs = CPU(cpu);
126
+
127
+ bool new_state = env->cp15.hcr_el2 & HCR_VSE;
128
+
129
+ if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VSERR) != 0)) {
130
+ if (new_state) {
131
+ cpu_interrupt(cs, CPU_INTERRUPT_VSERR);
132
+ } else {
133
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR);
134
+ }
135
+ }
136
+}
137
+
138
#ifndef CONFIG_USER_ONLY
139
static void arm_cpu_set_irq(void *opaque, int irq, int level)
140
{
46
diff --git a/target/arm/helper.c b/target/arm/helper.c
141
diff --git a/target/arm/helper.c b/target/arm/helper.c
47
index XXXXXXX..XXXXXXX 100644
142
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/helper.c
143
--- a/target/arm/helper.c
49
+++ b/target/arm/helper.c
144
+++ b/target/arm/helper.c
50
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(sxtb16)(uint32_t x)
145
@@ -XXX,XX +XXX,XX @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
51
return res;
146
}
52
}
147
}
53
148
54
+static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra)
149
- /* External aborts are not possible in QEMU so A bit is always clear */
55
+{
150
+ if (hcr_el2 & HCR_AMO) {
56
+ /*
151
+ if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
57
+ * Take a division-by-zero exception if necessary; otherwise return
152
+ ret |= CPSR_A;
58
+ * to get the usual non-trapping division behaviour (result of 0)
153
+ }
59
+ */
60
+ if (arm_feature(env, ARM_FEATURE_M)
61
+ && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) {
62
+ raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra);
63
+ }
154
+ }
64
+}
155
+
65
+
156
return ret;
66
uint32_t HELPER(uxtb16)(uint32_t x)
157
}
67
{
158
68
uint32_t res;
159
@@ -XXX,XX +XXX,XX @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
69
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(uxtb16)(uint32_t x)
160
g_assert(qemu_mutex_iothread_locked());
70
return res;
161
arm_cpu_update_virq(cpu);
71
}
162
arm_cpu_update_vfiq(cpu);
72
163
+ arm_cpu_update_vserr(cpu);
73
-int32_t HELPER(sdiv)(int32_t num, int32_t den)
164
}
74
+int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den)
165
75
{
166
static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
76
if (den == 0) {
167
@@ -XXX,XX +XXX,XX @@ void arm_log_exception(CPUState *cs)
77
+ handle_possible_div0_trap(env, GETPC());
78
return 0;
79
}
80
if (num == INT_MIN && den == -1) {
81
@@ -XXX,XX +XXX,XX @@ int32_t HELPER(sdiv)(int32_t num, int32_t den)
82
return num / den;
83
}
84
85
-uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
86
+uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den)
87
{
88
if (den == 0) {
89
+ handle_possible_div0_trap(env, GETPC());
90
return 0;
91
}
92
return num / den;
93
@@ -XXX,XX +XXX,XX @@ void arm_log_exception(int idx)
94
[EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
95
[EXCP_LSERR] = "v8M LSERR UsageFault",
168
[EXCP_LSERR] = "v8M LSERR UsageFault",
96
[EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
169
[EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
97
+ [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
170
[EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
171
+ [EXCP_VSERR] = "Virtual SERR",
98
};
172
};
99
173
100
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
174
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
101
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
175
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
102
index XXXXXXX..XXXXXXX 100644
176
mask = CPSR_A | CPSR_I | CPSR_F;
103
--- a/target/arm/m_helper.c
177
offset = 4;
104
+++ b/target/arm/m_helper.c
105
@@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
106
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
107
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
108
break;
178
break;
109
+ case EXCP_DIVBYZERO:
179
+ case EXCP_VSERR:
110
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
180
+ {
111
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_DIVBYZERO_MASK;
181
+ /*
182
+ * Note that this is reported as a data abort, but the DFAR
183
+ * has an UNKNOWN value. Construct the SError syndrome from
184
+ * AET and ExT fields.
185
+ */
186
+ ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
187
+
188
+ if (extended_addresses_enabled(env)) {
189
+ env->exception.fsr = arm_fi_to_lfsc(&fi);
190
+ } else {
191
+ env->exception.fsr = arm_fi_to_sfsc(&fi);
192
+ }
193
+ env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
194
+ A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
195
+ qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
196
+ env->exception.fsr);
197
+
198
+ new_mode = ARM_CPU_MODE_ABT;
199
+ addr = 0x10;
200
+ mask = CPSR_A | CPSR_I;
201
+ offset = 8;
202
+ }
112
+ break;
203
+ break;
113
case EXCP_SWI:
204
case EXCP_SMC:
114
/* The PC already points to the next instruction. */
205
new_mode = ARM_CPU_MODE_MON;
115
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
206
addr = 0x08;
116
diff --git a/target/arm/translate.c b/target/arm/translate.c
207
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
117
index XXXXXXX..XXXXXXX 100644
208
case EXCP_VFIQ:
118
--- a/target/arm/translate.c
209
addr += 0x100;
119
+++ b/target/arm/translate.c
210
break;
120
@@ -XXX,XX +XXX,XX @@ static bool op_div(DisasContext *s, arg_rrr *a, bool u)
211
+ case EXCP_VSERR:
121
t1 = load_reg(s, a->rn);
212
+ addr += 0x180;
122
t2 = load_reg(s, a->rm);
213
+ /* Construct the SError syndrome from IDS and ISS fields. */
123
if (u) {
214
+ env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
124
- gen_helper_udiv(t1, t1, t2);
215
+ env->cp15.esr_el[new_el] = env->exception.syndrome;
125
+ gen_helper_udiv(t1, cpu_env, t1, t2);
216
+ break;
126
} else {
217
default:
127
- gen_helper_sdiv(t1, t1, t2);
218
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
128
+ gen_helper_sdiv(t1, cpu_env, t1, t2);
219
}
129
}
130
tcg_temp_free_i32(t2);
131
store_reg(s, a->rd, t1);
132
--
220
--
133
2.20.1
221
2.25.1
134
135
diff view generated by jsdifflib
1
Implement the MVE VCTP insn, which sets the VPR.P0 predicate bits so
1
From: Richard Henderson <richard.henderson@linaro.org>
2
as to predicate any element at index Rn or greater is predicated. As
3
with VPNOT, this insn itself is predicable and subject to beatwise
4
execution.
5
2
6
The calculation of the mask is the same as is used to determine
3
Check for and defer any pending virtual SError.
7
ltpmask in mve_element_mask(), but we precalculate masklen in
8
generated code to avoid having to have 4 helpers specialized by size.
9
4
10
We put the decode line in with the low-overhead-loop insns in
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
t32.decode because it's logically part of that collection of insn
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
patterns, even though it is an MVE only insn.
7
Message-id: 20220506180242.216785-17-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/helper.h | 1 +
11
target/arm/a32.decode | 16 ++++++++------
12
target/arm/t32.decode | 18 ++++++++--------
13
target/arm/op_helper.c | 43 ++++++++++++++++++++++++++++++++++++++
14
target/arm/translate-a64.c | 17 +++++++++++++++
15
target/arm/translate.c | 23 ++++++++++++++++++++
16
6 files changed, 103 insertions(+), 15 deletions(-)
13
17
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
diff --git a/target/arm/helper.h b/target/arm/helper.h
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
index XXXXXXX..XXXXXXX 100644
16
---
20
--- a/target/arm/helper.h
17
target/arm/helper-mve.h | 2 ++
21
+++ b/target/arm/helper.h
18
target/arm/translate-a32.h | 1 +
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(wfe, void, env)
19
target/arm/t32.decode | 1 +
23
DEF_HELPER_1(yield, void, env)
20
target/arm/mve_helper.c | 20 ++++++++++++++++++++
24
DEF_HELPER_1(pre_hvc, void, env)
21
target/arm/translate-mve.c | 2 +-
25
DEF_HELPER_2(pre_smc, void, env, i32)
22
target/arm/translate.c | 33 +++++++++++++++++++++++++++++++++
26
+DEF_HELPER_1(vesb, void, env)
23
6 files changed, 58 insertions(+), 1 deletion(-)
27
24
28
DEF_HELPER_3(cpsr_write, void, env, i32, i32)
25
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
29
DEF_HELPER_2(cpsr_write_eret, void, env, i32)
26
index XXXXXXX..XXXXXXX 100644
30
diff --git a/target/arm/a32.decode b/target/arm/a32.decode
27
--- a/target/arm/helper-mve.h
31
index XXXXXXX..XXXXXXX 100644
28
+++ b/target/arm/helper-mve.h
32
--- a/target/arm/a32.decode
29
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_veor, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
33
+++ b/target/arm/a32.decode
30
DEF_HELPER_FLAGS_4(mve_vpsel, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
@@ -XXX,XX +XXX,XX @@ SMULTT .... 0001 0110 .... 0000 .... 1110 .... @rd0mn
31
DEF_HELPER_FLAGS_1(mve_vpnot, TCG_CALL_NO_WG, void, env)
35
32
36
{
33
+DEF_HELPER_FLAGS_2(mve_vctp, TCG_CALL_NO_WG, void, env, i32)
37
{
34
+
38
- YIELD ---- 0011 0010 0000 1111 ---- 0000 0001
35
DEF_HELPER_FLAGS_4(mve_vaddb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
39
- WFE ---- 0011 0010 0000 1111 ---- 0000 0010
36
DEF_HELPER_FLAGS_4(mve_vaddh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
40
- WFI ---- 0011 0010 0000 1111 ---- 0000 0011
37
DEF_HELPER_FLAGS_4(mve_vaddw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
41
+ [
38
diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h
42
+ YIELD ---- 0011 0010 0000 1111 ---- 0000 0001
39
index XXXXXXX..XXXXXXX 100644
43
+ WFE ---- 0011 0010 0000 1111 ---- 0000 0010
40
--- a/target/arm/translate-a32.h
44
+ WFI ---- 0011 0010 0000 1111 ---- 0000 0011
41
+++ b/target/arm/translate-a32.h
45
42
@@ -XXX,XX +XXX,XX @@ long neon_element_offset(int reg, int element, MemOp memop);
46
- # TODO: Implement SEV, SEVL; may help SMP performance.
43
void gen_rev16(TCGv_i32 dest, TCGv_i32 var);
47
- # SEV ---- 0011 0010 0000 1111 ---- 0000 0100
44
void clear_eci_state(DisasContext *s);
48
- # SEVL ---- 0011 0010 0000 1111 ---- 0000 0101
45
bool mve_eci_check(DisasContext *s);
49
+ # TODO: Implement SEV, SEVL; may help SMP performance.
46
+void mve_update_eci(DisasContext *s);
50
+ # SEV ---- 0011 0010 0000 1111 ---- 0000 0100
47
void mve_update_and_store_eci(DisasContext *s);
51
+ # SEVL ---- 0011 0010 0000 1111 ---- 0000 0101
48
bool mve_skip_vmov(DisasContext *s, int vn, int index, int size);
52
+
49
53
+ ESB ---- 0011 0010 0000 1111 ---- 0001 0000
54
+ ]
55
56
# The canonical nop ends in 00000000, but the whole of the
57
# rest of the space executes as nop if otherwise unsupported.
50
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
58
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
51
index XXXXXXX..XXXXXXX 100644
59
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/t32.decode
60
--- a/target/arm/t32.decode
53
+++ b/target/arm/t32.decode
61
+++ b/target/arm/t32.decode
54
@@ -XXX,XX +XXX,XX @@ BL 1111 0. .......... 11.1 ............ @branch24
62
@@ -XXX,XX +XXX,XX @@ CLZ 1111 1010 1011 ---- 1111 .... 1000 .... @rdm
55
# This is DLSTP
63
[
56
DLS 1111 0 0000 0 size:2 rn:4 1110 0000 0000 0001
64
# Hints, and CPS
57
}
65
{
58
+ VCTP 1111 0 0000 0 size:2 rn:4 1110 1000 0000 0001
66
- YIELD 1111 0011 1010 1111 1000 0000 0000 0001
59
]
67
- WFE 1111 0011 1010 1111 1000 0000 0000 0010
60
}
68
- WFI 1111 0011 1010 1111 1000 0000 0000 0011
61
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
69
+ [
62
index XXXXXXX..XXXXXXX 100644
70
+ YIELD 1111 0011 1010 1111 1000 0000 0000 0001
63
--- a/target/arm/mve_helper.c
71
+ WFE 1111 0011 1010 1111 1000 0000 0000 0010
64
+++ b/target/arm/mve_helper.c
72
+ WFI 1111 0011 1010 1111 1000 0000 0000 0011
65
@@ -XXX,XX +XXX,XX @@ void HELPER(mve_vpnot)(CPUARMState *env)
73
66
mve_advance_vpt(env);
74
- # TODO: Implement SEV, SEVL; may help SMP performance.
67
}
75
- # SEV 1111 0011 1010 1111 1000 0000 0000 0100
68
76
- # SEVL 1111 0011 1010 1111 1000 0000 0000 0101
69
+/*
77
+ # TODO: Implement SEV, SEVL; may help SMP performance.
70
+ * VCTP: P0 unexecuted bits unchanged, predicated bits zeroed,
78
+ # SEV 1111 0011 1010 1111 1000 0000 0000 0100
71
+ * otherwise set according to value of Rn. The calculation of
79
+ # SEVL 1111 0011 1010 1111 1000 0000 0000 0101
72
+ * newmask here works in the same way as the calculation of the
80
73
+ * ltpmask in mve_element_mask(), but we have pre-calculated
81
- # For M-profile minimal-RAS ESB can be a NOP, which is the
74
+ * the masklen in the generated code.
82
- # default behaviour since it is in the hint space.
75
+ */
83
- # ESB 1111 0011 1010 1111 1000 0000 0001 0000
76
+void HELPER(mve_vctp)(CPUARMState *env, uint32_t masklen)
84
+ ESB 1111 0011 1010 1111 1000 0000 0001 0000
77
+{
85
+ ]
78
+ uint16_t mask = mve_element_mask(env);
86
79
+ uint16_t eci_mask = mve_eci_mask(env);
87
# The canonical nop ends in 0000 0000, but the whole rest
80
+ uint16_t newmask;
88
# of the space is "reserved hint, behaves as nop".
81
+
89
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
82
+ assert(masklen <= 16);
90
index XXXXXXX..XXXXXXX 100644
83
+ newmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0;
91
--- a/target/arm/op_helper.c
84
+ newmask &= mask;
92
+++ b/target/arm/op_helper.c
85
+ env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (newmask & eci_mask);
93
@@ -XXX,XX +XXX,XX @@ void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
86
+ mve_advance_vpt(env);
94
access_type, mmu_idx, ra);
87
+}
88
+
89
#define DO_1OP_SAT(OP, ESIZE, TYPE, FN) \
90
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
91
{ \
92
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
93
index XXXXXXX..XXXXXXX 100644
94
--- a/target/arm/translate-mve.c
95
+++ b/target/arm/translate-mve.c
96
@@ -XXX,XX +XXX,XX @@ bool mve_eci_check(DisasContext *s)
97
}
95
}
98
}
96
}
99
97
+
100
-static void mve_update_eci(DisasContext *s)
98
+/*
101
+void mve_update_eci(DisasContext *s)
99
+ * This function corresponds to AArch64.vESBOperation().
102
{
100
+ * Note that the AArch32 version is not functionally different.
103
/*
101
+ */
104
* The helper function will always update the CPUState field,
102
+void HELPER(vesb)(CPUARMState *env)
103
+{
104
+ /*
105
+ * The EL2Enabled() check is done inside arm_hcr_el2_eff,
106
+ * and will return HCR_EL2.VSE == 0, so nothing happens.
107
+ */
108
+ uint64_t hcr = arm_hcr_el2_eff(env);
109
+ bool enabled = !(hcr & HCR_TGE) && (hcr & HCR_AMO);
110
+ bool pending = enabled && (hcr & HCR_VSE);
111
+ bool masked = (env->daif & PSTATE_A);
112
+
113
+ /* If VSE pending and masked, defer the exception. */
114
+ if (pending && masked) {
115
+ uint32_t syndrome;
116
+
117
+ if (arm_el_is_aa64(env, 1)) {
118
+ /* Copy across IDS and ISS from VSESR. */
119
+ syndrome = env->cp15.vsesr_el2 & 0x1ffffff;
120
+ } else {
121
+ ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal };
122
+
123
+ if (extended_addresses_enabled(env)) {
124
+ syndrome = arm_fi_to_lfsc(&fi);
125
+ } else {
126
+ syndrome = arm_fi_to_sfsc(&fi);
127
+ }
128
+ /* Copy across AET and ExT from VSESR. */
129
+ syndrome |= env->cp15.vsesr_el2 & 0xd000;
130
+ }
131
+
132
+ /* Set VDISR_EL2.A along with the syndrome. */
133
+ env->cp15.vdisr_el2 = syndrome | (1u << 31);
134
+
135
+ /* Clear pending virtual SError */
136
+ env->cp15.hcr_el2 &= ~HCR_VSE;
137
+ cpu_reset_interrupt(env_cpu(env), CPU_INTERRUPT_VSERR);
138
+ }
139
+}
140
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
141
index XXXXXXX..XXXXXXX 100644
142
--- a/target/arm/translate-a64.c
143
+++ b/target/arm/translate-a64.c
144
@@ -XXX,XX +XXX,XX @@ static void handle_hint(DisasContext *s, uint32_t insn,
145
gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
146
}
147
break;
148
+ case 0b10000: /* ESB */
149
+ /* Without RAS, we must implement this as NOP. */
150
+ if (dc_isar_feature(aa64_ras, s)) {
151
+ /*
152
+ * QEMU does not have a source of physical SErrors,
153
+ * so we are only concerned with virtual SErrors.
154
+ * The pseudocode in the ARM for this case is
155
+ * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
156
+ * AArch64.vESBOperation();
157
+ * Most of the condition can be evaluated at translation time.
158
+ * Test for EL2 present, and defer test for SEL2 to runtime.
159
+ */
160
+ if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
161
+ gen_helper_vesb(cpu_env);
162
+ }
163
+ }
164
+ break;
165
case 0b11000: /* PACIAZ */
166
if (s->pauth_active) {
167
gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
105
diff --git a/target/arm/translate.c b/target/arm/translate.c
168
diff --git a/target/arm/translate.c b/target/arm/translate.c
106
index XXXXXXX..XXXXXXX 100644
169
index XXXXXXX..XXXXXXX 100644
107
--- a/target/arm/translate.c
170
--- a/target/arm/translate.c
108
+++ b/target/arm/translate.c
171
+++ b/target/arm/translate.c
109
@@ -XXX,XX +XXX,XX @@ static bool trans_LCTP(DisasContext *s, arg_LCTP *a)
172
@@ -XXX,XX +XXX,XX @@ static bool trans_WFI(DisasContext *s, arg_WFI *a)
110
return true;
173
return true;
111
}
174
}
112
175
113
+static bool trans_VCTP(DisasContext *s, arg_VCTP *a)
176
+static bool trans_ESB(DisasContext *s, arg_ESB *a)
114
+{
177
+{
115
+ /*
178
+ /*
116
+ * M-profile Create Vector Tail Predicate. This insn is itself
179
+ * For M-profile, minimal-RAS ESB can be a NOP.
117
+ * predicated and is subject to beatwise execution.
180
+ * Without RAS, we must implement this as NOP.
118
+ */
181
+ */
119
+ TCGv_i32 rn_shifted, masklen;
182
+ if (!arm_dc_feature(s, ARM_FEATURE_M) && dc_isar_feature(aa32_ras, s)) {
120
+
183
+ /*
121
+ if (!dc_isar_feature(aa32_mve, s) || a->rn == 13 || a->rn == 15) {
184
+ * QEMU does not have a source of physical SErrors,
122
+ return false;
185
+ * so we are only concerned with virtual SErrors.
186
+ * The pseudocode in the ARM for this case is
187
+ * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
188
+ * AArch32.vESBOperation();
189
+ * Most of the condition can be evaluated at translation time.
190
+ * Test for EL2 present, and defer test for SEL2 to runtime.
191
+ */
192
+ if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
193
+ gen_helper_vesb(cpu_env);
194
+ }
123
+ }
195
+ }
124
+
125
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
126
+ return true;
127
+ }
128
+
129
+ /*
130
+ * We pre-calculate the mask length here to avoid having
131
+ * to have multiple helpers specialized for size.
132
+ * We pass the helper "rn <= (1 << (4 - size)) ? (rn << size) : 16".
133
+ */
134
+ rn_shifted = tcg_temp_new_i32();
135
+ masklen = load_reg(s, a->rn);
136
+ tcg_gen_shli_i32(rn_shifted, masklen, a->size);
137
+ tcg_gen_movcond_i32(TCG_COND_LEU, masklen,
138
+ masklen, tcg_constant_i32(1 << (4 - a->size)),
139
+ rn_shifted, tcg_constant_i32(16));
140
+ gen_helper_mve_vctp(cpu_env, masklen);
141
+ tcg_temp_free_i32(masklen);
142
+ tcg_temp_free_i32(rn_shifted);
143
+ mve_update_eci(s);
144
+ return true;
196
+ return true;
145
+}
197
+}
146
198
+
147
static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
199
static bool trans_NOP(DisasContext *s, arg_NOP *a)
148
{
200
{
201
return true;
149
--
202
--
150
2.20.1
203
2.25.1
151
152
diff view generated by jsdifflib
1
From: Guenter Roeck <linux@roeck-us.net>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Instantiate SAI1/2/3 as unimplemented devices to avoid Linux kernel crashes
4
such as the following.
5
6
Unhandled fault: external abort on non-linefetch (0x808) at 0xd19b0000
7
pgd = (ptrval)
8
[d19b0000] *pgd=82711811, *pte=308a0653, *ppte=308a0453
9
Internal error: : 808 [#1] SMP ARM
10
Modules linked in:
11
CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.14.0-rc5 #1
12
...
13
[<c095e974>] (regmap_mmio_write32le) from [<c095eb48>] (regmap_mmio_write+0x3c/0x54)
14
[<c095eb48>] (regmap_mmio_write) from [<c09580f4>] (_regmap_write+0x4c/0x1f0)
15
[<c09580f4>] (_regmap_write) from [<c0959b28>] (regmap_write+0x3c/0x60)
16
[<c0959b28>] (regmap_write) from [<c0d41130>] (fsl_sai_runtime_resume+0x9c/0x1ec)
17
[<c0d41130>] (fsl_sai_runtime_resume) from [<c0942464>] (__rpm_callback+0x3c/0x108)
18
[<c0942464>] (__rpm_callback) from [<c0942590>] (rpm_callback+0x60/0x64)
19
[<c0942590>] (rpm_callback) from [<c0942b60>] (rpm_resume+0x5cc/0x808)
20
[<c0942b60>] (rpm_resume) from [<c0942dfc>] (__pm_runtime_resume+0x60/0xa0)
21
[<c0942dfc>] (__pm_runtime_resume) from [<c0d4231c>] (fsl_sai_probe+0x2b8/0x65c)
22
[<c0d4231c>] (fsl_sai_probe) from [<c0935b08>] (platform_probe+0x58/0xb8)
23
[<c0935b08>] (platform_probe) from [<c0933264>] (really_probe.part.0+0x9c/0x334)
24
[<c0933264>] (really_probe.part.0) from [<c093359c>] (__driver_probe_device+0xa0/0x138)
25
[<c093359c>] (__driver_probe_device) from [<c0933664>] (driver_probe_device+0x30/0xc8)
26
[<c0933664>] (driver_probe_device) from [<c0933c88>] (__driver_attach+0x90/0x130)
27
[<c0933c88>] (__driver_attach) from [<c0931060>] (bus_for_each_dev+0x78/0xb8)
28
[<c0931060>] (bus_for_each_dev) from [<c093254c>] (bus_add_driver+0xf0/0x1d8)
29
[<c093254c>] (bus_add_driver) from [<c0934a30>] (driver_register+0x88/0x118)
30
[<c0934a30>] (driver_register) from [<c01022c0>] (do_one_initcall+0x7c/0x3a4)
31
[<c01022c0>] (do_one_initcall) from [<c1601204>] (kernel_init_freeable+0x198/0x22c)
32
[<c1601204>] (kernel_init_freeable) from [<c0f5ff2c>] (kernel_init+0x10/0x128)
33
[<c0f5ff2c>] (kernel_init) from [<c010013c>] (ret_from_fork+0x14/0x38)
34
35
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
36
Message-id: 20210810175607.538090-1-linux@roeck-us.net
37
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220506180242.216785-18-richard.henderson@linaro.org
38
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
39
---
7
---
40
include/hw/arm/fsl-imx7.h | 5 +++++
8
docs/system/arm/emulation.rst | 1 +
41
hw/arm/fsl-imx7.c | 7 +++++++
9
target/arm/cpu64.c | 1 +
42
2 files changed, 12 insertions(+)
10
target/arm/cpu_tcg.c | 1 +
11
3 files changed, 3 insertions(+)
43
12
44
diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h
13
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
45
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
46
--- a/include/hw/arm/fsl-imx7.h
15
--- a/docs/system/arm/emulation.rst
47
+++ b/include/hw/arm/fsl-imx7.h
16
+++ b/docs/system/arm/emulation.rst
48
@@ -XXX,XX +XXX,XX @@ enum FslIMX7MemoryMap {
17
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
49
FSL_IMX7_UART6_ADDR = 0x30A80000,
18
- FEAT_PMULL (PMULL, PMULL2 instructions)
50
FSL_IMX7_UART7_ADDR = 0x30A90000,
19
- FEAT_PMUv3p1 (PMU Extensions v3.1)
51
20
- FEAT_PMUv3p4 (PMU Extensions v3.4)
52
+ FSL_IMX7_SAI1_ADDR = 0x308A0000,
21
+- FEAT_RAS (Reliability, availability, and serviceability)
53
+ FSL_IMX7_SAI2_ADDR = 0x308B0000,
22
- FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions)
54
+ FSL_IMX7_SAI3_ADDR = 0x308C0000,
23
- FEAT_RNG (Random number generator)
55
+ FSL_IMX7_SAIn_SIZE = 0x10000,
24
- FEAT_SB (Speculation Barrier)
56
+
25
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
57
FSL_IMX7_ENET1_ADDR = 0x30BE0000,
58
FSL_IMX7_ENET2_ADDR = 0x30BF0000,
59
60
diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c
61
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
62
--- a/hw/arm/fsl-imx7.c
27
--- a/target/arm/cpu64.c
63
+++ b/hw/arm/fsl-imx7.c
28
+++ b/target/arm/cpu64.c
64
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
29
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
65
create_unimplemented_device("can1", FSL_IMX7_CAN1_ADDR, FSL_IMX7_CANn_SIZE);
30
t = cpu->isar.id_aa64pfr0;
66
create_unimplemented_device("can2", FSL_IMX7_CAN2_ADDR, FSL_IMX7_CANn_SIZE);
31
t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); /* FEAT_FP16 */
67
32
t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); /* FEAT_FP16 */
68
+ /*
33
+ t = FIELD_DP64(t, ID_AA64PFR0, RAS, 1); /* FEAT_RAS */
69
+ * SAI (Audio SSI (Synchronous Serial Interface))
34
t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
70
+ */
35
t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); /* FEAT_SEL2 */
71
+ create_unimplemented_device("sai1", FSL_IMX7_SAI1_ADDR, FSL_IMX7_SAIn_SIZE);
36
t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */
72
+ create_unimplemented_device("sai2", FSL_IMX7_SAI2_ADDR, FSL_IMX7_SAIn_SIZE);
37
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
73
+ create_unimplemented_device("sai2", FSL_IMX7_SAI3_ADDR, FSL_IMX7_SAIn_SIZE);
38
index XXXXXXX..XXXXXXX 100644
74
+
39
--- a/target/arm/cpu_tcg.c
75
/*
40
+++ b/target/arm/cpu_tcg.c
76
* OCOTP
41
@@ -XXX,XX +XXX,XX @@ void aa32_max_features(ARMCPU *cpu)
77
*/
42
43
t = cpu->isar.id_pfr0;
44
t = FIELD_DP32(t, ID_PFR0, DIT, 1); /* FEAT_DIT */
45
+ t = FIELD_DP32(t, ID_PFR0, RAS, 1); /* FEAT_RAS */
46
cpu->isar.id_pfr0 = t;
47
48
t = cpu->isar.id_pfr2;
78
--
49
--
79
2.20.1
50
2.25.1
80
81
diff view generated by jsdifflib
1
From: Guenter Roeck <linux@roeck-us.net>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Instantiate SAI1/2/3 and ASRC as unimplemented devices to avoid random
3
This feature is AArch64 only, and applies to physical SErrors,
4
Linux kernel crashes, such as
4
which QEMU does not implement, thus the feature is a nop.
5
5
6
Unhandled fault: external abort on non-linefetch (0x808) at 0xd1580010
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
pgd = (ptrval)
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
[d1580010] *pgd=8231b811, *pte=02034653, *ppte=02034453
8
Message-id: 20220506180242.216785-19-richard.henderson@linaro.org
9
Internal error: : 808 [#1] SMP ARM
10
...
11
[<c095e974>] (regmap_mmio_write32le) from [<c095eb48>] (regmap_mmio_write+0x3c/0x54)
12
[<c095eb48>] (regmap_mmio_write) from [<c09580f4>] (_regmap_write+0x4c/0x1f0)
13
[<c09580f4>] (_regmap_write) from [<c095837c>] (_regmap_update_bits+0xe4/0xec)
14
[<c095837c>] (_regmap_update_bits) from [<c09599b4>] (regmap_update_bits_base+0x50/0x74)
15
[<c09599b4>] (regmap_update_bits_base) from [<c0d3e9e4>] (fsl_asrc_runtime_resume+0x1e4/0x21c)
16
[<c0d3e9e4>] (fsl_asrc_runtime_resume) from [<c0942464>] (__rpm_callback+0x3c/0x108)
17
[<c0942464>] (__rpm_callback) from [<c0942590>] (rpm_callback+0x60/0x64)
18
[<c0942590>] (rpm_callback) from [<c0942b60>] (rpm_resume+0x5cc/0x808)
19
[<c0942b60>] (rpm_resume) from [<c0942dfc>] (__pm_runtime_resume+0x60/0xa0)
20
[<c0942dfc>] (__pm_runtime_resume) from [<c0d3ecc4>] (fsl_asrc_probe+0x2a8/0x708)
21
[<c0d3ecc4>] (fsl_asrc_probe) from [<c0935b08>] (platform_probe+0x58/0xb8)
22
[<c0935b08>] (platform_probe) from [<c0933264>] (really_probe.part.0+0x9c/0x334)
23
[<c0933264>] (really_probe.part.0) from [<c093359c>] (__driver_probe_device+0xa0/0x138)
24
[<c093359c>] (__driver_probe_device) from [<c0933664>] (driver_probe_device+0x30/0xc8)
25
[<c0933664>] (driver_probe_device) from [<c0933c88>] (__driver_attach+0x90/0x130)
26
[<c0933c88>] (__driver_attach) from [<c0931060>] (bus_for_each_dev+0x78/0xb8)
27
[<c0931060>] (bus_for_each_dev) from [<c093254c>] (bus_add_driver+0xf0/0x1d8)
28
[<c093254c>] (bus_add_driver) from [<c0934a30>] (driver_register+0x88/0x118)
29
[<c0934a30>] (driver_register) from [<c01022c0>] (do_one_initcall+0x7c/0x3a4)
30
[<c01022c0>] (do_one_initcall) from [<c1601204>] (kernel_init_freeable+0x198/0x22c)
31
[<c1601204>] (kernel_init_freeable) from [<c0f5ff2c>] (kernel_init+0x10/0x128)
32
[<c0f5ff2c>] (kernel_init) from [<c010013c>] (ret_from_fork+0x14/0x38)
33
34
or
35
36
Unhandled fault: external abort on non-linefetch (0x808) at 0xd19b0000
37
pgd = (ptrval)
38
[d19b0000] *pgd=82711811, *pte=308a0653, *ppte=308a0453
39
Internal error: : 808 [#1] SMP ARM
40
...
41
[<c095e974>] (regmap_mmio_write32le) from [<c095eb48>] (regmap_mmio_write+0x3c/0x54)
42
[<c095eb48>] (regmap_mmio_write) from [<c09580f4>] (_regmap_write+0x4c/0x1f0)
43
[<c09580f4>] (_regmap_write) from [<c0959b28>] (regmap_write+0x3c/0x60)
44
[<c0959b28>] (regmap_write) from [<c0d41130>] (fsl_sai_runtime_resume+0x9c/0x1ec)
45
[<c0d41130>] (fsl_sai_runtime_resume) from [<c0942464>] (__rpm_callback+0x3c/0x108)
46
[<c0942464>] (__rpm_callback) from [<c0942590>] (rpm_callback+0x60/0x64)
47
[<c0942590>] (rpm_callback) from [<c0942b60>] (rpm_resume+0x5cc/0x808)
48
[<c0942b60>] (rpm_resume) from [<c0942dfc>] (__pm_runtime_resume+0x60/0xa0)
49
[<c0942dfc>] (__pm_runtime_resume) from [<c0d4231c>] (fsl_sai_probe+0x2b8/0x65c)
50
[<c0d4231c>] (fsl_sai_probe) from [<c0935b08>] (platform_probe+0x58/0xb8)
51
[<c0935b08>] (platform_probe) from [<c0933264>] (really_probe.part.0+0x9c/0x334)
52
[<c0933264>] (really_probe.part.0) from [<c093359c>] (__driver_probe_device+0xa0/0x138)
53
[<c093359c>] (__driver_probe_device) from [<c0933664>] (driver_probe_device+0x30/0xc8)
54
[<c0933664>] (driver_probe_device) from [<c0933c88>] (__driver_attach+0x90/0x130)
55
[<c0933c88>] (__driver_attach) from [<c0931060>] (bus_for_each_dev+0x78/0xb8)
56
[<c0931060>] (bus_for_each_dev) from [<c093254c>] (bus_add_driver+0xf0/0x1d8)
57
[<c093254c>] (bus_add_driver) from [<c0934a30>] (driver_register+0x88/0x118)
58
[<c0934a30>] (driver_register) from [<c01022c0>] (do_one_initcall+0x7c/0x3a4)
59
[<c01022c0>] (do_one_initcall) from [<c1601204>] (kernel_init_freeable+0x198/0x22c)
60
[<c1601204>] (kernel_init_freeable) from [<c0f5ff2c>] (kernel_init+0x10/0x128)
61
[<c0f5ff2c>] (kernel_init) from [<c010013c>] (ret_from_fork+0x14/0x38)
62
63
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
64
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
65
Message-id: 20210810160318.87376-1-linux@roeck-us.net
66
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
67
---
10
---
68
hw/arm/fsl-imx6ul.c | 12 ++++++++++++
11
docs/system/arm/emulation.rst | 1 +
69
1 file changed, 12 insertions(+)
12
target/arm/cpu64.c | 1 +
13
2 files changed, 2 insertions(+)
70
14
71
diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c
15
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
72
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
73
--- a/hw/arm/fsl-imx6ul.c
17
--- a/docs/system/arm/emulation.rst
74
+++ b/hw/arm/fsl-imx6ul.c
18
+++ b/docs/system/arm/emulation.rst
75
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
19
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
76
*/
20
- FEAT_FlagM2 (Enhancements to flag manipulation instructions)
77
create_unimplemented_device("sdma", FSL_IMX6UL_SDMA_ADDR, 0x4000);
21
- FEAT_HPDS (Hierarchical permission disables)
78
22
- FEAT_I8MM (AArch64 Int8 matrix multiplication instructions)
79
+ /*
23
+- FEAT_IESB (Implicit error synchronization event)
80
+ * SAI (Audio SSI (Synchronous Serial Interface))
24
- FEAT_JSCVT (JavaScript conversion instructions)
81
+ */
25
- FEAT_LOR (Limited ordering regions)
82
+ create_unimplemented_device("sai1", FSL_IMX6UL_SAI1_ADDR, 0x4000);
26
- FEAT_LPA (Large Physical Address space)
83
+ create_unimplemented_device("sai2", FSL_IMX6UL_SAI2_ADDR, 0x4000);
27
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
84
+ create_unimplemented_device("sai3", FSL_IMX6UL_SAI3_ADDR, 0x4000);
28
index XXXXXXX..XXXXXXX 100644
85
+
29
--- a/target/arm/cpu64.c
86
/*
30
+++ b/target/arm/cpu64.c
87
* PWM
31
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
88
*/
32
t = cpu->isar.id_aa64mmfr2;
89
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
33
t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* FEAT_TTCNP */
90
create_unimplemented_device("pwm3", FSL_IMX6UL_PWM3_ADDR, 0x4000);
34
t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); /* FEAT_UAO */
91
create_unimplemented_device("pwm4", FSL_IMX6UL_PWM4_ADDR, 0x4000);
35
+ t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1); /* FEAT_IESB */
92
36
t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */
93
+ /*
37
t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* FEAT_TTST */
94
+ * Audio ASRC (asynchronous sample rate converter)
38
t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1); /* FEAT_TTL */
95
+ */
96
+ create_unimplemented_device("asrc", FSL_IMX6UL_ASRC_ADDR, 0x4000);
97
+
98
/*
99
* CAN
100
*/
101
--
39
--
102
2.20.1
40
2.25.1
103
104
diff view generated by jsdifflib
1
From: Jan Luebbe <jlu@pengutronix.de>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Break events are currently only handled by chardev/char-serial.c, so we
3
This extension concerns branch speculation, which TCG does
4
just ignore errors, which results in no behaviour change for other
4
not implement. Thus we can trivially enable this feature.
5
chardevs.
6
5
7
Signed-off-by: Jan Luebbe <jlu@pengutronix.de>
8
Message-id: 20210806144700.3751979-1-jlu@pengutronix.de
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220506180242.216785-20-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
hw/char/pl011.c | 6 ++++++
11
docs/system/arm/emulation.rst | 1 +
13
1 file changed, 6 insertions(+)
12
target/arm/cpu64.c | 1 +
13
target/arm/cpu_tcg.c | 1 +
14
3 files changed, 3 insertions(+)
14
15
15
diff --git a/hw/char/pl011.c b/hw/char/pl011.c
16
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
16
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/char/pl011.c
18
--- a/docs/system/arm/emulation.rst
18
+++ b/hw/char/pl011.c
19
+++ b/docs/system/arm/emulation.rst
19
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
20
#include "hw/qdev-properties-system.h"
21
- FEAT_BBM at level 2 (Translation table break-before-make levels)
21
#include "migration/vmstate.h"
22
- FEAT_BF16 (AArch64 BFloat16 instructions)
22
#include "chardev/char-fe.h"
23
- FEAT_BTI (Branch Target Identification)
23
+#include "chardev/char-serial.h"
24
+- FEAT_CSV2 (Cache speculation variant 2)
24
#include "qemu/log.h"
25
- FEAT_DIT (Data Independent Timing instructions)
25
#include "qemu/module.h"
26
- FEAT_DPB (DC CVAP instruction)
26
#include "trace.h"
27
- FEAT_Debugv8p2 (Debug changes for v8.2)
27
@@ -XXX,XX +XXX,XX @@ static void pl011_write(void *opaque, hwaddr offset,
28
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
28
s->read_count = 0;
29
index XXXXXXX..XXXXXXX 100644
29
s->read_pos = 0;
30
--- a/target/arm/cpu64.c
30
}
31
+++ b/target/arm/cpu64.c
31
+ if ((s->lcr ^ value) & 0x1) {
32
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
32
+ int break_enable = value & 0x1;
33
t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
33
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_BREAK,
34
t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); /* FEAT_SEL2 */
34
+ &break_enable);
35
t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */
35
+ }
36
+ t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 1); /* FEAT_CSV2 */
36
s->lcr = value;
37
cpu->isar.id_aa64pfr0 = t;
37
pl011_set_read_trigger(s);
38
38
break;
39
t = cpu->isar.id_aa64pfr1;
40
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/cpu_tcg.c
43
+++ b/target/arm/cpu_tcg.c
44
@@ -XXX,XX +XXX,XX @@ void aa32_max_features(ARMCPU *cpu)
45
cpu->isar.id_mmfr4 = t;
46
47
t = cpu->isar.id_pfr0;
48
+ t = FIELD_DP32(t, ID_PFR0, CSV2, 2); /* FEAT_CVS2 */
49
t = FIELD_DP32(t, ID_PFR0, DIT, 1); /* FEAT_DIT */
50
t = FIELD_DP32(t, ID_PFR0, RAS, 1); /* FEAT_RAS */
51
cpu->isar.id_pfr0 = t;
39
--
52
--
40
2.20.1
53
2.25.1
41
42
diff view generated by jsdifflib
1
Implement the MVE interleaving load/store functions VLD2, VLD4, VST2
1
From: Richard Henderson <richard.henderson@linaro.org>
2
and VST4. VLD2 loads 16 bytes of data from memory and writes to 2
3
consecutive Qregs; VLD4 loads 16 bytes of data from memory and writes
4
to 4 consecutive Qregs. The 'pattern' field in the encoding
5
determines the offset into memory which is accessed and also which
6
elements in the Qregs are written to. (The intention is that a
7
sequence of four consecutive VLD4 with different pattern values
8
performs a complete de-interleaving load of 64 bytes into all
9
elements of the 4 Qregs.) VST2 and VST4 do the same, but for stores.
10
2
3
There is no branch prediction in TCG, therefore there is no
4
need to actually include the context number into the predictor.
5
Therefore all we need to do is add the state for SCXTNUM_ELx.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220506180242.216785-21-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
---
11
---
14
target/arm/helper-mve.h | 48 ++++++
12
docs/system/arm/emulation.rst | 3 ++
15
target/arm/mve.decode | 11 ++
13
target/arm/cpu.h | 16 +++++++++
16
target/arm/mve_helper.c | 342 +++++++++++++++++++++++++++++++++++++
14
target/arm/cpu.c | 5 +++
17
target/arm/translate-mve.c | 94 ++++++++++
15
target/arm/cpu64.c | 3 +-
18
4 files changed, 495 insertions(+)
16
target/arm/helper.c | 61 ++++++++++++++++++++++++++++++++++-
17
5 files changed, 86 insertions(+), 2 deletions(-)
19
18
20
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
19
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
21
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/helper-mve.h
21
--- a/docs/system/arm/emulation.rst
23
+++ b/target/arm/helper-mve.h
22
+++ b/docs/system/arm/emulation.rst
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vldrd_sg_wb_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
25
DEF_HELPER_FLAGS_4(mve_vstrw_sg_wb_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
- FEAT_BF16 (AArch64 BFloat16 instructions)
26
DEF_HELPER_FLAGS_4(mve_vstrd_sg_wb_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
- FEAT_BTI (Branch Target Identification)
27
26
- FEAT_CSV2 (Cache speculation variant 2)
28
+DEF_HELPER_FLAGS_3(mve_vld20b, TCG_CALL_NO_WG, void, env, i32, i32)
27
+- FEAT_CSV2_1p1 (Cache speculation variant 2, version 1.1)
29
+DEF_HELPER_FLAGS_3(mve_vld20h, TCG_CALL_NO_WG, void, env, i32, i32)
28
+- FEAT_CSV2_1p2 (Cache speculation variant 2, version 1.2)
30
+DEF_HELPER_FLAGS_3(mve_vld20w, TCG_CALL_NO_WG, void, env, i32, i32)
29
+- FEAT_CSV2_2 (Cache speculation variant 2, version 2)
31
+
30
- FEAT_DIT (Data Independent Timing instructions)
32
+DEF_HELPER_FLAGS_3(mve_vld21b, TCG_CALL_NO_WG, void, env, i32, i32)
31
- FEAT_DPB (DC CVAP instruction)
33
+DEF_HELPER_FLAGS_3(mve_vld21h, TCG_CALL_NO_WG, void, env, i32, i32)
32
- FEAT_Debugv8p2 (Debug changes for v8.2)
34
+DEF_HELPER_FLAGS_3(mve_vld21w, TCG_CALL_NO_WG, void, env, i32, i32)
33
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
35
+
34
index XXXXXXX..XXXXXXX 100644
36
+DEF_HELPER_FLAGS_3(mve_vld40b, TCG_CALL_NO_WG, void, env, i32, i32)
35
--- a/target/arm/cpu.h
37
+DEF_HELPER_FLAGS_3(mve_vld40h, TCG_CALL_NO_WG, void, env, i32, i32)
36
+++ b/target/arm/cpu.h
38
+DEF_HELPER_FLAGS_3(mve_vld40w, TCG_CALL_NO_WG, void, env, i32, i32)
37
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
39
+
38
ARMPACKey apdb;
40
+DEF_HELPER_FLAGS_3(mve_vld41b, TCG_CALL_NO_WG, void, env, i32, i32)
39
ARMPACKey apga;
41
+DEF_HELPER_FLAGS_3(mve_vld41h, TCG_CALL_NO_WG, void, env, i32, i32)
40
} keys;
42
+DEF_HELPER_FLAGS_3(mve_vld41w, TCG_CALL_NO_WG, void, env, i32, i32)
41
+
43
+
42
+ uint64_t scxtnum_el[4];
44
+DEF_HELPER_FLAGS_3(mve_vld42b, TCG_CALL_NO_WG, void, env, i32, i32)
43
#endif
45
+DEF_HELPER_FLAGS_3(mve_vld42h, TCG_CALL_NO_WG, void, env, i32, i32)
44
46
+DEF_HELPER_FLAGS_3(mve_vld42w, TCG_CALL_NO_WG, void, env, i32, i32)
45
#if defined(CONFIG_USER_ONLY)
47
+
46
@@ -XXX,XX +XXX,XX @@ void pmu_init(ARMCPU *cpu);
48
+DEF_HELPER_FLAGS_3(mve_vld43b, TCG_CALL_NO_WG, void, env, i32, i32)
47
#define SCTLR_WXN (1U << 19)
49
+DEF_HELPER_FLAGS_3(mve_vld43h, TCG_CALL_NO_WG, void, env, i32, i32)
48
#define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */
50
+DEF_HELPER_FLAGS_3(mve_vld43w, TCG_CALL_NO_WG, void, env, i32, i32)
49
#define SCTLR_UWXN (1U << 20) /* v7 onward, AArch32 only */
51
+
50
+#define SCTLR_TSCXT (1U << 20) /* FEAT_CSV2_1p2, AArch64 only */
52
+DEF_HELPER_FLAGS_3(mve_vst20b, TCG_CALL_NO_WG, void, env, i32, i32)
51
#define SCTLR_FI (1U << 21) /* up to v7, v8 RES0 */
53
+DEF_HELPER_FLAGS_3(mve_vst20h, TCG_CALL_NO_WG, void, env, i32, i32)
52
#define SCTLR_IESB (1U << 21) /* v8.2-IESB, AArch64 only */
54
+DEF_HELPER_FLAGS_3(mve_vst20w, TCG_CALL_NO_WG, void, env, i32, i32)
53
#define SCTLR_U (1U << 22) /* up to v6, RAO in v7 */
55
+
54
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_dit(const ARMISARegisters *id)
56
+DEF_HELPER_FLAGS_3(mve_vst21b, TCG_CALL_NO_WG, void, env, i32, i32)
55
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, DIT) != 0;
57
+DEF_HELPER_FLAGS_3(mve_vst21h, TCG_CALL_NO_WG, void, env, i32, i32)
58
+DEF_HELPER_FLAGS_3(mve_vst21w, TCG_CALL_NO_WG, void, env, i32, i32)
59
+
60
+DEF_HELPER_FLAGS_3(mve_vst40b, TCG_CALL_NO_WG, void, env, i32, i32)
61
+DEF_HELPER_FLAGS_3(mve_vst40h, TCG_CALL_NO_WG, void, env, i32, i32)
62
+DEF_HELPER_FLAGS_3(mve_vst40w, TCG_CALL_NO_WG, void, env, i32, i32)
63
+
64
+DEF_HELPER_FLAGS_3(mve_vst41b, TCG_CALL_NO_WG, void, env, i32, i32)
65
+DEF_HELPER_FLAGS_3(mve_vst41h, TCG_CALL_NO_WG, void, env, i32, i32)
66
+DEF_HELPER_FLAGS_3(mve_vst41w, TCG_CALL_NO_WG, void, env, i32, i32)
67
+
68
+DEF_HELPER_FLAGS_3(mve_vst42b, TCG_CALL_NO_WG, void, env, i32, i32)
69
+DEF_HELPER_FLAGS_3(mve_vst42h, TCG_CALL_NO_WG, void, env, i32, i32)
70
+DEF_HELPER_FLAGS_3(mve_vst42w, TCG_CALL_NO_WG, void, env, i32, i32)
71
+
72
+DEF_HELPER_FLAGS_3(mve_vst43b, TCG_CALL_NO_WG, void, env, i32, i32)
73
+DEF_HELPER_FLAGS_3(mve_vst43h, TCG_CALL_NO_WG, void, env, i32, i32)
74
+DEF_HELPER_FLAGS_3(mve_vst43w, TCG_CALL_NO_WG, void, env, i32, i32)
75
+
76
DEF_HELPER_FLAGS_3(mve_vdup, TCG_CALL_NO_WG, void, env, ptr, i32)
77
78
DEF_HELPER_FLAGS_4(mve_vidupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
79
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
80
index XXXXXXX..XXXXXXX 100644
81
--- a/target/arm/mve.decode
82
+++ b/target/arm/mve.decode
83
@@ -XXX,XX +XXX,XX @@
84
&vabav qn qm rda size
85
&vldst_sg qd qm rn size msize os
86
&vldst_sg_imm qd qm a w imm
87
+&vldst_il qd rn size pat w
88
89
# scatter-gather memory size is in bits 6:4
90
%sg_msize 6:1 4:1
91
@@ -XXX,XX +XXX,XX @@
92
@vldst_sg_imm .... .... a:1 . w:1 . .... .... .... . imm:7 &vldst_sg_imm \
93
qd=%qd qm=%qn
94
95
+# Deinterleaving load/interleaving store
96
+@vldst_il .... .... .. w:1 . rn:4 .... ... size:2 pat:2 ..... &vldst_il \
97
+ qd=%qd
98
+
99
@1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm
100
@1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0
101
@2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn
102
@@ -XXX,XX +XXX,XX @@ VLDRD_sg_imm 111 1 1101 ... 1 ... 0 ... 1 1111 .... .... @vldst_sg_imm
103
VSTRW_sg_imm 111 1 1101 ... 0 ... 0 ... 1 1110 .... .... @vldst_sg_imm
104
VSTRD_sg_imm 111 1 1101 ... 0 ... 0 ... 1 1111 .... .... @vldst_sg_imm
105
106
+# deinterleaving loads/interleaving stores
107
+VLD2 1111 1100 1 .. 1 .... ... 1 111 .. .. 00000 @vldst_il
108
+VLD4 1111 1100 1 .. 1 .... ... 1 111 .. .. 00001 @vldst_il
109
+VST2 1111 1100 1 .. 0 .... ... 1 111 .. .. 00000 @vldst_il
110
+VST4 1111 1100 1 .. 0 .... ... 1 111 .. .. 00001 @vldst_il
111
+
112
# Moves between 2 32-bit vector lanes and 2 general purpose registers
113
VMOV_to_2gp 1110 1100 0 . 00 rt2:4 ... 0 1111 000 idx:1 rt:4 qd=%qd
114
VMOV_from_2gp 1110 1100 0 . 01 rt2:4 ... 0 1111 000 idx:1 rt:4 qd=%qd
115
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
116
index XXXXXXX..XXXXXXX 100644
117
--- a/target/arm/mve_helper.c
118
+++ b/target/arm/mve_helper.c
119
@@ -XXX,XX +XXX,XX @@ DO_VLDR64_SG(vldrd_sg_wb_ud, ADDR_ADD, true)
120
DO_VSTR_SG(vstrw_sg_wb_uw, stl, 4, uint32_t, ADDR_ADD, true)
121
DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true)
122
123
+/*
124
+ * Deinterleaving loads/interleaving stores.
125
+ *
126
+ * For these helpers we are passed the index of the first Qreg
127
+ * (VLD2/VST2 will also access Qn+1, VLD4/VST4 access Qn .. Qn+3)
128
+ * and the value of the base address register Rn.
129
+ * The helpers are specialized for pattern and element size, so
130
+ * for instance vld42h is VLD4 with pattern 2, element size MO_16.
131
+ *
132
+ * These insns are beatwise but not predicated, so we must honour ECI,
133
+ * but need not look at mve_element_mask().
134
+ *
135
+ * The pseudocode implements these insns with multiple memory accesses
136
+ * of the element size, but rules R_VVVG and R_FXDM permit us to make
137
+ * one 32-bit memory access per beat.
138
+ */
139
+#define DO_VLD4B(OP, O1, O2, O3, O4) \
140
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
141
+ uint32_t base) \
142
+ { \
143
+ int beat, e; \
144
+ uint16_t mask = mve_eci_mask(env); \
145
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
146
+ uint32_t addr, data; \
147
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
148
+ if ((mask & 1) == 0) { \
149
+ /* ECI says skip this beat */ \
150
+ continue; \
151
+ } \
152
+ addr = base + off[beat] * 4; \
153
+ data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
154
+ for (e = 0; e < 4; e++, data >>= 8) { \
155
+ uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \
156
+ qd[H1(off[beat])] = data; \
157
+ } \
158
+ } \
159
+ }
160
+
161
+#define DO_VLD4H(OP, O1, O2) \
162
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
163
+ uint32_t base) \
164
+ { \
165
+ int beat; \
166
+ uint16_t mask = mve_eci_mask(env); \
167
+ static const uint8_t off[4] = { O1, O1, O2, O2 }; \
168
+ uint32_t addr, data; \
169
+ int y; /* y counts 0 2 0 2 */ \
170
+ uint16_t *qd; \
171
+ for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \
172
+ if ((mask & 1) == 0) { \
173
+ /* ECI says skip this beat */ \
174
+ continue; \
175
+ } \
176
+ addr = base + off[beat] * 8 + (beat & 1) * 4; \
177
+ data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
178
+ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \
179
+ qd[H2(off[beat])] = data; \
180
+ data >>= 16; \
181
+ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \
182
+ qd[H2(off[beat])] = data; \
183
+ } \
184
+ }
185
+
186
+#define DO_VLD4W(OP, O1, O2, O3, O4) \
187
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
188
+ uint32_t base) \
189
+ { \
190
+ int beat; \
191
+ uint16_t mask = mve_eci_mask(env); \
192
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
193
+ uint32_t addr, data; \
194
+ uint32_t *qd; \
195
+ int y; \
196
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
197
+ if ((mask & 1) == 0) { \
198
+ /* ECI says skip this beat */ \
199
+ continue; \
200
+ } \
201
+ addr = base + off[beat] * 4; \
202
+ data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
203
+ y = (beat + (O1 & 2)) & 3; \
204
+ qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \
205
+ qd[H4(off[beat] >> 2)] = data; \
206
+ } \
207
+ }
208
+
209
+DO_VLD4B(vld40b, 0, 1, 10, 11)
210
+DO_VLD4B(vld41b, 2, 3, 12, 13)
211
+DO_VLD4B(vld42b, 4, 5, 14, 15)
212
+DO_VLD4B(vld43b, 6, 7, 8, 9)
213
+
214
+DO_VLD4H(vld40h, 0, 5)
215
+DO_VLD4H(vld41h, 1, 6)
216
+DO_VLD4H(vld42h, 2, 7)
217
+DO_VLD4H(vld43h, 3, 4)
218
+
219
+DO_VLD4W(vld40w, 0, 1, 10, 11)
220
+DO_VLD4W(vld41w, 2, 3, 12, 13)
221
+DO_VLD4W(vld42w, 4, 5, 14, 15)
222
+DO_VLD4W(vld43w, 6, 7, 8, 9)
223
+
224
+#define DO_VLD2B(OP, O1, O2, O3, O4) \
225
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
226
+ uint32_t base) \
227
+ { \
228
+ int beat, e; \
229
+ uint16_t mask = mve_eci_mask(env); \
230
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
231
+ uint32_t addr, data; \
232
+ uint8_t *qd; \
233
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
234
+ if ((mask & 1) == 0) { \
235
+ /* ECI says skip this beat */ \
236
+ continue; \
237
+ } \
238
+ addr = base + off[beat] * 2; \
239
+ data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
240
+ for (e = 0; e < 4; e++, data >>= 8) { \
241
+ qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \
242
+ qd[H1(off[beat] + (e >> 1))] = data; \
243
+ } \
244
+ } \
245
+ }
246
+
247
+#define DO_VLD2H(OP, O1, O2, O3, O4) \
248
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
249
+ uint32_t base) \
250
+ { \
251
+ int beat; \
252
+ uint16_t mask = mve_eci_mask(env); \
253
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
254
+ uint32_t addr, data; \
255
+ int e; \
256
+ uint16_t *qd; \
257
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
258
+ if ((mask & 1) == 0) { \
259
+ /* ECI says skip this beat */ \
260
+ continue; \
261
+ } \
262
+ addr = base + off[beat] * 4; \
263
+ data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
264
+ for (e = 0; e < 2; e++, data >>= 16) { \
265
+ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \
266
+ qd[H2(off[beat])] = data; \
267
+ } \
268
+ } \
269
+ }
270
+
271
+#define DO_VLD2W(OP, O1, O2, O3, O4) \
272
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
273
+ uint32_t base) \
274
+ { \
275
+ int beat; \
276
+ uint16_t mask = mve_eci_mask(env); \
277
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
278
+ uint32_t addr, data; \
279
+ uint32_t *qd; \
280
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
281
+ if ((mask & 1) == 0) { \
282
+ /* ECI says skip this beat */ \
283
+ continue; \
284
+ } \
285
+ addr = base + off[beat]; \
286
+ data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
287
+ qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \
288
+ qd[H4(off[beat] >> 3)] = data; \
289
+ } \
290
+ }
291
+
292
+DO_VLD2B(vld20b, 0, 2, 12, 14)
293
+DO_VLD2B(vld21b, 4, 6, 8, 10)
294
+
295
+DO_VLD2H(vld20h, 0, 1, 6, 7)
296
+DO_VLD2H(vld21h, 2, 3, 4, 5)
297
+
298
+DO_VLD2W(vld20w, 0, 4, 24, 28)
299
+DO_VLD2W(vld21w, 8, 12, 16, 20)
300
+
301
+#define DO_VST4B(OP, O1, O2, O3, O4) \
302
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
303
+ uint32_t base) \
304
+ { \
305
+ int beat, e; \
306
+ uint16_t mask = mve_eci_mask(env); \
307
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
308
+ uint32_t addr, data; \
309
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
310
+ if ((mask & 1) == 0) { \
311
+ /* ECI says skip this beat */ \
312
+ continue; \
313
+ } \
314
+ addr = base + off[beat] * 4; \
315
+ data = 0; \
316
+ for (e = 3; e >= 0; e--) { \
317
+ uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \
318
+ data = (data << 8) | qd[H1(off[beat])]; \
319
+ } \
320
+ cpu_stl_le_data_ra(env, addr, data, GETPC()); \
321
+ } \
322
+ }
323
+
324
+#define DO_VST4H(OP, O1, O2) \
325
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
326
+ uint32_t base) \
327
+ { \
328
+ int beat; \
329
+ uint16_t mask = mve_eci_mask(env); \
330
+ static const uint8_t off[4] = { O1, O1, O2, O2 }; \
331
+ uint32_t addr, data; \
332
+ int y; /* y counts 0 2 0 2 */ \
333
+ uint16_t *qd; \
334
+ for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \
335
+ if ((mask & 1) == 0) { \
336
+ /* ECI says skip this beat */ \
337
+ continue; \
338
+ } \
339
+ addr = base + off[beat] * 8 + (beat & 1) * 4; \
340
+ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \
341
+ data = qd[H2(off[beat])]; \
342
+ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \
343
+ data |= qd[H2(off[beat])] << 16; \
344
+ cpu_stl_le_data_ra(env, addr, data, GETPC()); \
345
+ } \
346
+ }
347
+
348
+#define DO_VST4W(OP, O1, O2, O3, O4) \
349
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
350
+ uint32_t base) \
351
+ { \
352
+ int beat; \
353
+ uint16_t mask = mve_eci_mask(env); \
354
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
355
+ uint32_t addr, data; \
356
+ uint32_t *qd; \
357
+ int y; \
358
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
359
+ if ((mask & 1) == 0) { \
360
+ /* ECI says skip this beat */ \
361
+ continue; \
362
+ } \
363
+ addr = base + off[beat] * 4; \
364
+ y = (beat + (O1 & 2)) & 3; \
365
+ qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \
366
+ data = qd[H4(off[beat] >> 2)]; \
367
+ cpu_stl_le_data_ra(env, addr, data, GETPC()); \
368
+ } \
369
+ }
370
+
371
+DO_VST4B(vst40b, 0, 1, 10, 11)
372
+DO_VST4B(vst41b, 2, 3, 12, 13)
373
+DO_VST4B(vst42b, 4, 5, 14, 15)
374
+DO_VST4B(vst43b, 6, 7, 8, 9)
375
+
376
+DO_VST4H(vst40h, 0, 5)
377
+DO_VST4H(vst41h, 1, 6)
378
+DO_VST4H(vst42h, 2, 7)
379
+DO_VST4H(vst43h, 3, 4)
380
+
381
+DO_VST4W(vst40w, 0, 1, 10, 11)
382
+DO_VST4W(vst41w, 2, 3, 12, 13)
383
+DO_VST4W(vst42w, 4, 5, 14, 15)
384
+DO_VST4W(vst43w, 6, 7, 8, 9)
385
+
386
+#define DO_VST2B(OP, O1, O2, O3, O4) \
387
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
388
+ uint32_t base) \
389
+ { \
390
+ int beat, e; \
391
+ uint16_t mask = mve_eci_mask(env); \
392
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
393
+ uint32_t addr, data; \
394
+ uint8_t *qd; \
395
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
396
+ if ((mask & 1) == 0) { \
397
+ /* ECI says skip this beat */ \
398
+ continue; \
399
+ } \
400
+ addr = base + off[beat] * 2; \
401
+ data = 0; \
402
+ for (e = 3; e >= 0; e--) { \
403
+ qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \
404
+ data = (data << 8) | qd[H1(off[beat] + (e >> 1))]; \
405
+ } \
406
+ cpu_stl_le_data_ra(env, addr, data, GETPC()); \
407
+ } \
408
+ }
409
+
410
+#define DO_VST2H(OP, O1, O2, O3, O4) \
411
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
412
+ uint32_t base) \
413
+ { \
414
+ int beat; \
415
+ uint16_t mask = mve_eci_mask(env); \
416
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
417
+ uint32_t addr, data; \
418
+ int e; \
419
+ uint16_t *qd; \
420
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
421
+ if ((mask & 1) == 0) { \
422
+ /* ECI says skip this beat */ \
423
+ continue; \
424
+ } \
425
+ addr = base + off[beat] * 4; \
426
+ data = 0; \
427
+ for (e = 1; e >= 0; e--) { \
428
+ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \
429
+ data = (data << 16) | qd[H2(off[beat])]; \
430
+ } \
431
+ cpu_stl_le_data_ra(env, addr, data, GETPC()); \
432
+ } \
433
+ }
434
+
435
+#define DO_VST2W(OP, O1, O2, O3, O4) \
436
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
437
+ uint32_t base) \
438
+ { \
439
+ int beat; \
440
+ uint16_t mask = mve_eci_mask(env); \
441
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
442
+ uint32_t addr, data; \
443
+ uint32_t *qd; \
444
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
445
+ if ((mask & 1) == 0) { \
446
+ /* ECI says skip this beat */ \
447
+ continue; \
448
+ } \
449
+ addr = base + off[beat]; \
450
+ qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \
451
+ data = qd[H4(off[beat] >> 3)]; \
452
+ cpu_stl_le_data_ra(env, addr, data, GETPC()); \
453
+ } \
454
+ }
455
+
456
+DO_VST2B(vst20b, 0, 2, 12, 14)
457
+DO_VST2B(vst21b, 4, 6, 8, 10)
458
+
459
+DO_VST2H(vst20h, 0, 1, 6, 7)
460
+DO_VST2H(vst21h, 2, 3, 4, 5)
461
+
462
+DO_VST2W(vst20w, 0, 4, 24, 28)
463
+DO_VST2W(vst21w, 8, 12, 16, 20)
464
+
465
/*
466
* The mergemask(D, R, M) macro performs the operation "*D = R" but
467
* storing only the bytes which correspond to 1 bits in M,
468
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
469
index XXXXXXX..XXXXXXX 100644
470
--- a/target/arm/translate-mve.c
471
+++ b/target/arm/translate-mve.c
472
@@ -XXX,XX +XXX,XX @@ static inline int vidup_imm(DisasContext *s, int x)
473
474
typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
475
typedef void MVEGenLdStSGFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
476
+typedef void MVEGenLdStIlFn(TCGv_ptr, TCGv_i32, TCGv_i32);
477
typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
478
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
479
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
480
@@ -XXX,XX +XXX,XX @@ static bool trans_VSTRD_sg_imm(DisasContext *s, arg_vldst_sg_imm *a)
481
return do_ldst_sg_imm(s, a, fns[a->w], MO_64);
482
}
56
}
483
57
484
+static bool do_vldst_il(DisasContext *s, arg_vldst_il *a, MVEGenLdStIlFn *fn,
58
+static inline bool isar_feature_aa64_scxtnum(const ARMISARegisters *id)
485
+ int addrinc)
486
+{
59
+{
487
+ TCGv_i32 rn;
60
+ int key = FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, CSV2);
488
+
61
+ if (key >= 2) {
489
+ if (!dc_isar_feature(aa32_mve, s) ||
62
+ return true; /* FEAT_CSV2_2 */
490
+ !mve_check_qreg_bank(s, a->qd) ||
63
+ }
491
+ !fn || (a->rn == 13 && a->w) || a->rn == 15) {
64
+ if (key == 1) {
492
+ /* Variously UNPREDICTABLE or UNDEF or related-encoding */
65
+ key = FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, CSV2_FRAC);
493
+ return false;
66
+ return key >= 2; /* FEAT_CSV2_1p2 */
494
+ }
67
+ }
495
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
68
+ return false;
496
+ return true;
497
+ }
498
+
499
+ rn = load_reg(s, a->rn);
500
+ /*
501
+ * We pass the index of Qd, not a pointer, because the helper must
502
+ * access multiple Q registers starting at Qd and working up.
503
+ */
504
+ fn(cpu_env, tcg_constant_i32(a->qd), rn);
505
+
506
+ if (a->w) {
507
+ tcg_gen_addi_i32(rn, rn, addrinc);
508
+ store_reg(s, a->rn, rn);
509
+ } else {
510
+ tcg_temp_free_i32(rn);
511
+ }
512
+ mve_update_and_store_eci(s);
513
+ return true;
514
+}
69
+}
515
+
70
+
516
+/* This macro is just to make the arrays more compact in these functions */
71
static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id)
517
+#define F(N) gen_helper_mve_##N
72
{
518
+
73
return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0;
519
+static bool trans_VLD2(DisasContext *s, arg_vldst_il *a)
74
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/arm/cpu.c
77
+++ b/target/arm/cpu.c
78
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
79
*/
80
env->cp15.gcr_el1 = 0x1ffff;
81
}
82
+ /*
83
+ * Disable access to SCXTNUM_EL0 from CSV2_1p2.
84
+ * This is not yet exposed from the Linux kernel in any way.
85
+ */
86
+ env->cp15.sctlr_el[1] |= SCTLR_TSCXT;
87
#else
88
/* Reset into the highest available EL */
89
if (arm_feature(env, ARM_FEATURE_EL3)) {
90
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/cpu64.c
93
+++ b/target/arm/cpu64.c
94
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
95
t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
96
t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); /* FEAT_SEL2 */
97
t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */
98
- t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 1); /* FEAT_CSV2 */
99
+ t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 2); /* FEAT_CSV2_2 */
100
cpu->isar.id_aa64pfr0 = t;
101
102
t = cpu->isar.id_aa64pfr1;
103
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
104
* we do for EL2 with the virtualization=on property.
105
*/
106
t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */
107
+ t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */
108
cpu->isar.id_aa64pfr1 = t;
109
110
t = cpu->isar.id_aa64mmfr0;
111
diff --git a/target/arm/helper.c b/target/arm/helper.c
112
index XXXXXXX..XXXXXXX 100644
113
--- a/target/arm/helper.c
114
+++ b/target/arm/helper.c
115
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
116
if (cpu_isar_feature(aa64_mte, cpu)) {
117
valid_mask |= SCR_ATA;
118
}
119
+ if (cpu_isar_feature(aa64_scxtnum, cpu)) {
120
+ valid_mask |= SCR_ENSCXT;
121
+ }
122
} else {
123
valid_mask &= ~(SCR_RW | SCR_ST);
124
if (cpu_isar_feature(aa32_ras, cpu)) {
125
@@ -XXX,XX +XXX,XX @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
126
if (cpu_isar_feature(aa64_mte, cpu)) {
127
valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
128
}
129
+ if (cpu_isar_feature(aa64_scxtnum, cpu)) {
130
+ valid_mask |= HCR_ENSCXT;
131
+ }
132
}
133
134
/* Clear RES0 bits. */
135
@@ -XXX,XX +XXX,XX @@ static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
136
{ K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
137
"TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
138
139
+ { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
140
+ "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
141
+ isar_feature_aa64_scxtnum },
142
+
143
/* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
144
/* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
145
};
146
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
147
},
148
};
149
150
-#endif
151
+static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
152
+ bool isread)
520
+{
153
+{
521
+ static MVEGenLdStIlFn * const fns[4][4] = {
154
+ uint64_t hcr = arm_hcr_el2_eff(env);
522
+ { F(vld20b), F(vld20h), F(vld20w), NULL, },
155
+ int el = arm_current_el(env);
523
+ { F(vld21b), F(vld21h), F(vld21w), NULL, },
156
+
524
+ { NULL, NULL, NULL, NULL },
157
+ if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
525
+ { NULL, NULL, NULL, NULL },
158
+ if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
526
+ };
159
+ if (hcr & HCR_TGE) {
527
+ if (a->qd > 6) {
160
+ return CP_ACCESS_TRAP_EL2;
528
+ return false;
161
+ }
529
+ }
162
+ return CP_ACCESS_TRAP;
530
+ return do_vldst_il(s, a, fns[a->pat][a->size], 32);
163
+ }
164
+ } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
165
+ return CP_ACCESS_TRAP_EL2;
166
+ }
167
+ if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
168
+ return CP_ACCESS_TRAP_EL2;
169
+ }
170
+ if (el < 3
171
+ && arm_feature(env, ARM_FEATURE_EL3)
172
+ && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
173
+ return CP_ACCESS_TRAP_EL3;
174
+ }
175
+ return CP_ACCESS_OK;
531
+}
176
+}
532
+
177
+
533
+static bool trans_VLD4(DisasContext *s, arg_vldst_il *a)
178
+static const ARMCPRegInfo scxtnum_reginfo[] = {
534
+{
179
+ { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
535
+ static MVEGenLdStIlFn * const fns[4][4] = {
180
+ .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
536
+ { F(vld40b), F(vld40h), F(vld40w), NULL, },
181
+ .access = PL0_RW, .accessfn = access_scxtnum,
537
+ { F(vld41b), F(vld41h), F(vld41w), NULL, },
182
+ .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
538
+ { F(vld42b), F(vld42h), F(vld42w), NULL, },
183
+ { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
539
+ { F(vld43b), F(vld43h), F(vld43w), NULL, },
184
+ .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
540
+ };
185
+ .access = PL1_RW, .accessfn = access_scxtnum,
541
+ if (a->qd > 4) {
186
+ .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
542
+ return false;
187
+ { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
543
+ }
188
+ .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
544
+ return do_vldst_il(s, a, fns[a->pat][a->size], 64);
189
+ .access = PL2_RW, .accessfn = access_scxtnum,
545
+}
190
+ .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
546
+
191
+ { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
547
+static bool trans_VST2(DisasContext *s, arg_vldst_il *a)
192
+ .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
548
+{
193
+ .access = PL3_RW,
549
+ static MVEGenLdStIlFn * const fns[4][4] = {
194
+ .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
550
+ { F(vst20b), F(vst20h), F(vst20w), NULL, },
195
+};
551
+ { F(vst21b), F(vst21h), F(vst21w), NULL, },
196
+#endif /* TARGET_AARCH64 */
552
+ { NULL, NULL, NULL, NULL },
197
553
+ { NULL, NULL, NULL, NULL },
198
static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
554
+ };
199
bool isread)
555
+ if (a->qd > 6) {
200
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
556
+ return false;
201
define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
557
+ }
202
define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
558
+ return do_vldst_il(s, a, fns[a->pat][a->size], 32);
203
}
559
+}
204
+
560
+
205
+ if (cpu_isar_feature(aa64_scxtnum, cpu)) {
561
+static bool trans_VST4(DisasContext *s, arg_vldst_il *a)
206
+ define_arm_cp_regs(cpu, scxtnum_reginfo);
562
+{
207
+ }
563
+ static MVEGenLdStIlFn * const fns[4][4] = {
208
#endif
564
+ { F(vst40b), F(vst40h), F(vst40w), NULL, },
209
565
+ { F(vst41b), F(vst41h), F(vst41w), NULL, },
210
if (cpu_isar_feature(any_predinv, cpu)) {
566
+ { F(vst42b), F(vst42h), F(vst42w), NULL, },
567
+ { F(vst43b), F(vst43h), F(vst43w), NULL, },
568
+ };
569
+ if (a->qd > 4) {
570
+ return false;
571
+ }
572
+ return do_vldst_il(s, a, fns[a->pat][a->size], 64);
573
+}
574
+
575
+#undef F
576
+
577
static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
578
{
579
TCGv_ptr qd;
580
--
211
--
581
2.20.1
212
2.25.1
582
583
diff view generated by jsdifflib
1
From: "Wen, Jianxian" <Jianxian.Wen@verisilicon.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Add property memory region which can connect with IOMMU region to support SMMU translate.
3
This extension concerns cache speculation, which TCG does
4
not implement. Thus we can trivially enable this feature.
4
5
5
Signed-off-by: Jianxian Wen <jianxian.wen@verisilicon.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 4C23C17B8E87E74E906A25A3254A03F4FA1FEC31@SHASXM03.verisilicon.com
8
Message-id: 20220506180242.216785-22-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
---
10
hw/arm/exynos4210.c | 3 +++
11
docs/system/arm/emulation.rst | 1 +
11
hw/arm/xilinx_zynq.c | 3 +++
12
target/arm/cpu64.c | 1 +
12
hw/dma/pl330.c | 26 ++++++++++++++++++++++----
13
target/arm/cpu_tcg.c | 1 +
13
3 files changed, 28 insertions(+), 4 deletions(-)
14
3 files changed, 3 insertions(+)
14
15
15
diff --git a/hw/arm/exynos4210.c b/hw/arm/exynos4210.c
16
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
16
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/exynos4210.c
18
--- a/docs/system/arm/emulation.rst
18
+++ b/hw/arm/exynos4210.c
19
+++ b/docs/system/arm/emulation.rst
19
@@ -XXX,XX +XXX,XX @@ static DeviceState *pl330_create(uint32_t base, qemu_or_irq *orgate,
20
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
20
int i;
21
- FEAT_CSV2_1p1 (Cache speculation variant 2, version 1.1)
21
22
- FEAT_CSV2_1p2 (Cache speculation variant 2, version 1.2)
22
dev = qdev_new("pl330");
23
- FEAT_CSV2_2 (Cache speculation variant 2, version 2)
23
+ object_property_set_link(OBJECT(dev), "memory",
24
+- FEAT_CSV3 (Cache speculation variant 3)
24
+ OBJECT(get_system_memory()),
25
- FEAT_DIT (Data Independent Timing instructions)
25
+ &error_fatal);
26
- FEAT_DPB (DC CVAP instruction)
26
qdev_prop_set_uint8(dev, "num_events", nevents);
27
- FEAT_Debugv8p2 (Debug changes for v8.2)
27
qdev_prop_set_uint8(dev, "num_chnls", 8);
28
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
28
qdev_prop_set_uint8(dev, "num_periph_req", nreq);
29
diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c
30
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/arm/xilinx_zynq.c
30
--- a/target/arm/cpu64.c
32
+++ b/hw/arm/xilinx_zynq.c
31
+++ b/target/arm/cpu64.c
33
@@ -XXX,XX +XXX,XX @@ static void zynq_init(MachineState *machine)
32
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
34
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[39-IRQ_OFFSET]);
33
t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); /* FEAT_SEL2 */
35
34
t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */
36
dev = qdev_new("pl330");
35
t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 2); /* FEAT_CSV2_2 */
37
+ object_property_set_link(OBJECT(dev), "memory",
36
+ t = FIELD_DP64(t, ID_AA64PFR0, CSV3, 1); /* FEAT_CSV3 */
38
+ OBJECT(address_space_mem),
37
cpu->isar.id_aa64pfr0 = t;
39
+ &error_fatal);
38
40
qdev_prop_set_uint8(dev, "num_chnls", 8);
39
t = cpu->isar.id_aa64pfr1;
41
qdev_prop_set_uint8(dev, "num_periph_req", 4);
40
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
42
qdev_prop_set_uint8(dev, "num_events", 16);
43
diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c
44
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
45
--- a/hw/dma/pl330.c
42
--- a/target/arm/cpu_tcg.c
46
+++ b/hw/dma/pl330.c
43
+++ b/target/arm/cpu_tcg.c
47
@@ -XXX,XX +XXX,XX @@ struct PL330State {
44
@@ -XXX,XX +XXX,XX @@ void aa32_max_features(ARMCPU *cpu)
48
uint8_t num_faulting;
45
cpu->isar.id_pfr0 = t;
49
uint8_t periph_busy[PL330_PERIPH_NUM];
46
50
47
t = cpu->isar.id_pfr2;
51
+ /* Memory region that DMA operation access */
48
+ t = FIELD_DP32(t, ID_PFR2, CSV3, 1); /* FEAT_CSV3 */
52
+ MemoryRegion *mem_mr;
49
t = FIELD_DP32(t, ID_PFR2, SSBS, 1); /* FEAT_SSBS */
53
+ AddressSpace *mem_as;
50
cpu->isar.id_pfr2 = t;
54
};
55
56
#define TYPE_PL330 "pl330"
57
@@ -XXX,XX +XXX,XX @@ static inline const PL330InsnDesc *pl330_fetch_insn(PL330Chan *ch)
58
uint8_t opcode;
59
int i;
60
61
- dma_memory_read(&address_space_memory, ch->pc, &opcode, 1);
62
+ dma_memory_read(ch->parent->mem_as, ch->pc, &opcode, 1);
63
for (i = 0; insn_desc[i].size; i++) {
64
if ((opcode & insn_desc[i].opmask) == insn_desc[i].opcode) {
65
return &insn_desc[i];
66
@@ -XXX,XX +XXX,XX @@ static inline void pl330_exec_insn(PL330Chan *ch, const PL330InsnDesc *insn)
67
uint8_t buf[PL330_INSN_MAXSIZE];
68
69
assert(insn->size <= PL330_INSN_MAXSIZE);
70
- dma_memory_read(&address_space_memory, ch->pc, buf, insn->size);
71
+ dma_memory_read(ch->parent->mem_as, ch->pc, buf, insn->size);
72
insn->exec(ch, buf[0], &buf[1], insn->size - 1);
73
}
74
75
@@ -XXX,XX +XXX,XX @@ static int pl330_exec_cycle(PL330Chan *channel)
76
if (q != NULL && q->len <= pl330_fifo_num_free(&s->fifo)) {
77
int len = q->len - (q->addr & (q->len - 1));
78
79
- dma_memory_read(&address_space_memory, q->addr, buf, len);
80
+ dma_memory_read(s->mem_as, q->addr, buf, len);
81
trace_pl330_exec_cycle(q->addr, len);
82
if (trace_event_get_state_backends(TRACE_PL330_HEXDUMP)) {
83
pl330_hexdump(buf, len);
84
@@ -XXX,XX +XXX,XX @@ static int pl330_exec_cycle(PL330Chan *channel)
85
fifo_res = pl330_fifo_get(&s->fifo, buf, len, q->tag);
86
}
87
if (fifo_res == PL330_FIFO_OK || q->z) {
88
- dma_memory_write(&address_space_memory, q->addr, buf, len);
89
+ dma_memory_write(s->mem_as, q->addr, buf, len);
90
trace_pl330_exec_cycle(q->addr, len);
91
if (trace_event_get_state_backends(TRACE_PL330_HEXDUMP)) {
92
pl330_hexdump(buf, len);
93
@@ -XXX,XX +XXX,XX @@ static void pl330_realize(DeviceState *dev, Error **errp)
94
"dma", PL330_IOMEM_SIZE);
95
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
96
97
+ if (!s->mem_mr) {
98
+ error_setg(errp, "'memory' link is not set");
99
+ return;
100
+ } else if (s->mem_mr == get_system_memory()) {
101
+ /* Avoid creating new AS for system memory. */
102
+ s->mem_as = &address_space_memory;
103
+ } else {
104
+ s->mem_as = g_new0(AddressSpace, 1);
105
+ address_space_init(s->mem_as, s->mem_mr,
106
+ memory_region_name(s->mem_mr));
107
+ }
108
+
109
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pl330_exec_cycle_timer, s);
110
111
s->cfg[0] = (s->mgr_ns_at_rst ? 0x4 : 0) |
112
@@ -XXX,XX +XXX,XX @@ static Property pl330_properties[] = {
113
DEFINE_PROP_UINT8("rd_q_dep", PL330State, rd_q_dep, 16),
114
DEFINE_PROP_UINT16("data_buffer_dep", PL330State, data_buffer_dep, 256),
115
116
+ DEFINE_PROP_LINK("memory", PL330State, mem_mr,
117
+ TYPE_MEMORY_REGION, MemoryRegion *),
118
+
119
DEFINE_PROP_END_OF_LIST(),
120
};
121
51
122
--
52
--
123
2.20.1
53
2.25.1
124
125
diff view generated by jsdifflib
1
From: Hamza Mahfooz <someguy@effective-light.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
As per commit 5626f8c6d468 ("rcu: Add automatically released rcu_read_lock
3
This extension concerns not merging memory access, which TCG does
4
variants"), RCU_READ_LOCK_GUARD() should be used instead of
4
not implement. Thus we can trivially enable this feature.
5
rcu_read_{un}lock().
5
Add a comment to handle_hint for the DGH instruction, but no code.
6
6
7
Signed-off-by: Hamza Mahfooz <someguy@effective-light.com>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210727235201.11491-1-someguy@effective-light.com
9
Message-id: 20220506180242.216785-23-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
11
---
12
target/arm/kvm.c | 17 ++++++++---------
12
docs/system/arm/emulation.rst | 1 +
13
1 file changed, 8 insertions(+), 9 deletions(-)
13
target/arm/cpu64.c | 1 +
14
target/arm/translate-a64.c | 1 +
15
3 files changed, 3 insertions(+)
14
16
15
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
17
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
16
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/kvm.c
19
--- a/docs/system/arm/emulation.rst
18
+++ b/target/arm/kvm.c
20
+++ b/docs/system/arm/emulation.rst
19
@@ -XXX,XX +XXX,XX @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
21
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
20
hwaddr xlat, len, doorbell_gpa;
22
- FEAT_CSV2_1p2 (Cache speculation variant 2, version 1.2)
21
MemoryRegionSection mrs;
23
- FEAT_CSV2_2 (Cache speculation variant 2, version 2)
22
MemoryRegion *mr;
24
- FEAT_CSV3 (Cache speculation variant 3)
23
- int ret = 1;
25
+- FEAT_DGH (Data gathering hint)
24
26
- FEAT_DIT (Data Independent Timing instructions)
25
if (as == &address_space_memory) {
27
- FEAT_DPB (DC CVAP instruction)
26
return 0;
28
- FEAT_Debugv8p2 (Debug changes for v8.2)
27
@@ -XXX,XX +XXX,XX @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
29
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
28
30
index XXXXXXX..XXXXXXX 100644
29
/* MSI doorbell address is translated by an IOMMU */
31
--- a/target/arm/cpu64.c
30
32
+++ b/target/arm/cpu64.c
31
- rcu_read_lock();
33
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
32
+ RCU_READ_LOCK_GUARD();
34
t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1); /* FEAT_SB */
33
+
35
t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1); /* FEAT_SPECRES */
34
mr = address_space_translate(as, address, &xlat, &len, true,
36
t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1); /* FEAT_BF16 */
35
MEMTXATTRS_UNSPECIFIED);
37
+ t = FIELD_DP64(t, ID_AA64ISAR1, DGH, 1); /* FEAT_DGH */
36
+
38
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); /* FEAT_I8MM */
37
if (!mr) {
39
cpu->isar.id_aa64isar1 = t;
38
- goto unlock;
40
39
+ return 1;
41
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
40
}
42
index XXXXXXX..XXXXXXX 100644
41
+
43
--- a/target/arm/translate-a64.c
42
mrs = memory_region_find(mr, xlat, 1);
44
+++ b/target/arm/translate-a64.c
43
+
45
@@ -XXX,XX +XXX,XX @@ static void handle_hint(DisasContext *s, uint32_t insn,
44
if (!mrs.mr) {
46
break;
45
- goto unlock;
47
case 0b00100: /* SEV */
46
+ return 1;
48
case 0b00101: /* SEVL */
47
}
49
+ case 0b00110: /* DGH */
48
50
/* we treat all as NOP at least for now */
49
doorbell_gpa = mrs.offset_within_address_space;
51
break;
50
@@ -XXX,XX +XXX,XX @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
52
case 0b00111: /* XPACLRI */
51
52
trace_kvm_arm_fixup_msi_route(address, doorbell_gpa);
53
54
- ret = 0;
55
-
56
-unlock:
57
- rcu_read_unlock();
58
- return ret;
59
+ return 0;
60
}
61
62
int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
63
--
53
--
64
2.20.1
54
2.25.1
65
66
diff view generated by jsdifflib
1
Implement the MVE VMOV forms that move data between 2 general-purpose
1
From: Richard Henderson <richard.henderson@linaro.org>
2
registers and 2 32-bit lanes in a vector register.
3
2
3
Enable the a76 for virt and sbsa board use.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220506180242.216785-24-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
---
9
---
7
target/arm/translate-a32.h | 1 +
10
docs/system/arm/virt.rst | 1 +
8
target/arm/mve.decode | 4 ++
11
hw/arm/sbsa-ref.c | 1 +
9
target/arm/translate-mve.c | 85 ++++++++++++++++++++++++++++++++++++++
12
hw/arm/virt.c | 1 +
10
target/arm/translate-vfp.c | 2 +-
13
target/arm/cpu64.c | 66 ++++++++++++++++++++++++++++++++++++++++
11
4 files changed, 91 insertions(+), 1 deletion(-)
14
4 files changed, 69 insertions(+)
12
15
13
diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h
16
diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst
14
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-a32.h
18
--- a/docs/system/arm/virt.rst
16
+++ b/target/arm/translate-a32.h
19
+++ b/docs/system/arm/virt.rst
17
@@ -XXX,XX +XXX,XX @@ void gen_rev16(TCGv_i32 dest, TCGv_i32 var);
20
@@ -XXX,XX +XXX,XX @@ Supported guest CPU types:
18
void clear_eci_state(DisasContext *s);
21
- ``cortex-a53`` (64-bit)
19
bool mve_eci_check(DisasContext *s);
22
- ``cortex-a57`` (64-bit)
20
void mve_update_and_store_eci(DisasContext *s);
23
- ``cortex-a72`` (64-bit)
21
+bool mve_skip_vmov(DisasContext *s, int vn, int index, int size);
24
+- ``cortex-a76`` (64-bit)
22
25
- ``a64fx`` (64-bit)
23
static inline TCGv_i32 load_cpu_offset(int offset)
26
- ``host`` (with KVM only)
24
{
27
- ``max`` (same as ``host`` for KVM; best possible emulation with TCG)
25
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
28
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
26
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/mve.decode
30
--- a/hw/arm/sbsa-ref.c
28
+++ b/target/arm/mve.decode
31
+++ b/hw/arm/sbsa-ref.c
29
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111101 ....... @vldr_vstr \
32
@@ -XXX,XX +XXX,XX @@ static const int sbsa_ref_irqmap[] = {
30
VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \
33
static const char * const valid_cpus[] = {
31
size=2 p=1
34
ARM_CPU_TYPE_NAME("cortex-a57"),
32
35
ARM_CPU_TYPE_NAME("cortex-a72"),
33
+# Moves between 2 32-bit vector lanes and 2 general purpose registers
36
+ ARM_CPU_TYPE_NAME("cortex-a76"),
34
+VMOV_to_2gp 1110 1100 0 . 00 rt2:4 ... 0 1111 000 idx:1 rt:4 qd=%qd
37
ARM_CPU_TYPE_NAME("max"),
35
+VMOV_from_2gp 1110 1100 0 . 01 rt2:4 ... 0 1111 000 idx:1 rt:4 qd=%qd
38
};
39
40
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/hw/arm/virt.c
43
+++ b/hw/arm/virt.c
44
@@ -XXX,XX +XXX,XX @@ static const char *valid_cpus[] = {
45
ARM_CPU_TYPE_NAME("cortex-a53"),
46
ARM_CPU_TYPE_NAME("cortex-a57"),
47
ARM_CPU_TYPE_NAME("cortex-a72"),
48
+ ARM_CPU_TYPE_NAME("cortex-a76"),
49
ARM_CPU_TYPE_NAME("a64fx"),
50
ARM_CPU_TYPE_NAME("host"),
51
ARM_CPU_TYPE_NAME("max"),
52
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/cpu64.c
55
+++ b/target/arm/cpu64.c
56
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
57
define_cortex_a72_a57_a53_cp_reginfo(cpu);
58
}
59
60
+static void aarch64_a76_initfn(Object *obj)
61
+{
62
+ ARMCPU *cpu = ARM_CPU(obj);
36
+
63
+
37
# Vector 2-op
64
+ cpu->dtb_compatible = "arm,cortex-a76";
38
VAND 1110 1111 0 . 00 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
65
+ set_feature(&cpu->env, ARM_FEATURE_V8);
39
VBIC 1110 1111 0 . 01 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
66
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
40
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
67
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
41
index XXXXXXX..XXXXXXX 100644
68
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
42
--- a/target/arm/translate-mve.c
69
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
43
+++ b/target/arm/translate-mve.c
70
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
44
@@ -XXX,XX +XXX,XX @@ static bool do_vabav(DisasContext *s, arg_vabav *a, MVEGenVABAVFn *fn)
71
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
45
72
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
46
DO_VABAV(VABAV_S, vabavs)
47
DO_VABAV(VABAV_U, vabavu)
48
+
73
+
49
+static bool trans_VMOV_to_2gp(DisasContext *s, arg_VMOV_to_2gp *a)
74
+ /* Ordered by B2.4 AArch64 registers by functional group */
50
+{
75
+ cpu->clidr = 0x82000023;
51
+ /*
76
+ cpu->ctr = 0x8444C004;
52
+ * VMOV two 32-bit vector lanes to two general-purpose registers.
77
+ cpu->dcz_blocksize = 4;
53
+ * This insn is not predicated but it is subject to beat-wise
78
+ cpu->isar.id_aa64dfr0 = 0x0000000010305408ull;
54
+ * execution if it is not in an IT block. For us this means
79
+ cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
55
+ * only that if PSR.ECI says we should not be executing the beat
80
+ cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
56
+ * corresponding to the lane of the vector register being accessed
81
+ cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull;
57
+ * then we should skip perfoming the move, and that we need to do
82
+ cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
58
+ * the usual check for bad ECI state and advance of ECI state.
83
+ cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
59
+ * (If PSR.ECI is non-zero then we cannot be in an IT block.)
84
+ cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */
60
+ */
85
+ cpu->isar.id_aa64pfr1 = 0x0000000000000010ull;
61
+ TCGv_i32 tmp;
86
+ cpu->id_afr0 = 0x00000000;
62
+ int vd;
87
+ cpu->isar.id_dfr0 = 0x04010088;
88
+ cpu->isar.id_isar0 = 0x02101110;
89
+ cpu->isar.id_isar1 = 0x13112111;
90
+ cpu->isar.id_isar2 = 0x21232042;
91
+ cpu->isar.id_isar3 = 0x01112131;
92
+ cpu->isar.id_isar4 = 0x00010142;
93
+ cpu->isar.id_isar5 = 0x01011121;
94
+ cpu->isar.id_isar6 = 0x00000010;
95
+ cpu->isar.id_mmfr0 = 0x10201105;
96
+ cpu->isar.id_mmfr1 = 0x40000000;
97
+ cpu->isar.id_mmfr2 = 0x01260000;
98
+ cpu->isar.id_mmfr3 = 0x02122211;
99
+ cpu->isar.id_mmfr4 = 0x00021110;
100
+ cpu->isar.id_pfr0 = 0x10010131;
101
+ cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
102
+ cpu->isar.id_pfr2 = 0x00000011;
103
+ cpu->midr = 0x414fd0b1; /* r4p1 */
104
+ cpu->revidr = 0;
63
+
105
+
64
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd) ||
106
+ /* From B2.18 CCSIDR_EL1 */
65
+ a->rt == 13 || a->rt == 15 || a->rt2 == 13 || a->rt2 == 15 ||
107
+ cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */
66
+ a->rt == a->rt2) {
108
+ cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */
67
+ /* Rt/Rt2 cases are UNPREDICTABLE */
109
+ cpu->ccsidr[2] = 0x707fe03a; /* 512KB L2 cache */
68
+ return false;
69
+ }
70
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
71
+ return true;
72
+ }
73
+
110
+
74
+ /* Convert Qreg index to Dreg for read_neon_element32() etc */
111
+ /* From B2.93 SCTLR_EL3 */
75
+ vd = a->qd * 2;
112
+ cpu->reset_sctlr = 0x30c50838;
76
+
113
+
77
+ if (!mve_skip_vmov(s, vd, a->idx, MO_32)) {
114
+ /* From B4.23 ICH_VTR_EL2 */
78
+ tmp = tcg_temp_new_i32();
115
+ cpu->gic_num_lrs = 4;
79
+ read_neon_element32(tmp, vd, a->idx, MO_32);
116
+ cpu->gic_vpribits = 5;
80
+ store_reg(s, a->rt, tmp);
117
+ cpu->gic_vprebits = 5;
81
+ }
82
+ if (!mve_skip_vmov(s, vd + 1, a->idx, MO_32)) {
83
+ tmp = tcg_temp_new_i32();
84
+ read_neon_element32(tmp, vd + 1, a->idx, MO_32);
85
+ store_reg(s, a->rt2, tmp);
86
+ }
87
+
118
+
88
+ mve_update_and_store_eci(s);
119
+ /* From B5.1 AdvSIMD AArch64 register summary */
89
+ return true;
120
+ cpu->isar.mvfr0 = 0x10110222;
121
+ cpu->isar.mvfr1 = 0x13211111;
122
+ cpu->isar.mvfr2 = 0x00000043;
90
+}
123
+}
91
+
124
+
92
+static bool trans_VMOV_from_2gp(DisasContext *s, arg_VMOV_to_2gp *a)
125
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
93
+{
94
+ /*
95
+ * VMOV two general-purpose registers to two 32-bit vector lanes.
96
+ * This insn is not predicated but it is subject to beat-wise
97
+ * execution if it is not in an IT block. For us this means
98
+ * only that if PSR.ECI says we should not be executing the beat
99
+ * corresponding to the lane of the vector register being accessed
100
+ * then we should skip perfoming the move, and that we need to do
101
+ * the usual check for bad ECI state and advance of ECI state.
102
+ * (If PSR.ECI is non-zero then we cannot be in an IT block.)
103
+ */
104
+ TCGv_i32 tmp;
105
+ int vd;
106
+
107
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd) ||
108
+ a->rt == 13 || a->rt == 15 || a->rt2 == 13 || a->rt2 == 15) {
109
+ /* Rt/Rt2 cases are UNPREDICTABLE */
110
+ return false;
111
+ }
112
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
113
+ return true;
114
+ }
115
+
116
+ /* Convert Qreg idx to Dreg for read_neon_element32() etc */
117
+ vd = a->qd * 2;
118
+
119
+ if (!mve_skip_vmov(s, vd, a->idx, MO_32)) {
120
+ tmp = load_reg(s, a->rt);
121
+ write_neon_element32(tmp, vd, a->idx, MO_32);
122
+ tcg_temp_free_i32(tmp);
123
+ }
124
+ if (!mve_skip_vmov(s, vd + 1, a->idx, MO_32)) {
125
+ tmp = load_reg(s, a->rt2);
126
+ write_neon_element32(tmp, vd + 1, a->idx, MO_32);
127
+ tcg_temp_free_i32(tmp);
128
+ }
129
+
130
+ mve_update_and_store_eci(s);
131
+ return true;
132
+}
133
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/target/arm/translate-vfp.c
136
+++ b/target/arm/translate-vfp.c
137
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
138
return true;
139
}
140
141
-static bool mve_skip_vmov(DisasContext *s, int vn, int index, int size)
142
+bool mve_skip_vmov(DisasContext *s, int vn, int index, int size)
143
{
126
{
144
/*
127
/*
145
* In a CPU with MVE, the VMOV (vector lane to general-purpose register)
128
@@ -XXX,XX +XXX,XX @@ static const ARMCPUInfo aarch64_cpus[] = {
129
{ .name = "cortex-a57", .initfn = aarch64_a57_initfn },
130
{ .name = "cortex-a53", .initfn = aarch64_a53_initfn },
131
{ .name = "cortex-a72", .initfn = aarch64_a72_initfn },
132
+ { .name = "cortex-a76", .initfn = aarch64_a76_initfn },
133
{ .name = "a64fx", .initfn = aarch64_a64fx_initfn },
134
{ .name = "max", .initfn = aarch64_max_initfn },
135
#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
146
--
136
--
147
2.20.1
137
2.25.1
148
149
diff view generated by jsdifflib
1
Implement the MVE VMLADAV and VMLSLDAV insns. Like the VMLALDAV and
1
From: Richard Henderson <richard.henderson@linaro.org>
2
VMLSLDAV insns already implemented, these accumulate multiplied
3
vector elements; but they accumulate a 32-bit result rather than a
4
64-bit one.
5
2
6
Note that these encodings overlap with what would be RdaHi=0b111 for
3
Enable the n1 for virt and sbsa board use.
7
VMLALDAV, VMLSLDAV, VRMLALDAVH and VRMLSLDAVH.
8
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220506180242.216785-25-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
---
9
---
12
target/arm/helper-mve.h | 17 ++++++++++
10
docs/system/arm/virt.rst | 1 +
13
target/arm/mve.decode | 33 +++++++++++++++++---
11
hw/arm/sbsa-ref.c | 1 +
14
target/arm/mve_helper.c | 41 ++++++++++++++++++++++++
12
hw/arm/virt.c | 1 +
15
target/arm/translate-mve.c | 64 ++++++++++++++++++++++++++++++++++++++
13
target/arm/cpu64.c | 66 ++++++++++++++++++++++++++++++++++++++++
16
4 files changed, 150 insertions(+), 5 deletions(-)
14
4 files changed, 69 insertions(+)
17
15
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-mve.h
18
--- a/docs/system/arm/virt.rst
21
+++ b/target/arm/helper-mve.h
19
+++ b/docs/system/arm/virt.rst
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrmlaldavhuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
20
@@ -XXX,XX +XXX,XX @@ Supported guest CPU types:
23
DEF_HELPER_FLAGS_4(mve_vrmlsldavhsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
21
- ``cortex-a76`` (64-bit)
24
DEF_HELPER_FLAGS_4(mve_vrmlsldavhxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
22
- ``a64fx`` (64-bit)
25
23
- ``host`` (with KVM only)
26
+DEF_HELPER_FLAGS_4(mve_vmladavsb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
24
+- ``neoverse-n1`` (64-bit)
27
+DEF_HELPER_FLAGS_4(mve_vmladavsh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
25
- ``max`` (same as ``host`` for KVM; best possible emulation with TCG)
28
+DEF_HELPER_FLAGS_4(mve_vmladavsw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
26
29
+DEF_HELPER_FLAGS_4(mve_vmladavub, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
27
Note that the default is ``cortex-a15``, so for an AArch64 guest you must
30
+DEF_HELPER_FLAGS_4(mve_vmladavuh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
28
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
31
+DEF_HELPER_FLAGS_4(mve_vmladavuw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
29
index XXXXXXX..XXXXXXX 100644
32
+DEF_HELPER_FLAGS_4(mve_vmlsdavb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
30
--- a/hw/arm/sbsa-ref.c
33
+DEF_HELPER_FLAGS_4(mve_vmlsdavh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
31
+++ b/hw/arm/sbsa-ref.c
34
+DEF_HELPER_FLAGS_4(mve_vmlsdavw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
32
@@ -XXX,XX +XXX,XX @@ static const char * const valid_cpus[] = {
33
ARM_CPU_TYPE_NAME("cortex-a57"),
34
ARM_CPU_TYPE_NAME("cortex-a72"),
35
ARM_CPU_TYPE_NAME("cortex-a76"),
36
+ ARM_CPU_TYPE_NAME("neoverse-n1"),
37
ARM_CPU_TYPE_NAME("max"),
38
};
39
40
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/hw/arm/virt.c
43
+++ b/hw/arm/virt.c
44
@@ -XXX,XX +XXX,XX @@ static const char *valid_cpus[] = {
45
ARM_CPU_TYPE_NAME("cortex-a72"),
46
ARM_CPU_TYPE_NAME("cortex-a76"),
47
ARM_CPU_TYPE_NAME("a64fx"),
48
+ ARM_CPU_TYPE_NAME("neoverse-n1"),
49
ARM_CPU_TYPE_NAME("host"),
50
ARM_CPU_TYPE_NAME("max"),
51
};
52
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/cpu64.c
55
+++ b/target/arm/cpu64.c
56
@@ -XXX,XX +XXX,XX @@ static void aarch64_a76_initfn(Object *obj)
57
cpu->isar.mvfr2 = 0x00000043;
58
}
59
60
+static void aarch64_neoverse_n1_initfn(Object *obj)
61
+{
62
+ ARMCPU *cpu = ARM_CPU(obj);
35
+
63
+
36
+DEF_HELPER_FLAGS_4(mve_vmladavsxb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
64
+ cpu->dtb_compatible = "arm,neoverse-n1";
37
+DEF_HELPER_FLAGS_4(mve_vmladavsxh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
65
+ set_feature(&cpu->env, ARM_FEATURE_V8);
38
+DEF_HELPER_FLAGS_4(mve_vmladavsxw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
66
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
39
+DEF_HELPER_FLAGS_4(mve_vmlsdavxb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
67
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
40
+DEF_HELPER_FLAGS_4(mve_vmlsdavxh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
68
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
41
+DEF_HELPER_FLAGS_4(mve_vmlsdavxw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
69
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
70
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
71
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
72
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
42
+
73
+
43
DEF_HELPER_FLAGS_3(mve_vaddvsb, TCG_CALL_NO_WG, i32, env, ptr, i32)
74
+ /* Ordered by B2.4 AArch64 registers by functional group */
44
DEF_HELPER_FLAGS_3(mve_vaddvub, TCG_CALL_NO_WG, i32, env, ptr, i32)
75
+ cpu->clidr = 0x82000023;
45
DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
76
+ cpu->ctr = 0x8444c004;
46
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
77
+ cpu->dcz_blocksize = 4;
47
index XXXXXXX..XXXXXXX 100644
78
+ cpu->isar.id_aa64dfr0 = 0x0000000110305408ull;
48
--- a/target/arm/mve.decode
79
+ cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
49
+++ b/target/arm/mve.decode
80
+ cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
50
@@ -XXX,XX +XXX,XX @@ VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2
81
+ cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull;
51
%size_16 16:1 !function=plus_1
82
+ cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
52
83
+ cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
53
&vmlaldav rdahi rdalo size qn qm x a
84
+ cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */
54
+&vmladav rda size qn qm x a
85
+ cpu->isar.id_aa64pfr1 = 0x0000000000000020ull;
55
86
+ cpu->id_afr0 = 0x00000000;
56
@vmlaldav .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \
87
+ cpu->isar.id_dfr0 = 0x04010088;
57
qn=%qn rdahi=%rdahi rdalo=%rdalo size=%size_16 &vmlaldav
88
+ cpu->isar.id_isar0 = 0x02101110;
58
@vmlaldav_nosz .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \
89
+ cpu->isar.id_isar1 = 0x13112111;
59
qn=%qn rdahi=%rdahi rdalo=%rdalo size=0 &vmlaldav
90
+ cpu->isar.id_isar2 = 0x21232042;
60
-VMLALDAV_S 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
91
+ cpu->isar.id_isar3 = 0x01112131;
61
-VMLALDAV_U 1111 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
92
+ cpu->isar.id_isar4 = 0x00010142;
62
+@vmladav .... .... .... ... . ... x:1 .... . . a:1 . qm:3 . \
93
+ cpu->isar.id_isar5 = 0x01011121;
63
+ qn=%qn rda=%rdalo size=%size_16 &vmladav
94
+ cpu->isar.id_isar6 = 0x00000010;
64
+@vmladav_nosz .... .... .... ... . ... x:1 .... . . a:1 . qm:3 . \
95
+ cpu->isar.id_mmfr0 = 0x10201105;
65
+ qn=%qn rda=%rdalo size=0 &vmladav
96
+ cpu->isar.id_mmfr1 = 0x40000000;
66
97
+ cpu->isar.id_mmfr2 = 0x01260000;
67
-VMLSLDAV 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 1 @vmlaldav
98
+ cpu->isar.id_mmfr3 = 0x02122211;
68
+{
99
+ cpu->isar.id_mmfr4 = 0x00021110;
69
+ VMLADAV_S 1110 1110 1111 ... . ... . 1110 . 0 . 0 ... 0 @vmladav
100
+ cpu->isar.id_pfr0 = 0x10010131;
70
+ VMLALDAV_S 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
101
+ cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
71
+}
102
+ cpu->isar.id_pfr2 = 0x00000011;
72
+{
103
+ cpu->midr = 0x414fd0c1; /* r4p1 */
73
+ VMLADAV_U 1111 1110 1111 ... . ... . 1110 . 0 . 0 ... 0 @vmladav
104
+ cpu->revidr = 0;
74
+ VMLALDAV_U 1111 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
105
+
106
+ /* From B2.23 CCSIDR_EL1 */
107
+ cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */
108
+ cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */
109
+ cpu->ccsidr[2] = 0x70ffe03a; /* 1MB L2 cache */
110
+
111
+ /* From B2.98 SCTLR_EL3 */
112
+ cpu->reset_sctlr = 0x30c50838;
113
+
114
+ /* From B4.23 ICH_VTR_EL2 */
115
+ cpu->gic_num_lrs = 4;
116
+ cpu->gic_vpribits = 5;
117
+ cpu->gic_vprebits = 5;
118
+
119
+ /* From B5.1 AdvSIMD AArch64 register summary */
120
+ cpu->isar.mvfr0 = 0x10110222;
121
+ cpu->isar.mvfr1 = 0x13211111;
122
+ cpu->isar.mvfr2 = 0x00000043;
75
+}
123
+}
76
+
124
+
77
+{
125
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
78
+ VMLSDAV 1110 1110 1111 ... . ... . 1110 . 0 . 0 ... 1 @vmladav
79
+ VMLSLDAV 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 1 @vmlaldav
80
+}
81
+
82
+{
83
+ VMLSDAV 1111 1110 1111 ... 0 ... . 1110 . 0 . 0 ... 1 @vmladav_nosz
84
+ VRMLSLDAVH 1111 1110 1 ... ... 0 ... . 1110 . 0 . 0 ... 1 @vmlaldav_nosz
85
+}
86
+
87
+VMLADAV_S 1110 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 1 @vmladav_nosz
88
+VMLADAV_U 1111 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 1 @vmladav_nosz
89
90
{
91
VMAXV_S 1110 1110 1110 .. 10 .... 1111 0 0 . 0 ... 0 @vmaxv
92
VMINV_S 1110 1110 1110 .. 10 .... 1111 1 0 . 0 ... 0 @vmaxv
93
VMAXAV 1110 1110 1110 .. 00 .... 1111 0 0 . 0 ... 0 @vmaxv
94
VMINAV 1110 1110 1110 .. 00 .... 1111 1 0 . 0 ... 0 @vmaxv
95
+ VMLADAV_S 1110 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 0 @vmladav_nosz
96
VRMLALDAVH_S 1110 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz
97
}
98
99
{
100
VMAXV_U 1111 1110 1110 .. 10 .... 1111 0 0 . 0 ... 0 @vmaxv
101
VMINV_U 1111 1110 1110 .. 10 .... 1111 1 0 . 0 ... 0 @vmaxv
102
+ VMLADAV_U 1111 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 0 @vmladav_nosz
103
VRMLALDAVH_U 1111 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz
104
}
105
106
-VRMLSLDAVH 1111 1110 1 ... ... 0 ... . 1110 . 0 . 0 ... 1 @vmlaldav_nosz
107
-
108
# Scalar operations
109
110
VADD_scalar 1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar
111
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
112
index XXXXXXX..XXXXXXX 100644
113
--- a/target/arm/mve_helper.c
114
+++ b/target/arm/mve_helper.c
115
@@ -XXX,XX +XXX,XX @@ DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=)
116
DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
117
DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
118
119
+/*
120
+ * Multiply add dual accumulate ops
121
+ */
122
+#define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
123
+ uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
124
+ void *vm, uint32_t a) \
125
+ { \
126
+ uint16_t mask = mve_element_mask(env); \
127
+ unsigned e; \
128
+ TYPE *n = vn, *m = vm; \
129
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
130
+ if (mask & 1) { \
131
+ if (e & 1) { \
132
+ a ODDACC \
133
+ n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
134
+ } else { \
135
+ a EVENACC \
136
+ n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
137
+ } \
138
+ } \
139
+ } \
140
+ mve_advance_vpt(env); \
141
+ return a; \
142
+ }
143
+
144
+#define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC) \
145
+ DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC) \
146
+ DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC) \
147
+ DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC)
148
+
149
+#define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC) \
150
+ DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC) \
151
+ DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \
152
+ DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC)
153
+
154
+DO_DAV_S(vmladavs, false, +=, +=)
155
+DO_DAV_U(vmladavu, false, +=, +=)
156
+DO_DAV_S(vmlsdav, false, +=, -=)
157
+DO_DAV_S(vmladavsx, true, +=, +=)
158
+DO_DAV_S(vmlsdavx, true, +=, -=)
159
+
160
/*
161
* Rounding multiply add long dual accumulate high. In the pseudocode
162
* this is implemented with a 72-bit internal accumulator value of which
163
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
164
index XXXXXXX..XXXXXXX 100644
165
--- a/target/arm/translate-mve.c
166
+++ b/target/arm/translate-mve.c
167
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenVIWDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32, TC
168
typedef void MVEGenCmpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
169
typedef void MVEGenScalarCmpFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
170
typedef void MVEGenVABAVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
171
+typedef void MVEGenDualAccOpFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
172
173
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
174
static inline long mve_qreg_offset(unsigned reg)
175
@@ -XXX,XX +XXX,XX @@ static bool trans_VRMLSLDAVH(DisasContext *s, arg_vmlaldav *a)
176
return do_long_dual_acc(s, a, fns[a->x]);
177
}
178
179
+static bool do_dual_acc(DisasContext *s, arg_vmladav *a, MVEGenDualAccOpFn *fn)
180
+{
181
+ TCGv_ptr qn, qm;
182
+ TCGv_i32 rda;
183
+
184
+ if (!dc_isar_feature(aa32_mve, s) ||
185
+ !mve_check_qreg_bank(s, a->qn) ||
186
+ !fn) {
187
+ return false;
188
+ }
189
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
190
+ return true;
191
+ }
192
+
193
+ qn = mve_qreg_ptr(a->qn);
194
+ qm = mve_qreg_ptr(a->qm);
195
+
196
+ /*
197
+ * This insn is subject to beat-wise execution. Partial execution
198
+ * of an A=0 (no-accumulate) insn which does not execute the first
199
+ * beat must start with the current rda value, not 0.
200
+ */
201
+ if (a->a || mve_skip_first_beat(s)) {
202
+ rda = load_reg(s, a->rda);
203
+ } else {
204
+ rda = tcg_const_i32(0);
205
+ }
206
+
207
+ fn(rda, cpu_env, qn, qm, rda);
208
+ store_reg(s, a->rda, rda);
209
+ tcg_temp_free_ptr(qn);
210
+ tcg_temp_free_ptr(qm);
211
+
212
+ mve_update_eci(s);
213
+ return true;
214
+}
215
+
216
+#define DO_DUAL_ACC(INSN, FN) \
217
+ static bool trans_##INSN(DisasContext *s, arg_vmladav *a) \
218
+ { \
219
+ static MVEGenDualAccOpFn * const fns[4][2] = { \
220
+ { gen_helper_mve_##FN##b, gen_helper_mve_##FN##xb }, \
221
+ { gen_helper_mve_##FN##h, gen_helper_mve_##FN##xh }, \
222
+ { gen_helper_mve_##FN##w, gen_helper_mve_##FN##xw }, \
223
+ { NULL, NULL }, \
224
+ }; \
225
+ return do_dual_acc(s, a, fns[a->size][a->x]); \
226
+ }
227
+
228
+DO_DUAL_ACC(VMLADAV_S, vmladavs)
229
+DO_DUAL_ACC(VMLSDAV, vmlsdav)
230
+
231
+static bool trans_VMLADAV_U(DisasContext *s, arg_vmladav *a)
232
+{
233
+ static MVEGenDualAccOpFn * const fns[4][2] = {
234
+ { gen_helper_mve_vmladavub, NULL },
235
+ { gen_helper_mve_vmladavuh, NULL },
236
+ { gen_helper_mve_vmladavuw, NULL },
237
+ { NULL, NULL },
238
+ };
239
+ return do_dual_acc(s, a, fns[a->size][a->x]);
240
+}
241
+
242
static void gen_vpst(DisasContext *s, uint32_t mask)
243
{
126
{
244
/*
127
/*
128
@@ -XXX,XX +XXX,XX @@ static const ARMCPUInfo aarch64_cpus[] = {
129
{ .name = "cortex-a72", .initfn = aarch64_a72_initfn },
130
{ .name = "cortex-a76", .initfn = aarch64_a76_initfn },
131
{ .name = "a64fx", .initfn = aarch64_a64fx_initfn },
132
+ { .name = "neoverse-n1", .initfn = aarch64_neoverse_n1_initfn },
133
{ .name = "max", .initfn = aarch64_max_initfn },
134
#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
135
{ .name = "host", .initfn = aarch64_host_initfn },
245
--
136
--
246
2.20.1
137
2.25.1
247
248
diff view generated by jsdifflib
1
From: Eduardo Habkost <ehabkost@redhat.com>
1
From: Leif Lindholm <quic_llindhol@quicinc.com>
2
2
3
The SBSA_GWDT enum value conflicts with the SBSA_GWDT() QOM type
3
The sbsa-ref machine is continuously evolving. Some of the changes we
4
checking helper, preventing us from using a OBJECT_DEFINE* or
4
want to make in the near future, to align with real components (e.g.
5
DEFINE_INSTANCE_CHECKER macro for the SBSA_GWDT() wrapper.
5
the GIC-700), will break compatibility for existing firmware.
6
6
7
If I understand the SBSA 6.0 specification correctly, the signal
7
Introduce two new properties to the DT generated on machine generation:
8
being connected to IRQ 16 is the WS0 output signal from the
8
- machine-version-major
9
Generic Watchdog. Rename the enum value to SBSA_GWDT_WS0 to be
9
To be incremented when a platform change makes the machine
10
more explicit and avoid the name conflict.
10
incompatible with existing firmware.
11
- machine-version-minor
12
To be incremented when functionality is added to the machine
13
without causing incompatibility with existing firmware.
14
to be reset to 0 when machine-version-major is incremented.
11
15
12
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
16
This versioning scheme is *neither*:
13
Message-id: 20210806023119.431680-1-ehabkost@redhat.com
17
- A QEMU versioned machine type; a given version of QEMU will emulate
18
a given version of the platform.
19
- A reflection of level of SBSA (now SystemReady SR) support provided.
20
21
The version will increment on guest-visible functional changes only,
22
akin to a revision ID register found on a physical platform.
23
24
These properties are both introduced with the value 0.
25
(Hence, a machine where the DT is lacking these nodes is equivalent
26
to version 0.0.)
27
28
Signed-off-by: Leif Lindholm <quic_llindhol@quicinc.com>
29
Message-id: 20220505113947.75714-1-quic_llindhol@quicinc.com
30
Cc: Peter Maydell <peter.maydell@linaro.org>
31
Cc: Radoslaw Biernacki <rad@semihalf.com>
32
Cc: Cédric Le Goater <clg@kaod.org>
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
33
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
34
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
35
---
17
hw/arm/sbsa-ref.c | 6 +++---
36
hw/arm/sbsa-ref.c | 14 ++++++++++++++
18
1 file changed, 3 insertions(+), 3 deletions(-)
37
1 file changed, 14 insertions(+)
19
38
20
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
39
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
21
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/arm/sbsa-ref.c
41
--- a/hw/arm/sbsa-ref.c
23
+++ b/hw/arm/sbsa-ref.c
42
+++ b/hw/arm/sbsa-ref.c
24
@@ -XXX,XX +XXX,XX @@ enum {
43
@@ -XXX,XX +XXX,XX @@ static void create_fdt(SBSAMachineState *sms)
25
SBSA_GIC_DIST,
44
qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x2);
26
SBSA_GIC_REDIST,
45
qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x2);
27
SBSA_SECURE_EC,
46
28
- SBSA_GWDT,
47
+ /*
29
+ SBSA_GWDT_WS0,
48
+ * This versioning scheme is for informing platform fw only. It is neither:
30
SBSA_GWDT_REFRESH,
49
+ * - A QEMU versioned machine type; a given version of QEMU will emulate
31
SBSA_GWDT_CONTROL,
50
+ * a given version of the platform.
32
SBSA_SMMU,
51
+ * - A reflection of level of SBSA (now SystemReady SR) support provided.
33
@@ -XXX,XX +XXX,XX @@ static const int sbsa_ref_irqmap[] = {
52
+ *
34
[SBSA_AHCI] = 10,
53
+ * machine-version-major: updated when changes breaking fw compatibility
35
[SBSA_EHCI] = 11,
54
+ * are introduced.
36
[SBSA_SMMU] = 12, /* ... to 15 */
55
+ * machine-version-minor: updated when features are added that don't break
37
- [SBSA_GWDT] = 16,
56
+ * fw compatibility.
38
+ [SBSA_GWDT_WS0] = 16,
57
+ */
39
};
58
+ qemu_fdt_setprop_cell(fdt, "/", "machine-version-major", 0);
40
59
+ qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 0);
41
static const char * const valid_cpus[] = {
60
+
42
@@ -XXX,XX +XXX,XX @@ static void create_wdt(const SBSAMachineState *sms)
61
if (ms->numa_state->have_numa_distance) {
43
hwaddr cbase = sbsa_ref_memmap[SBSA_GWDT_CONTROL].base;
62
int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t);
44
DeviceState *dev = qdev_new(TYPE_WDT_SBSA);
63
uint32_t *matrix = g_malloc0(size);
45
SysBusDevice *s = SYS_BUS_DEVICE(dev);
46
- int irq = sbsa_ref_irqmap[SBSA_GWDT];
47
+ int irq = sbsa_ref_irqmap[SBSA_GWDT_WS0];
48
49
sysbus_realize_and_unref(s, &error_fatal);
50
sysbus_mmio_map(s, 0, rbase);
51
--
64
--
52
2.20.1
65
2.25.1
53
66
54
67
diff view generated by jsdifflib
1
Implement the MVE 1-operand saturating operations VQABS and VQNEG.
1
From: Gavin Shan <gshan@redhat.com>
2
2
3
This adds cluster-id in CPU instance properties, which will be used
4
by arm/virt machine. Besides, the cluster-id is also verified or
5
dumped in various spots:
6
7
* hw/core/machine.c::machine_set_cpu_numa_node() to associate
8
CPU with its NUMA node.
9
10
* hw/core/machine.c::machine_numa_finish_cpu_init() to record
11
CPU slots with no NUMA mapping set.
12
13
* hw/core/machine-hmp-cmds.c::hmp_hotpluggable_cpus() to dump
14
cluster-id.
15
16
Signed-off-by: Gavin Shan <gshan@redhat.com>
17
Reviewed-by: Yanan Wang <wangyanan55@huawei.com>
18
Acked-by: Igor Mammedov <imammedo@redhat.com>
19
Message-id: 20220503140304.855514-2-gshan@redhat.com
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
21
---
6
target/arm/helper-mve.h | 8 ++++++++
22
qapi/machine.json | 6 ++++--
7
target/arm/mve.decode | 3 +++
23
hw/core/machine-hmp-cmds.c | 4 ++++
8
target/arm/mve_helper.c | 37 +++++++++++++++++++++++++++++++++++++
24
hw/core/machine.c | 16 ++++++++++++++++
9
target/arm/translate-mve.c | 2 ++
25
3 files changed, 24 insertions(+), 2 deletions(-)
10
4 files changed, 50 insertions(+)
11
26
12
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
27
diff --git a/qapi/machine.json b/qapi/machine.json
13
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/helper-mve.h
29
--- a/qapi/machine.json
15
+++ b/target/arm/helper-mve.h
30
+++ b/qapi/machine.json
16
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vnegw, TCG_CALL_NO_WG, void, env, ptr, ptr)
31
@@ -XXX,XX +XXX,XX @@
17
DEF_HELPER_FLAGS_3(mve_vfnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
32
# @node-id: NUMA node ID the CPU belongs to
18
DEF_HELPER_FLAGS_3(mve_vfnegs, TCG_CALL_NO_WG, void, env, ptr, ptr)
33
# @socket-id: socket number within node/board the CPU belongs to
19
34
# @die-id: die number within socket the CPU belongs to (since 4.1)
20
+DEF_HELPER_FLAGS_3(mve_vqabsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
35
-# @core-id: core number within die the CPU belongs to
21
+DEF_HELPER_FLAGS_3(mve_vqabsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
36
+# @cluster-id: cluster number within die the CPU belongs to (since 7.1)
22
+DEF_HELPER_FLAGS_3(mve_vqabsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
37
+# @core-id: core number within cluster the CPU belongs to
38
# @thread-id: thread number within core the CPU belongs to
39
#
40
-# Note: currently there are 5 properties that could be present
41
+# Note: currently there are 6 properties that could be present
42
# but management should be prepared to pass through other
43
# properties with device_add command to allow for future
44
# interface extension. This also requires the filed names to be kept in
45
@@ -XXX,XX +XXX,XX @@
46
'data': { '*node-id': 'int',
47
'*socket-id': 'int',
48
'*die-id': 'int',
49
+ '*cluster-id': 'int',
50
'*core-id': 'int',
51
'*thread-id': 'int'
52
}
53
diff --git a/hw/core/machine-hmp-cmds.c b/hw/core/machine-hmp-cmds.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/hw/core/machine-hmp-cmds.c
56
+++ b/hw/core/machine-hmp-cmds.c
57
@@ -XXX,XX +XXX,XX @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict)
58
if (c->has_die_id) {
59
monitor_printf(mon, " die-id: \"%" PRIu64 "\"\n", c->die_id);
60
}
61
+ if (c->has_cluster_id) {
62
+ monitor_printf(mon, " cluster-id: \"%" PRIu64 "\"\n",
63
+ c->cluster_id);
64
+ }
65
if (c->has_core_id) {
66
monitor_printf(mon, " core-id: \"%" PRIu64 "\"\n", c->core_id);
67
}
68
diff --git a/hw/core/machine.c b/hw/core/machine.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/hw/core/machine.c
71
+++ b/hw/core/machine.c
72
@@ -XXX,XX +XXX,XX @@ void machine_set_cpu_numa_node(MachineState *machine,
73
return;
74
}
75
76
+ if (props->has_cluster_id && !slot->props.has_cluster_id) {
77
+ error_setg(errp, "cluster-id is not supported");
78
+ return;
79
+ }
23
+
80
+
24
+DEF_HELPER_FLAGS_3(mve_vqnegb, TCG_CALL_NO_WG, void, env, ptr, ptr)
81
if (props->has_socket_id && !slot->props.has_socket_id) {
25
+DEF_HELPER_FLAGS_3(mve_vqnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
82
error_setg(errp, "socket-id is not supported");
26
+DEF_HELPER_FLAGS_3(mve_vqnegw, TCG_CALL_NO_WG, void, env, ptr, ptr)
83
return;
84
@@ -XXX,XX +XXX,XX @@ void machine_set_cpu_numa_node(MachineState *machine,
85
continue;
86
}
87
88
+ if (props->has_cluster_id &&
89
+ props->cluster_id != slot->props.cluster_id) {
90
+ continue;
91
+ }
27
+
92
+
28
DEF_HELPER_FLAGS_3(mve_vmovnbb, TCG_CALL_NO_WG, void, env, ptr, ptr)
93
if (props->has_die_id && props->die_id != slot->props.die_id) {
29
DEF_HELPER_FLAGS_3(mve_vmovnbh, TCG_CALL_NO_WG, void, env, ptr, ptr)
94
continue;
30
DEF_HELPER_FLAGS_3(mve_vmovntb, TCG_CALL_NO_WG, void, env, ptr, ptr)
95
}
31
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
96
@@ -XXX,XX +XXX,XX @@ static char *cpu_slot_to_string(const CPUArchId *cpu)
32
index XXXXXXX..XXXXXXX 100644
97
}
33
--- a/target/arm/mve.decode
98
g_string_append_printf(s, "die-id: %"PRId64, cpu->props.die_id);
34
+++ b/target/arm/mve.decode
35
@@ -XXX,XX +XXX,XX @@ VABS_fp 1111 1111 1 . 11 .. 01 ... 0 0111 01 . 0 ... 0 @1op
36
VNEG 1111 1111 1 . 11 .. 01 ... 0 0011 11 . 0 ... 0 @1op
37
VNEG_fp 1111 1111 1 . 11 .. 01 ... 0 0111 11 . 0 ... 0 @1op
38
39
+VQABS 1111 1111 1 . 11 .. 00 ... 0 0111 01 . 0 ... 0 @1op
40
+VQNEG 1111 1111 1 . 11 .. 00 ... 0 0111 11 . 0 ... 0 @1op
41
+
42
&vdup qd rt size
43
# Qd is in the fields usually named Qn
44
@vdup .... .... . . .. ... . rt:4 .... . . . . .... qd=%qn &vdup
45
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/mve_helper.c
48
+++ b/target/arm/mve_helper.c
49
@@ -XXX,XX +XXX,XX @@ void HELPER(mve_vpsel)(CPUARMState *env, void *vd, void *vn, void *vm)
50
}
99
}
51
mve_advance_vpt(env);
100
+ if (cpu->props.has_cluster_id) {
52
}
101
+ if (s->len) {
53
+
102
+ g_string_append_printf(s, ", ");
54
+#define DO_1OP_SAT(OP, ESIZE, TYPE, FN) \
103
+ }
55
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
104
+ g_string_append_printf(s, "cluster-id: %"PRId64, cpu->props.cluster_id);
56
+ { \
57
+ TYPE *d = vd, *m = vm; \
58
+ uint16_t mask = mve_element_mask(env); \
59
+ unsigned e; \
60
+ bool qc = false; \
61
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
62
+ bool sat = false; \
63
+ mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)], &sat), mask); \
64
+ qc |= sat & mask & 1; \
65
+ } \
66
+ if (qc) { \
67
+ env->vfp.qc[0] = qc; \
68
+ } \
69
+ mve_advance_vpt(env); \
70
+ }
105
+ }
71
+
106
if (cpu->props.has_core_id) {
72
+#define DO_VQABS_B(N, SATP) \
107
if (s->len) {
73
+ do_sat_bhs(DO_ABS((int64_t)N), INT8_MIN, INT8_MAX, SATP)
108
g_string_append_printf(s, ", ");
74
+#define DO_VQABS_H(N, SATP) \
75
+ do_sat_bhs(DO_ABS((int64_t)N), INT16_MIN, INT16_MAX, SATP)
76
+#define DO_VQABS_W(N, SATP) \
77
+ do_sat_bhs(DO_ABS((int64_t)N), INT32_MIN, INT32_MAX, SATP)
78
+
79
+#define DO_VQNEG_B(N, SATP) do_sat_bhs(-(int64_t)N, INT8_MIN, INT8_MAX, SATP)
80
+#define DO_VQNEG_H(N, SATP) do_sat_bhs(-(int64_t)N, INT16_MIN, INT16_MAX, SATP)
81
+#define DO_VQNEG_W(N, SATP) do_sat_bhs(-(int64_t)N, INT32_MIN, INT32_MAX, SATP)
82
+
83
+DO_1OP_SAT(vqabsb, 1, int8_t, DO_VQABS_B)
84
+DO_1OP_SAT(vqabsh, 2, int16_t, DO_VQABS_H)
85
+DO_1OP_SAT(vqabsw, 4, int32_t, DO_VQABS_W)
86
+
87
+DO_1OP_SAT(vqnegb, 1, int8_t, DO_VQNEG_B)
88
+DO_1OP_SAT(vqnegh, 2, int16_t, DO_VQNEG_H)
89
+DO_1OP_SAT(vqnegw, 4, int32_t, DO_VQNEG_W)
90
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/translate-mve.c
93
+++ b/target/arm/translate-mve.c
94
@@ -XXX,XX +XXX,XX @@ DO_1OP(VCLZ, vclz)
95
DO_1OP(VCLS, vcls)
96
DO_1OP(VABS, vabs)
97
DO_1OP(VNEG, vneg)
98
+DO_1OP(VQABS, vqabs)
99
+DO_1OP(VQNEG, vqneg)
100
101
/* Narrowing moves: only size 0 and 1 are valid */
102
#define DO_VMOVN(INSN, FN) \
103
--
109
--
104
2.20.1
110
2.25.1
105
106
diff view generated by jsdifflib
1
In mve_element_mask(), we calculate a mask for tail predication which
1
From: Gavin Shan <gshan@redhat.com>
2
should have a number of 1 bits based on the value of LR. However,
3
our MAKE_64BIT_MASK() macro has undefined behaviour when passed a
4
zero length. Special case this to give the all-zeroes mask we
5
require.
6
2
3
The CPU topology isn't enabled on arm/virt machine yet, but we're
4
going to do it in next patch. After the CPU topology is enabled by
5
next patch, "thread-id=1" becomes invalid because the CPU core is
6
preferred on arm/virt machine. It means these two CPUs have 0/1
7
as their core IDs, but their thread IDs are all 0. It will trigger
8
test failure as the following message indicates:
9
10
[14/21 qemu:qtest+qtest-aarch64 / qtest-aarch64/numa-test ERROR
11
1.48s killed by signal 6 SIGABRT
12
>>> G_TEST_DBUS_DAEMON=/home/gavin/sandbox/qemu.main/tests/dbus-vmstate-daemon.sh \
13
QTEST_QEMU_STORAGE_DAEMON_BINARY=./storage-daemon/qemu-storage-daemon \
14
QTEST_QEMU_BINARY=./qemu-system-aarch64 \
15
QTEST_QEMU_IMG=./qemu-img MALLOC_PERTURB_=83 \
16
/home/gavin/sandbox/qemu.main/build/tests/qtest/numa-test --tap -k
17
――――――――――――――――――――――――――――――――――――――――――――――
18
stderr:
19
qemu-system-aarch64: -numa cpu,node-id=0,thread-id=1: no match found
20
21
This fixes the issue by providing comprehensive SMP configurations
22
in aarch64_numa_cpu(). The SMP configurations aren't used before
23
the CPU topology is enabled in next patch.
24
25
Signed-off-by: Gavin Shan <gshan@redhat.com>
26
Reviewed-by: Yanan Wang <wangyanan55@huawei.com>
27
Message-id: 20220503140304.855514-3-gshan@redhat.com
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
---
29
---
10
target/arm/mve_helper.c | 3 ++-
30
tests/qtest/numa-test.c | 3 ++-
11
1 file changed, 2 insertions(+), 1 deletion(-)
31
1 file changed, 2 insertions(+), 1 deletion(-)
12
32
13
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
33
diff --git a/tests/qtest/numa-test.c b/tests/qtest/numa-test.c
14
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/mve_helper.c
35
--- a/tests/qtest/numa-test.c
16
+++ b/target/arm/mve_helper.c
36
+++ b/tests/qtest/numa-test.c
17
@@ -XXX,XX +XXX,XX @@ static uint16_t mve_element_mask(CPUARMState *env)
37
@@ -XXX,XX +XXX,XX @@ static void aarch64_numa_cpu(const void *data)
18
*/
38
QTestState *qts;
19
int masklen = env->regs[14] << env->v7m.ltpsize;
39
g_autofree char *cli = NULL;
20
assert(masklen <= 16);
40
21
- mask &= MAKE_64BIT_MASK(0, masklen);
41
- cli = make_cli(data, "-machine smp.cpus=2 "
22
+ uint16_t ltpmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0;
42
+ cli = make_cli(data, "-machine "
23
+ mask &= ltpmask;
43
+ "smp.cpus=2,smp.sockets=1,smp.clusters=1,smp.cores=1,smp.threads=2 "
24
}
44
"-numa node,nodeid=0,memdev=ram -numa node,nodeid=1 "
25
45
"-numa cpu,node-id=1,thread-id=0 "
26
if ((env->condexec_bits & 0xf) == 0) {
46
"-numa cpu,node-id=0,thread-id=1");
27
--
47
--
28
2.20.1
48
2.25.1
29
49
30
50
diff view generated by jsdifflib
1
Factor out the "generate code to update VPR.MASK01/MASK23" part of
1
From: Gavin Shan <gshan@redhat.com>
2
trans_VPST(); we are going to want to reuse it for the VPT insns.
3
2
3
Currently, the SMP configuration isn't considered when the CPU
4
topology is populated. In this case, it's impossible to provide
5
the default CPU-to-NUMA mapping or association based on the socket
6
ID of the given CPU.
7
8
This takes account of SMP configuration when the CPU topology
9
is populated. The die ID for the given CPU isn't assigned since
10
it's not supported on arm/virt machine. Besides, the used SMP
11
configuration in qtest/numa-test/aarch64_numa_cpu() is corrcted
12
to avoid testing failure
13
14
Signed-off-by: Gavin Shan <gshan@redhat.com>
15
Reviewed-by: Yanan Wang <wangyanan55@huawei.com>
16
Acked-by: Igor Mammedov <imammedo@redhat.com>
17
Message-id: 20220503140304.855514-4-gshan@redhat.com
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
---
19
---
7
target/arm/translate-mve.c | 31 +++++++++++++++++--------------
20
hw/arm/virt.c | 15 ++++++++++++++-
8
1 file changed, 17 insertions(+), 14 deletions(-)
21
1 file changed, 14 insertions(+), 1 deletion(-)
9
22
10
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
23
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
11
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
12
--- a/target/arm/translate-mve.c
25
--- a/hw/arm/virt.c
13
+++ b/target/arm/translate-mve.c
26
+++ b/hw/arm/virt.c
14
@@ -XXX,XX +XXX,XX @@ static bool trans_VRMLSLDAVH(DisasContext *s, arg_vmlaldav *a)
27
@@ -XXX,XX +XXX,XX @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
15
return do_long_dual_acc(s, a, fns[a->x]);
28
int n;
16
}
29
unsigned int max_cpus = ms->smp.max_cpus;
17
30
VirtMachineState *vms = VIRT_MACHINE(ms);
18
-static bool trans_VPST(DisasContext *s, arg_VPST *a)
31
+ MachineClass *mc = MACHINE_GET_CLASS(vms);
19
+static void gen_vpst(DisasContext *s, uint32_t mask)
32
20
{
33
if (ms->possible_cpus) {
21
- TCGv_i32 vpr;
34
assert(ms->possible_cpus->len == max_cpus);
22
-
35
@@ -XXX,XX +XXX,XX @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
23
- /* mask == 0 is a "related encoding" */
36
ms->possible_cpus->cpus[n].type = ms->cpu_type;
24
- if (!dc_isar_feature(aa32_mve, s) || !a->mask) {
37
ms->possible_cpus->cpus[n].arch_id =
25
- return false;
38
virt_cpu_mp_affinity(vms, n);
26
- }
39
+
27
- if (!mve_eci_check(s) || !vfp_access_check(s)) {
40
+ assert(!mc->smp_props.dies_supported);
28
- return true;
41
+ ms->possible_cpus->cpus[n].props.has_socket_id = true;
29
- }
42
+ ms->possible_cpus->cpus[n].props.socket_id =
30
/*
43
+ n / (ms->smp.clusters * ms->smp.cores * ms->smp.threads);
31
* Set the VPR mask fields. We take advantage of MASK01 and MASK23
44
+ ms->possible_cpus->cpus[n].props.has_cluster_id = true;
32
* being adjacent fields in the register.
45
+ ms->possible_cpus->cpus[n].props.cluster_id =
33
*
46
+ (n / (ms->smp.cores * ms->smp.threads)) % ms->smp.clusters;
34
- * This insn is not predicated, but it is subject to beat-wise
47
+ ms->possible_cpus->cpus[n].props.has_core_id = true;
35
+ * Updating the masks is not predicated, but it is subject to beat-wise
48
+ ms->possible_cpus->cpus[n].props.core_id =
36
* execution, and the mask is updated on the odd-numbered beats.
49
+ (n / ms->smp.threads) % ms->smp.cores;
37
* So if PSR.ECI says we should skip beat 1, we mustn't update the
50
ms->possible_cpus->cpus[n].props.has_thread_id = true;
38
* 01 mask field.
51
- ms->possible_cpus->cpus[n].props.thread_id = n;
39
*/
52
+ ms->possible_cpus->cpus[n].props.thread_id =
40
- vpr = load_cpu_field(v7m.vpr);
53
+ n % ms->smp.threads;
41
+ TCGv_i32 vpr = load_cpu_field(v7m.vpr);
42
switch (s->eci) {
43
case ECI_NONE:
44
case ECI_A0:
45
/* Update both 01 and 23 fields */
46
tcg_gen_deposit_i32(vpr, vpr,
47
- tcg_constant_i32(a->mask | (a->mask << 4)),
48
+ tcg_constant_i32(mask | (mask << 4)),
49
R_V7M_VPR_MASK01_SHIFT,
50
R_V7M_VPR_MASK01_LENGTH + R_V7M_VPR_MASK23_LENGTH);
51
break;
52
@@ -XXX,XX +XXX,XX @@ static bool trans_VPST(DisasContext *s, arg_VPST *a)
53
case ECI_A0A1A2B0:
54
/* Update only the 23 mask field */
55
tcg_gen_deposit_i32(vpr, vpr,
56
- tcg_constant_i32(a->mask),
57
+ tcg_constant_i32(mask),
58
R_V7M_VPR_MASK23_SHIFT, R_V7M_VPR_MASK23_LENGTH);
59
break;
60
default:
61
g_assert_not_reached();
62
}
54
}
63
store_cpu_field(vpr, v7m.vpr);
55
return ms->possible_cpus;
64
+}
65
+
66
+static bool trans_VPST(DisasContext *s, arg_VPST *a)
67
+{
68
+ /* mask == 0 is a "related encoding" */
69
+ if (!dc_isar_feature(aa32_mve, s) || !a->mask) {
70
+ return false;
71
+ }
72
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
73
+ return true;
74
+ }
75
+ gen_vpst(s, a->mask);
76
mve_update_and_store_eci(s);
77
return true;
78
}
56
}
79
--
57
--
80
2.20.1
58
2.25.1
81
82
diff view generated by jsdifflib
1
Implement the MVE VMLA insn, which multiplies a vector by a scalar
1
From: Gavin Shan <gshan@redhat.com>
2
and accumulates into another vector.
3
2
3
In aarch64_numa_cpu(), the CPU and NUMA association is something
4
like below. Two threads in the same core/cluster/socket are
5
associated with two individual NUMA nodes, which is unreal as
6
Igor Mammedov mentioned. We don't expect the association to break
7
NUMA-to-socket boundary, which matches with the real world.
8
9
NUMA-node socket cluster core thread
10
------------------------------------------
11
0 0 0 0 0
12
1 0 0 0 1
13
14
This corrects the topology for CPUs and their association with
15
NUMA nodes. After this patch is applied, the CPU and NUMA
16
association becomes something like below, which looks real.
17
Besides, socket/cluster/core/thread IDs are all checked when
18
the NUMA node IDs are verified. It helps to check if the CPU
19
topology is properly populated or not.
20
21
NUMA-node socket cluster core thread
22
------------------------------------------
23
0 1 0 0 0
24
1 0 0 0 0
25
26
Suggested-by: Igor Mammedov <imammedo@redhat.com>
27
Signed-off-by: Gavin Shan <gshan@redhat.com>
28
Acked-by: Igor Mammedov <imammedo@redhat.com>
29
Message-id: 20220503140304.855514-5-gshan@redhat.com
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
30
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
---
31
---
7
target/arm/helper-mve.h | 4 ++++
32
tests/qtest/numa-test.c | 18 ++++++++++++------
8
target/arm/mve.decode | 1 +
33
1 file changed, 12 insertions(+), 6 deletions(-)
9
target/arm/mve_helper.c | 5 +++++
10
target/arm/translate-mve.c | 1 +
11
4 files changed, 11 insertions(+)
12
34
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
35
diff --git a/tests/qtest/numa-test.c b/tests/qtest/numa-test.c
14
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
37
--- a/tests/qtest/numa-test.c
16
+++ b/target/arm/helper-mve.h
38
+++ b/tests/qtest/numa-test.c
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqdmullb_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i3
39
@@ -XXX,XX +XXX,XX @@ static void aarch64_numa_cpu(const void *data)
18
DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
g_autofree char *cli = NULL;
19
DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
20
42
cli = make_cli(data, "-machine "
21
+DEF_HELPER_FLAGS_4(mve_vmlab, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
- "smp.cpus=2,smp.sockets=1,smp.clusters=1,smp.cores=1,smp.threads=2 "
22
+DEF_HELPER_FLAGS_4(mve_vmlah, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
44
+ "smp.cpus=2,smp.sockets=2,smp.clusters=1,smp.cores=1,smp.threads=1 "
23
+DEF_HELPER_FLAGS_4(mve_vmlaw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
45
"-numa node,nodeid=0,memdev=ram -numa node,nodeid=1 "
24
+
46
- "-numa cpu,node-id=1,thread-id=0 "
25
DEF_HELPER_FLAGS_4(mve_vmlasb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
47
- "-numa cpu,node-id=0,thread-id=1");
26
DEF_HELPER_FLAGS_4(mve_vmlash, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
48
+ "-numa cpu,node-id=0,socket-id=1,cluster-id=0,core-id=0,thread-id=0 "
27
DEF_HELPER_FLAGS_4(mve_vmlasw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
49
+ "-numa cpu,node-id=1,socket-id=0,cluster-id=0,core-id=0,thread-id=0");
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
50
qts = qtest_init(cli);
29
index XXXXXXX..XXXXXXX 100644
51
cpus = get_cpus(qts, &resp);
30
--- a/target/arm/mve.decode
52
g_assert(cpus);
31
+++ b/target/arm/mve.decode
53
32
@@ -XXX,XX +XXX,XX @@ VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
54
while ((e = qlist_pop(cpus))) {
33
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
55
QDict *cpu, *props;
34
56
- int64_t thread, node;
35
# The U bit (28) is don't-care because it does not affect the result
57
+ int64_t socket, cluster, core, thread, node;
36
+VMLA 111- 1110 0 . .. ... 1 ... 0 1110 . 100 .... @2scalar
58
37
VMLAS 111- 1110 0 . .. ... 1 ... 1 1110 . 100 .... @2scalar
59
cpu = qobject_to(QDict, e);
38
60
g_assert(qdict_haskey(cpu, "props"));
39
# Vector add across vector
61
@@ -XXX,XX +XXX,XX @@ static void aarch64_numa_cpu(const void *data)
40
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
62
41
index XXXXXXX..XXXXXXX 100644
63
g_assert(qdict_haskey(props, "node-id"));
42
--- a/target/arm/mve_helper.c
64
node = qdict_get_int(props, "node-id");
43
+++ b/target/arm/mve_helper.c
65
+ g_assert(qdict_haskey(props, "socket-id"));
44
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
66
+ socket = qdict_get_int(props, "socket-id");
45
DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
67
+ g_assert(qdict_haskey(props, "cluster-id"));
46
DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
68
+ cluster = qdict_get_int(props, "cluster-id");
47
69
+ g_assert(qdict_haskey(props, "core-id"));
48
+/* Vector by scalar plus vector */
70
+ core = qdict_get_int(props, "core-id");
49
+#define DO_VMLA(D, N, M) ((N) * (M) + (D))
71
g_assert(qdict_haskey(props, "thread-id"));
50
+
72
thread = qdict_get_int(props, "thread-id");
51
+DO_2OP_ACC_SCALAR_U(vmla, DO_VMLA)
73
52
+
74
- if (thread == 0) {
53
/* Vector by vector plus scalar */
75
+ if (socket == 0 && cluster == 0 && core == 0 && thread == 0) {
54
#define DO_VMLAS(D, N, M) ((N) * (D) + (M))
76
g_assert_cmpint(node, ==, 1);
55
77
- } else if (thread == 1) {
56
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
78
+ } else if (socket == 1 && cluster == 0 && core == 0 && thread == 0) {
57
index XXXXXXX..XXXXXXX 100644
79
g_assert_cmpint(node, ==, 0);
58
--- a/target/arm/translate-mve.c
80
} else {
59
+++ b/target/arm/translate-mve.c
81
g_assert(false);
60
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR(VQSUB_U_scalar, vqsubu_scalar)
61
DO_2OP_SCALAR(VQDMULH_scalar, vqdmulh_scalar)
62
DO_2OP_SCALAR(VQRDMULH_scalar, vqrdmulh_scalar)
63
DO_2OP_SCALAR(VBRSR, vbrsr)
64
+DO_2OP_SCALAR(VMLA, vmla)
65
DO_2OP_SCALAR(VMLAS, vmlas)
66
67
static bool trans_VQDMULLB_scalar(DisasContext *s, arg_2scalar *a)
68
--
82
--
69
2.20.1
83
2.25.1
70
71
diff view generated by jsdifflib
1
The MVEGenDualAccOpFn is a bit misnamed, since it is used for
1
From: Gavin Shan <gshan@redhat.com>
2
the "long dual accumulate" operations that use a 64-bit
3
accumulator. Rename it to MVEGenLongDualAccOpFn so we can
4
use the former name for the 32-bit accumulator insns.
5
2
3
When CPU-to-NUMA association isn't explicitly provided by users,
4
the default one is given by mc->get_default_cpu_node_id(). However,
5
the CPU topology isn't fully considered in the default association
6
and this causes CPU topology broken warnings on booting Linux guest.
7
8
For example, the following warning messages are observed when the
9
Linux guest is booted with the following command lines.
10
11
/home/gavin/sandbox/qemu.main/build/qemu-system-aarch64 \
12
-accel kvm -machine virt,gic-version=host \
13
-cpu host \
14
-smp 6,sockets=2,cores=3,threads=1 \
15
-m 1024M,slots=16,maxmem=64G \
16
-object memory-backend-ram,id=mem0,size=128M \
17
-object memory-backend-ram,id=mem1,size=128M \
18
-object memory-backend-ram,id=mem2,size=128M \
19
-object memory-backend-ram,id=mem3,size=128M \
20
-object memory-backend-ram,id=mem4,size=128M \
21
-object memory-backend-ram,id=mem4,size=384M \
22
-numa node,nodeid=0,memdev=mem0 \
23
-numa node,nodeid=1,memdev=mem1 \
24
-numa node,nodeid=2,memdev=mem2 \
25
-numa node,nodeid=3,memdev=mem3 \
26
-numa node,nodeid=4,memdev=mem4 \
27
-numa node,nodeid=5,memdev=mem5
28
:
29
alternatives: patching kernel code
30
BUG: arch topology borken
31
the CLS domain not a subset of the MC domain
32
<the above error log repeats>
33
BUG: arch topology borken
34
the DIE domain not a subset of the NODE domain
35
36
With current implementation of mc->get_default_cpu_node_id(),
37
CPU#0 to CPU#5 are associated with NODE#0 to NODE#5 separately.
38
That's incorrect because CPU#0/1/2 should be associated with same
39
NUMA node because they're seated in same socket.
40
41
This fixes the issue by considering the socket ID when the default
42
CPU-to-NUMA association is provided in virt_possible_cpu_arch_ids().
43
With this applied, no more CPU topology broken warnings are seen
44
from the Linux guest. The 6 CPUs are associated with NODE#0/1, but
45
there are no CPUs associated with NODE#2/3/4/5.
46
47
Signed-off-by: Gavin Shan <gshan@redhat.com>
48
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
49
Reviewed-by: Yanan Wang <wangyanan55@huawei.com>
50
Message-id: 20220503140304.855514-6-gshan@redhat.com
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
51
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
---
52
---
9
target/arm/translate-mve.c | 16 ++++++++--------
53
hw/arm/virt.c | 4 +++-
10
1 file changed, 8 insertions(+), 8 deletions(-)
54
1 file changed, 3 insertions(+), 1 deletion(-)
11
55
12
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
56
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
13
index XXXXXXX..XXXXXXX 100644
57
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate-mve.c
58
--- a/hw/arm/virt.c
15
+++ b/target/arm/translate-mve.c
59
+++ b/hw/arm/virt.c
16
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
60
@@ -XXX,XX +XXX,XX @@ virt_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
17
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
61
18
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
62
static int64_t virt_get_default_cpu_node_id(const MachineState *ms, int idx)
19
typedef void MVEGenTwoOpShiftFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
63
{
20
-typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
64
- return idx % ms->numa_state->num_nodes;
21
+typedef void MVEGenLongDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
65
+ int64_t socket_id = ms->possible_cpus->cpus[idx].props.socket_id;
22
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
66
+
23
typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
67
+ return socket_id % ms->numa_state->num_nodes;
24
typedef void MVEGenVIDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32);
25
@@ -XXX,XX +XXX,XX @@ static bool trans_VQDMULLT_scalar(DisasContext *s, arg_2scalar *a)
26
}
68
}
27
69
28
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
70
static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
29
- MVEGenDualAccOpFn *fn)
30
+ MVEGenLongDualAccOpFn *fn)
31
{
32
TCGv_ptr qn, qm;
33
TCGv_i64 rda;
34
@@ -XXX,XX +XXX,XX @@ static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
35
36
static bool trans_VMLALDAV_S(DisasContext *s, arg_vmlaldav *a)
37
{
38
- static MVEGenDualAccOpFn * const fns[4][2] = {
39
+ static MVEGenLongDualAccOpFn * const fns[4][2] = {
40
{ NULL, NULL },
41
{ gen_helper_mve_vmlaldavsh, gen_helper_mve_vmlaldavxsh },
42
{ gen_helper_mve_vmlaldavsw, gen_helper_mve_vmlaldavxsw },
43
@@ -XXX,XX +XXX,XX @@ static bool trans_VMLALDAV_S(DisasContext *s, arg_vmlaldav *a)
44
45
static bool trans_VMLALDAV_U(DisasContext *s, arg_vmlaldav *a)
46
{
47
- static MVEGenDualAccOpFn * const fns[4][2] = {
48
+ static MVEGenLongDualAccOpFn * const fns[4][2] = {
49
{ NULL, NULL },
50
{ gen_helper_mve_vmlaldavuh, NULL },
51
{ gen_helper_mve_vmlaldavuw, NULL },
52
@@ -XXX,XX +XXX,XX @@ static bool trans_VMLALDAV_U(DisasContext *s, arg_vmlaldav *a)
53
54
static bool trans_VMLSLDAV(DisasContext *s, arg_vmlaldav *a)
55
{
56
- static MVEGenDualAccOpFn * const fns[4][2] = {
57
+ static MVEGenLongDualAccOpFn * const fns[4][2] = {
58
{ NULL, NULL },
59
{ gen_helper_mve_vmlsldavsh, gen_helper_mve_vmlsldavxsh },
60
{ gen_helper_mve_vmlsldavsw, gen_helper_mve_vmlsldavxsw },
61
@@ -XXX,XX +XXX,XX @@ static bool trans_VMLSLDAV(DisasContext *s, arg_vmlaldav *a)
62
63
static bool trans_VRMLALDAVH_S(DisasContext *s, arg_vmlaldav *a)
64
{
65
- static MVEGenDualAccOpFn * const fns[] = {
66
+ static MVEGenLongDualAccOpFn * const fns[] = {
67
gen_helper_mve_vrmlaldavhsw, gen_helper_mve_vrmlaldavhxsw,
68
};
69
return do_long_dual_acc(s, a, fns[a->x]);
70
@@ -XXX,XX +XXX,XX @@ static bool trans_VRMLALDAVH_S(DisasContext *s, arg_vmlaldav *a)
71
72
static bool trans_VRMLALDAVH_U(DisasContext *s, arg_vmlaldav *a)
73
{
74
- static MVEGenDualAccOpFn * const fns[] = {
75
+ static MVEGenLongDualAccOpFn * const fns[] = {
76
gen_helper_mve_vrmlaldavhuw, NULL,
77
};
78
return do_long_dual_acc(s, a, fns[a->x]);
79
@@ -XXX,XX +XXX,XX @@ static bool trans_VRMLALDAVH_U(DisasContext *s, arg_vmlaldav *a)
80
81
static bool trans_VRMLSLDAVH(DisasContext *s, arg_vmlaldav *a)
82
{
83
- static MVEGenDualAccOpFn * const fns[] = {
84
+ static MVEGenLongDualAccOpFn * const fns[] = {
85
gen_helper_mve_vrmlsldavhsw, gen_helper_mve_vrmlsldavhxsw,
86
};
87
return do_long_dual_acc(s, a, fns[a->x]);
88
--
71
--
89
2.20.1
72
2.25.1
90
91
diff view generated by jsdifflib
1
In some situations we need a mask telling us which parts of the
1
From: Gavin Shan <gshan@redhat.com>
2
vector correspond to beats that are not being executed because of
3
ECI, separately from the combined "which bytes are predicated away"
4
mask. Factor this mask calculation out of mve_element_mask() into
5
its own function.
6
2
3
When the PPTT table is built, the CPU topology is re-calculated, but
4
it's unecessary because the CPU topology has been populated in
5
virt_possible_cpu_arch_ids() on arm/virt machine.
6
7
This reworks build_pptt() to avoid by reusing the existing IDs in
8
ms->possible_cpus. Currently, the only user of build_pptt() is
9
arm/virt machine.
10
11
Signed-off-by: Gavin Shan <gshan@redhat.com>
12
Tested-by: Yanan Wang <wangyanan55@huawei.com>
13
Reviewed-by: Yanan Wang <wangyanan55@huawei.com>
14
Acked-by: Igor Mammedov <imammedo@redhat.com>
15
Acked-by: Michael S. Tsirkin <mst@redhat.com>
16
Message-id: 20220503140304.855514-7-gshan@redhat.com
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
---
18
---
10
target/arm/mve_helper.c | 58 ++++++++++++++++++++++++-----------------
19
hw/acpi/aml-build.c | 111 +++++++++++++++++++-------------------------
11
1 file changed, 34 insertions(+), 24 deletions(-)
20
1 file changed, 48 insertions(+), 63 deletions(-)
12
21
13
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
22
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
14
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/mve_helper.c
24
--- a/hw/acpi/aml-build.c
16
+++ b/target/arm/mve_helper.c
25
+++ b/hw/acpi/aml-build.c
17
@@ -XXX,XX +XXX,XX @@
26
@@ -XXX,XX +XXX,XX @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
18
#include "exec/exec-all.h"
27
const char *oem_id, const char *oem_table_id)
19
#include "tcg/tcg.h"
20
21
+static uint16_t mve_eci_mask(CPUARMState *env)
22
+{
23
+ /*
24
+ * Return the mask of which elements in the MVE vector correspond
25
+ * to beats being executed. The mask has 1 bits for executed lanes
26
+ * and 0 bits where ECI says this beat was already executed.
27
+ */
28
+ int eci;
29
+
30
+ if ((env->condexec_bits & 0xf) != 0) {
31
+ return 0xffff;
32
+ }
33
+
34
+ eci = env->condexec_bits >> 4;
35
+ switch (eci) {
36
+ case ECI_NONE:
37
+ return 0xffff;
38
+ case ECI_A0:
39
+ return 0xfff0;
40
+ case ECI_A0A1:
41
+ return 0xff00;
42
+ case ECI_A0A1A2:
43
+ case ECI_A0A1A2B0:
44
+ return 0xf000;
45
+ default:
46
+ g_assert_not_reached();
47
+ }
48
+}
49
+
50
static uint16_t mve_element_mask(CPUARMState *env)
51
{
28
{
52
/*
29
MachineClass *mc = MACHINE_GET_CLASS(ms);
53
@@ -XXX,XX +XXX,XX @@ static uint16_t mve_element_mask(CPUARMState *env)
30
- GQueue *list = g_queue_new();
54
mask &= ltpmask;
31
- guint pptt_start = table_data->len;
55
}
32
- guint parent_offset;
56
33
- guint length, i;
57
- if ((env->condexec_bits & 0xf) == 0) {
34
- int uid = 0;
58
- /*
35
- int socket;
59
- * ECI bits indicate which beats are already executed;
36
+ CPUArchIdList *cpus = ms->possible_cpus;
60
- * we handle this by effectively predicating them out.
37
+ int64_t socket_id = -1, cluster_id = -1, core_id = -1;
61
- */
38
+ uint32_t socket_offset = 0, cluster_offset = 0, core_offset = 0;
62
- int eci = env->condexec_bits >> 4;
39
+ uint32_t pptt_start = table_data->len;
63
- switch (eci) {
40
+ int n;
64
- case ECI_NONE:
41
AcpiTable table = { .sig = "PPTT", .rev = 2,
65
- break;
42
.oem_id = oem_id, .oem_table_id = oem_table_id };
66
- case ECI_A0:
43
67
- mask &= 0xfff0;
44
acpi_table_begin(&table, table_data);
68
- break;
45
69
- case ECI_A0A1:
46
- for (socket = 0; socket < ms->smp.sockets; socket++) {
70
- mask &= 0xff00;
47
- g_queue_push_tail(list,
71
- break;
48
- GUINT_TO_POINTER(table_data->len - pptt_start));
72
- case ECI_A0A1A2:
49
- build_processor_hierarchy_node(
73
- case ECI_A0A1A2B0:
50
- table_data,
74
- mask &= 0xf000;
51
- /*
75
- break;
52
- * Physical package - represents the boundary
76
- default:
53
- * of a physical package
77
- g_assert_not_reached();
54
- */
78
- }
55
- (1 << 0),
56
- 0, socket, NULL, 0);
79
- }
57
- }
80
-
58
-
59
- if (mc->smp_props.clusters_supported) {
60
- length = g_queue_get_length(list);
61
- for (i = 0; i < length; i++) {
62
- int cluster;
63
-
64
- parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
65
- for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
66
- g_queue_push_tail(list,
67
- GUINT_TO_POINTER(table_data->len - pptt_start));
68
- build_processor_hierarchy_node(
69
- table_data,
70
- (0 << 0), /* not a physical package */
71
- parent_offset, cluster, NULL, 0);
72
- }
81
+ /*
73
+ /*
82
+ * ECI bits indicate which beats are already executed;
74
+ * This works with the assumption that cpus[n].props.*_id has been
83
+ * we handle this by effectively predicating them out.
75
+ * sorted from top to down levels in mc->possible_cpu_arch_ids().
76
+ * Otherwise, the unexpected and duplicated containers will be
77
+ * created.
84
+ */
78
+ */
85
+ mask &= mve_eci_mask(env);
79
+ for (n = 0; n < cpus->len; n++) {
86
return mask;
80
+ if (cpus->cpus[n].props.socket_id != socket_id) {
81
+ assert(cpus->cpus[n].props.socket_id > socket_id);
82
+ socket_id = cpus->cpus[n].props.socket_id;
83
+ cluster_id = -1;
84
+ core_id = -1;
85
+ socket_offset = table_data->len - pptt_start;
86
+ build_processor_hierarchy_node(table_data,
87
+ (1 << 0), /* Physical package */
88
+ 0, socket_id, NULL, 0);
89
}
90
- }
91
92
- length = g_queue_get_length(list);
93
- for (i = 0; i < length; i++) {
94
- int core;
95
-
96
- parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
97
- for (core = 0; core < ms->smp.cores; core++) {
98
- if (ms->smp.threads > 1) {
99
- g_queue_push_tail(list,
100
- GUINT_TO_POINTER(table_data->len - pptt_start));
101
- build_processor_hierarchy_node(
102
- table_data,
103
- (0 << 0), /* not a physical package */
104
- parent_offset, core, NULL, 0);
105
- } else {
106
- build_processor_hierarchy_node(
107
- table_data,
108
- (1 << 1) | /* ACPI Processor ID valid */
109
- (1 << 3), /* Node is a Leaf */
110
- parent_offset, uid++, NULL, 0);
111
+ if (mc->smp_props.clusters_supported) {
112
+ if (cpus->cpus[n].props.cluster_id != cluster_id) {
113
+ assert(cpus->cpus[n].props.cluster_id > cluster_id);
114
+ cluster_id = cpus->cpus[n].props.cluster_id;
115
+ core_id = -1;
116
+ cluster_offset = table_data->len - pptt_start;
117
+ build_processor_hierarchy_node(table_data,
118
+ (0 << 0), /* Not a physical package */
119
+ socket_offset, cluster_id, NULL, 0);
120
}
121
+ } else {
122
+ cluster_offset = socket_offset;
123
}
124
- }
125
126
- length = g_queue_get_length(list);
127
- for (i = 0; i < length; i++) {
128
- int thread;
129
+ if (ms->smp.threads == 1) {
130
+ build_processor_hierarchy_node(table_data,
131
+ (1 << 1) | /* ACPI Processor ID valid */
132
+ (1 << 3), /* Node is a Leaf */
133
+ cluster_offset, n, NULL, 0);
134
+ } else {
135
+ if (cpus->cpus[n].props.core_id != core_id) {
136
+ assert(cpus->cpus[n].props.core_id > core_id);
137
+ core_id = cpus->cpus[n].props.core_id;
138
+ core_offset = table_data->len - pptt_start;
139
+ build_processor_hierarchy_node(table_data,
140
+ (0 << 0), /* Not a physical package */
141
+ cluster_offset, core_id, NULL, 0);
142
+ }
143
144
- parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
145
- for (thread = 0; thread < ms->smp.threads; thread++) {
146
- build_processor_hierarchy_node(
147
- table_data,
148
+ build_processor_hierarchy_node(table_data,
149
(1 << 1) | /* ACPI Processor ID valid */
150
(1 << 2) | /* Processor is a Thread */
151
(1 << 3), /* Node is a Leaf */
152
- parent_offset, uid++, NULL, 0);
153
+ core_offset, n, NULL, 0);
154
}
155
}
156
157
- g_queue_free(list);
158
acpi_table_end(linker, &table);
87
}
159
}
88
160
89
--
161
--
90
2.20.1
162
2.25.1
91
92
diff view generated by jsdifflib
Deleted patch
1
For vector loads, predicated elements are zeroed, instead of
2
retaining their previous values (as happens for most data
3
processing operations). This means we need to distinguish
4
"beat not executed due to ECI" (don't touch destination
5
element) from "beat executed but predicated out" (zero
6
destination element).
7
1
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
target/arm/mve_helper.c | 8 +++++---
12
1 file changed, 5 insertions(+), 3 deletions(-)
13
14
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/mve_helper.c
17
+++ b/target/arm/mve_helper.c
18
@@ -XXX,XX +XXX,XX @@ static void mve_advance_vpt(CPUARMState *env)
19
env->v7m.vpr = vpr;
20
}
21
22
-
23
+/* For loads, predicated lanes are zeroed instead of keeping their old values */
24
#define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \
25
void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
26
{ \
27
TYPE *d = vd; \
28
uint16_t mask = mve_element_mask(env); \
29
+ uint16_t eci_mask = mve_eci_mask(env); \
30
unsigned b, e; \
31
/* \
32
* R_SXTM allows the dest reg to become UNKNOWN for abandoned \
33
@@ -XXX,XX +XXX,XX @@ static void mve_advance_vpt(CPUARMState *env)
34
* then take an exception. \
35
*/ \
36
for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
37
- if (mask & (1 << b)) { \
38
- d[H##ESIZE(e)] = cpu_##LDTYPE##_data_ra(env, addr, GETPC()); \
39
+ if (eci_mask & (1 << b)) { \
40
+ d[H##ESIZE(e)] = (mask & (1 << b)) ? \
41
+ cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \
42
} \
43
addr += MSIZE; \
44
} \
45
--
46
2.20.1
47
48
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VMULL (polynomial) insn. Unlike Neon, this comes
2
in two flavours: 8x8->16 and a 16x16->32. Also unlike Neon, the
3
inputs are in either the low or the high half of each double-width
4
element.
5
1
6
The assembler for this insn indicates the size with "P8" or "P16",
7
encoded into bit 28 as size = 0 or 1. We choose to follow the
8
same encoding as VQDMULL and decode this into a->size as MO_16
9
or MO_32 indicating the size of the result elements. This then
10
carries through to the helper function names where it then
11
matches up with the existing pmull_h() which does an 8x8->16
12
operation and a new pmull_w() which does the 16x16->32.
13
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
---
17
target/arm/helper-mve.h | 5 +++++
18
target/arm/vec_internal.h | 11 +++++++++++
19
target/arm/mve.decode | 14 ++++++++++----
20
target/arm/mve_helper.c | 16 ++++++++++++++++
21
target/arm/translate-mve.c | 28 ++++++++++++++++++++++++++++
22
target/arm/vec_helper.c | 14 +++++++++++++-
23
6 files changed, 83 insertions(+), 5 deletions(-)
24
25
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/helper-mve.h
28
+++ b/target/arm/helper-mve.h
29
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmulltub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
DEF_HELPER_FLAGS_4(mve_vmulltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
DEF_HELPER_FLAGS_4(mve_vmulltuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
33
+DEF_HELPER_FLAGS_4(mve_vmullpbh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
+DEF_HELPER_FLAGS_4(mve_vmullpth, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
35
+DEF_HELPER_FLAGS_4(mve_vmullpbw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
36
+DEF_HELPER_FLAGS_4(mve_vmullptw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
37
+
38
DEF_HELPER_FLAGS_4(mve_vqdmulhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
39
DEF_HELPER_FLAGS_4(mve_vqdmulhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
40
DEF_HELPER_FLAGS_4(mve_vqdmulhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
41
diff --git a/target/arm/vec_internal.h b/target/arm/vec_internal.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/vec_internal.h
44
+++ b/target/arm/vec_internal.h
45
@@ -XXX,XX +XXX,XX @@ int16_t do_sqrdmlah_h(int16_t, int16_t, int16_t, bool, bool, uint32_t *);
46
int32_t do_sqrdmlah_s(int32_t, int32_t, int32_t, bool, bool, uint32_t *);
47
int64_t do_sqrdmlah_d(int64_t, int64_t, int64_t, bool, bool);
48
49
+/*
50
+ * 8 x 8 -> 16 vector polynomial multiply where the inputs are
51
+ * in the low 8 bits of each 16-bit element
52
+*/
53
+uint64_t pmull_h(uint64_t op1, uint64_t op2);
54
+/*
55
+ * 16 x 16 -> 32 vector polynomial multiply where the inputs are
56
+ * in the low 16 bits of each 32-bit element
57
+ */
58
+uint64_t pmull_w(uint64_t op1, uint64_t op2);
59
+
60
#endif /* TARGET_ARM_VEC_INTERNALS_H */
61
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/arm/mve.decode
64
+++ b/target/arm/mve.decode
65
@@ -XXX,XX +XXX,XX @@ VHADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 0 ... 0 @2op
66
VHSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op
67
VHSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op
68
69
-VMULL_BS 111 0 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op
70
-VMULL_BU 111 1 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op
71
-VMULL_TS 111 0 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
72
-VMULL_TU 111 1 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
73
+{
74
+ VMULLP_B 111 . 1110 0 . 11 ... 1 ... 0 1110 . 0 . 0 ... 0 @2op_sz28
75
+ VMULL_BS 111 0 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op
76
+ VMULL_BU 111 1 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op
77
+}
78
+{
79
+ VMULLP_T 111 . 1110 0 . 11 ... 1 ... 1 1110 . 0 . 0 ... 0 @2op_sz28
80
+ VMULL_TS 111 0 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
81
+ VMULL_TU 111 1 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
82
+}
83
84
VQDMULH 1110 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op
85
VQRDMULH 1111 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op
86
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/target/arm/mve_helper.c
89
+++ b/target/arm/mve_helper.c
90
@@ -XXX,XX +XXX,XX @@ DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL)
91
DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL)
92
DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL)
93
94
+/*
95
+ * Polynomial multiply. We can always do this generating 64 bits
96
+ * of the result at a time, so we don't need to use DO_2OP_L.
97
+ */
98
+#define VMULLPH_MASK 0x00ff00ff00ff00ffULL
99
+#define VMULLPW_MASK 0x0000ffff0000ffffULL
100
+#define DO_VMULLPBH(N, M) pmull_h((N) & VMULLPH_MASK, (M) & VMULLPH_MASK)
101
+#define DO_VMULLPTH(N, M) DO_VMULLPBH((N) >> 8, (M) >> 8)
102
+#define DO_VMULLPBW(N, M) pmull_w((N) & VMULLPW_MASK, (M) & VMULLPW_MASK)
103
+#define DO_VMULLPTW(N, M) DO_VMULLPBW((N) >> 16, (M) >> 16)
104
+
105
+DO_2OP(vmullpbh, 8, uint64_t, DO_VMULLPBH)
106
+DO_2OP(vmullpth, 8, uint64_t, DO_VMULLPTH)
107
+DO_2OP(vmullpbw, 8, uint64_t, DO_VMULLPBW)
108
+DO_2OP(vmullptw, 8, uint64_t, DO_VMULLPTW)
109
+
110
/*
111
* Because the computation type is at least twice as large as required,
112
* these work for both signed and unsigned source types.
113
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
114
index XXXXXXX..XXXXXXX 100644
115
--- a/target/arm/translate-mve.c
116
+++ b/target/arm/translate-mve.c
117
@@ -XXX,XX +XXX,XX @@ static bool trans_VQDMULLT(DisasContext *s, arg_2op *a)
118
return do_2op(s, a, fns[a->size]);
119
}
120
121
+static bool trans_VMULLP_B(DisasContext *s, arg_2op *a)
122
+{
123
+ /*
124
+ * Note that a->size indicates the output size, ie VMULL.P8
125
+ * is the 8x8->16 operation and a->size is MO_16; VMULL.P16
126
+ * is the 16x16->32 operation and a->size is MO_32.
127
+ */
128
+ static MVEGenTwoOpFn * const fns[] = {
129
+ NULL,
130
+ gen_helper_mve_vmullpbh,
131
+ gen_helper_mve_vmullpbw,
132
+ NULL,
133
+ };
134
+ return do_2op(s, a, fns[a->size]);
135
+}
136
+
137
+static bool trans_VMULLP_T(DisasContext *s, arg_2op *a)
138
+{
139
+ /* a->size is as for trans_VMULLP_B */
140
+ static MVEGenTwoOpFn * const fns[] = {
141
+ NULL,
142
+ gen_helper_mve_vmullpth,
143
+ gen_helper_mve_vmullptw,
144
+ NULL,
145
+ };
146
+ return do_2op(s, a, fns[a->size]);
147
+}
148
+
149
/*
150
* VADC and VSBC: these perform an add-with-carry or subtract-with-carry
151
* of the 32-bit elements in each lane of the input vectors, where the
152
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
153
index XXXXXXX..XXXXXXX 100644
154
--- a/target/arm/vec_helper.c
155
+++ b/target/arm/vec_helper.c
156
@@ -XXX,XX +XXX,XX @@ static uint64_t expand_byte_to_half(uint64_t x)
157
| ((x & 0xff000000) << 24);
158
}
159
160
-static uint64_t pmull_h(uint64_t op1, uint64_t op2)
161
+uint64_t pmull_w(uint64_t op1, uint64_t op2)
162
{
163
uint64_t result = 0;
164
int i;
165
+ for (i = 0; i < 16; ++i) {
166
+ uint64_t mask = (op1 & 0x0000000100000001ull) * 0xffffffff;
167
+ result ^= op2 & mask;
168
+ op1 >>= 1;
169
+ op2 <<= 1;
170
+ }
171
+ return result;
172
+}
173
174
+uint64_t pmull_h(uint64_t op1, uint64_t op2)
175
+{
176
+ uint64_t result = 0;
177
+ int i;
178
for (i = 0; i < 8; ++i) {
179
uint64_t mask = (op1 & 0x0001000100010001ull) * 0xffff;
180
result ^= op2 & mask;
181
--
182
2.20.1
183
184
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE integer vector comparison instructions. These are
2
"VCMP (vector)" encodings T1, T2 and T3, and "VPT (vector)" encodings
3
T1, T2 and T3.
4
1
5
These insns compare corresponding elements in each vector, and update
6
the VPR.P0 predicate bits with the results of the comparison. VPT
7
also sets the VPR.MASK01 and VPR.MASK23 fields -- it is effectively
8
"VCMP then VPST".
9
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
target/arm/helper-mve.h | 32 ++++++++++++++++++++++
14
target/arm/mve.decode | 18 +++++++++++-
15
target/arm/mve_helper.c | 56 ++++++++++++++++++++++++++++++++++++++
16
target/arm/translate-mve.c | 47 ++++++++++++++++++++++++++++++++
17
4 files changed, 152 insertions(+), 1 deletion(-)
18
19
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper-mve.h
22
+++ b/target/arm/helper-mve.h
23
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
24
DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
25
DEF_HELPER_FLAGS_3(mve_uqrshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
26
DEF_HELPER_FLAGS_3(mve_sqrshr, TCG_CALL_NO_RWG, i32, env, i32, i32)
27
+
28
+DEF_HELPER_FLAGS_3(mve_vcmpeqb, TCG_CALL_NO_WG, void, env, ptr, ptr)
29
+DEF_HELPER_FLAGS_3(mve_vcmpeqh, TCG_CALL_NO_WG, void, env, ptr, ptr)
30
+DEF_HELPER_FLAGS_3(mve_vcmpeqw, TCG_CALL_NO_WG, void, env, ptr, ptr)
31
+
32
+DEF_HELPER_FLAGS_3(mve_vcmpneb, TCG_CALL_NO_WG, void, env, ptr, ptr)
33
+DEF_HELPER_FLAGS_3(mve_vcmpneh, TCG_CALL_NO_WG, void, env, ptr, ptr)
34
+DEF_HELPER_FLAGS_3(mve_vcmpnew, TCG_CALL_NO_WG, void, env, ptr, ptr)
35
+
36
+DEF_HELPER_FLAGS_3(mve_vcmpcsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
37
+DEF_HELPER_FLAGS_3(mve_vcmpcsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
38
+DEF_HELPER_FLAGS_3(mve_vcmpcsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
39
+
40
+DEF_HELPER_FLAGS_3(mve_vcmphib, TCG_CALL_NO_WG, void, env, ptr, ptr)
41
+DEF_HELPER_FLAGS_3(mve_vcmphih, TCG_CALL_NO_WG, void, env, ptr, ptr)
42
+DEF_HELPER_FLAGS_3(mve_vcmphiw, TCG_CALL_NO_WG, void, env, ptr, ptr)
43
+
44
+DEF_HELPER_FLAGS_3(mve_vcmpgeb, TCG_CALL_NO_WG, void, env, ptr, ptr)
45
+DEF_HELPER_FLAGS_3(mve_vcmpgeh, TCG_CALL_NO_WG, void, env, ptr, ptr)
46
+DEF_HELPER_FLAGS_3(mve_vcmpgew, TCG_CALL_NO_WG, void, env, ptr, ptr)
47
+
48
+DEF_HELPER_FLAGS_3(mve_vcmpltb, TCG_CALL_NO_WG, void, env, ptr, ptr)
49
+DEF_HELPER_FLAGS_3(mve_vcmplth, TCG_CALL_NO_WG, void, env, ptr, ptr)
50
+DEF_HELPER_FLAGS_3(mve_vcmpltw, TCG_CALL_NO_WG, void, env, ptr, ptr)
51
+
52
+DEF_HELPER_FLAGS_3(mve_vcmpgtb, TCG_CALL_NO_WG, void, env, ptr, ptr)
53
+DEF_HELPER_FLAGS_3(mve_vcmpgth, TCG_CALL_NO_WG, void, env, ptr, ptr)
54
+DEF_HELPER_FLAGS_3(mve_vcmpgtw, TCG_CALL_NO_WG, void, env, ptr, ptr)
55
+
56
+DEF_HELPER_FLAGS_3(mve_vcmpleb, TCG_CALL_NO_WG, void, env, ptr, ptr)
57
+DEF_HELPER_FLAGS_3(mve_vcmpleh, TCG_CALL_NO_WG, void, env, ptr, ptr)
58
+DEF_HELPER_FLAGS_3(mve_vcmplew, TCG_CALL_NO_WG, void, env, ptr, ptr)
59
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/mve.decode
62
+++ b/target/arm/mve.decode
63
@@ -XXX,XX +XXX,XX @@
64
&2shift qd qm shift size
65
&vidup qd rn size imm
66
&viwdup qd rn rm size imm
67
+&vcmp qm qn size mask
68
69
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
70
# Note that both Rn and Qd are 3 bits only (no D bit)
71
@@ -XXX,XX +XXX,XX @@
72
@2_shr_w .... .... .. 1 ..... .... .... .... .... &2shift qd=%qd qm=%qm \
73
size=2 shift=%rshift_i5
74
75
+# Vector comparison; 4-bit Qm but 3-bit Qn
76
+%mask_22_13 22:1 13:3
77
+@vcmp .... .... .. size:2 qn:3 . .... .... .... .... &vcmp qm=%qm mask=%mask_22_13
78
+
79
# Vector loads and stores
80
81
# Widening loads and narrowing stores:
82
@@ -XXX,XX +XXX,XX @@ VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
83
}
84
85
# Predicate operations
86
-%mask_22_13 22:1 13:3
87
VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
88
89
# Logical immediate operations (1 reg and modified-immediate)
90
@@ -XXX,XX +XXX,XX @@ VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
91
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
92
93
VSHLC 111 0 1110 1 . 1 imm:5 ... 0 1111 1100 rdm:4 qd=%qd
94
+
95
+# Comparisons. We expand out the conditions which are split across
96
+# encodings T1, T2, T3 and the fc bits. These include VPT, which is
97
+# effectively "VCMP then VPST". A plain "VCMP" has a mask field of zero.
98
+VCMPEQ 1111 1110 0 . .. ... 1 ... 0 1111 0 0 . 0 ... 0 @vcmp
99
+VCMPNE 1111 1110 0 . .. ... 1 ... 0 1111 1 0 . 0 ... 0 @vcmp
100
+VCMPCS 1111 1110 0 . .. ... 1 ... 0 1111 0 0 . 0 ... 1 @vcmp
101
+VCMPHI 1111 1110 0 . .. ... 1 ... 0 1111 1 0 . 0 ... 1 @vcmp
102
+VCMPGE 1111 1110 0 . .. ... 1 ... 1 1111 0 0 . 0 ... 0 @vcmp
103
+VCMPLT 1111 1110 0 . .. ... 1 ... 1 1111 1 0 . 0 ... 0 @vcmp
104
+VCMPGT 1111 1110 0 . .. ... 1 ... 1 1111 0 0 . 0 ... 1 @vcmp
105
+VCMPLE 1111 1110 0 . .. ... 1 ... 1 1111 1 0 . 0 ... 1 @vcmp
106
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/target/arm/mve_helper.c
109
+++ b/target/arm/mve_helper.c
110
@@ -XXX,XX +XXX,XX @@ static uint32_t do_sub_wrap(uint32_t offset, uint32_t wrap, uint32_t imm)
111
DO_VIDUP_ALL(vidup, DO_ADD)
112
DO_VIWDUP_ALL(viwdup, do_add_wrap)
113
DO_VIWDUP_ALL(vdwdup, do_sub_wrap)
114
+
115
+/*
116
+ * Vector comparison.
117
+ * P0 bits for non-executed beats (where eci_mask is 0) are unchanged.
118
+ * P0 bits for predicated lanes in executed beats (where mask is 0) are 0.
119
+ * P0 bits otherwise are updated with the results of the comparisons.
120
+ * We must also keep unchanged the MASK fields at the top of v7m.vpr.
121
+ */
122
+#define DO_VCMP(OP, ESIZE, TYPE, FN) \
123
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \
124
+ { \
125
+ TYPE *n = vn, *m = vm; \
126
+ uint16_t mask = mve_element_mask(env); \
127
+ uint16_t eci_mask = mve_eci_mask(env); \
128
+ uint16_t beatpred = 0; \
129
+ uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
130
+ unsigned e; \
131
+ for (e = 0; e < 16 / ESIZE; e++) { \
132
+ bool r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)]); \
133
+ /* Comparison sets 0/1 bits for each byte in the element */ \
134
+ beatpred |= r * emask; \
135
+ emask <<= ESIZE; \
136
+ } \
137
+ beatpred &= mask; \
138
+ env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
139
+ (beatpred & eci_mask); \
140
+ mve_advance_vpt(env); \
141
+ }
142
+
143
+#define DO_VCMP_S(OP, FN) \
144
+ DO_VCMP(OP##b, 1, int8_t, FN) \
145
+ DO_VCMP(OP##h, 2, int16_t, FN) \
146
+ DO_VCMP(OP##w, 4, int32_t, FN)
147
+
148
+#define DO_VCMP_U(OP, FN) \
149
+ DO_VCMP(OP##b, 1, uint8_t, FN) \
150
+ DO_VCMP(OP##h, 2, uint16_t, FN) \
151
+ DO_VCMP(OP##w, 4, uint32_t, FN)
152
+
153
+#define DO_EQ(N, M) ((N) == (M))
154
+#define DO_NE(N, M) ((N) != (M))
155
+#define DO_EQ(N, M) ((N) == (M))
156
+#define DO_EQ(N, M) ((N) == (M))
157
+#define DO_GE(N, M) ((N) >= (M))
158
+#define DO_LT(N, M) ((N) < (M))
159
+#define DO_GT(N, M) ((N) > (M))
160
+#define DO_LE(N, M) ((N) <= (M))
161
+
162
+DO_VCMP_U(vcmpeq, DO_EQ)
163
+DO_VCMP_U(vcmpne, DO_NE)
164
+DO_VCMP_U(vcmpcs, DO_GE)
165
+DO_VCMP_U(vcmphi, DO_GT)
166
+DO_VCMP_S(vcmpge, DO_GE)
167
+DO_VCMP_S(vcmplt, DO_LT)
168
+DO_VCMP_S(vcmpgt, DO_GT)
169
+DO_VCMP_S(vcmple, DO_LE)
170
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
171
index XXXXXXX..XXXXXXX 100644
172
--- a/target/arm/translate-mve.c
173
+++ b/target/arm/translate-mve.c
174
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
175
typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
176
typedef void MVEGenVIDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32);
177
typedef void MVEGenVIWDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
178
+typedef void MVEGenCmpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
179
180
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
181
static inline long mve_qreg_offset(unsigned reg)
182
@@ -XXX,XX +XXX,XX @@ static bool trans_VDWDUP(DisasContext *s, arg_viwdup *a)
183
};
184
return do_viwdup(s, a, fns[a->size]);
185
}
186
+
187
+static bool do_vcmp(DisasContext *s, arg_vcmp *a, MVEGenCmpFn *fn)
188
+{
189
+ TCGv_ptr qn, qm;
190
+
191
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qm) ||
192
+ !fn) {
193
+ return false;
194
+ }
195
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
196
+ return true;
197
+ }
198
+
199
+ qn = mve_qreg_ptr(a->qn);
200
+ qm = mve_qreg_ptr(a->qm);
201
+ fn(cpu_env, qn, qm);
202
+ tcg_temp_free_ptr(qn);
203
+ tcg_temp_free_ptr(qm);
204
+ if (a->mask) {
205
+ /* VPT */
206
+ gen_vpst(s, a->mask);
207
+ }
208
+ mve_update_eci(s);
209
+ return true;
210
+}
211
+
212
+#define DO_VCMP(INSN, FN) \
213
+ static bool trans_##INSN(DisasContext *s, arg_vcmp *a) \
214
+ { \
215
+ static MVEGenCmpFn * const fns[] = { \
216
+ gen_helper_mve_##FN##b, \
217
+ gen_helper_mve_##FN##h, \
218
+ gen_helper_mve_##FN##w, \
219
+ NULL, \
220
+ }; \
221
+ return do_vcmp(s, a, fns[a->size]); \
222
+ }
223
+
224
+DO_VCMP(VCMPEQ, vcmpeq)
225
+DO_VCMP(VCMPNE, vcmpne)
226
+DO_VCMP(VCMPCS, vcmpcs)
227
+DO_VCMP(VCMPHI, vcmphi)
228
+DO_VCMP(VCMPGE, vcmpge)
229
+DO_VCMP(VCMPLT, vcmplt)
230
+DO_VCMP(VCMPGT, vcmpgt)
231
+DO_VCMP(VCMPLE, vcmple)
232
--
233
2.20.1
234
235
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VPSEL insn, which sets each byte of the destination
2
vector Qd to the byte from either Qn or Qm depending on the value of
3
the corresponding bit in VPR.P0.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/arm/helper-mve.h | 2 ++
9
target/arm/mve.decode | 7 +++++--
10
target/arm/mve_helper.c | 19 +++++++++++++++++++
11
target/arm/translate-mve.c | 2 ++
12
4 files changed, 28 insertions(+), 2 deletions(-)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vorr, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vorn, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_veor, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
22
+DEF_HELPER_FLAGS_4(mve_vpsel, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+
24
DEF_HELPER_FLAGS_4(mve_vaddb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
DEF_HELPER_FLAGS_4(mve_vaddh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
DEF_HELPER_FLAGS_4(mve_vaddw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/mve.decode
30
+++ b/target/arm/mve.decode
31
@@ -XXX,XX +XXX,XX @@ VSHLC 111 0 1110 1 . 1 imm:5 ... 0 1111 1100 rdm:4 qd=%qd
32
# effectively "VCMP then VPST". A plain "VCMP" has a mask field of zero.
33
VCMPEQ 1111 1110 0 . .. ... 1 ... 0 1111 0 0 . 0 ... 0 @vcmp
34
VCMPNE 1111 1110 0 . .. ... 1 ... 0 1111 1 0 . 0 ... 0 @vcmp
35
-VCMPCS 1111 1110 0 . .. ... 1 ... 0 1111 0 0 . 0 ... 1 @vcmp
36
-VCMPHI 1111 1110 0 . .. ... 1 ... 0 1111 1 0 . 0 ... 1 @vcmp
37
+{
38
+ VPSEL 1111 1110 0 . 11 ... 1 ... 0 1111 . 0 . 0 ... 1 @2op_nosz
39
+ VCMPCS 1111 1110 0 . .. ... 1 ... 0 1111 0 0 . 0 ... 1 @vcmp
40
+ VCMPHI 1111 1110 0 . .. ... 1 ... 0 1111 1 0 . 0 ... 1 @vcmp
41
+}
42
VCMPGE 1111 1110 0 . .. ... 1 ... 1 1111 0 0 . 0 ... 0 @vcmp
43
VCMPLT 1111 1110 0 . .. ... 1 ... 1 1111 1 0 . 0 ... 0 @vcmp
44
VCMPGT 1111 1110 0 . .. ... 1 ... 1 1111 0 0 . 0 ... 1 @vcmp
45
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/mve_helper.c
48
+++ b/target/arm/mve_helper.c
49
@@ -XXX,XX +XXX,XX @@ DO_VCMP_S(vcmpge, DO_GE)
50
DO_VCMP_S(vcmplt, DO_LT)
51
DO_VCMP_S(vcmpgt, DO_GT)
52
DO_VCMP_S(vcmple, DO_LE)
53
+
54
+void HELPER(mve_vpsel)(CPUARMState *env, void *vd, void *vn, void *vm)
55
+{
56
+ /*
57
+ * Qd[n] = VPR.P0[n] ? Qn[n] : Qm[n]
58
+ * but note that whether bytes are written to Qd is still subject
59
+ * to (all forms of) predication in the usual way.
60
+ */
61
+ uint64_t *d = vd, *n = vn, *m = vm;
62
+ uint16_t mask = mve_element_mask(env);
63
+ uint16_t p0 = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
64
+ unsigned e;
65
+ for (e = 0; e < 16 / 8; e++, mask >>= 8, p0 >>= 8) {
66
+ uint64_t r = m[H8(e)];
67
+ mergemask(&r, n[H8(e)], p0);
68
+ mergemask(&d[H8(e)], r, mask);
69
+ }
70
+ mve_advance_vpt(env);
71
+}
72
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/target/arm/translate-mve.c
75
+++ b/target/arm/translate-mve.c
76
@@ -XXX,XX +XXX,XX @@ DO_LOGIC(VORR, gen_helper_mve_vorr)
77
DO_LOGIC(VORN, gen_helper_mve_vorn)
78
DO_LOGIC(VEOR, gen_helper_mve_veor)
79
80
+DO_LOGIC(VPSEL, gen_helper_mve_vpsel)
81
+
82
#define DO_2OP(INSN, FN) \
83
static bool trans_##INSN(DisasContext *s, arg_2op *a) \
84
{ \
85
--
86
2.20.1
87
88
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE instructions which perform shifts by a scalar.
2
These are VSHL T2, VRSHL T2, VQSHL T1 and VQRSHL T2. They take the
3
shift amount in a general purpose register and shift every element in
4
the vector by that amount.
5
1
6
Mostly we can reuse the helper functions for shift-by-immediate; we
7
do need two new helpers for VQRSHL.
8
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
target/arm/helper-mve.h | 8 +++++++
13
target/arm/mve.decode | 23 ++++++++++++++++---
14
target/arm/mve_helper.c | 2 ++
15
target/arm/translate-mve.c | 46 ++++++++++++++++++++++++++++++++++++++
16
4 files changed, 76 insertions(+), 3 deletions(-)
17
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-mve.h
21
+++ b/target/arm/helper-mve.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
26
+DEF_HELPER_FLAGS_4(mve_vqrshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vqrshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vqrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vqrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vqrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vqrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+
34
DEF_HELPER_FLAGS_4(mve_vshllbsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
DEF_HELPER_FLAGS_4(mve_vshllbsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
DEF_HELPER_FLAGS_4(mve_vshllbub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/mve.decode
40
+++ b/target/arm/mve.decode
41
@@ -XXX,XX +XXX,XX @@
42
&viwdup qd rn rm size imm
43
&vcmp qm qn size mask
44
&vcmp_scalar qn rm size mask
45
+&shl_scalar qda rm size
46
47
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
48
# Note that both Rn and Qd are 3 bits only (no D bit)
49
@@ -XXX,XX +XXX,XX @@
50
@2_shr_w .... .... .. 1 ..... .... .... .... .... &2shift qd=%qd qm=%qm \
51
size=2 shift=%rshift_i5
52
53
+@shl_scalar .... .... .... size:2 .. .... .... .... rm:4 &shl_scalar qda=%qd
54
+
55
# Vector comparison; 4-bit Qm but 3-bit Qn
56
%mask_22_13 22:1 13:3
57
@vcmp .... .... .. size:2 qn:3 . .... .... .... .... &vcmp qm=%qm mask=%mask_22_13
58
@@ -XXX,XX +XXX,XX @@ VRMLSLDAVH 1111 1110 1 ... ... 0 ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav_no
59
60
VADD_scalar 1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar
61
VSUB_scalar 1110 1110 0 . .. ... 1 ... 1 1111 . 100 .... @2scalar
62
-VMUL_scalar 1110 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
63
+
64
+{
65
+ VSHL_S_scalar 1110 1110 0 . 11 .. 01 ... 1 1110 0110 .... @shl_scalar
66
+ VRSHL_S_scalar 1110 1110 0 . 11 .. 11 ... 1 1110 0110 .... @shl_scalar
67
+ VQSHL_S_scalar 1110 1110 0 . 11 .. 01 ... 1 1110 1110 .... @shl_scalar
68
+ VQRSHL_S_scalar 1110 1110 0 . 11 .. 11 ... 1 1110 1110 .... @shl_scalar
69
+ VMUL_scalar 1110 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
70
+}
71
+
72
+{
73
+ VSHL_U_scalar 1111 1110 0 . 11 .. 01 ... 1 1110 0110 .... @shl_scalar
74
+ VRSHL_U_scalar 1111 1110 0 . 11 .. 11 ... 1 1110 0110 .... @shl_scalar
75
+ VQSHL_U_scalar 1111 1110 0 . 11 .. 01 ... 1 1110 1110 .... @shl_scalar
76
+ VQRSHL_U_scalar 1111 1110 0 . 11 .. 11 ... 1 1110 1110 .... @shl_scalar
77
+ VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
78
+}
79
+
80
VHADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
81
VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
82
VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
83
@@ -XXX,XX +XXX,XX @@ VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
84
size=%size_28
85
}
86
87
-VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
88
-
89
VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
90
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
91
92
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
93
index XXXXXXX..XXXXXXX 100644
94
--- a/target/arm/mve_helper.c
95
+++ b/target/arm/mve_helper.c
96
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
97
DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
98
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
99
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
100
+DO_2SHIFT_SAT_U(vqrshli_u, DO_UQRSHL_OP)
101
+DO_2SHIFT_SAT_S(vqrshli_s, DO_SQRSHL_OP)
102
103
/* Shift-and-insert; we always work with 64 bits at a time */
104
#define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \
105
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/arm/translate-mve.c
108
+++ b/target/arm/translate-mve.c
109
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VRSHRI_U, vrshli_u, true)
110
DO_2SHIFT(VSRI, vsri, false)
111
DO_2SHIFT(VSLI, vsli, false)
112
113
+static bool do_2shift_scalar(DisasContext *s, arg_shl_scalar *a,
114
+ MVEGenTwoOpShiftFn *fn)
115
+{
116
+ TCGv_ptr qda;
117
+ TCGv_i32 rm;
118
+
119
+ if (!dc_isar_feature(aa32_mve, s) ||
120
+ !mve_check_qreg_bank(s, a->qda) ||
121
+ a->rm == 13 || a->rm == 15 || !fn) {
122
+ /* Rm cases are UNPREDICTABLE */
123
+ return false;
124
+ }
125
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
126
+ return true;
127
+ }
128
+
129
+ qda = mve_qreg_ptr(a->qda);
130
+ rm = load_reg(s, a->rm);
131
+ fn(cpu_env, qda, qda, rm);
132
+ tcg_temp_free_ptr(qda);
133
+ tcg_temp_free_i32(rm);
134
+ mve_update_eci(s);
135
+ return true;
136
+}
137
+
138
+#define DO_2SHIFT_SCALAR(INSN, FN) \
139
+ static bool trans_##INSN(DisasContext *s, arg_shl_scalar *a) \
140
+ { \
141
+ static MVEGenTwoOpShiftFn * const fns[] = { \
142
+ gen_helper_mve_##FN##b, \
143
+ gen_helper_mve_##FN##h, \
144
+ gen_helper_mve_##FN##w, \
145
+ NULL, \
146
+ }; \
147
+ return do_2shift_scalar(s, a, fns[a->size]); \
148
+ }
149
+
150
+DO_2SHIFT_SCALAR(VSHL_S_scalar, vshli_s)
151
+DO_2SHIFT_SCALAR(VSHL_U_scalar, vshli_u)
152
+DO_2SHIFT_SCALAR(VRSHL_S_scalar, vrshli_s)
153
+DO_2SHIFT_SCALAR(VRSHL_U_scalar, vrshli_u)
154
+DO_2SHIFT_SCALAR(VQSHL_S_scalar, vqshli_s)
155
+DO_2SHIFT_SCALAR(VQSHL_U_scalar, vqshli_u)
156
+DO_2SHIFT_SCALAR(VQRSHL_S_scalar, vqrshli_s)
157
+DO_2SHIFT_SCALAR(VQRSHL_U_scalar, vqrshli_u)
158
+
159
#define DO_VSHLL(INSN, FN) \
160
static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
161
{ \
162
--
163
2.20.1
164
165
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE integer min/max across vector insns
2
VMAXV, VMINV, VMAXAV and VMINAV, which find the maximum
3
from the vector elements and a general purpose register,
4
and store the maximum back into the general purpose
5
register.
6
1
7
These insns overlap with VRMLALDAVH (they use what would
8
be RdaHi=0b110).
9
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
target/arm/helper-mve.h | 20 ++++++++++++
14
target/arm/mve.decode | 18 +++++++++--
15
target/arm/mve_helper.c | 66 ++++++++++++++++++++++++++++++++++++++
16
target/arm/translate-mve.c | 48 +++++++++++++++++++++++++++
17
4 files changed, 150 insertions(+), 2 deletions(-)
18
19
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper-mve.h
22
+++ b/target/arm/helper-mve.h
23
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
24
DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
25
DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
26
27
+DEF_HELPER_FLAGS_3(mve_vmaxvsb, TCG_CALL_NO_WG, i32, env, ptr, i32)
28
+DEF_HELPER_FLAGS_3(mve_vmaxvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
29
+DEF_HELPER_FLAGS_3(mve_vmaxvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
30
+DEF_HELPER_FLAGS_3(mve_vmaxvub, TCG_CALL_NO_WG, i32, env, ptr, i32)
31
+DEF_HELPER_FLAGS_3(mve_vmaxvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
32
+DEF_HELPER_FLAGS_3(mve_vmaxvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
33
+DEF_HELPER_FLAGS_3(mve_vmaxavb, TCG_CALL_NO_WG, i32, env, ptr, i32)
34
+DEF_HELPER_FLAGS_3(mve_vmaxavh, TCG_CALL_NO_WG, i32, env, ptr, i32)
35
+DEF_HELPER_FLAGS_3(mve_vmaxavw, TCG_CALL_NO_WG, i32, env, ptr, i32)
36
+
37
+DEF_HELPER_FLAGS_3(mve_vminvsb, TCG_CALL_NO_WG, i32, env, ptr, i32)
38
+DEF_HELPER_FLAGS_3(mve_vminvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
39
+DEF_HELPER_FLAGS_3(mve_vminvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
40
+DEF_HELPER_FLAGS_3(mve_vminvub, TCG_CALL_NO_WG, i32, env, ptr, i32)
41
+DEF_HELPER_FLAGS_3(mve_vminvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
42
+DEF_HELPER_FLAGS_3(mve_vminvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
43
+DEF_HELPER_FLAGS_3(mve_vminavb, TCG_CALL_NO_WG, i32, env, ptr, i32)
44
+DEF_HELPER_FLAGS_3(mve_vminavh, TCG_CALL_NO_WG, i32, env, ptr, i32)
45
+DEF_HELPER_FLAGS_3(mve_vminavw, TCG_CALL_NO_WG, i32, env, ptr, i32)
46
+
47
DEF_HELPER_FLAGS_3(mve_vaddlv_s, TCG_CALL_NO_WG, i64, env, ptr, i64)
48
DEF_HELPER_FLAGS_3(mve_vaddlv_u, TCG_CALL_NO_WG, i64, env, ptr, i64)
49
50
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/mve.decode
53
+++ b/target/arm/mve.decode
54
@@ -XXX,XX +XXX,XX @@
55
&vcmp qm qn size mask
56
&vcmp_scalar qn rm size mask
57
&shl_scalar qda rm size
58
+&vmaxv qm rda size
59
60
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
61
# Note that both Rn and Qd are 3 bits only (no D bit)
62
@@ -XXX,XX +XXX,XX @@
63
@vcmp_scalar .... .... .. size:2 qn:3 . .... .... .... rm:4 &vcmp_scalar \
64
mask=%mask_22_13
65
66
+@vmaxv .... .... .... size:2 .. rda:4 .... .... .... &vmaxv qm=%qm
67
+
68
# Vector loads and stores
69
70
# Widening loads and narrowing stores:
71
@@ -XXX,XX +XXX,XX @@ VMLALDAV_U 1111 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
72
73
VMLSLDAV 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 1 @vmlaldav
74
75
-VRMLALDAVH_S 1110 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz
76
-VRMLALDAVH_U 1111 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz
77
+{
78
+ VMAXV_S 1110 1110 1110 .. 10 .... 1111 0 0 . 0 ... 0 @vmaxv
79
+ VMINV_S 1110 1110 1110 .. 10 .... 1111 1 0 . 0 ... 0 @vmaxv
80
+ VMAXAV 1110 1110 1110 .. 00 .... 1111 0 0 . 0 ... 0 @vmaxv
81
+ VMINAV 1110 1110 1110 .. 00 .... 1111 1 0 . 0 ... 0 @vmaxv
82
+ VRMLALDAVH_S 1110 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz
83
+}
84
+
85
+{
86
+ VMAXV_U 1111 1110 1110 .. 10 .... 1111 0 0 . 0 ... 0 @vmaxv
87
+ VMINV_U 1111 1110 1110 .. 10 .... 1111 1 0 . 0 ... 0 @vmaxv
88
+ VRMLALDAVH_U 1111 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz
89
+}
90
91
VRMLSLDAVH 1111 1110 1 ... ... 0 ... . 1110 . 0 . 0 ... 1 @vmlaldav_nosz
92
93
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/target/arm/mve_helper.c
96
+++ b/target/arm/mve_helper.c
97
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvub, 1, uint8_t)
98
DO_VADDV(vaddvuh, 2, uint16_t)
99
DO_VADDV(vaddvuw, 4, uint32_t)
100
101
+/*
102
+ * Vector max/min across vector. Unlike VADDV, we must
103
+ * read ra as the element size, not its full width.
104
+ * We work with int64_t internally for simplicity.
105
+ */
106
+#define DO_VMAXMINV(OP, ESIZE, TYPE, RATYPE, FN) \
107
+ uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
108
+ uint32_t ra_in) \
109
+ { \
110
+ uint16_t mask = mve_element_mask(env); \
111
+ unsigned e; \
112
+ TYPE *m = vm; \
113
+ int64_t ra = (RATYPE)ra_in; \
114
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
115
+ if (mask & 1) { \
116
+ ra = FN(ra, m[H##ESIZE(e)]); \
117
+ } \
118
+ } \
119
+ mve_advance_vpt(env); \
120
+ return ra; \
121
+ } \
122
+
123
+#define DO_VMAXMINV_U(INSN, FN) \
124
+ DO_VMAXMINV(INSN##b, 1, uint8_t, uint8_t, FN) \
125
+ DO_VMAXMINV(INSN##h, 2, uint16_t, uint16_t, FN) \
126
+ DO_VMAXMINV(INSN##w, 4, uint32_t, uint32_t, FN)
127
+#define DO_VMAXMINV_S(INSN, FN) \
128
+ DO_VMAXMINV(INSN##b, 1, int8_t, int8_t, FN) \
129
+ DO_VMAXMINV(INSN##h, 2, int16_t, int16_t, FN) \
130
+ DO_VMAXMINV(INSN##w, 4, int32_t, int32_t, FN)
131
+
132
+/*
133
+ * Helpers for max and min of absolute values across vector:
134
+ * note that we only take the absolute value of 'm', not 'n'
135
+ */
136
+static int64_t do_maxa(int64_t n, int64_t m)
137
+{
138
+ if (m < 0) {
139
+ m = -m;
140
+ }
141
+ return MAX(n, m);
142
+}
143
+
144
+static int64_t do_mina(int64_t n, int64_t m)
145
+{
146
+ if (m < 0) {
147
+ m = -m;
148
+ }
149
+ return MIN(n, m);
150
+}
151
+
152
+DO_VMAXMINV_S(vmaxvs, DO_MAX)
153
+DO_VMAXMINV_U(vmaxvu, DO_MAX)
154
+DO_VMAXMINV_S(vminvs, DO_MIN)
155
+DO_VMAXMINV_U(vminvu, DO_MIN)
156
+/*
157
+ * VMAXAV, VMINAV treat the general purpose input as unsigned
158
+ * and the vector elements as signed.
159
+ */
160
+DO_VMAXMINV(vmaxavb, 1, int8_t, uint8_t, do_maxa)
161
+DO_VMAXMINV(vmaxavh, 2, int16_t, uint16_t, do_maxa)
162
+DO_VMAXMINV(vmaxavw, 4, int32_t, uint32_t, do_maxa)
163
+DO_VMAXMINV(vminavb, 1, int8_t, uint8_t, do_mina)
164
+DO_VMAXMINV(vminavh, 2, int16_t, uint16_t, do_mina)
165
+DO_VMAXMINV(vminavw, 4, int32_t, uint32_t, do_mina)
166
+
167
#define DO_VADDLV(OP, TYPE, LTYPE) \
168
uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
169
uint64_t ra) \
170
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
171
index XXXXXXX..XXXXXXX 100644
172
--- a/target/arm/translate-mve.c
173
+++ b/target/arm/translate-mve.c
174
@@ -XXX,XX +XXX,XX @@ DO_VCMP(VCMPGE, vcmpge)
175
DO_VCMP(VCMPLT, vcmplt)
176
DO_VCMP(VCMPGT, vcmpgt)
177
DO_VCMP(VCMPLE, vcmple)
178
+
179
+static bool do_vmaxv(DisasContext *s, arg_vmaxv *a, MVEGenVADDVFn fn)
180
+{
181
+ /*
182
+ * MIN/MAX operations across a vector: compute the min or
183
+ * max of the initial value in a general purpose register
184
+ * and all the elements in the vector, and store it back
185
+ * into the general purpose register.
186
+ */
187
+ TCGv_ptr qm;
188
+ TCGv_i32 rda;
189
+
190
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qm) ||
191
+ !fn || a->rda == 13 || a->rda == 15) {
192
+ /* Rda cases are UNPREDICTABLE */
193
+ return false;
194
+ }
195
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
196
+ return true;
197
+ }
198
+
199
+ qm = mve_qreg_ptr(a->qm);
200
+ rda = load_reg(s, a->rda);
201
+ fn(rda, cpu_env, qm, rda);
202
+ store_reg(s, a->rda, rda);
203
+ tcg_temp_free_ptr(qm);
204
+ mve_update_eci(s);
205
+ return true;
206
+}
207
+
208
+#define DO_VMAXV(INSN, FN) \
209
+ static bool trans_##INSN(DisasContext *s, arg_vmaxv *a) \
210
+ { \
211
+ static MVEGenVADDVFn * const fns[] = { \
212
+ gen_helper_mve_##FN##b, \
213
+ gen_helper_mve_##FN##h, \
214
+ gen_helper_mve_##FN##w, \
215
+ NULL, \
216
+ }; \
217
+ return do_vmaxv(s, a, fns[a->size]); \
218
+ }
219
+
220
+DO_VMAXV(VMAXV_S, vmaxvs)
221
+DO_VMAXV(VMAXV_U, vmaxvu)
222
+DO_VMAXV(VMAXAV, vmaxav)
223
+DO_VMAXV(VMINV_S, vminvs)
224
+DO_VMAXV(VMINV_U, vminvu)
225
+DO_VMAXV(VMINAV, vminav)
226
--
227
2.20.1
228
229
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VABAV insn, which computes absolute differences
2
between elements of two vectors and accumulates the result into
3
a general purpose register.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/arm/helper-mve.h | 7 +++++++
9
target/arm/mve.decode | 6 ++++++
10
target/arm/mve_helper.c | 26 +++++++++++++++++++++++
11
target/arm/translate-mve.c | 43 ++++++++++++++++++++++++++++++++++++++
12
4 files changed, 82 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vminavw, TCG_CALL_NO_WG, i32, env, ptr, i32)
19
DEF_HELPER_FLAGS_3(mve_vaddlv_s, TCG_CALL_NO_WG, i64, env, ptr, i64)
20
DEF_HELPER_FLAGS_3(mve_vaddlv_u, TCG_CALL_NO_WG, i64, env, ptr, i64)
21
22
+DEF_HELPER_FLAGS_4(mve_vabavsb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_4(mve_vabavsh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_4(mve_vabavsw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_4(mve_vabavub, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(mve_vabavuh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vabavuw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
28
+
29
DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
30
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
31
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
32
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/mve.decode
35
+++ b/target/arm/mve.decode
36
@@ -XXX,XX +XXX,XX @@
37
&vcmp_scalar qn rm size mask
38
&shl_scalar qda rm size
39
&vmaxv qm rda size
40
+&vabav qn qm rda size
41
42
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
43
# Note that both Rn and Qd are 3 bits only (no D bit)
44
@@ -XXX,XX +XXX,XX @@ VMLAS 111- 1110 0 . .. ... 1 ... 1 1110 . 100 .... @2scalar
45
rdahi=%rdahi rdalo=%rdalo
46
}
47
48
+@vabav .... .... .. size:2 .... rda:4 .... .... .... &vabav qn=%qn qm=%qm
49
+
50
+VABAV_S 111 0 1110 10 .. ... 0 .... 1111 . 0 . 0 ... 1 @vabav
51
+VABAV_U 111 1 1110 10 .. ... 0 .... 1111 . 0 . 0 ... 1 @vabav
52
+
53
# Logical immediate operations (1 reg and modified-immediate)
54
55
# The cmode/op bits here decode VORR/VBIC/VMOV/VMVN, but
56
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/mve_helper.c
59
+++ b/target/arm/mve_helper.c
60
@@ -XXX,XX +XXX,XX @@ DO_VMAXMINV(vminavb, 1, int8_t, uint8_t, do_mina)
61
DO_VMAXMINV(vminavh, 2, int16_t, uint16_t, do_mina)
62
DO_VMAXMINV(vminavw, 4, int32_t, uint32_t, do_mina)
63
64
+#define DO_VABAV(OP, ESIZE, TYPE) \
65
+ uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
66
+ void *vm, uint32_t ra) \
67
+ { \
68
+ uint16_t mask = mve_element_mask(env); \
69
+ unsigned e; \
70
+ TYPE *m = vm, *n = vn; \
71
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
72
+ if (mask & 1) { \
73
+ int64_t n0 = n[H##ESIZE(e)]; \
74
+ int64_t m0 = m[H##ESIZE(e)]; \
75
+ uint32_t r = n0 >= m0 ? (n0 - m0) : (m0 - n0); \
76
+ ra += r; \
77
+ } \
78
+ } \
79
+ mve_advance_vpt(env); \
80
+ return ra; \
81
+ }
82
+
83
+DO_VABAV(vabavsb, 1, int8_t)
84
+DO_VABAV(vabavsh, 2, int16_t)
85
+DO_VABAV(vabavsw, 4, int32_t)
86
+DO_VABAV(vabavub, 1, uint8_t)
87
+DO_VABAV(vabavuh, 2, uint16_t)
88
+DO_VABAV(vabavuw, 4, uint32_t)
89
+
90
#define DO_VADDLV(OP, TYPE, LTYPE) \
91
uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
92
uint64_t ra) \
93
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/target/arm/translate-mve.c
96
+++ b/target/arm/translate-mve.c
97
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenVIDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32);
98
typedef void MVEGenVIWDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
99
typedef void MVEGenCmpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
100
typedef void MVEGenScalarCmpFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
101
+typedef void MVEGenVABAVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
102
103
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
104
static inline long mve_qreg_offset(unsigned reg)
105
@@ -XXX,XX +XXX,XX @@ DO_VMAXV(VMAXAV, vmaxav)
106
DO_VMAXV(VMINV_S, vminvs)
107
DO_VMAXV(VMINV_U, vminvu)
108
DO_VMAXV(VMINAV, vminav)
109
+
110
+static bool do_vabav(DisasContext *s, arg_vabav *a, MVEGenVABAVFn *fn)
111
+{
112
+ /* Absolute difference accumulated across vector */
113
+ TCGv_ptr qn, qm;
114
+ TCGv_i32 rda;
115
+
116
+ if (!dc_isar_feature(aa32_mve, s) ||
117
+ !mve_check_qreg_bank(s, a->qm | a->qn) ||
118
+ !fn || a->rda == 13 || a->rda == 15) {
119
+ /* Rda cases are UNPREDICTABLE */
120
+ return false;
121
+ }
122
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
123
+ return true;
124
+ }
125
+
126
+ qm = mve_qreg_ptr(a->qm);
127
+ qn = mve_qreg_ptr(a->qn);
128
+ rda = load_reg(s, a->rda);
129
+ fn(rda, cpu_env, qn, qm, rda);
130
+ store_reg(s, a->rda, rda);
131
+ tcg_temp_free_ptr(qm);
132
+ tcg_temp_free_ptr(qn);
133
+ mve_update_eci(s);
134
+ return true;
135
+}
136
+
137
+#define DO_VABAV(INSN, FN) \
138
+ static bool trans_##INSN(DisasContext *s, arg_vabav *a) \
139
+ { \
140
+ static MVEGenVABAVFn * const fns[] = { \
141
+ gen_helper_mve_##FN##b, \
142
+ gen_helper_mve_##FN##h, \
143
+ gen_helper_mve_##FN##w, \
144
+ NULL, \
145
+ }; \
146
+ return do_vabav(s, a, fns[a->size]); \
147
+ }
148
+
149
+DO_VABAV(VABAV_S, vabavs)
150
+DO_VABAV(VABAV_U, vabavu)
151
--
152
2.20.1
153
154
diff view generated by jsdifflib