1
The following changes since commit 53f306f316549d20c76886903181413d20842423:
1
The following changes since commit 5a67d7735d4162630769ef495cf813244fc850df:
2
2
3
Merge remote-tracking branch 'remotes/ehabkost-gl/tags/x86-next-pull-request' into staging (2021-06-21 11:26:04 +0100)
3
Merge remote-tracking branch 'remotes/berrange-gitlab/tags/tls-deps-pull-request' into staging (2021-07-02 08:22:39 +0100)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210621
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210702
8
8
9
for you to fetch changes up to a83f1d9263d281f938a3984cda7104d55affd43a:
9
for you to fetch changes up to 04ea4d3cfd0a21b248ece8eb7a9436a3d9898dd8:
10
10
11
docs/system: arm: Add nRF boards description (2021-06-21 17:24:33 +0100)
11
target/arm: Implement MVE shifts by register (2021-07-02 11:48:38 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
target-arm queue:
14
target-arm queue:
15
* Don't require 'virt' board to be compiled in for ACPI GHES code
15
* more MVE instructions
16
* docs: Document which architecture extensions we emulate
16
* hw/gpio/gpio_pwr: use shutdown function for reboot
17
* Fix bugs in M-profile FPCXT_NS accesses
17
* target/arm: Check NaN mode before silencing NaN
18
* First slice of MVE patches
18
* tests: Boot and halt a Linux guest on the Raspberry Pi 2 machine
19
* Implement MTE3
19
* hw/arm: Add basic power management to raspi.
20
* docs/system: arm: Add nRF boards description
20
* docs/system/arm: Add quanta-gbs-bmc, quanta-q7l1-bmc
21
21
22
----------------------------------------------------------------
22
----------------------------------------------------------------
23
Alexandre Iooss (1):
23
Joe Komlodi (1):
24
docs/system: arm: Add nRF boards description
24
target/arm: Check NaN mode before silencing NaN
25
25
26
Peter Collingbourne (1):
26
Maxim Uvarov (1):
27
target/arm: Implement MTE3
27
hw/gpio/gpio_pwr: use shutdown function for reboot
28
28
29
Peter Maydell (55):
29
Nolan Leake (1):
30
hw/acpi: Provide stub version of acpi_ghes_record_errors()
30
hw/arm: Add basic power management to raspi.
31
hw/acpi: Provide function acpi_ghes_present()
32
target/arm: Use acpi_ghes_present() to see if we report ACPI memory errors
33
docs/system/arm: Document which architecture extensions we emulate
34
target/arm/translate-vfp.c: Whitespace fixes
35
target/arm: Handle FPU being disabled in FPCXT_NS accesses
36
target/arm: Don't NOCP fault for FPCXT_NS accesses
37
target/arm: Handle writeback in VLDR/VSTR sysreg with no memory access
38
target/arm: Factor FP context update code out into helper function
39
target/arm: Split vfp_access_check() into A and M versions
40
target/arm: Handle FPU check for FPCXT_NS insns via vfp_access_check_m()
41
target/arm: Implement MVE VLDR/VSTR (non-widening forms)
42
target/arm: Implement widening/narrowing MVE VLDR/VSTR insns
43
target/arm: Implement MVE VCLZ
44
target/arm: Implement MVE VCLS
45
target/arm: Implement MVE VREV16, VREV32, VREV64
46
target/arm: Implement MVE VMVN (register)
47
target/arm: Implement MVE VABS
48
target/arm: Implement MVE VNEG
49
tcg: Make gen_dup_i32/i64() public as tcg_gen_dup_i32/i64
50
target/arm: Implement MVE VDUP
51
target/arm: Implement MVE VAND, VBIC, VORR, VORN, VEOR
52
target/arm: Implement MVE VADD, VSUB, VMUL
53
target/arm: Implement MVE VMULH
54
target/arm: Implement MVE VRMULH
55
target/arm: Implement MVE VMAX, VMIN
56
target/arm: Implement MVE VABD
57
target/arm: Implement MVE VHADD, VHSUB
58
target/arm: Implement MVE VMULL
59
target/arm: Implement MVE VMLALDAV
60
target/arm: Implement MVE VMLSLDAV
61
target/arm: Implement MVE VRMLALDAVH, VRMLSLDAVH
62
target/arm: Implement MVE VADD (scalar)
63
target/arm: Implement MVE VSUB, VMUL (scalar)
64
target/arm: Implement MVE VHADD, VHSUB (scalar)
65
target/arm: Implement MVE VBRSR
66
target/arm: Implement MVE VPST
67
target/arm: Implement MVE VQADD and VQSUB
68
target/arm: Implement MVE VQDMULH and VQRDMULH (scalar)
69
target/arm: Implement MVE VQDMULL scalar
70
target/arm: Implement MVE VQDMULH, VQRDMULH (vector)
71
target/arm: Implement MVE VQADD, VQSUB (vector)
72
target/arm: Implement MVE VQSHL (vector)
73
target/arm: Implement MVE VQRSHL
74
target/arm: Implement MVE VSHL insn
75
target/arm: Implement MVE VRSHL
76
target/arm: Implement MVE VQDMLADH and VQRDMLADH
77
target/arm: Implement MVE VQDMLSDH and VQRDMLSDH
78
target/arm: Implement MVE VQDMULL (vector)
79
target/arm: Implement MVE VRHADD
80
target/arm: Implement MVE VADC, VSBC
81
target/arm: Implement MVE VCADD
82
target/arm: Implement MVE VHCADD
83
target/arm: Implement MVE VADDV
84
target/arm: Make VMOV scalar <-> gpreg beatwise for MVE
85
31
86
docs/system/arm/emulation.rst | 103 ++++
32
Patrick Venture (2):
87
docs/system/arm/nrf.rst | 51 ++
33
docs/system/arm: Add quanta-q7l1-bmc reference
88
docs/system/target-arm.rst | 7 +
34
docs/system/arm: Add quanta-gbs-bmc reference
89
include/hw/acpi/ghes.h | 9 +
90
include/tcg/tcg-op.h | 8 +
91
include/tcg/tcg.h | 1 -
92
target/arm/helper-mve.h | 357 +++++++++++++
93
target/arm/helper.h | 2 +
94
target/arm/internals.h | 11 +
95
target/arm/translate-a32.h | 3 +
96
target/arm/translate.h | 10 +
97
target/arm/m-nocp.decode | 24 +
98
target/arm/mve.decode | 240 +++++++++
99
target/arm/vfp.decode | 14 -
100
hw/acpi/ghes-stub.c | 22 +
101
hw/acpi/ghes.c | 17 +
102
target/arm/cpu64.c | 2 +-
103
target/arm/kvm64.c | 6 +-
104
target/arm/mte_helper.c | 82 +--
105
target/arm/mve_helper.c | 1160 +++++++++++++++++++++++++++++++++++++++++
106
target/arm/translate-m-nocp.c | 550 +++++++++++++++++++
107
target/arm/translate-mve.c | 759 +++++++++++++++++++++++++++
108
target/arm/translate-vfp.c | 741 +++++++-------------------
109
tcg/tcg-op-gvec.c | 20 +-
110
MAINTAINERS | 1 +
111
hw/acpi/meson.build | 6 +-
112
target/arm/meson.build | 1 +
113
27 files changed, 3578 insertions(+), 629 deletions(-)
114
create mode 100644 docs/system/arm/emulation.rst
115
create mode 100644 docs/system/arm/nrf.rst
116
create mode 100644 target/arm/helper-mve.h
117
create mode 100644 hw/acpi/ghes-stub.c
118
create mode 100644 target/arm/mve_helper.c
119
35
36
Peter Maydell (18):
37
target/arm: Fix MVE widening/narrowing VLDR/VSTR offset calculation
38
target/arm: Fix bugs in MVE VRMLALDAVH, VRMLSLDAVH
39
target/arm: Make asimd_imm_const() public
40
target/arm: Use asimd_imm_const for A64 decode
41
target/arm: Use dup_const() instead of bitfield_replicate()
42
target/arm: Implement MVE logical immediate insns
43
target/arm: Implement MVE vector shift left by immediate insns
44
target/arm: Implement MVE vector shift right by immediate insns
45
target/arm: Implement MVE VSHLL
46
target/arm: Implement MVE VSRI, VSLI
47
target/arm: Implement MVE VSHRN, VRSHRN
48
target/arm: Implement MVE saturating narrowing shifts
49
target/arm: Implement MVE VSHLC
50
target/arm: Implement MVE VADDLV
51
target/arm: Implement MVE long shifts by immediate
52
target/arm: Implement MVE long shifts by register
53
target/arm: Implement MVE shifts by immediate
54
target/arm: Implement MVE shifts by register
55
56
Philippe Mathieu-Daudé (1):
57
tests: Boot and halt a Linux guest on the Raspberry Pi 2 machine
58
59
docs/system/arm/aspeed.rst | 1 +
60
docs/system/arm/nuvoton.rst | 5 +-
61
include/hw/arm/bcm2835_peripherals.h | 3 +-
62
include/hw/misc/bcm2835_powermgt.h | 29 ++
63
target/arm/helper-mve.h | 108 +++++++
64
target/arm/translate.h | 41 +++
65
target/arm/mve.decode | 177 ++++++++++-
66
target/arm/t32.decode | 71 ++++-
67
hw/arm/bcm2835_peripherals.c | 13 +-
68
hw/gpio/gpio_pwr.c | 2 +-
69
hw/misc/bcm2835_powermgt.c | 160 ++++++++++
70
target/arm/helper-a64.c | 12 +-
71
target/arm/mve_helper.c | 524 +++++++++++++++++++++++++++++++--
72
target/arm/translate-a64.c | 86 +-----
73
target/arm/translate-mve.c | 261 +++++++++++++++-
74
target/arm/translate-neon.c | 81 -----
75
target/arm/translate.c | 327 +++++++++++++++++++-
76
target/arm/vfp_helper.c | 24 +-
77
hw/misc/meson.build | 1 +
78
tests/acceptance/boot_linux_console.py | 43 +++
79
20 files changed, 1760 insertions(+), 209 deletions(-)
80
create mode 100644 include/hw/misc/bcm2835_powermgt.h
81
create mode 100644 hw/misc/bcm2835_powermgt.c
82
diff view generated by jsdifflib
1
vfp_access_check and its helper routine full_vfp_access_check() has
1
From: Patrick Venture <venture@google.com>
2
gradually grown and is now an awkward mix of A-profile only and
3
M-profile only pieces. Refactor it into an A-profile only and an
4
M-profile only version, taking advantage of the fact that now the
5
only direct call to full_vfp_access_check() is in A-profile-only
6
code.
7
2
3
Adds a line-item reference to the supported quanta-q71l-bmc aspeed
4
entry.
5
6
Signed-off-by: Patrick Venture <venture@google.com>
7
Reviewed-by: Cédric Le Goater <clg@kaod.org>
8
Message-id: 20210615192848.1065297-2-venture@google.com
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210618141019.10671-7-peter.maydell@linaro.org
11
---
10
---
12
target/arm/translate-vfp.c | 79 +++++++++++++++++++++++---------------
11
docs/system/arm/aspeed.rst | 1 +
13
1 file changed, 48 insertions(+), 31 deletions(-)
12
1 file changed, 1 insertion(+)
14
13
15
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
14
diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-vfp.c
16
--- a/docs/system/arm/aspeed.rst
18
+++ b/target/arm/translate-vfp.c
17
+++ b/docs/system/arm/aspeed.rst
19
@@ -XXX,XX +XXX,XX @@ static void gen_update_fp_context(DisasContext *s)
18
@@ -XXX,XX +XXX,XX @@ etc.
20
}
19
AST2400 SoC based machines :
21
20
22
/*
21
- ``palmetto-bmc`` OpenPOWER Palmetto POWER8 BMC
23
- * Check that VFP access is enabled. If it is, do the necessary
22
+- ``quanta-q71l-bmc`` OpenBMC Quanta BMC
24
- * M-profile lazy-FP handling and then return true.
23
25
- * If not, emit code to generate an appropriate exception and
24
AST2500 SoC based machines :
26
- * return false.
27
+ * Check that VFP access is enabled, A-profile specific version.
28
+ *
29
+ * If VFP is enabled, return true. If not, emit code to generate an
30
+ * appropriate exception and return false.
31
* The ignore_vfp_enabled argument specifies that we should ignore
32
- * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
33
+ * whether VFP is enabled via FPEXC.EN: this should be true for FMXR/FMRX
34
* accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
35
*/
36
-static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
37
+static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
38
{
39
if (s->fp_excp_el) {
40
- if (arm_dc_feature(s, ARM_FEATURE_M)) {
41
- /*
42
- * M-profile mostly catches the "FPU disabled" case early, in
43
- * disas_m_nocp(), but a few insns (eg LCTP, WLSTP, DLSTP)
44
- * which do coprocessor-checks are outside the large ranges of
45
- * the encoding space handled by the patterns in m-nocp.decode,
46
- * and for them we may need to raise NOCP here.
47
- */
48
- gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
49
- syn_uncategorized(), s->fp_excp_el);
50
- } else {
51
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
52
- syn_fp_access_trap(1, 0xe, false),
53
- s->fp_excp_el);
54
- }
55
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
56
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
57
return false;
58
}
59
60
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
61
unallocated_encoding(s);
62
return false;
63
}
64
+ return true;
65
+}
66
67
- if (arm_dc_feature(s, ARM_FEATURE_M)) {
68
- /* Handle M-profile lazy FP state mechanics */
69
-
70
- /* Trigger lazy-state preservation if necessary */
71
- gen_preserve_fp_state(s);
72
-
73
- /* Update ownership of FP context and create new FP context if needed */
74
- gen_update_fp_context(s);
75
+/*
76
+ * Check that VFP access is enabled, M-profile specific version.
77
+ *
78
+ * If VFP is enabled, do the necessary M-profile lazy-FP handling and then
79
+ * return true. If not, emit code to generate an appropriate exception and
80
+ * return false.
81
+ */
82
+static bool vfp_access_check_m(DisasContext *s)
83
+{
84
+ if (s->fp_excp_el) {
85
+ /*
86
+ * M-profile mostly catches the "FPU disabled" case early, in
87
+ * disas_m_nocp(), but a few insns (eg LCTP, WLSTP, DLSTP)
88
+ * which do coprocessor-checks are outside the large ranges of
89
+ * the encoding space handled by the patterns in m-nocp.decode,
90
+ * and for them we may need to raise NOCP here.
91
+ */
92
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
93
+ syn_uncategorized(), s->fp_excp_el);
94
+ return false;
95
}
96
97
+ /* Handle M-profile lazy FP state mechanics */
98
+
99
+ /* Trigger lazy-state preservation if necessary */
100
+ gen_preserve_fp_state(s);
101
+
102
+ /* Update ownership of FP context and create new FP context if needed */
103
+ gen_update_fp_context(s);
104
+
105
return true;
106
}
107
108
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
109
*/
110
bool vfp_access_check(DisasContext *s)
111
{
112
- return full_vfp_access_check(s, false);
113
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
114
+ return vfp_access_check_m(s);
115
+ } else {
116
+ return vfp_access_check_a(s, false);
117
+ }
118
}
119
120
static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
121
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
122
return false;
123
}
124
125
- if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
126
+ /*
127
+ * Call vfp_access_check_a() directly, because we need to tell
128
+ * it to ignore FPEXC.EN for some register accesses.
129
+ */
130
+ if (!vfp_access_check_a(s, ignore_vfp_enabled)) {
131
return true;
132
}
133
25
134
--
26
--
135
2.20.1
27
2.20.1
136
28
137
29
diff view generated by jsdifflib
1
Implement the MVE VHCADD insn, which is similar to VCADD
1
From: Patrick Venture <venture@google.com>
2
but performs a halving step. This one overlaps with VADC.
3
2
3
Add line item reference to quanta-gbs-bmc machine.
4
5
Signed-off-by: Patrick Venture <venture@google.com>
6
Reviewed-by: Cédric Le Goater <clg@kaod.org>
7
Message-id: 20210615192848.1065297-3-venture@google.com
8
[PMM: fixed underline Sphinx warning]
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-43-peter.maydell@linaro.org
7
---
10
---
8
target/arm/helper-mve.h | 8 ++++++++
11
docs/system/arm/nuvoton.rst | 5 +++--
9
target/arm/mve.decode | 8 ++++++--
12
1 file changed, 3 insertions(+), 2 deletions(-)
10
target/arm/mve_helper.c | 2 ++
11
target/arm/translate-mve.c | 4 +++-
12
4 files changed, 19 insertions(+), 3 deletions(-)
13
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
diff --git a/docs/system/arm/nuvoton.rst b/docs/system/arm/nuvoton.rst
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
16
--- a/docs/system/arm/nuvoton.rst
17
+++ b/target/arm/helper-mve.h
17
+++ b/docs/system/arm/nuvoton.rst
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vcadd270b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
@@ -XXX,XX +XXX,XX @@
19
DEF_HELPER_FLAGS_4(mve_vcadd270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
-Nuvoton iBMC boards (``npcm750-evb``, ``quanta-gsj``)
20
DEF_HELPER_FLAGS_4(mve_vcadd270w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
-=====================================================
21
21
+Nuvoton iBMC boards (``*-bmc``, ``npcm750-evb``, ``quanta-gsj``)
22
+DEF_HELPER_FLAGS_4(mve_vhcadd90b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+================================================================
23
+DEF_HELPER_FLAGS_4(mve_vhcadd90h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
24
+DEF_HELPER_FLAGS_4(mve_vhcadd90w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
The `Nuvoton iBMC`_ chips (NPCM7xx) are a family of ARM-based SoCs that are
25
+
25
designed to be used as Baseboard Management Controllers (BMCs) in various
26
+DEF_HELPER_FLAGS_4(mve_vhcadd270b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
@@ -XXX,XX +XXX,XX @@ segment. The following machines are based on this chip :
27
+DEF_HELPER_FLAGS_4(mve_vhcadd270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
The NPCM730 SoC has two Cortex-A9 cores and is targeted for Data Center and
28
+DEF_HELPER_FLAGS_4(mve_vhcadd270w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
Hyperscale applications. The following machines are based on this chip :
29
+
29
30
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+- ``quanta-gbs-bmc`` Quanta GBS server BMC
31
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
- ``quanta-gsj`` Quanta GSJ server BMC
32
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
There are also two more SoCs, NPCM710 and NPCM705, which are single-core
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
36
+++ b/target/arm/mve.decode
37
@@ -XXX,XX +XXX,XX @@ VQDMULLT 111 . 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 1 @2op_sz28
38
VRHADD_S 111 0 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
39
VRHADD_U 111 1 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
40
41
-VADC 1110 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
42
-VADCI 1110 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
43
+{
44
+ VADC 1110 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
45
+ VADCI 1110 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
46
+ VHCADD90 1110 1110 0 . .. ... 0 ... 0 1111 . 0 . 0 ... 0 @2op
47
+ VHCADD270 1110 1110 0 . .. ... 0 ... 1 1111 . 0 . 0 ... 0 @2op
48
+}
49
50
{
51
VSBC 1111 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
56
@@ -XXX,XX +XXX,XX @@ void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm)
57
58
DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD)
59
DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB)
60
+DO_VCADD_ALL(vhcadd90, do_vhsub_s, do_vhadd_s)
61
+DO_VCADD_ALL(vhcadd270, do_vhadd_s, do_vhsub_s)
62
63
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
64
{
65
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/target/arm/translate-mve.c
68
+++ b/target/arm/translate-mve.c
69
@@ -XXX,XX +XXX,XX @@ DO_2OP(VRHADD_U, vrhaddu)
70
/*
71
* VCADD Qd == Qm at size MO_32 is UNPREDICTABLE; we choose not to diagnose
72
* so we can reuse the DO_2OP macro. (Our implementation calculates the
73
- * "expected" results in this case.)
74
+ * "expected" results in this case.) Similarly for VHCADD.
75
*/
76
DO_2OP(VCADD90, vcadd90)
77
DO_2OP(VCADD270, vcadd270)
78
+DO_2OP(VHCADD90, vhcadd90)
79
+DO_2OP(VHCADD270, vhcadd270)
80
81
static bool trans_VQDMULLB(DisasContext *s, arg_2op *a)
82
{
83
--
34
--
84
2.20.1
35
2.20.1
85
36
86
37
diff view generated by jsdifflib
1
Generic code in target/arm wants to call acpi_ghes_record_errors();
1
From: Nolan Leake <nolan@sigbus.net>
2
provide a stub version so that we don't fail to link when
2
3
CONFIG_ACPI_APEI is not set. This requires us to add a new
3
This is just enough to make reboot and poweroff work. Works for
4
ghes-stub.c file to contain it and the meson.build mechanics
4
linux, u-boot, and the arm trusted firmware. Not tested, but should
5
to use it when appropriate.
5
work for plan9, and bare-metal/hobby OSes, since they seem to generally
6
6
do what linux does for reset.
7
8
The watchdog timer functionality is not yet implemented.
9
10
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/64
11
Signed-off-by: Nolan Leake <nolan@sigbus.net>
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
14
Message-id: 20210625210209.1870217-1-nolan@sigbus.net
15
[PMM: tweaked commit title; fixed region size to 0x200;
16
moved header file to include/]
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Dongjiu Geng <gengdongjiu1@gmail.com>
10
Message-id: 20210603171259.27962-2-peter.maydell@linaro.org
11
---
18
---
12
hw/acpi/ghes-stub.c | 17 +++++++++++++++++
19
include/hw/arm/bcm2835_peripherals.h | 3 +-
13
hw/acpi/meson.build | 6 +++---
20
include/hw/misc/bcm2835_powermgt.h | 29 +++++
14
2 files changed, 20 insertions(+), 3 deletions(-)
21
hw/arm/bcm2835_peripherals.c | 13 ++-
15
create mode 100644 hw/acpi/ghes-stub.c
22
hw/misc/bcm2835_powermgt.c | 160 +++++++++++++++++++++++++++
16
23
hw/misc/meson.build | 1 +
17
diff --git a/hw/acpi/ghes-stub.c b/hw/acpi/ghes-stub.c
24
5 files changed, 204 insertions(+), 2 deletions(-)
25
create mode 100644 include/hw/misc/bcm2835_powermgt.h
26
create mode 100644 hw/misc/bcm2835_powermgt.c
27
28
diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/include/hw/arm/bcm2835_peripherals.h
31
+++ b/include/hw/arm/bcm2835_peripherals.h
32
@@ -XXX,XX +XXX,XX @@
33
#include "hw/misc/bcm2835_mphi.h"
34
#include "hw/misc/bcm2835_thermal.h"
35
#include "hw/misc/bcm2835_cprman.h"
36
+#include "hw/misc/bcm2835_powermgt.h"
37
#include "hw/sd/sdhci.h"
38
#include "hw/sd/bcm2835_sdhost.h"
39
#include "hw/gpio/bcm2835_gpio.h"
40
@@ -XXX,XX +XXX,XX @@ struct BCM2835PeripheralState {
41
BCM2835MphiState mphi;
42
UnimplementedDeviceState txp;
43
UnimplementedDeviceState armtmr;
44
- UnimplementedDeviceState powermgt;
45
+ BCM2835PowerMgtState powermgt;
46
BCM2835CprmanState cprman;
47
PL011State uart0;
48
BCM2835AuxState aux;
49
diff --git a/include/hw/misc/bcm2835_powermgt.h b/include/hw/misc/bcm2835_powermgt.h
18
new file mode 100644
50
new file mode 100644
19
index XXXXXXX..XXXXXXX
51
index XXXXXXX..XXXXXXX
20
--- /dev/null
52
--- /dev/null
21
+++ b/hw/acpi/ghes-stub.c
53
+++ b/include/hw/misc/bcm2835_powermgt.h
22
@@ -XXX,XX +XXX,XX @@
54
@@ -XXX,XX +XXX,XX @@
23
+/*
55
+/*
24
+ * Support for generating APEI tables and recording CPER for Guests:
56
+ * BCM2835 Power Management emulation
25
+ * stub functions.
57
+ *
26
+ *
58
+ * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com>
27
+ * Copyright (c) 2021 Linaro, Ltd
59
+ * Copyright (C) 2021 Nolan Leake <nolan@sigbus.net>
28
+ *
60
+ *
29
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
61
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
30
+ * See the COPYING file in the top-level directory.
62
+ * See the COPYING file in the top-level directory.
31
+ */
63
+ */
32
+
64
+
65
+#ifndef BCM2835_POWERMGT_H
66
+#define BCM2835_POWERMGT_H
67
+
68
+#include "hw/sysbus.h"
69
+#include "qom/object.h"
70
+
71
+#define TYPE_BCM2835_POWERMGT "bcm2835-powermgt"
72
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835PowerMgtState, BCM2835_POWERMGT)
73
+
74
+struct BCM2835PowerMgtState {
75
+ SysBusDevice busdev;
76
+ MemoryRegion iomem;
77
+
78
+ uint32_t rstc;
79
+ uint32_t rsts;
80
+ uint32_t wdog;
81
+};
82
+
83
+#endif
84
diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c
85
index XXXXXXX..XXXXXXX 100644
86
--- a/hw/arm/bcm2835_peripherals.c
87
+++ b/hw/arm/bcm2835_peripherals.c
88
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_init(Object *obj)
89
90
object_property_add_const_link(OBJECT(&s->dwc2), "dma-mr",
91
OBJECT(&s->gpu_bus_mr));
92
+
93
+ /* Power Management */
94
+ object_initialize_child(obj, "powermgt", &s->powermgt,
95
+ TYPE_BCM2835_POWERMGT);
96
}
97
98
static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
99
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
100
qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ,
101
INTERRUPT_USB));
102
103
+ /* Power Management */
104
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->powermgt), errp)) {
105
+ return;
106
+ }
107
+
108
+ memory_region_add_subregion(&s->peri_mr, PM_OFFSET,
109
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->powermgt), 0));
110
+
111
create_unimp(s, &s->txp, "bcm2835-txp", TXP_OFFSET, 0x1000);
112
create_unimp(s, &s->armtmr, "bcm2835-sp804", ARMCTRL_TIMER0_1_OFFSET, 0x40);
113
- create_unimp(s, &s->powermgt, "bcm2835-powermgt", PM_OFFSET, 0x114);
114
create_unimp(s, &s->i2s, "bcm2835-i2s", I2S_OFFSET, 0x100);
115
create_unimp(s, &s->smi, "bcm2835-smi", SMI_OFFSET, 0x100);
116
create_unimp(s, &s->spi[0], "bcm2835-spi0", SPI0_OFFSET, 0x20);
117
diff --git a/hw/misc/bcm2835_powermgt.c b/hw/misc/bcm2835_powermgt.c
118
new file mode 100644
119
index XXXXXXX..XXXXXXX
120
--- /dev/null
121
+++ b/hw/misc/bcm2835_powermgt.c
122
@@ -XXX,XX +XXX,XX @@
123
+/*
124
+ * BCM2835 Power Management emulation
125
+ *
126
+ * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com>
127
+ * Copyright (C) 2021 Nolan Leake <nolan@sigbus.net>
128
+ *
129
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
130
+ * See the COPYING file in the top-level directory.
131
+ */
132
+
33
+#include "qemu/osdep.h"
133
+#include "qemu/osdep.h"
34
+#include "hw/acpi/ghes.h"
134
+#include "qemu/log.h"
35
+
135
+#include "qemu/module.h"
36
+int acpi_ghes_record_errors(uint8_t source_id, uint64_t physical_address)
136
+#include "hw/misc/bcm2835_powermgt.h"
37
+{
137
+#include "migration/vmstate.h"
38
+ return -1;
138
+#include "sysemu/runstate.h"
39
+}
139
+
40
diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build
140
+#define PASSWORD 0x5a000000
141
+#define PASSWORD_MASK 0xff000000
142
+
143
+#define R_RSTC 0x1c
144
+#define V_RSTC_RESET 0x20
145
+#define R_RSTS 0x20
146
+#define V_RSTS_POWEROFF 0x555 /* Linux uses partition 63 to indicate halt. */
147
+#define R_WDOG 0x24
148
+
149
+static uint64_t bcm2835_powermgt_read(void *opaque, hwaddr offset,
150
+ unsigned size)
151
+{
152
+ BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque;
153
+ uint32_t res = 0;
154
+
155
+ switch (offset) {
156
+ case R_RSTC:
157
+ res = s->rstc;
158
+ break;
159
+ case R_RSTS:
160
+ res = s->rsts;
161
+ break;
162
+ case R_WDOG:
163
+ res = s->wdog;
164
+ break;
165
+
166
+ default:
167
+ qemu_log_mask(LOG_UNIMP,
168
+ "bcm2835_powermgt_read: Unknown offset 0x%08"HWADDR_PRIx
169
+ "\n", offset);
170
+ res = 0;
171
+ break;
172
+ }
173
+
174
+ return res;
175
+}
176
+
177
+static void bcm2835_powermgt_write(void *opaque, hwaddr offset,
178
+ uint64_t value, unsigned size)
179
+{
180
+ BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque;
181
+
182
+ if ((value & PASSWORD_MASK) != PASSWORD) {
183
+ qemu_log_mask(LOG_GUEST_ERROR,
184
+ "bcm2835_powermgt_write: Bad password 0x%"PRIx64
185
+ " at offset 0x%08"HWADDR_PRIx"\n",
186
+ value, offset);
187
+ return;
188
+ }
189
+
190
+ value = value & ~PASSWORD_MASK;
191
+
192
+ switch (offset) {
193
+ case R_RSTC:
194
+ s->rstc = value;
195
+ if (value & V_RSTC_RESET) {
196
+ if ((s->rsts & 0xfff) == V_RSTS_POWEROFF) {
197
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
198
+ } else {
199
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
200
+ }
201
+ }
202
+ break;
203
+ case R_RSTS:
204
+ qemu_log_mask(LOG_UNIMP,
205
+ "bcm2835_powermgt_write: RSTS\n");
206
+ s->rsts = value;
207
+ break;
208
+ case R_WDOG:
209
+ qemu_log_mask(LOG_UNIMP,
210
+ "bcm2835_powermgt_write: WDOG\n");
211
+ s->wdog = value;
212
+ break;
213
+
214
+ default:
215
+ qemu_log_mask(LOG_UNIMP,
216
+ "bcm2835_powermgt_write: Unknown offset 0x%08"HWADDR_PRIx
217
+ "\n", offset);
218
+ break;
219
+ }
220
+}
221
+
222
+static const MemoryRegionOps bcm2835_powermgt_ops = {
223
+ .read = bcm2835_powermgt_read,
224
+ .write = bcm2835_powermgt_write,
225
+ .endianness = DEVICE_NATIVE_ENDIAN,
226
+ .impl.min_access_size = 4,
227
+ .impl.max_access_size = 4,
228
+};
229
+
230
+static const VMStateDescription vmstate_bcm2835_powermgt = {
231
+ .name = TYPE_BCM2835_POWERMGT,
232
+ .version_id = 1,
233
+ .minimum_version_id = 1,
234
+ .fields = (VMStateField[]) {
235
+ VMSTATE_UINT32(rstc, BCM2835PowerMgtState),
236
+ VMSTATE_UINT32(rsts, BCM2835PowerMgtState),
237
+ VMSTATE_UINT32(wdog, BCM2835PowerMgtState),
238
+ VMSTATE_END_OF_LIST()
239
+ }
240
+};
241
+
242
+static void bcm2835_powermgt_init(Object *obj)
243
+{
244
+ BCM2835PowerMgtState *s = BCM2835_POWERMGT(obj);
245
+
246
+ memory_region_init_io(&s->iomem, obj, &bcm2835_powermgt_ops, s,
247
+ TYPE_BCM2835_POWERMGT, 0x200);
248
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
249
+}
250
+
251
+static void bcm2835_powermgt_reset(DeviceState *dev)
252
+{
253
+ BCM2835PowerMgtState *s = BCM2835_POWERMGT(dev);
254
+
255
+ /* https://elinux.org/BCM2835_registers#PM */
256
+ s->rstc = 0x00000102;
257
+ s->rsts = 0x00001000;
258
+ s->wdog = 0x00000000;
259
+}
260
+
261
+static void bcm2835_powermgt_class_init(ObjectClass *klass, void *data)
262
+{
263
+ DeviceClass *dc = DEVICE_CLASS(klass);
264
+
265
+ dc->reset = bcm2835_powermgt_reset;
266
+ dc->vmsd = &vmstate_bcm2835_powermgt;
267
+}
268
+
269
+static TypeInfo bcm2835_powermgt_info = {
270
+ .name = TYPE_BCM2835_POWERMGT,
271
+ .parent = TYPE_SYS_BUS_DEVICE,
272
+ .instance_size = sizeof(BCM2835PowerMgtState),
273
+ .class_init = bcm2835_powermgt_class_init,
274
+ .instance_init = bcm2835_powermgt_init,
275
+};
276
+
277
+static void bcm2835_powermgt_register_types(void)
278
+{
279
+ type_register_static(&bcm2835_powermgt_info);
280
+}
281
+
282
+type_init(bcm2835_powermgt_register_types)
283
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
41
index XXXXXXX..XXXXXXX 100644
284
index XXXXXXX..XXXXXXX 100644
42
--- a/hw/acpi/meson.build
285
--- a/hw/misc/meson.build
43
+++ b/hw/acpi/meson.build
286
+++ b/hw/misc/meson.build
44
@@ -XXX,XX +XXX,XX @@ acpi_ss.add(when: 'CONFIG_ACPI_PCI', if_true: files('pci.c'))
287
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files(
45
acpi_ss.add(when: 'CONFIG_ACPI_VMGENID', if_true: files('vmgenid.c'))
288
'bcm2835_rng.c',
46
acpi_ss.add(when: 'CONFIG_ACPI_HW_REDUCED', if_true: files('generic_event_device.c'))
289
'bcm2835_thermal.c',
47
acpi_ss.add(when: 'CONFIG_ACPI_HMAT', if_true: files('hmat.c'))
290
'bcm2835_cprman.c',
48
-acpi_ss.add(when: 'CONFIG_ACPI_APEI', if_true: files('ghes.c'))
291
+ 'bcm2835_powermgt.c',
49
+acpi_ss.add(when: 'CONFIG_ACPI_APEI', if_true: files('ghes.c'), if_false: files('ghes-stub.c'))
292
))
50
acpi_ss.add(when: 'CONFIG_ACPI_X86', if_true: files('core.c', 'piix4.c', 'pcihp.c'), if_false: files('acpi-stub.c'))
293
softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_misc.c'))
51
acpi_ss.add(when: 'CONFIG_ACPI_X86_ICH', if_true: files('ich9.c', 'tco.c'))
294
softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq_slcr.c', 'zynq-xadc.c'))
52
acpi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c'), if_false: files('ipmi-stub.c'))
53
acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c'))
54
acpi_ss.add(when: 'CONFIG_TPM', if_true: files('tpm.c'))
55
-softmmu_ss.add(when: 'CONFIG_ACPI', if_false: files('acpi-stub.c', 'aml-build-stub.c'))
56
+softmmu_ss.add(when: 'CONFIG_ACPI', if_false: files('acpi-stub.c', 'aml-build-stub.c', 'ghes-stub.c'))
57
softmmu_ss.add_all(when: 'CONFIG_ACPI', if_true: acpi_ss)
58
softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-stub.c', 'aml-build-stub.c',
59
- 'acpi-x86-stub.c', 'ipmi-stub.c'))
60
+ 'acpi-x86-stub.c', 'ipmi-stub.c', 'ghes-stub.c'))
61
--
295
--
62
2.20.1
296
2.20.1
63
297
64
298
diff view generated by jsdifflib
Deleted patch
1
Allow code elsewhere in the system to check whether the ACPI GHES
2
table is present, so it can determine whether it is OK to try to
3
record an error by calling acpi_ghes_record_errors().
4
1
5
(We don't need to migrate the new 'present' field in AcpiGhesState,
6
because it is set once at system initialization and doesn't change.)
7
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Dongjiu Geng <gengdongjiu1@gmail.com>
11
Message-id: 20210603171259.27962-3-peter.maydell@linaro.org
12
---
13
include/hw/acpi/ghes.h | 9 +++++++++
14
hw/acpi/ghes-stub.c | 5 +++++
15
hw/acpi/ghes.c | 17 +++++++++++++++++
16
3 files changed, 31 insertions(+)
17
18
diff --git a/include/hw/acpi/ghes.h b/include/hw/acpi/ghes.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/acpi/ghes.h
21
+++ b/include/hw/acpi/ghes.h
22
@@ -XXX,XX +XXX,XX @@ enum {
23
24
typedef struct AcpiGhesState {
25
uint64_t ghes_addr_le;
26
+ bool present; /* True if GHES is present at all on this board */
27
} AcpiGhesState;
28
29
void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker);
30
@@ -XXX,XX +XXX,XX @@ void acpi_build_hest(GArray *table_data, BIOSLinker *linker,
31
void acpi_ghes_add_fw_cfg(AcpiGhesState *vms, FWCfgState *s,
32
GArray *hardware_errors);
33
int acpi_ghes_record_errors(uint8_t notify, uint64_t error_physical_addr);
34
+
35
+/**
36
+ * acpi_ghes_present: Report whether ACPI GHES table is present
37
+ *
38
+ * Returns: true if the system has an ACPI GHES table and it is
39
+ * safe to call acpi_ghes_record_errors() to record a memory error.
40
+ */
41
+bool acpi_ghes_present(void);
42
#endif
43
diff --git a/hw/acpi/ghes-stub.c b/hw/acpi/ghes-stub.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/hw/acpi/ghes-stub.c
46
+++ b/hw/acpi/ghes-stub.c
47
@@ -XXX,XX +XXX,XX @@ int acpi_ghes_record_errors(uint8_t source_id, uint64_t physical_address)
48
{
49
return -1;
50
}
51
+
52
+bool acpi_ghes_present(void)
53
+{
54
+ return false;
55
+}
56
diff --git a/hw/acpi/ghes.c b/hw/acpi/ghes.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/hw/acpi/ghes.c
59
+++ b/hw/acpi/ghes.c
60
@@ -XXX,XX +XXX,XX @@ void acpi_ghes_add_fw_cfg(AcpiGhesState *ags, FWCfgState *s,
61
/* Create a read-write fw_cfg file for Address */
62
fw_cfg_add_file_callback(s, ACPI_GHES_DATA_ADDR_FW_CFG_FILE, NULL, NULL,
63
NULL, &(ags->ghes_addr_le), sizeof(ags->ghes_addr_le), false);
64
+
65
+ ags->present = true;
66
}
67
68
int acpi_ghes_record_errors(uint8_t source_id, uint64_t physical_address)
69
@@ -XXX,XX +XXX,XX @@ int acpi_ghes_record_errors(uint8_t source_id, uint64_t physical_address)
70
71
return ret;
72
}
73
+
74
+bool acpi_ghes_present(void)
75
+{
76
+ AcpiGedState *acpi_ged_state;
77
+ AcpiGhesState *ags;
78
+
79
+ acpi_ged_state = ACPI_GED(object_resolve_path_type("", TYPE_ACPI_GED,
80
+ NULL));
81
+
82
+ if (!acpi_ged_state) {
83
+ return false;
84
+ }
85
+ ags = &acpi_ged_state->ghes_state;
86
+ return ags->present;
87
+}
88
--
89
2.20.1
90
91
diff view generated by jsdifflib
Deleted patch
1
The virt_is_acpi_enabled() function is specific to the virt board, as
2
is the check for its 'ras' property. Use the new acpi_ghes_present()
3
function to check whether we should report memory errors via
4
acpi_ghes_record_errors().
5
1
6
This avoids a link error if QEMU was built without support for the
7
virt board, and provides a mechanism that can be used by any future
8
board models that want to add ACPI memory error reporting support
9
(they only need to call acpi_ghes_add_fw_cfg()).
10
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Dongjiu Geng <gengdongjiu1@gmail.com>
14
Message-id: 20210603171259.27962-4-peter.maydell@linaro.org
15
---
16
target/arm/kvm64.c | 6 +-----
17
1 file changed, 1 insertion(+), 5 deletions(-)
18
19
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/kvm64.c
22
+++ b/target/arm/kvm64.c
23
@@ -XXX,XX +XXX,XX @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
24
{
25
ram_addr_t ram_addr;
26
hwaddr paddr;
27
- Object *obj = qdev_get_machine();
28
- VirtMachineState *vms = VIRT_MACHINE(obj);
29
- bool acpi_enabled = virt_is_acpi_enabled(vms);
30
31
assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
32
33
- if (acpi_enabled && addr &&
34
- object_property_get_bool(obj, "ras", NULL)) {
35
+ if (acpi_ghes_present() && addr) {
36
ram_addr = qemu_ram_addr_from_host(addr);
37
if (ram_addr != RAM_ADDR_INVALID &&
38
kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
39
--
40
2.20.1
41
42
diff view generated by jsdifflib
Deleted patch
1
These days the Arm architecture has a wide range of fine-grained
2
optional extra architectural features. We implement quite a lot
3
of these but by no means all of them. Document what we do implement,
4
so that users can find out without having to dig through back-issues
5
of our Changelog on the wiki.
6
1
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Message-id: 20210617140328.28622-1-peter.maydell@linaro.org
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
docs/system/arm/emulation.rst | 102 ++++++++++++++++++++++++++++++++++
13
docs/system/target-arm.rst | 6 ++
14
2 files changed, 108 insertions(+)
15
create mode 100644 docs/system/arm/emulation.rst
16
17
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
18
new file mode 100644
19
index XXXXXXX..XXXXXXX
20
--- /dev/null
21
+++ b/docs/system/arm/emulation.rst
22
@@ -XXX,XX +XXX,XX @@
23
+A-profile CPU architecture support
24
+==================================
25
+
26
+QEMU's TCG emulation includes support for the Armv5, Armv6, Armv7 and
27
+Armv8 versions of the A-profile architecture. It also has support for
28
+the following architecture extensions:
29
+
30
+- FEAT_AA32BF16 (AArch32 BFloat16 instructions)
31
+- FEAT_AA32HPD (AArch32 hierarchical permission disables)
32
+- FEAT_AA32I8MM (AArch32 Int8 matrix multiplication instructions)
33
+- FEAT_AES (AESD and AESE instructions)
34
+- FEAT_BF16 (AArch64 BFloat16 instructions)
35
+- FEAT_BTI (Branch Target Identification)
36
+- FEAT_DIT (Data Independent Timing instructions)
37
+- FEAT_DPB (DC CVAP instruction)
38
+- FEAT_DotProd (Advanced SIMD dot product instructions)
39
+- FEAT_FCMA (Floating-point complex number instructions)
40
+- FEAT_FHM (Floating-point half-precision multiplication instructions)
41
+- FEAT_FP16 (Half-precision floating-point data processing)
42
+- FEAT_FRINTTS (Floating-point to integer instructions)
43
+- FEAT_FlagM (Flag manipulation instructions v2)
44
+- FEAT_FlagM2 (Enhancements to flag manipulation instructions)
45
+- FEAT_HPDS (Hierarchical permission disables)
46
+- FEAT_I8MM (AArch64 Int8 matrix multiplication instructions)
47
+- FEAT_JSCVT (JavaScript conversion instructions)
48
+- FEAT_LOR (Limited ordering regions)
49
+- FEAT_LRCPC (Load-acquire RCpc instructions)
50
+- FEAT_LRCPC2 (Load-acquire RCpc instructions v2)
51
+- FEAT_LSE (Large System Extensions)
52
+- FEAT_MTE (Memory Tagging Extension)
53
+- FEAT_MTE2 (Memory Tagging Extension)
54
+- FEAT_PAN (Privileged access never)
55
+- FEAT_PAN2 (AT S1E1R and AT S1E1W instruction variants affected by PSTATE.PAN)
56
+- FEAT_PAuth (Pointer authentication)
57
+- FEAT_PMULL (PMULL, PMULL2 instructions)
58
+- FEAT_PMUv3p1 (PMU Extensions v3.1)
59
+- FEAT_PMUv3p4 (PMU Extensions v3.4)
60
+- FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions)
61
+- FEAT_RNG (Random number generator)
62
+- FEAT_SB (Speculation Barrier)
63
+- FEAT_SEL2 (Secure EL2)
64
+- FEAT_SHA1 (SHA1 instructions)
65
+- FEAT_SHA256 (SHA256 instructions)
66
+- FEAT_SHA3 (Advanced SIMD SHA3 instructions)
67
+- FEAT_SHA512 (Advanced SIMD SHA512 instructions)
68
+- FEAT_SM3 (Advanced SIMD SM3 instructions)
69
+- FEAT_SM4 (Advanced SIMD SM4 instructions)
70
+- FEAT_SPECRES (Speculation restriction instructions)
71
+- FEAT_SSBS (Speculative Store Bypass Safe)
72
+- FEAT_TLBIOS (TLB invalidate instructions in Outer Shareable domain)
73
+- FEAT_TLBIRANGE (TLB invalidate range instructions)
74
+- FEAT_TTCNP (Translation table Common not private translations)
75
+- FEAT_TTST (Small translation tables)
76
+- FEAT_UAO (Unprivileged Access Override control)
77
+- FEAT_VHE (Virtualization Host Extensions)
78
+- FEAT_VMID16 (16-bit VMID)
79
+- FEAT_XNX (Translation table stage 2 Unprivileged Execute-never)
80
+- SVE (The Scalable Vector Extension)
81
+- SVE2 (The Scalable Vector Extension v2)
82
+
83
+For information on the specifics of these extensions, please refer
84
+to the `Armv8-A Arm Architecture Reference Manual
85
+<https://developer.arm.com/documentation/ddi0487/latest>`_.
86
+
87
+When a specific named CPU is being emulated, only those features which
88
+are present in hardware for that CPU are emulated. (If a feature is
89
+not in the list above then it is not supported, even if the real
90
+hardware should have it.) The ``max`` CPU enables all features.
91
+
92
+R-profile CPU architecture support
93
+==================================
94
+
95
+QEMU's TCG emulation support for R-profile CPUs is currently limited.
96
+We emulate only the Cortex-R5 and Cortex-R5F CPUs.
97
+
98
+M-profile CPU architecture support
99
+==================================
100
+
101
+QEMU's TCG emulation includes support for Armv6-M, Armv7-M, Armv8-M, and
102
+Armv8.1-M versions of the M-profile architucture. It also has support
103
+for the following architecture extensions:
104
+
105
+- FP (Floating-point Extension)
106
+- FPCXT (FPCXT access instructions)
107
+- HP (Half-precision floating-point instructions)
108
+- LOB (Low Overhead loops and Branch future)
109
+- M (Main Extension)
110
+- MPU (Memory Protection Unit Extension)
111
+- PXN (Privileged Execute Never)
112
+- RAS (Reliability, Serviceability and Availability): "minimum RAS Extension" only
113
+- S (Security Extension)
114
+- ST (System Timer Extension)
115
+
116
+For information on the specifics of these extensions, please refer
117
+to the `Armv8-M Arm Architecture Reference Manual
118
+<https://developer.arm.com/documentation/ddi0553/latest>`_.
119
+
120
+When a specific named CPU is being emulated, only those features which
121
+are present in hardware for that CPU are emulated. (If a feature is
122
+not in the list above then it is not supported, even if the real
123
+hardware should have it.) There is no equivalent of the ``max`` CPU for
124
+M-profile.
125
diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst
126
index XXXXXXX..XXXXXXX 100644
127
--- a/docs/system/target-arm.rst
128
+++ b/docs/system/target-arm.rst
129
@@ -XXX,XX +XXX,XX @@ undocumented; you can get a complete list by running
130
arm/virt
131
arm/xlnx-versal-virt
132
133
+Emulated CPU architecture support
134
+=================================
135
+
136
+.. toctree::
137
+ arm/emulation
138
+
139
Arm CPU features
140
================
141
142
--
143
2.20.1
144
145
diff view generated by jsdifflib
Deleted patch
1
In the code for handling VFP system register accesses there is some
2
stray whitespace after a unary '-' operator, and also some incorrect
3
indent in a couple of function prototypes. We're about to move this
4
code to another file, so fix the code style issues first so
5
checkpatch doesn't complain about the code-movement patch.
6
1
7
Cc: qemu-stable@nongnu.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210618141019.10671-2-peter.maydell@linaro.org
11
---
12
target/arm/translate-vfp.c | 11 +++++------
13
1 file changed, 5 insertions(+), 6 deletions(-)
14
15
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-vfp.c
18
+++ b/target/arm/translate-vfp.c
19
@@ -XXX,XX +XXX,XX @@ static void gen_branch_fpInactive(DisasContext *s, TCGCond cond,
20
}
21
22
static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
23
-
24
fp_sysreg_loadfn *loadfn,
25
- void *opaque)
26
+ void *opaque)
27
{
28
/* Do a write to an M-profile floating point system register */
29
TCGv_i32 tmp;
30
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
31
}
32
33
static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
34
- fp_sysreg_storefn *storefn,
35
- void *opaque)
36
+ fp_sysreg_storefn *storefn,
37
+ void *opaque)
38
{
39
/* Do a read from an M-profile floating point system register */
40
TCGv_i32 tmp;
41
@@ -XXX,XX +XXX,XX @@ static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
42
TCGv_i32 addr;
43
44
if (!a->a) {
45
- offset = - offset;
46
+ offset = -offset;
47
}
48
49
addr = load_reg(s, a->rn);
50
@@ -XXX,XX +XXX,XX @@ static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque)
51
TCGv_i32 value = tcg_temp_new_i32();
52
53
if (!a->a) {
54
- offset = - offset;
55
+ offset = -offset;
56
}
57
58
addr = load_reg(s, a->rn);
59
--
60
2.20.1
61
62
diff view generated by jsdifflib
Deleted patch
1
If the guest makes an FPCXT_NS access when the FPU is disabled,
2
one of two things happens:
3
* if there is no active FP context, then the insn behaves the
4
same way as if the FPU was enabled: writes ignored, reads
5
same value as FPDSCR_NS
6
* if there is an active FP context, then we take a NOCP
7
exception
8
1
9
Add code to the sysreg read/write functions which emits
10
code to take the NOCP exception in the latter case.
11
12
At the moment this will never be used, because the NOCP checks in
13
m-nocp.decode happen first, and so the trans functions are never
14
called when the FPU is disabled. The code will be needed when we
15
move the sysreg access insns to before the NOCP patterns in the
16
following commit.
17
18
Cc: qemu-stable@nongnu.org
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
Message-id: 20210618141019.10671-3-peter.maydell@linaro.org
22
---
23
target/arm/translate-vfp.c | 32 ++++++++++++++++++++++++++++++--
24
1 file changed, 30 insertions(+), 2 deletions(-)
25
26
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/translate-vfp.c
29
+++ b/target/arm/translate-vfp.c
30
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
31
lab_end = gen_new_label();
32
/* fpInactive case: write is a NOP, so branch to end */
33
gen_branch_fpInactive(s, TCG_COND_NE, lab_end);
34
- /* !fpInactive: PreserveFPState(), and reads same as FPCXT_S */
35
+ /*
36
+ * !fpInactive: if FPU disabled, take NOCP exception;
37
+ * otherwise PreserveFPState(), and then FPCXT_NS writes
38
+ * behave the same as FPCXT_S writes.
39
+ */
40
+ if (s->fp_excp_el) {
41
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
42
+ syn_uncategorized(), s->fp_excp_el);
43
+ /*
44
+ * This was only a conditional exception, so override
45
+ * gen_exception_insn()'s default to DISAS_NORETURN
46
+ */
47
+ s->base.is_jmp = DISAS_NEXT;
48
+ break;
49
+ }
50
gen_preserve_fp_state(s);
51
/* fall through */
52
case ARM_VFP_FPCXT_S:
53
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
54
tcg_gen_br(lab_end);
55
56
gen_set_label(lab_active);
57
- /* !fpInactive: Reads the same as FPCXT_S, but side effects differ */
58
+ /*
59
+ * !fpInactive: if FPU disabled, take NOCP exception;
60
+ * otherwise PreserveFPState(), and then FPCXT_NS
61
+ * reads the same as FPCXT_S.
62
+ */
63
+ if (s->fp_excp_el) {
64
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
65
+ syn_uncategorized(), s->fp_excp_el);
66
+ /*
67
+ * This was only a conditional exception, so override
68
+ * gen_exception_insn()'s default to DISAS_NORETURN
69
+ */
70
+ s->base.is_jmp = DISAS_NEXT;
71
+ break;
72
+ }
73
gen_preserve_fp_state(s);
74
tmp = tcg_temp_new_i32();
75
sfpa = tcg_temp_new_i32();
76
--
77
2.20.1
78
79
diff view generated by jsdifflib
1
From: Alexandre Iooss <erdnaxe@crans.org>
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
2
3
This adds the target guide for BBC Micro:bit.
3
Add a test booting and quickly shutdown a raspi2 machine,
4
to test the power management model:
4
5
5
Information is taken from https://wiki.qemu.org/Features/MicroBit
6
(1/1) tests/acceptance/boot_linux_console.py:BootLinuxConsole.test_arm_raspi2_initrd:
6
and from hw/arm/nrf51_soc.c.
7
console: [ 0.000000] Booting Linux on physical CPU 0xf00
8
console: [ 0.000000] Linux version 4.14.98-v7+ (dom@dom-XPS-13-9370) (gcc version 4.9.3 (crosstool-NG crosstool-ng-1.22.0-88-g8460611)) #1200 SMP Tue Feb 12 20:27:48 GMT 2019
9
console: [ 0.000000] CPU: ARMv7 Processor [410fc075] revision 5 (ARMv7), cr=10c5387d
10
console: [ 0.000000] CPU: div instructions available: patching division code
11
console: [ 0.000000] CPU: PIPT / VIPT nonaliasing data cache, VIPT aliasing instruction cache
12
console: [ 0.000000] OF: fdt: Machine model: Raspberry Pi 2 Model B
13
...
14
console: Boot successful.
15
console: cat /proc/cpuinfo
16
console: / # cat /proc/cpuinfo
17
...
18
console: processor : 3
19
console: model name : ARMv7 Processor rev 5 (v7l)
20
console: BogoMIPS : 125.00
21
console: Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm
22
console: CPU implementer : 0x41
23
console: CPU architecture: 7
24
console: CPU variant : 0x0
25
console: CPU part : 0xc07
26
console: CPU revision : 5
27
console: Hardware : BCM2835
28
console: Revision : 0000
29
console: Serial : 0000000000000000
30
console: cat /proc/iomem
31
console: / # cat /proc/iomem
32
console: 00000000-3bffffff : System RAM
33
console: 00008000-00afffff : Kernel code
34
console: 00c00000-00d468ef : Kernel data
35
console: 3f006000-3f006fff : dwc_otg
36
console: 3f007000-3f007eff : /soc/dma@7e007000
37
console: 3f00b880-3f00b8bf : /soc/mailbox@7e00b880
38
console: 3f100000-3f100027 : /soc/watchdog@7e100000
39
console: 3f101000-3f102fff : /soc/cprman@7e101000
40
console: 3f200000-3f2000b3 : /soc/gpio@7e200000
41
PASS (24.59 s)
42
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0
43
JOB TIME : 25.02 s
7
44
8
Signed-off-by: Alexandre Iooss <erdnaxe@crans.org>
45
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
46
Reviewed-by: Wainer dos Santos Moschetta <wainersm@redhat.com>
10
Reviewed-by: Joel Stanley <joel@jms.id.au>
47
Message-id: 20210531113837.1689775-1-f4bug@amsat.org
11
Message-id: 20210621075625.540471-1-erdnaxe@crans.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
48
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
49
---
14
docs/system/arm/nrf.rst | 51 ++++++++++++++++++++++++++++++++++++++
50
tests/acceptance/boot_linux_console.py | 43 ++++++++++++++++++++++++++
15
docs/system/target-arm.rst | 1 +
51
1 file changed, 43 insertions(+)
16
MAINTAINERS | 1 +
17
3 files changed, 53 insertions(+)
18
create mode 100644 docs/system/arm/nrf.rst
19
52
20
diff --git a/docs/system/arm/nrf.rst b/docs/system/arm/nrf.rst
53
diff --git a/tests/acceptance/boot_linux_console.py b/tests/acceptance/boot_linux_console.py
21
new file mode 100644
54
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX
55
--- a/tests/acceptance/boot_linux_console.py
23
--- /dev/null
56
+++ b/tests/acceptance/boot_linux_console.py
24
+++ b/docs/system/arm/nrf.rst
25
@@ -XXX,XX +XXX,XX @@
57
@@ -XXX,XX +XXX,XX @@
26
+Nordic nRF boards (``microbit``)
58
from avocado import skip
27
+================================
59
from avocado import skipUnless
60
from avocado_qemu import Test
61
+from avocado_qemu import exec_command
62
from avocado_qemu import exec_command_and_wait_for_pattern
63
from avocado_qemu import interrupt_interactive_console_until_pattern
64
from avocado_qemu import wait_for_console_pattern
65
@@ -XXX,XX +XXX,XX @@ def test_arm_raspi2_uart0(self):
66
"""
67
self.do_test_arm_raspi2(0)
68
69
+ def test_arm_raspi2_initrd(self):
70
+ """
71
+ :avocado: tags=arch:arm
72
+ :avocado: tags=machine:raspi2
73
+ """
74
+ deb_url = ('http://archive.raspberrypi.org/debian/'
75
+ 'pool/main/r/raspberrypi-firmware/'
76
+ 'raspberrypi-kernel_1.20190215-1_armhf.deb')
77
+ deb_hash = 'cd284220b32128c5084037553db3c482426f3972'
78
+ deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
79
+ kernel_path = self.extract_from_deb(deb_path, '/boot/kernel7.img')
80
+ dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2709-rpi-2-b.dtb')
28
+
81
+
29
+The `Nordic nRF`_ chips are a family of ARM-based System-on-Chip that
82
+ initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
30
+are designed to be used for low-power and short-range wireless solutions.
83
+ '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
84
+ 'arm/rootfs-armv7a.cpio.gz')
85
+ initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c'
86
+ initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
87
+ initrd_path = os.path.join(self.workdir, 'rootfs.cpio')
88
+ archive.gzip_uncompress(initrd_path_gz, initrd_path)
31
+
89
+
32
+.. _Nordic nRF: https://www.nordicsemi.com/Products
90
+ self.vm.set_console()
91
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
92
+ 'earlycon=pl011,0x3f201000 console=ttyAMA0 '
93
+ 'panic=-1 noreboot ' +
94
+ 'dwc_otg.fiq_fsm_enable=0')
95
+ self.vm.add_args('-kernel', kernel_path,
96
+ '-dtb', dtb_path,
97
+ '-initrd', initrd_path,
98
+ '-append', kernel_command_line,
99
+ '-no-reboot')
100
+ self.vm.launch()
101
+ self.wait_for_console_pattern('Boot successful.')
33
+
102
+
34
+The nRF51 series is the first series for short range wireless applications.
103
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
35
+It is superseded by the nRF52 series.
104
+ 'BCM2835')
36
+The following machines are based on this chip :
105
+ exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
106
+ '/soc/cprman@7e101000')
107
+ exec_command(self, 'halt')
108
+ # Wait for VM to shut down gracefully
109
+ self.vm.wait()
37
+
110
+
38
+- ``microbit`` BBC micro:bit board with nRF51822 SoC
111
def test_arm_exynos4210_initrd(self):
39
+
112
"""
40
+There are other series such as nRF52, nRF53 and nRF91 which are currently not
113
:avocado: tags=arch:arm
41
+supported by QEMU.
42
+
43
+Supported devices
44
+-----------------
45
+
46
+ * ARM Cortex-M0 (ARMv6-M)
47
+ * Serial ports (UART)
48
+ * Clock controller
49
+ * Timers
50
+ * Random Number Generator (RNG)
51
+ * GPIO controller
52
+ * NVMC
53
+ * SWI
54
+
55
+Missing devices
56
+---------------
57
+
58
+ * Watchdog
59
+ * Real-Time Clock (RTC) controller
60
+ * TWI (i2c)
61
+ * SPI controller
62
+ * Analog to Digital Converter (ADC)
63
+ * Quadrature decoder
64
+ * Radio
65
+
66
+Boot options
67
+------------
68
+
69
+The Micro:bit machine can be started using the ``-device`` option to load a
70
+firmware in `ihex format`_. Example:
71
+
72
+.. _ihex format: https://en.wikipedia.org/wiki/Intel_HEX
73
+
74
+.. code-block:: bash
75
+
76
+ $ qemu-system-arm -M microbit -device loader,file=test.hex
77
diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst
78
index XXXXXXX..XXXXXXX 100644
79
--- a/docs/system/target-arm.rst
80
+++ b/docs/system/target-arm.rst
81
@@ -XXX,XX +XXX,XX @@ undocumented; you can get a complete list by running
82
arm/digic
83
arm/musicpal
84
arm/gumstix
85
+ arm/nrf
86
arm/nseries
87
arm/nuvoton
88
arm/orangepi
89
diff --git a/MAINTAINERS b/MAINTAINERS
90
index XXXXXXX..XXXXXXX 100644
91
--- a/MAINTAINERS
92
+++ b/MAINTAINERS
93
@@ -XXX,XX +XXX,XX @@ F: hw/*/microbit*.c
94
F: include/hw/*/nrf51*.h
95
F: include/hw/*/microbit*.h
96
F: tests/qtest/microbit-test.c
97
+F: docs/system/arm/nrf.rst
98
99
AVR Machines
100
-------------
101
--
114
--
102
2.20.1
115
2.20.1
103
116
104
117
diff view generated by jsdifflib
1
Implement the MVE VCADD insn, which performs a complex add with
1
From: Joe Komlodi <joe.komlodi@xilinx.com>
2
rotate. Note that the size=0b11 encoding is VSBC.
3
2
4
The architecture grants some leeway for the "destination and Vm
3
If the CPU is running in default NaN mode (FPCR.DN == 1) and we execute
5
source overlap" case for the size MO_32 case, but we choose not to
4
FRSQRTE, FRECPE, or FRECPX with a signaling NaN, parts_silence_nan_frac() will
6
make use of it, instead always calculating all 16 bytes worth of
5
assert due to fpst->default_nan_mode being set.
7
results before setting the destination register.
8
6
7
To avoid this, we check to see what NaN mode we're running in before we call
8
floatxx_silence_nan().
9
10
Signed-off-by: Joe Komlodi <joe.komlodi@xilinx.com>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 1624662174-175828-2-git-send-email-joe.komlodi@xilinx.com
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210617121628.20116-42-peter.maydell@linaro.org
12
---
15
---
13
target/arm/helper-mve.h | 8 ++++++++
16
target/arm/helper-a64.c | 12 +++++++++---
14
target/arm/mve.decode | 9 +++++++--
17
target/arm/vfp_helper.c | 24 ++++++++++++++++++------
15
target/arm/mve_helper.c | 29 +++++++++++++++++++++++++++++
18
2 files changed, 27 insertions(+), 9 deletions(-)
16
target/arm/translate-mve.c | 7 +++++++
17
4 files changed, 51 insertions(+), 2 deletions(-)
18
19
19
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
20
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
20
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper-mve.h
22
--- a/target/arm/helper-a64.c
22
+++ b/target/arm/helper-mve.h
23
+++ b/target/arm/helper-a64.c
23
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vadci, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp)
24
DEF_HELPER_FLAGS_4(mve_vsbc, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
float16 nan = a;
25
DEF_HELPER_FLAGS_4(mve_vsbci, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
if (float16_is_signaling_nan(a, fpst)) {
26
27
float_raise(float_flag_invalid, fpst);
27
+DEF_HELPER_FLAGS_4(mve_vcadd90b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
- nan = float16_silence_nan(a, fpst);
28
+DEF_HELPER_FLAGS_4(mve_vcadd90h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
+ if (!fpst->default_nan_mode) {
29
+DEF_HELPER_FLAGS_4(mve_vcadd90w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
+ nan = float16_silence_nan(a, fpst);
30
+
31
+ }
31
+DEF_HELPER_FLAGS_4(mve_vcadd270b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
}
32
+DEF_HELPER_FLAGS_4(mve_vcadd270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
33
if (fpst->default_nan_mode) {
33
+DEF_HELPER_FLAGS_4(mve_vcadd270w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
nan = float16_default_nan(fpst);
34
+
35
@@ -XXX,XX +XXX,XX @@ float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
35
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
float32 nan = a;
36
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
if (float32_is_signaling_nan(a, fpst)) {
37
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
float_raise(float_flag_invalid, fpst);
38
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
39
- nan = float32_silence_nan(a, fpst);
40
+ if (!fpst->default_nan_mode) {
41
+ nan = float32_silence_nan(a, fpst);
42
+ }
43
}
44
if (fpst->default_nan_mode) {
45
nan = float32_default_nan(fpst);
46
@@ -XXX,XX +XXX,XX @@ float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
47
float64 nan = a;
48
if (float64_is_signaling_nan(a, fpst)) {
49
float_raise(float_flag_invalid, fpst);
50
- nan = float64_silence_nan(a, fpst);
51
+ if (!fpst->default_nan_mode) {
52
+ nan = float64_silence_nan(a, fpst);
53
+ }
54
}
55
if (fpst->default_nan_mode) {
56
nan = float64_default_nan(fpst);
57
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
39
index XXXXXXX..XXXXXXX 100644
58
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/mve.decode
59
--- a/target/arm/vfp_helper.c
41
+++ b/target/arm/mve.decode
60
+++ b/target/arm/vfp_helper.c
42
@@ -XXX,XX +XXX,XX @@ VRHADD_S 111 0 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
61
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp)
43
VRHADD_U 111 1 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
62
float16 nan = f16;
44
63
if (float16_is_signaling_nan(f16, fpst)) {
45
VADC 1110 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
64
float_raise(float_flag_invalid, fpst);
46
-VSBC 1111 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
65
- nan = float16_silence_nan(f16, fpst);
47
VADCI 1110 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
66
+ if (!fpst->default_nan_mode) {
48
-VSBCI 1111 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
67
+ nan = float16_silence_nan(f16, fpst);
49
+
68
+ }
50
+{
69
}
51
+ VSBC 1111 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
70
if (fpst->default_nan_mode) {
52
+ VSBCI 1111 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
71
nan = float16_default_nan(fpst);
53
+ VCADD90 1111 1110 0 . .. ... 0 ... 0 1111 . 0 . 0 ... 0 @2op
72
@@ -XXX,XX +XXX,XX @@ float32 HELPER(recpe_f32)(float32 input, void *fpstp)
54
+ VCADD270 1111 1110 0 . .. ... 0 ... 1 1111 . 0 . 0 ... 0 @2op
73
float32 nan = f32;
55
+}
74
if (float32_is_signaling_nan(f32, fpst)) {
56
75
float_raise(float_flag_invalid, fpst);
57
# Vector miscellaneous
76
- nan = float32_silence_nan(f32, fpst);
58
77
+ if (!fpst->default_nan_mode) {
59
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
78
+ nan = float32_silence_nan(f32, fpst);
60
index XXXXXXX..XXXXXXX 100644
79
+ }
61
--- a/target/arm/mve_helper.c
80
}
62
+++ b/target/arm/mve_helper.c
81
if (fpst->default_nan_mode) {
63
@@ -XXX,XX +XXX,XX @@ void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm)
82
nan = float32_default_nan(fpst);
64
do_vadc(env, vd, vn, vm, -1, 1, true);
83
@@ -XXX,XX +XXX,XX @@ float64 HELPER(recpe_f64)(float64 input, void *fpstp)
65
}
84
float64 nan = f64;
66
85
if (float64_is_signaling_nan(f64, fpst)) {
67
+#define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \
86
float_raise(float_flag_invalid, fpst);
68
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
87
- nan = float64_silence_nan(f64, fpst);
69
+ { \
88
+ if (!fpst->default_nan_mode) {
70
+ TYPE *d = vd, *n = vn, *m = vm; \
89
+ nan = float64_silence_nan(f64, fpst);
71
+ uint16_t mask = mve_element_mask(env); \
90
+ }
72
+ unsigned e; \
91
}
73
+ TYPE r[16 / ESIZE]; \
92
if (fpst->default_nan_mode) {
74
+ /* Calculate all results first to avoid overwriting inputs */ \
93
nan = float64_default_nan(fpst);
75
+ for (e = 0; e < 16 / ESIZE; e++) { \
94
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp)
76
+ if (!(e & 1)) { \
95
float16 nan = f16;
77
+ r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \
96
if (float16_is_signaling_nan(f16, s)) {
78
+ } else { \
97
float_raise(float_flag_invalid, s);
79
+ r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \
98
- nan = float16_silence_nan(f16, s);
80
+ } \
99
+ if (!s->default_nan_mode) {
81
+ } \
100
+ nan = float16_silence_nan(f16, fpstp);
82
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
101
+ }
83
+ mergemask(&d[H##ESIZE(e)], r[e], mask); \
102
}
84
+ } \
103
if (s->default_nan_mode) {
85
+ mve_advance_vpt(env); \
104
nan = float16_default_nan(s);
86
+ }
105
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
87
+
106
float32 nan = f32;
88
+#define DO_VCADD_ALL(OP, FN0, FN1) \
107
if (float32_is_signaling_nan(f32, s)) {
89
+ DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \
108
float_raise(float_flag_invalid, s);
90
+ DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \
109
- nan = float32_silence_nan(f32, s);
91
+ DO_VCADD(OP##w, 4, int32_t, FN0, FN1)
110
+ if (!s->default_nan_mode) {
92
+
111
+ nan = float32_silence_nan(f32, fpstp);
93
+DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD)
112
+ }
94
+DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB)
113
}
95
+
114
if (s->default_nan_mode) {
96
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
115
nan = float32_default_nan(s);
97
{
116
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
98
if (val > max) {
117
float64 nan = f64;
99
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
118
if (float64_is_signaling_nan(f64, s)) {
100
index XXXXXXX..XXXXXXX 100644
119
float_raise(float_flag_invalid, s);
101
--- a/target/arm/translate-mve.c
120
- nan = float64_silence_nan(f64, s);
102
+++ b/target/arm/translate-mve.c
121
+ if (!s->default_nan_mode) {
103
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQRDMLSDH, vqrdmlsdh)
122
+ nan = float64_silence_nan(f64, fpstp);
104
DO_2OP(VQRDMLSDHX, vqrdmlsdhx)
123
+ }
105
DO_2OP(VRHADD_S, vrhadds)
124
}
106
DO_2OP(VRHADD_U, vrhaddu)
125
if (s->default_nan_mode) {
107
+/*
126
nan = float64_default_nan(s);
108
+ * VCADD Qd == Qm at size MO_32 is UNPREDICTABLE; we choose not to diagnose
109
+ * so we can reuse the DO_2OP macro. (Our implementation calculates the
110
+ * "expected" results in this case.)
111
+ */
112
+DO_2OP(VCADD90, vcadd90)
113
+DO_2OP(VCADD270, vcadd270)
114
115
static bool trans_VQDMULLB(DisasContext *s, arg_2op *a)
116
{
117
--
127
--
118
2.20.1
128
2.20.1
119
129
120
130
diff view generated by jsdifflib
1
From: Peter Collingbourne <pcc@google.com>
1
From: Maxim Uvarov <maxim.uvarov@linaro.org>
2
2
3
MTE3 introduces an asymmetric tag checking mode, in which loads are
3
qemu has 2 type of functions: shutdown and reboot. Shutdown
4
checked synchronously and stores are checked asynchronously. Add
4
function has to be used for machine shutdown. Otherwise we cause
5
support for it.
5
a reset with a bogus "cause" value, when we intended a shutdown.
6
6
7
Signed-off-by: Peter Collingbourne <pcc@google.com>
7
Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20210616195614.11785-1-pcc@google.com
9
Message-id: 20210625111842.3790-3-maxim.uvarov@linaro.org
10
[PMM: Add line to emulation.rst]
10
[PMM: tweaked commit message]
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
12
---
13
docs/system/arm/emulation.rst | 1 +
13
hw/gpio/gpio_pwr.c | 2 +-
14
target/arm/cpu64.c | 2 +-
14
1 file changed, 1 insertion(+), 1 deletion(-)
15
target/arm/mte_helper.c | 82 ++++++++++++++++++++++-------------
16
3 files changed, 53 insertions(+), 32 deletions(-)
17
15
18
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
16
diff --git a/hw/gpio/gpio_pwr.c b/hw/gpio/gpio_pwr.c
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/docs/system/arm/emulation.rst
18
--- a/hw/gpio/gpio_pwr.c
21
+++ b/docs/system/arm/emulation.rst
19
+++ b/hw/gpio/gpio_pwr.c
22
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
20
@@ -XXX,XX +XXX,XX @@ static void gpio_pwr_reset(void *opaque, int n, int level)
23
- FEAT_LSE (Large System Extensions)
21
static void gpio_pwr_shutdown(void *opaque, int n, int level)
24
- FEAT_MTE (Memory Tagging Extension)
22
{
25
- FEAT_MTE2 (Memory Tagging Extension)
23
if (level) {
26
+- FEAT_MTE3 (MTE Asymmetric Fault Handling)
24
- qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
27
- FEAT_PAN (Privileged access never)
25
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
28
- FEAT_PAN2 (AT S1E1R and AT S1E1W instruction variants affected by PSTATE.PAN)
29
- FEAT_PAuth (Pointer authentication)
30
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/cpu64.c
33
+++ b/target/arm/cpu64.c
34
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
35
* during realize if the board provides no tag memory, much like
36
* we do for EL2 with the virtualization=on property.
37
*/
38
- t = FIELD_DP64(t, ID_AA64PFR1, MTE, 2);
39
+ t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);
40
cpu->isar.id_aa64pfr1 = t;
41
42
t = cpu->isar.id_aa64mmfr0;
43
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/mte_helper.c
46
+++ b/target/arm/mte_helper.c
47
@@ -XXX,XX +XXX,XX @@ void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
48
}
26
}
49
}
27
}
50
28
51
+static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
52
+ uint64_t dirty_ptr, uintptr_t ra)
53
+{
54
+ int is_write, syn;
55
+
56
+ env->exception.vaddress = dirty_ptr;
57
+
58
+ is_write = FIELD_EX32(desc, MTEDESC, WRITE);
59
+ syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
60
+ 0x11);
61
+ raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
62
+ g_assert_not_reached();
63
+}
64
+
65
+static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
66
+ uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el)
67
+{
68
+ int select;
69
+
70
+ if (regime_has_2_ranges(arm_mmu_idx)) {
71
+ select = extract64(dirty_ptr, 55, 1);
72
+ } else {
73
+ select = 0;
74
+ }
75
+ env->cp15.tfsr_el[el] |= 1 << select;
76
+#ifdef CONFIG_USER_ONLY
77
+ /*
78
+ * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
79
+ * which then sends a SIGSEGV when the thread is next scheduled.
80
+ * This cpu will return to the main loop at the end of the TB,
81
+ * which is rather sooner than "normal". But the alternative
82
+ * is waiting until the next syscall.
83
+ */
84
+ qemu_cpu_kick(env_cpu(env));
85
+#endif
86
+}
87
+
88
/* Record a tag check failure. */
89
static void mte_check_fail(CPUARMState *env, uint32_t desc,
90
uint64_t dirty_ptr, uintptr_t ra)
91
{
92
int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
93
ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
94
- int el, reg_el, tcf, select, is_write, syn;
95
+ int el, reg_el, tcf;
96
uint64_t sctlr;
97
98
reg_el = regime_el(env, arm_mmu_idx);
99
@@ -XXX,XX +XXX,XX @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
100
switch (tcf) {
101
case 1:
102
/* Tag check fail causes a synchronous exception. */
103
- env->exception.vaddress = dirty_ptr;
104
-
105
- is_write = FIELD_EX32(desc, MTEDESC, WRITE);
106
- syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0,
107
- is_write, 0x11);
108
- raise_exception_ra(env, EXCP_DATA_ABORT, syn,
109
- exception_target_el(env), ra);
110
- /* noreturn, but fall through to the assert anyway */
111
+ mte_sync_check_fail(env, desc, dirty_ptr, ra);
112
+ break;
113
114
case 0:
115
/*
116
@@ -XXX,XX +XXX,XX @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
117
118
case 2:
119
/* Tag check fail causes asynchronous flag set. */
120
- if (regime_has_2_ranges(arm_mmu_idx)) {
121
- select = extract64(dirty_ptr, 55, 1);
122
- } else {
123
- select = 0;
124
- }
125
- env->cp15.tfsr_el[el] |= 1 << select;
126
-#ifdef CONFIG_USER_ONLY
127
- /*
128
- * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
129
- * which then sends a SIGSEGV when the thread is next scheduled.
130
- * This cpu will return to the main loop at the end of the TB,
131
- * which is rather sooner than "normal". But the alternative
132
- * is waiting until the next syscall.
133
- */
134
- qemu_cpu_kick(env_cpu(env));
135
-#endif
136
+ mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
137
break;
138
139
- default:
140
- /* Case 3: Reserved. */
141
- qemu_log_mask(LOG_GUEST_ERROR,
142
- "Tag check failure with SCTLR_EL%d.TCF%s "
143
- "set to reserved value %d\n",
144
- reg_el, el ? "" : "0", tcf);
145
+ case 3:
146
+ /*
147
+ * Tag check fail causes asynchronous flag set for stores, or
148
+ * a synchronous exception for loads.
149
+ */
150
+ if (FIELD_EX32(desc, MTEDESC, WRITE)) {
151
+ mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
152
+ } else {
153
+ mte_sync_check_fail(env, desc, dirty_ptr, ra);
154
+ }
155
break;
156
}
157
}
158
--
29
--
159
2.20.1
30
2.20.1
160
31
161
32
diff view generated by jsdifflib
1
In a CPU with MVE, the VMOV (vector lane to general-purpose register)
1
In do_ldst(), the calculation of the offset needs to be based on the
2
and VMOV (general-purpose register to vector lane) insns are not
2
size of the memory access, not the size of the elements in the
3
predicated, but they are subject to beatwise execution if they
3
vector. This meant we were getting it wrong for the widening and
4
are not in an IT block.
4
narrowing variants of the various VLDR and VSTR insns.
5
6
Since our implementation always executes all 4 beats in one tick,
7
this means only that we need to handle PSR.ECI:
8
* we must do the usual check for bad ECI state
9
* we must advance ECI state if the insn succeeds
10
* if ECI says we should not be executing the beat corresponding
11
to the lane of the vector register being accessed then we
12
should skip performing the move
13
14
Note that if PSR.ECI is non-zero then we cannot be in an IT block.
15
5
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Message-id: 20210617121628.20116-45-peter.maydell@linaro.org
8
Message-id: 20210628135835.6690-2-peter.maydell@linaro.org
19
---
9
---
20
target/arm/translate-a32.h | 2 +
10
target/arm/translate-mve.c | 17 +++++++++--------
21
target/arm/translate-mve.c | 4 +-
11
1 file changed, 9 insertions(+), 8 deletions(-)
22
target/arm/translate-vfp.c | 77 +++++++++++++++++++++++++++++++++++---
23
3 files changed, 75 insertions(+), 8 deletions(-)
24
12
25
diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate-a32.h
28
+++ b/target/arm/translate-a32.h
29
@@ -XXX,XX +XXX,XX @@ long neon_full_reg_offset(unsigned reg);
30
long neon_element_offset(int reg, int element, MemOp memop);
31
void gen_rev16(TCGv_i32 dest, TCGv_i32 var);
32
void clear_eci_state(DisasContext *s);
33
+bool mve_eci_check(DisasContext *s);
34
+void mve_update_and_store_eci(DisasContext *s);
35
36
static inline TCGv_i32 load_cpu_offset(int offset)
37
{
38
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
13
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
39
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/translate-mve.c
15
--- a/target/arm/translate-mve.c
41
+++ b/target/arm/translate-mve.c
16
+++ b/target/arm/translate-mve.c
42
@@ -XXX,XX +XXX,XX @@ static bool mve_check_qreg_bank(DisasContext *s, int qmask)
17
@@ -XXX,XX +XXX,XX @@ static bool mve_skip_first_beat(DisasContext *s)
43
return qmask < 8;
44
}
45
46
-static bool mve_eci_check(DisasContext *s)
47
+bool mve_eci_check(DisasContext *s)
48
{
49
/*
50
* This is a beatwise insn: check that ECI is valid (not a
51
@@ -XXX,XX +XXX,XX @@ static void mve_update_eci(DisasContext *s)
52
}
18
}
53
}
19
}
54
20
55
-static void mve_update_and_store_eci(DisasContext *s)
21
-static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
56
+void mve_update_and_store_eci(DisasContext *s)
22
+static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn,
23
+ unsigned msize)
57
{
24
{
58
/*
25
TCGv_i32 addr;
59
* For insns which don't call a helper function that will call
26
uint32_t offset;
60
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
27
@@ -XXX,XX +XXX,XX @@ static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/translate-vfp.c
63
+++ b/target/arm/translate-vfp.c
64
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
65
return true;
66
}
67
68
+static bool mve_skip_vmov(DisasContext *s, int vn, int index, int size)
69
+{
70
+ /*
71
+ * In a CPU with MVE, the VMOV (vector lane to general-purpose register)
72
+ * and VMOV (general-purpose register to vector lane) insns are not
73
+ * predicated, but they are subject to beatwise execution if they are
74
+ * not in an IT block.
75
+ *
76
+ * Since our implementation always executes all 4 beats in one tick,
77
+ * this means only that if PSR.ECI says we should not be executing
78
+ * the beat corresponding to the lane of the vector register being
79
+ * accessed then we should skip performing the move, and that we need
80
+ * to do the usual check for bad ECI state and advance of ECI state.
81
+ *
82
+ * Note that if PSR.ECI is non-zero then we cannot be in an IT block.
83
+ *
84
+ * Return true if this VMOV scalar <-> gpreg should be skipped because
85
+ * the MVE PSR.ECI state says we skip the beat where the store happens.
86
+ */
87
+
88
+ /* Calculate the byte offset into Qn which we're going to access */
89
+ int ofs = (index << size) + ((vn & 1) * 8);
90
+
91
+ if (!dc_isar_feature(aa32_mve, s)) {
92
+ return false;
93
+ }
94
+
95
+ switch (s->eci) {
96
+ case ECI_NONE:
97
+ return false;
98
+ case ECI_A0:
99
+ return ofs < 4;
100
+ case ECI_A0A1:
101
+ return ofs < 8;
102
+ case ECI_A0A1A2:
103
+ case ECI_A0A1A2B0:
104
+ return ofs < 12;
105
+ default:
106
+ g_assert_not_reached();
107
+ }
108
+}
109
+
110
static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
111
{
112
/* VMOV scalar to general purpose register */
113
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
114
return false;
115
}
116
117
+ if (dc_isar_feature(aa32_mve, s)) {
118
+ if (!mve_eci_check(s)) {
119
+ return true;
120
+ }
121
+ }
122
+
123
if (!vfp_access_check(s)) {
124
return true;
28
return true;
125
}
29
}
126
30
127
- tmp = tcg_temp_new_i32();
31
- offset = a->imm << a->size;
128
- read_neon_element32(tmp, a->vn, a->index, a->size | (a->u ? 0 : MO_SIGN));
32
+ offset = a->imm << msize;
129
- store_reg(s, a->rt, tmp);
33
if (!a->a) {
130
+ if (!mve_skip_vmov(s, a->vn, a->index, a->size)) {
34
offset = -offset;
131
+ tmp = tcg_temp_new_i32();
35
}
132
+ read_neon_element32(tmp, a->vn, a->index,
36
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR(DisasContext *s, arg_VLDR_VSTR *a)
133
+ a->size | (a->u ? 0 : MO_SIGN));
37
{ gen_helper_mve_vstrw, gen_helper_mve_vldrw },
134
+ store_reg(s, a->rt, tmp);
38
{ NULL, NULL }
135
+ }
39
};
136
40
- return do_ldst(s, a, ldstfns[a->size][a->l]);
137
+ if (dc_isar_feature(aa32_mve, s)) {
41
+ return do_ldst(s, a, ldstfns[a->size][a->l], a->size);
138
+ mve_update_and_store_eci(s);
139
+ }
140
return true;
141
}
42
}
142
43
143
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
44
-#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST) \
144
return false;
45
+#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST, MSIZE) \
46
static bool trans_##OP(DisasContext *s, arg_VLDR_VSTR *a) \
47
{ \
48
static MVEGenLdStFn * const ldstfns[2][2] = { \
49
{ gen_helper_mve_##ST, gen_helper_mve_##SLD }, \
50
{ NULL, gen_helper_mve_##ULD }, \
51
}; \
52
- return do_ldst(s, a, ldstfns[a->u][a->l]); \
53
+ return do_ldst(s, a, ldstfns[a->u][a->l], MSIZE); \
145
}
54
}
146
55
147
+ if (dc_isar_feature(aa32_mve, s)) {
56
-DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h)
148
+ if (!mve_eci_check(s)) {
57
-DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w)
149
+ return true;
58
-DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w)
150
+ }
59
+DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h, MO_8)
151
+ }
60
+DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w, MO_8)
152
+
61
+DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w, MO_16)
153
if (!vfp_access_check(s)) {
62
154
return true;
63
static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
155
}
64
{
156
157
- tmp = load_reg(s, a->rt);
158
- write_neon_element32(tmp, a->vn, a->index, a->size);
159
- tcg_temp_free_i32(tmp);
160
+ if (!mve_skip_vmov(s, a->vn, a->index, a->size)) {
161
+ tmp = load_reg(s, a->rt);
162
+ write_neon_element32(tmp, a->vn, a->index, a->size);
163
+ tcg_temp_free_i32(tmp);
164
+ }
165
166
+ if (dc_isar_feature(aa32_mve, s)) {
167
+ mve_update_and_store_eci(s);
168
+ }
169
return true;
170
}
171
172
--
65
--
173
2.20.1
66
2.20.1
174
67
175
68
diff view generated by jsdifflib
1
Implement the MVE VRMLALDAVH and VRMLSLDAVH insns, which accumulate
1
The initial implementation of the MVE VRMLALDAVH and VRMLSLDAVH
2
the results of a rounded multiply of pairs of elements into a 72-bit
2
insns had some bugs:
3
accumulator, returning the top 64 bits in a pair of general purpose
3
* the 32x32 multiply of elements was being done as 32x32->32,
4
registers.
4
not 32x32->64
5
* we were incorrectly maintaining the accumulator in its full
6
72-bit form across all 4 beats of the insn; in the pseudocode
7
it is squashed back into the 64 bits of the RdaHi:RdaLo
8
registers after each beat
5
9
10
In particular, fixing the second of these allows us to recast
11
the implementation to avoid 128-bit arithmetic entirely.
12
13
Since the element size here is always 4, we can also drop the
14
parameterization of ESIZE to make the code a little more readable.
15
16
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210617121628.20116-22-peter.maydell@linaro.org
19
Message-id: 20210628135835.6690-3-peter.maydell@linaro.org
9
---
20
---
10
target/arm/helper-mve.h | 8 ++++++++
21
target/arm/mve_helper.c | 38 +++++++++++++++++++++-----------------
11
target/arm/mve.decode | 7 +++++++
22
1 file changed, 21 insertions(+), 17 deletions(-)
12
target/arm/mve_helper.c | 37 +++++++++++++++++++++++++++++++++++++
13
target/arm/translate-mve.c | 24 ++++++++++++++++++++++++
14
4 files changed, 76 insertions(+)
15
23
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-mve.h
19
+++ b/target/arm/helper-mve.h
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmlsldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
21
DEF_HELPER_FLAGS_4(mve_vmlsldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
22
DEF_HELPER_FLAGS_4(mve_vmlsldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
23
DEF_HELPER_FLAGS_4(mve_vmlsldavxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
24
+
25
+DEF_HELPER_FLAGS_4(mve_vrmlaldavhsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
26
+DEF_HELPER_FLAGS_4(mve_vrmlaldavhxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
27
+
28
+DEF_HELPER_FLAGS_4(mve_vrmlaldavhuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vrmlsldavhsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
31
+DEF_HELPER_FLAGS_4(mve_vrmlsldavhxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
32
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/mve.decode
35
+++ b/target/arm/mve.decode
36
@@ -XXX,XX +XXX,XX @@ VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2
37
38
@vmlaldav .... .... . ... ... . ... . .... .... qm:3 . \
39
qn=%qn rdahi=%rdahi rdalo=%rdalo size=%size_16 &vmlaldav
40
+@vmlaldav_nosz .... .... . ... ... . ... . .... .... qm:3 . \
41
+ qn=%qn rdahi=%rdahi rdalo=%rdalo size=0 &vmlaldav
42
VMLALDAV_S 1110 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav
43
VMLALDAV_U 1111 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav
44
45
VMLSLDAV 1110 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav
46
+
47
+VRMLALDAVH_S 1110 1110 1 ... ... 0 ... x:1 1111 . 0 a:1 0 ... 0 @vmlaldav_nosz
48
+VRMLALDAVH_U 1111 1110 1 ... ... 0 ... x:1 1111 . 0 a:1 0 ... 0 @vmlaldav_nosz
49
+
50
+VRMLSLDAVH 1111 1110 1 ... ... 0 ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav_nosz
51
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
24
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
52
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/mve_helper.c
26
--- a/target/arm/mve_helper.c
54
+++ b/target/arm/mve_helper.c
27
+++ b/target/arm/mve_helper.c
55
@@ -XXX,XX +XXX,XX @@
28
@@ -XXX,XX +XXX,XX @@
56
*/
29
*/
57
30
58
#include "qemu/osdep.h"
31
#include "qemu/osdep.h"
59
+#include "qemu/int128.h"
32
-#include "qemu/int128.h"
60
#include "cpu.h"
33
#include "cpu.h"
61
#include "internals.h"
34
#include "internals.h"
62
#include "vec_internal.h"
35
#include "vec_internal.h"
63
@@ -XXX,XX +XXX,XX @@ DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=)
36
@@ -XXX,XX +XXX,XX @@ DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
64
DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=)
65
DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
66
DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
37
DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
67
+
38
68
+/*
39
/*
69
+ * Rounding multiply add long dual accumulate high: we must keep
40
- * Rounding multiply add long dual accumulate high: we must keep
70
+ * a 72-bit internal accumulator value and return the top 64 bits.
41
- * a 72-bit internal accumulator value and return the top 64 bits.
71
+ */
42
+ * Rounding multiply add long dual accumulate high. In the pseudocode
72
+#define DO_LDAVH(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC, TO128) \
43
+ * this is implemented with a 72-bit internal accumulator value of which
73
+ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
44
+ * the top 64 bits are returned. We optimize this to avoid having to
74
+ void *vm, uint64_t a) \
45
+ * use 128-bit arithmetic -- we can do this because the 74-bit accumulator
75
+ { \
46
+ * is squashed back into 64-bits after each beat.
76
+ uint16_t mask = mve_element_mask(env); \
47
*/
77
+ unsigned e; \
48
-#define DO_LDAVH(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC, TO128) \
78
+ TYPE *n = vn, *m = vm; \
49
+#define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \
79
+ Int128 acc = int128_lshift(TO128(a), 8); \
50
uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
80
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
51
void *vm, uint64_t a) \
81
+ if (mask & 1) { \
52
{ \
82
+ if (e & 1) { \
53
uint16_t mask = mve_element_mask(env); \
83
+ acc = ODDACC(acc, TO128(n[H##ESIZE(e - 1 * XCHG)] * \
54
unsigned e; \
84
+ m[H##ESIZE(e)])); \
55
TYPE *n = vn, *m = vm; \
85
+ } else { \
56
- Int128 acc = int128_lshift(TO128(a), 8); \
86
+ acc = EVENACC(acc, TO128(n[H##ESIZE(e + 1 * XCHG)] * \
57
- for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
87
+ m[H##ESIZE(e)])); \
58
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
88
+ } \
59
if (mask & 1) { \
89
+ acc = int128_add(acc, 1 << 7); \
60
+ LTYPE mul; \
90
+ } \
61
if (e & 1) { \
91
+ } \
62
- acc = ODDACC(acc, TO128(n[H##ESIZE(e - 1 * XCHG)] * \
92
+ mve_advance_vpt(env); \
63
- m[H##ESIZE(e)])); \
93
+ return int128_getlo(int128_rshift(acc, 8)); \
64
+ mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \
94
+ }
65
+ if (SUB) { \
95
+
66
+ mul = -mul; \
96
+DO_LDAVH(vrmlaldavhsw, 4, int32_t, false, int128_add, int128_add, int128_makes64)
67
+ } \
97
+DO_LDAVH(vrmlaldavhxsw, 4, int32_t, true, int128_add, int128_add, int128_makes64)
68
} else { \
98
+
69
- acc = EVENACC(acc, TO128(n[H##ESIZE(e + 1 * XCHG)] * \
99
+DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64)
70
- m[H##ESIZE(e)])); \
100
+
71
+ mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \
101
+DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64)
72
} \
102
+DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64)
73
- acc = int128_add(acc, int128_make64(1 << 7)); \
103
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
74
+ mul = (mul >> 8) + ((mul >> 7) & 1); \
104
index XXXXXXX..XXXXXXX 100644
75
+ a += mul; \
105
--- a/target/arm/translate-mve.c
76
} \
106
+++ b/target/arm/translate-mve.c
77
} \
107
@@ -XXX,XX +XXX,XX @@ static bool trans_VMLSLDAV(DisasContext *s, arg_vmlaldav *a)
78
mve_advance_vpt(env); \
108
};
79
- return int128_getlo(int128_rshift(acc, 8)); \
109
return do_long_dual_acc(s, a, fns[a->size][a->x]);
80
+ return a; \
110
}
81
}
111
+
82
112
+static bool trans_VRMLALDAVH_S(DisasContext *s, arg_vmlaldav *a)
83
-DO_LDAVH(vrmlaldavhsw, 4, int32_t, false, int128_add, int128_add, int128_makes64)
113
+{
84
-DO_LDAVH(vrmlaldavhxsw, 4, int32_t, true, int128_add, int128_add, int128_makes64)
114
+ static MVEGenDualAccOpFn * const fns[] = {
85
+DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false)
115
+ gen_helper_mve_vrmlaldavhsw, gen_helper_mve_vrmlaldavhxsw,
86
+DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false)
116
+ };
87
117
+ return do_long_dual_acc(s, a, fns[a->x]);
88
-DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64)
118
+}
89
+DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false)
119
+
90
120
+static bool trans_VRMLALDAVH_U(DisasContext *s, arg_vmlaldav *a)
91
-DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64)
121
+{
92
-DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64)
122
+ static MVEGenDualAccOpFn * const fns[] = {
93
+DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true)
123
+ gen_helper_mve_vrmlaldavhuw, NULL,
94
+DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true)
124
+ };
95
125
+ return do_long_dual_acc(s, a, fns[a->x]);
96
/* Vector add across vector */
126
+}
97
#define DO_VADDV(OP, ESIZE, TYPE) \
127
+
128
+static bool trans_VRMLSLDAVH(DisasContext *s, arg_vmlaldav *a)
129
+{
130
+ static MVEGenDualAccOpFn * const fns[] = {
131
+ gen_helper_mve_vrmlsldavhsw, gen_helper_mve_vrmlsldavhxsw,
132
+ };
133
+ return do_long_dual_acc(s, a, fns[a->x]);
134
+}
135
--
98
--
136
2.20.1
99
2.20.1
137
100
138
101
diff view generated by jsdifflib
1
Implement the MVE VADC and VSBC insns. These perform an
1
The function asimd_imm_const() in translate-neon.c is an
2
add-with-carry or subtract-with-carry of the 32-bit elements in each
2
implementation of the pseudocode AdvSIMDExpandImm(), which we will
3
lane of the input vectors, where the carry-out of each add is the
3
also want for MVE. Move the implementation to translate.c, with a
4
carry-in of the next. The initial carry input is either 1 or is from
4
prototype in translate.h.
5
FPSCR.C; the carry out at the end is written back to FPSCR.C.
6
5
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210617121628.20116-41-peter.maydell@linaro.org
8
Message-id: 20210628135835.6690-4-peter.maydell@linaro.org
10
---
9
---
11
target/arm/helper-mve.h | 5 ++++
10
target/arm/translate.h | 16 ++++++++++
12
target/arm/mve.decode | 5 ++++
11
target/arm/translate-neon.c | 63 -------------------------------------
13
target/arm/mve_helper.c | 52 ++++++++++++++++++++++++++++++++++++++
12
target/arm/translate.c | 57 +++++++++++++++++++++++++++++++++
14
target/arm/translate-mve.c | 37 +++++++++++++++++++++++++++
13
3 files changed, 73 insertions(+), 63 deletions(-)
15
4 files changed, 99 insertions(+)
16
14
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-mve.h
17
--- a/target/arm/translate.h
20
+++ b/target/arm/helper-mve.h
18
+++ b/target/arm/translate.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrhaddub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
@@ -XXX,XX +XXX,XX @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
22
DEF_HELPER_FLAGS_4(mve_vrhadduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
return opc | s->be_data;
23
DEF_HELPER_FLAGS_4(mve_vrhadduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
}
24
22
25
+DEF_HELPER_FLAGS_4(mve_vadc, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+/**
26
+DEF_HELPER_FLAGS_4(mve_vadci, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+ * asimd_imm_const: Expand an encoded SIMD constant value
27
+DEF_HELPER_FLAGS_4(mve_vsbc, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+ *
28
+DEF_HELPER_FLAGS_4(mve_vsbci, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+ * Expand a SIMD constant value. This is essentially the pseudocode
27
+ * AdvSIMDExpandImm, except that we also perform the boolean NOT needed for
28
+ * VMVN and VBIC (when cmode < 14 && op == 1).
29
+ *
30
+ * The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
31
+ * callers must catch this.
32
+ *
33
+ * cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
34
+ * is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
35
+ * we produce an immediate constant value of 0 in these cases.
36
+ */
37
+uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
29
+
38
+
30
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
#endif /* TARGET_ARM_TRANSLATE_H */
31
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
32
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
34
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
42
--- a/target/arm/translate-neon.c
36
+++ b/target/arm/mve.decode
43
+++ b/target/arm/translate-neon.c
37
@@ -XXX,XX +XXX,XX @@ VQDMULLT 111 . 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 1 @2op_sz28
44
@@ -XXX,XX +XXX,XX @@ DO_FP_2SH(VCVT_UH, gen_helper_gvec_vcvt_uh)
38
VRHADD_S 111 0 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
45
DO_FP_2SH(VCVT_HS, gen_helper_gvec_vcvt_hs)
39
VRHADD_U 111 1 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
46
DO_FP_2SH(VCVT_HU, gen_helper_gvec_vcvt_hu)
40
47
41
+VADC 1110 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
48
-static uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
42
+VSBC 1111 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
49
-{
43
+VADCI 1110 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
50
- /*
44
+VSBCI 1111 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
51
- * Expand the encoded constant.
52
- * Note that cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
53
- * We choose to not special-case this and will behave as if a
54
- * valid constant encoding of 0 had been given.
55
- * cmode = 15 op = 1 must UNDEF; we assume decode has handled that.
56
- */
57
- switch (cmode) {
58
- case 0: case 1:
59
- /* no-op */
60
- break;
61
- case 2: case 3:
62
- imm <<= 8;
63
- break;
64
- case 4: case 5:
65
- imm <<= 16;
66
- break;
67
- case 6: case 7:
68
- imm <<= 24;
69
- break;
70
- case 8: case 9:
71
- imm |= imm << 16;
72
- break;
73
- case 10: case 11:
74
- imm = (imm << 8) | (imm << 24);
75
- break;
76
- case 12:
77
- imm = (imm << 8) | 0xff;
78
- break;
79
- case 13:
80
- imm = (imm << 16) | 0xffff;
81
- break;
82
- case 14:
83
- if (op) {
84
- /*
85
- * This is the only case where the top and bottom 32 bits
86
- * of the encoded constant differ.
87
- */
88
- uint64_t imm64 = 0;
89
- int n;
90
-
91
- for (n = 0; n < 8; n++) {
92
- if (imm & (1 << n)) {
93
- imm64 |= (0xffULL << (n * 8));
94
- }
95
- }
96
- return imm64;
97
- }
98
- imm |= (imm << 8) | (imm << 16) | (imm << 24);
99
- break;
100
- case 15:
101
- imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
102
- | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
103
- break;
104
- }
105
- if (op) {
106
- imm = ~imm;
107
- }
108
- return dup_const(MO_32, imm);
109
-}
110
-
111
static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
112
GVecGen2iFn *fn)
113
{
114
diff --git a/target/arm/translate.c b/target/arm/translate.c
115
index XXXXXXX..XXXXXXX 100644
116
--- a/target/arm/translate.c
117
+++ b/target/arm/translate.c
118
@@ -XXX,XX +XXX,XX @@ void arm_translate_init(void)
119
a64_translate_init();
120
}
121
122
+uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
123
+{
124
+ /* Expand the encoded constant as per AdvSIMDExpandImm pseudocode */
125
+ switch (cmode) {
126
+ case 0: case 1:
127
+ /* no-op */
128
+ break;
129
+ case 2: case 3:
130
+ imm <<= 8;
131
+ break;
132
+ case 4: case 5:
133
+ imm <<= 16;
134
+ break;
135
+ case 6: case 7:
136
+ imm <<= 24;
137
+ break;
138
+ case 8: case 9:
139
+ imm |= imm << 16;
140
+ break;
141
+ case 10: case 11:
142
+ imm = (imm << 8) | (imm << 24);
143
+ break;
144
+ case 12:
145
+ imm = (imm << 8) | 0xff;
146
+ break;
147
+ case 13:
148
+ imm = (imm << 16) | 0xffff;
149
+ break;
150
+ case 14:
151
+ if (op) {
152
+ /*
153
+ * This is the only case where the top and bottom 32 bits
154
+ * of the encoded constant differ.
155
+ */
156
+ uint64_t imm64 = 0;
157
+ int n;
45
+
158
+
46
# Vector miscellaneous
159
+ for (n = 0; n < 8; n++) {
47
160
+ if (imm & (1 << n)) {
48
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
161
+ imm64 |= (0xffULL << (n * 8));
49
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
162
+ }
50
index XXXXXXX..XXXXXXX 100644
163
+ }
51
--- a/target/arm/mve_helper.c
164
+ return imm64;
52
+++ b/target/arm/mve_helper.c
165
+ }
53
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vrshlu, DO_VRSHLU)
166
+ imm |= (imm << 8) | (imm << 16) | (imm << 24);
54
DO_2OP_S(vrhadds, DO_RHADD_S)
167
+ break;
55
DO_2OP_U(vrhaddu, DO_RHADD_U)
168
+ case 15:
56
169
+ imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
57
+static void do_vadc(CPUARMState *env, uint32_t *d, uint32_t *n, uint32_t *m,
170
+ | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
58
+ uint32_t inv, uint32_t carry_in, bool update_flags)
171
+ break;
59
+{
60
+ uint16_t mask = mve_element_mask(env);
61
+ unsigned e;
62
+
63
+ /* If any additions trigger, we will update flags. */
64
+ if (mask & 0x1111) {
65
+ update_flags = true;
66
+ }
172
+ }
67
+
173
+ if (op) {
68
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
174
+ imm = ~imm;
69
+ uint64_t r = carry_in;
70
+ r += n[H4(e)];
71
+ r += m[H4(e)] ^ inv;
72
+ if (mask & 1) {
73
+ carry_in = r >> 32;
74
+ }
75
+ mergemask(&d[H4(e)], r, mask);
76
+ }
175
+ }
77
+
176
+ return dup_const(MO_32, imm);
78
+ if (update_flags) {
79
+ /* Store C, clear NZV. */
80
+ env->vfp.xregs[ARM_VFP_FPSCR] &= ~FPCR_NZCV_MASK;
81
+ env->vfp.xregs[ARM_VFP_FPSCR] |= carry_in * FPCR_C;
82
+ }
83
+ mve_advance_vpt(env);
84
+}
177
+}
85
+
178
+
86
+void HELPER(mve_vadc)(CPUARMState *env, void *vd, void *vn, void *vm)
179
/* Generate a label used for skipping this instruction */
87
+{
180
void arm_gen_condlabel(DisasContext *s)
88
+ bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
89
+ do_vadc(env, vd, vn, vm, 0, carry_in, false);
90
+}
91
+
92
+void HELPER(mve_vsbc)(CPUARMState *env, void *vd, void *vn, void *vm)
93
+{
94
+ bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
95
+ do_vadc(env, vd, vn, vm, -1, carry_in, false);
96
+}
97
+
98
+
99
+void HELPER(mve_vadci)(CPUARMState *env, void *vd, void *vn, void *vm)
100
+{
101
+ do_vadc(env, vd, vn, vm, 0, 0, true);
102
+}
103
+
104
+void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm)
105
+{
106
+ do_vadc(env, vd, vn, vm, -1, 1, true);
107
+}
108
+
109
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
110
{
111
if (val > max) {
112
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
113
index XXXXXXX..XXXXXXX 100644
114
--- a/target/arm/translate-mve.c
115
+++ b/target/arm/translate-mve.c
116
@@ -XXX,XX +XXX,XX @@ static bool trans_VQDMULLT(DisasContext *s, arg_2op *a)
117
return do_2op(s, a, fns[a->size]);
118
}
119
120
+/*
121
+ * VADC and VSBC: these perform an add-with-carry or subtract-with-carry
122
+ * of the 32-bit elements in each lane of the input vectors, where the
123
+ * carry-out of each add is the carry-in of the next. The initial carry
124
+ * input is either fixed (0 for VADCI, 1 for VSBCI) or is from FPSCR.C
125
+ * (for VADC and VSBC); the carry out at the end is written back to FPSCR.C.
126
+ * These insns are subject to beat-wise execution. Partial execution
127
+ * of an I=1 (initial carry input fixed) insn which does not
128
+ * execute the first beat must start with the current FPSCR.NZCV
129
+ * value, not the fixed constant input.
130
+ */
131
+static bool trans_VADC(DisasContext *s, arg_2op *a)
132
+{
133
+ return do_2op(s, a, gen_helper_mve_vadc);
134
+}
135
+
136
+static bool trans_VADCI(DisasContext *s, arg_2op *a)
137
+{
138
+ if (mve_skip_first_beat(s)) {
139
+ return trans_VADC(s, a);
140
+ }
141
+ return do_2op(s, a, gen_helper_mve_vadci);
142
+}
143
+
144
+static bool trans_VSBC(DisasContext *s, arg_2op *a)
145
+{
146
+ return do_2op(s, a, gen_helper_mve_vsbc);
147
+}
148
+
149
+static bool trans_VSBCI(DisasContext *s, arg_2op *a)
150
+{
151
+ if (mve_skip_first_beat(s)) {
152
+ return trans_VSBC(s, a);
153
+ }
154
+ return do_2op(s, a, gen_helper_mve_vsbci);
155
+}
156
+
157
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
158
MVEGenTwoOpScalarFn fn)
159
{
181
{
160
--
182
--
161
2.20.1
183
2.20.1
162
184
163
185
diff view generated by jsdifflib
1
The M-profile architecture requires that accesses to FPCXT_NS when
1
The A64 AdvSIMD modified-immediate grouping uses almost the same
2
there is no active FP state must not take a NOCP fault even if the
2
constant encoding that A32 Neon does; reuse asimd_imm_const() (to
3
FPU is disabled. We were not implementing this correctly, because
3
which we add the AArch64-specific case for cmode 15 op 1) instead of
4
in our decode we catch the NOCP faults early in m-nocp.decode.
4
reimplementing it all.
5
5
6
Fix this bug by moving all the handling of M-profile FP system
7
register accesses from vfp.decode into m-nocp.decode and putting
8
it above the NOCP blocks. This provides the correct behaviour:
9
* for accesses other than FPCXT_NS the trans functions call
10
vfp_access_check(), which will check for FPU disabled and
11
raise a NOCP exception if necessary
12
* for FPCXT_NS we have the special case code that doesn't
13
call vfp_access_check()
14
* when these trans functions want to raise an UNDEF they return
15
false, so the decoder will fall through into the NOCP blocks.
16
This means that NOCP correctly takes precedence over UNDEF
17
for these insns. (This is a difference from the other insns
18
handled by m-nocp.decode, where UNDEF takes precedence and
19
which we implement by having those trans functions call
20
unallocated_encoding() in the appropriate places.)
21
22
[Note for backport to stable: this commit has a semantic dependency
23
on commit 9a486856e9173af, which was not marked as cc-stable because
24
we didn't know we'd need it for a for-stable bugfix.]
25
26
Cc: qemu-stable@nongnu.org
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
Message-id: 20210618141019.10671-4-peter.maydell@linaro.org
8
Message-id: 20210628135835.6690-5-peter.maydell@linaro.org
30
---
9
---
31
target/arm/translate-a32.h | 1 +
10
target/arm/translate.h | 3 +-
32
target/arm/m-nocp.decode | 24 ++
11
target/arm/translate-a64.c | 86 ++++----------------------------------
33
target/arm/vfp.decode | 14 -
12
target/arm/translate.c | 17 +++++++-
34
target/arm/translate-m-nocp.c | 514 +++++++++++++++++++++++++++++++++
13
3 files changed, 24 insertions(+), 82 deletions(-)
35
target/arm/translate-vfp.c | 517 +---------------------------------
36
5 files changed, 542 insertions(+), 528 deletions(-)
37
14
38
diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
39
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/translate-a32.h
17
--- a/target/arm/translate.h
41
+++ b/target/arm/translate-a32.h
18
+++ b/target/arm/translate.h
42
@@ -XXX,XX +XXX,XX @@ bool disas_neon_shared(DisasContext *s, uint32_t insn);
19
@@ -XXX,XX +XXX,XX @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
43
void load_reg_var(DisasContext *s, TCGv_i32 var, int reg);
20
* VMVN and VBIC (when cmode < 14 && op == 1).
44
void arm_gen_condlabel(DisasContext *s);
21
*
45
bool vfp_access_check(DisasContext *s);
22
* The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
46
+void gen_preserve_fp_state(DisasContext *s);
23
- * callers must catch this.
47
void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop);
24
+ * callers must catch this; we return the 64-bit constant value defined
48
void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop);
25
+ * for AArch64.
49
void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop);
26
*
50
diff --git a/target/arm/m-nocp.decode b/target/arm/m-nocp.decode
27
* cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
28
* is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
29
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
51
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/m-nocp.decode
31
--- a/target/arm/translate-a64.c
53
+++ b/target/arm/m-nocp.decode
32
+++ b/target/arm/translate-a64.c
54
@@ -XXX,XX +XXX,XX @@
33
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
55
56
&nocp cp
57
58
+# M-profile VLDR/VSTR to sysreg
59
+%vldr_sysreg 22:1 13:3
60
+%imm7_0x4 0:7 !function=times_4
61
+
62
+&vldr_sysreg rn reg imm a w p
63
+@vldr_sysreg .... ... . a:1 . . . rn:4 ... . ... .. ....... \
64
+ reg=%vldr_sysreg imm=%imm7_0x4 &vldr_sysreg
65
+
66
{
34
{
67
# Special cases which do not take an early NOCP: VLLDM and VLSTM
35
int rd = extract32(insn, 0, 5);
68
VLLDM_VLSTM 1110 1100 001 l:1 rn:4 0000 1010 op:1 000 0000
36
int cmode = extract32(insn, 12, 4);
69
@@ -XXX,XX +XXX,XX @@
37
- int cmode_3_1 = extract32(cmode, 1, 3);
70
VSCCLRM 1110 1100 1.01 1111 .... 1011 imm:7 0 vd=%vd_dp size=3
38
- int cmode_0 = extract32(cmode, 0, 1);
71
VSCCLRM 1110 1100 1.01 1111 .... 1010 imm:8 vd=%vd_sp size=2
39
int o2 = extract32(insn, 11, 1);
72
40
uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
73
+ # FP system register accesses: these are a special case because accesses
41
bool is_neg = extract32(insn, 29, 1);
74
+ # to FPCXT_NS succeed even if the FPU is disabled. We therefore need
42
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
75
+ # to handle them before the big NOCP blocks. Note that within these
43
return;
76
+ # insns NOCP still has higher priority than UNDEFs; this is implemented
44
}
77
+ # by their returning 'false' for UNDEF so as to fall through into the
45
78
+ # NOCP check (in contrast to VLLDM etc, which call unallocated_encoding()
46
- /* See AdvSIMDExpandImm() in ARM ARM */
79
+ # for the UNDEFs there that must take precedence over NOCP.)
47
- switch (cmode_3_1) {
80
+
48
- case 0: /* Replicate(Zeros(24):imm8, 2) */
81
+ VMSR_VMRS ---- 1110 111 l:1 reg:4 rt:4 1010 0001 0000
49
- case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
82
+
50
- case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
83
+ # P=0 W=0 is SEE "Related encodings", so split into two patterns
51
- case 3: /* Replicate(imm8:Zeros(24), 2) */
84
+ VLDR_sysreg ---- 110 1 . . w:1 1 .... ... 0 111 11 ....... @vldr_sysreg p=1
52
- {
85
+ VLDR_sysreg ---- 110 0 . . 1 1 .... ... 0 111 11 ....... @vldr_sysreg p=0 w=1
53
- int shift = cmode_3_1 * 8;
86
+ VSTR_sysreg ---- 110 1 . . w:1 0 .... ... 0 111 11 ....... @vldr_sysreg p=1
54
- imm = bitfield_replicate(abcdefgh << shift, 32);
87
+ VSTR_sysreg ---- 110 0 . . 1 0 .... ... 0 111 11 ....... @vldr_sysreg p=0 w=1
88
+
89
NOCP 111- 1110 ---- ---- ---- cp:4 ---- ---- &nocp
90
NOCP 111- 110- ---- ---- ---- cp:4 ---- ---- &nocp
91
# From v8.1M onwards this range will also NOCP:
92
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
93
index XXXXXXX..XXXXXXX 100644
94
--- a/target/arm/vfp.decode
95
+++ b/target/arm/vfp.decode
96
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR_hp ---- 1101 u:1 .0 l:1 rn:4 .... 1001 imm:8 vd=%vd_sp
97
VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 vd=%vd_sp
98
VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 vd=%vd_dp
99
100
-# M-profile VLDR/VSTR to sysreg
101
-%vldr_sysreg 22:1 13:3
102
-%imm7_0x4 0:7 !function=times_4
103
-
104
-&vldr_sysreg rn reg imm a w p
105
-@vldr_sysreg .... ... . a:1 . . . rn:4 ... . ... .. ....... \
106
- reg=%vldr_sysreg imm=%imm7_0x4 &vldr_sysreg
107
-
108
-# P=0 W=0 is SEE "Related encodings", so split into two patterns
109
-VLDR_sysreg ---- 110 1 . . w:1 1 .... ... 0 111 11 ....... @vldr_sysreg p=1
110
-VLDR_sysreg ---- 110 0 . . 1 1 .... ... 0 111 11 ....... @vldr_sysreg p=0 w=1
111
-VSTR_sysreg ---- 110 1 . . w:1 0 .... ... 0 111 11 ....... @vldr_sysreg p=1
112
-VSTR_sysreg ---- 110 0 . . 1 0 .... ... 0 111 11 ....... @vldr_sysreg p=0 w=1
113
-
114
# We split the load/store multiple up into two patterns to avoid
115
# overlap with other insns in the "Advanced SIMD load/store and 64-bit move"
116
# grouping:
117
diff --git a/target/arm/translate-m-nocp.c b/target/arm/translate-m-nocp.c
118
index XXXXXXX..XXXXXXX 100644
119
--- a/target/arm/translate-m-nocp.c
120
+++ b/target/arm/translate-m-nocp.c
121
@@ -XXX,XX +XXX,XX @@
122
123
#include "qemu/osdep.h"
124
#include "tcg/tcg-op.h"
125
+#include "tcg/tcg-op-gvec.h"
126
#include "translate.h"
127
#include "translate-a32.h"
128
129
@@ -XXX,XX +XXX,XX @@ static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
130
return true;
131
}
132
133
+/*
134
+ * M-profile provides two different sets of instructions that can
135
+ * access floating point system registers: VMSR/VMRS (which move
136
+ * to/from a general purpose register) and VLDR/VSTR sysreg (which
137
+ * move directly to/from memory). In some cases there are also side
138
+ * effects which must happen after any write to memory (which could
139
+ * cause an exception). So we implement the common logic for the
140
+ * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
141
+ * which take pointers to callback functions which will perform the
142
+ * actual "read/write general purpose register" and "read/write
143
+ * memory" operations.
144
+ */
145
+
146
+/*
147
+ * Emit code to store the sysreg to its final destination; frees the
148
+ * TCG temp 'value' it is passed.
149
+ */
150
+typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value);
151
+/*
152
+ * Emit code to load the value to be copied to the sysreg; returns
153
+ * a new TCG temporary
154
+ */
155
+typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque);
156
+
157
+/* Common decode/access checks for fp sysreg read/write */
158
+typedef enum FPSysRegCheckResult {
159
+ FPSysRegCheckFailed, /* caller should return false */
160
+ FPSysRegCheckDone, /* caller should return true */
161
+ FPSysRegCheckContinue, /* caller should continue generating code */
162
+} FPSysRegCheckResult;
163
+
164
+static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
165
+{
166
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
167
+ return FPSysRegCheckFailed;
168
+ }
169
+
170
+ switch (regno) {
171
+ case ARM_VFP_FPSCR:
172
+ case QEMU_VFP_FPSCR_NZCV:
173
+ break;
174
+ case ARM_VFP_FPSCR_NZCVQC:
175
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
176
+ return FPSysRegCheckFailed;
177
+ }
178
+ break;
179
+ case ARM_VFP_FPCXT_S:
180
+ case ARM_VFP_FPCXT_NS:
181
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
182
+ return FPSysRegCheckFailed;
183
+ }
184
+ if (!s->v8m_secure) {
185
+ return FPSysRegCheckFailed;
186
+ }
187
+ break;
188
+ case ARM_VFP_VPR:
189
+ case ARM_VFP_P0:
190
+ if (!dc_isar_feature(aa32_mve, s)) {
191
+ return FPSysRegCheckFailed;
192
+ }
193
+ break;
194
+ default:
195
+ return FPSysRegCheckFailed;
196
+ }
197
+
198
+ /*
199
+ * FPCXT_NS is a special case: it has specific handling for
200
+ * "current FP state is inactive", and must do the PreserveFPState()
201
+ * but not the usual full set of actions done by ExecuteFPCheck().
202
+ * So we don't call vfp_access_check() and the callers must handle this.
203
+ */
204
+ if (regno != ARM_VFP_FPCXT_NS && !vfp_access_check(s)) {
205
+ return FPSysRegCheckDone;
206
+ }
207
+ return FPSysRegCheckContinue;
208
+}
209
+
210
+static void gen_branch_fpInactive(DisasContext *s, TCGCond cond,
211
+ TCGLabel *label)
212
+{
213
+ /*
214
+ * FPCXT_NS is a special case: it has specific handling for
215
+ * "current FP state is inactive", and must do the PreserveFPState()
216
+ * but not the usual full set of actions done by ExecuteFPCheck().
217
+ * We don't have a TB flag that matches the fpInactive check, so we
218
+ * do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
219
+ *
220
+ * Emit code that checks fpInactive and does a conditional
221
+ * branch to label based on it:
222
+ * if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
223
+ * if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
224
+ */
225
+ assert(cond == TCG_COND_EQ || cond == TCG_COND_NE);
226
+
227
+ /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
228
+ TCGv_i32 aspen, fpca;
229
+ aspen = load_cpu_field(v7m.fpccr[M_REG_NS]);
230
+ fpca = load_cpu_field(v7m.control[M_REG_S]);
231
+ tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
232
+ tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
233
+ tcg_gen_andi_i32(fpca, fpca, R_V7M_CONTROL_FPCA_MASK);
234
+ tcg_gen_or_i32(fpca, fpca, aspen);
235
+ tcg_gen_brcondi_i32(tcg_invert_cond(cond), fpca, 0, label);
236
+ tcg_temp_free_i32(aspen);
237
+ tcg_temp_free_i32(fpca);
238
+}
239
+
240
+static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
241
+ fp_sysreg_loadfn *loadfn,
242
+ void *opaque)
243
+{
244
+ /* Do a write to an M-profile floating point system register */
245
+ TCGv_i32 tmp;
246
+ TCGLabel *lab_end = NULL;
247
+
248
+ switch (fp_sysreg_checks(s, regno)) {
249
+ case FPSysRegCheckFailed:
250
+ return false;
251
+ case FPSysRegCheckDone:
252
+ return true;
253
+ case FPSysRegCheckContinue:
254
+ break;
255
+ }
256
+
257
+ switch (regno) {
258
+ case ARM_VFP_FPSCR:
259
+ tmp = loadfn(s, opaque);
260
+ gen_helper_vfp_set_fpscr(cpu_env, tmp);
261
+ tcg_temp_free_i32(tmp);
262
+ gen_lookup_tb(s);
263
+ break;
264
+ case ARM_VFP_FPSCR_NZCVQC:
265
+ {
266
+ TCGv_i32 fpscr;
267
+ tmp = loadfn(s, opaque);
268
+ if (dc_isar_feature(aa32_mve, s)) {
269
+ /* QC is only present for MVE; otherwise RES0 */
270
+ TCGv_i32 qc = tcg_temp_new_i32();
271
+ tcg_gen_andi_i32(qc, tmp, FPCR_QC);
272
+ /*
273
+ * The 4 vfp.qc[] fields need only be "zero" vs "non-zero";
274
+ * here writing the same value into all elements is simplest.
275
+ */
276
+ tcg_gen_gvec_dup_i32(MO_32, offsetof(CPUARMState, vfp.qc),
277
+ 16, 16, qc);
278
+ }
279
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
280
+ fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
281
+ tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
282
+ tcg_gen_or_i32(fpscr, fpscr, tmp);
283
+ store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
284
+ tcg_temp_free_i32(tmp);
285
+ break;
286
+ }
287
+ case ARM_VFP_FPCXT_NS:
288
+ lab_end = gen_new_label();
289
+ /* fpInactive case: write is a NOP, so branch to end */
290
+ gen_branch_fpInactive(s, TCG_COND_NE, lab_end);
291
+ /*
292
+ * !fpInactive: if FPU disabled, take NOCP exception;
293
+ * otherwise PreserveFPState(), and then FPCXT_NS writes
294
+ * behave the same as FPCXT_S writes.
295
+ */
296
+ if (s->fp_excp_el) {
297
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
298
+ syn_uncategorized(), s->fp_excp_el);
299
+ /*
300
+ * This was only a conditional exception, so override
301
+ * gen_exception_insn()'s default to DISAS_NORETURN
302
+ */
303
+ s->base.is_jmp = DISAS_NEXT;
304
+ break;
305
+ }
306
+ gen_preserve_fp_state(s);
307
+ /* fall through */
308
+ case ARM_VFP_FPCXT_S:
309
+ {
310
+ TCGv_i32 sfpa, control;
311
+ /*
312
+ * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
313
+ * bits [27:0] from value and zeroes bits [31:28].
314
+ */
315
+ tmp = loadfn(s, opaque);
316
+ sfpa = tcg_temp_new_i32();
317
+ tcg_gen_shri_i32(sfpa, tmp, 31);
318
+ control = load_cpu_field(v7m.control[M_REG_S]);
319
+ tcg_gen_deposit_i32(control, control, sfpa,
320
+ R_V7M_CONTROL_SFPA_SHIFT, 1);
321
+ store_cpu_field(control, v7m.control[M_REG_S]);
322
+ tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
323
+ gen_helper_vfp_set_fpscr(cpu_env, tmp);
324
+ tcg_temp_free_i32(tmp);
325
+ tcg_temp_free_i32(sfpa);
326
+ break;
327
+ }
328
+ case ARM_VFP_VPR:
329
+ /* Behaves as NOP if not privileged */
330
+ if (IS_USER(s)) {
331
+ break;
332
+ }
333
+ tmp = loadfn(s, opaque);
334
+ store_cpu_field(tmp, v7m.vpr);
335
+ break;
336
+ case ARM_VFP_P0:
337
+ {
338
+ TCGv_i32 vpr;
339
+ tmp = loadfn(s, opaque);
340
+ vpr = load_cpu_field(v7m.vpr);
341
+ tcg_gen_deposit_i32(vpr, vpr, tmp,
342
+ R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
343
+ store_cpu_field(vpr, v7m.vpr);
344
+ tcg_temp_free_i32(tmp);
345
+ break;
346
+ }
347
+ default:
348
+ g_assert_not_reached();
349
+ }
350
+ if (lab_end) {
351
+ gen_set_label(lab_end);
352
+ }
353
+ return true;
354
+}
355
+
356
+static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
357
+ fp_sysreg_storefn *storefn,
358
+ void *opaque)
359
+{
360
+ /* Do a read from an M-profile floating point system register */
361
+ TCGv_i32 tmp;
362
+ TCGLabel *lab_end = NULL;
363
+ bool lookup_tb = false;
364
+
365
+ switch (fp_sysreg_checks(s, regno)) {
366
+ case FPSysRegCheckFailed:
367
+ return false;
368
+ case FPSysRegCheckDone:
369
+ return true;
370
+ case FPSysRegCheckContinue:
371
+ break;
372
+ }
373
+
374
+ if (regno == ARM_VFP_FPSCR_NZCVQC && !dc_isar_feature(aa32_mve, s)) {
375
+ /* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */
376
+ regno = QEMU_VFP_FPSCR_NZCV;
377
+ }
378
+
379
+ switch (regno) {
380
+ case ARM_VFP_FPSCR:
381
+ tmp = tcg_temp_new_i32();
382
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
383
+ storefn(s, opaque, tmp);
384
+ break;
385
+ case ARM_VFP_FPSCR_NZCVQC:
386
+ tmp = tcg_temp_new_i32();
387
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
388
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK);
389
+ storefn(s, opaque, tmp);
390
+ break;
391
+ case QEMU_VFP_FPSCR_NZCV:
392
+ /*
393
+ * Read just NZCV; this is a special case to avoid the
394
+ * helper call for the "VMRS to CPSR.NZCV" insn.
395
+ */
396
+ tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
397
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
398
+ storefn(s, opaque, tmp);
399
+ break;
400
+ case ARM_VFP_FPCXT_S:
401
+ {
402
+ TCGv_i32 control, sfpa, fpscr;
403
+ /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
404
+ tmp = tcg_temp_new_i32();
405
+ sfpa = tcg_temp_new_i32();
406
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
407
+ tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
408
+ control = load_cpu_field(v7m.control[M_REG_S]);
409
+ tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
410
+ tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
411
+ tcg_gen_or_i32(tmp, tmp, sfpa);
412
+ tcg_temp_free_i32(sfpa);
413
+ /*
414
+ * Store result before updating FPSCR etc, in case
415
+ * it is a memory write which causes an exception.
416
+ */
417
+ storefn(s, opaque, tmp);
418
+ /*
419
+ * Now we must reset FPSCR from FPDSCR_NS, and clear
420
+ * CONTROL.SFPA; so we'll end the TB here.
421
+ */
422
+ tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
423
+ store_cpu_field(control, v7m.control[M_REG_S]);
424
+ fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
425
+ gen_helper_vfp_set_fpscr(cpu_env, fpscr);
426
+ tcg_temp_free_i32(fpscr);
427
+ lookup_tb = true;
428
+ break;
429
+ }
430
+ case ARM_VFP_FPCXT_NS:
431
+ {
432
+ TCGv_i32 control, sfpa, fpscr, fpdscr, zero;
433
+ TCGLabel *lab_active = gen_new_label();
434
+
435
+ lookup_tb = true;
436
+
437
+ gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
438
+ /* fpInactive case: reads as FPDSCR_NS */
439
+ TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
440
+ storefn(s, opaque, tmp);
441
+ lab_end = gen_new_label();
442
+ tcg_gen_br(lab_end);
443
+
444
+ gen_set_label(lab_active);
445
+ /*
446
+ * !fpInactive: if FPU disabled, take NOCP exception;
447
+ * otherwise PreserveFPState(), and then FPCXT_NS
448
+ * reads the same as FPCXT_S.
449
+ */
450
+ if (s->fp_excp_el) {
451
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
452
+ syn_uncategorized(), s->fp_excp_el);
453
+ /*
454
+ * This was only a conditional exception, so override
455
+ * gen_exception_insn()'s default to DISAS_NORETURN
456
+ */
457
+ s->base.is_jmp = DISAS_NEXT;
458
+ break;
459
+ }
460
+ gen_preserve_fp_state(s);
461
+ tmp = tcg_temp_new_i32();
462
+ sfpa = tcg_temp_new_i32();
463
+ fpscr = tcg_temp_new_i32();
464
+ gen_helper_vfp_get_fpscr(fpscr, cpu_env);
465
+ tcg_gen_andi_i32(tmp, fpscr, ~FPCR_NZCV_MASK);
466
+ control = load_cpu_field(v7m.control[M_REG_S]);
467
+ tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
468
+ tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
469
+ tcg_gen_or_i32(tmp, tmp, sfpa);
470
+ tcg_temp_free_i32(control);
471
+ /* Store result before updating FPSCR, in case it faults */
472
+ storefn(s, opaque, tmp);
473
+ /* If SFPA is zero then set FPSCR from FPDSCR_NS */
474
+ fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
475
+ zero = tcg_const_i32(0);
476
+ tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, zero, fpdscr, fpscr);
477
+ gen_helper_vfp_set_fpscr(cpu_env, fpscr);
478
+ tcg_temp_free_i32(zero);
479
+ tcg_temp_free_i32(sfpa);
480
+ tcg_temp_free_i32(fpdscr);
481
+ tcg_temp_free_i32(fpscr);
482
+ break;
483
+ }
484
+ case ARM_VFP_VPR:
485
+ /* Behaves as NOP if not privileged */
486
+ if (IS_USER(s)) {
487
+ break;
488
+ }
489
+ tmp = load_cpu_field(v7m.vpr);
490
+ storefn(s, opaque, tmp);
491
+ break;
492
+ case ARM_VFP_P0:
493
+ tmp = load_cpu_field(v7m.vpr);
494
+ tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
495
+ storefn(s, opaque, tmp);
496
+ break;
497
+ default:
498
+ g_assert_not_reached();
499
+ }
500
+
501
+ if (lab_end) {
502
+ gen_set_label(lab_end);
503
+ }
504
+ if (lookup_tb) {
505
+ gen_lookup_tb(s);
506
+ }
507
+ return true;
508
+}
509
+
510
+static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value)
511
+{
512
+ arg_VMSR_VMRS *a = opaque;
513
+
514
+ if (a->rt == 15) {
515
+ /* Set the 4 flag bits in the CPSR */
516
+ gen_set_nzcv(value);
517
+ tcg_temp_free_i32(value);
518
+ } else {
519
+ store_reg(s, a->rt, value);
520
+ }
521
+}
522
+
523
+static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque)
524
+{
525
+ arg_VMSR_VMRS *a = opaque;
526
+
527
+ return load_reg(s, a->rt);
528
+}
529
+
530
+static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
531
+{
532
+ /*
533
+ * Accesses to R15 are UNPREDICTABLE; we choose to undef.
534
+ * FPSCR -> r15 is a special case which writes to the PSR flags;
535
+ * set a->reg to a special value to tell gen_M_fp_sysreg_read()
536
+ * we only care about the top 4 bits of FPSCR there.
537
+ */
538
+ if (a->rt == 15) {
539
+ if (a->l && a->reg == ARM_VFP_FPSCR) {
540
+ a->reg = QEMU_VFP_FPSCR_NZCV;
541
+ } else {
542
+ return false;
543
+ }
544
+ }
545
+
546
+ if (a->l) {
547
+ /* VMRS, move FP system register to gp register */
548
+ return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a);
549
+ } else {
550
+ /* VMSR, move gp register to FP system register */
551
+ return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a);
552
+ }
553
+}
554
+
555
+static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
556
+{
557
+ arg_vldr_sysreg *a = opaque;
558
+ uint32_t offset = a->imm;
559
+ TCGv_i32 addr;
560
+
561
+ if (!a->a) {
562
+ offset = -offset;
563
+ }
564
+
565
+ addr = load_reg(s, a->rn);
566
+ if (a->p) {
567
+ tcg_gen_addi_i32(addr, addr, offset);
568
+ }
569
+
570
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
571
+ gen_helper_v8m_stackcheck(cpu_env, addr);
572
+ }
573
+
574
+ gen_aa32_st_i32(s, value, addr, get_mem_index(s),
575
+ MO_UL | MO_ALIGN | s->be_data);
576
+ tcg_temp_free_i32(value);
577
+
578
+ if (a->w) {
579
+ /* writeback */
580
+ if (!a->p) {
581
+ tcg_gen_addi_i32(addr, addr, offset);
582
+ }
583
+ store_reg(s, a->rn, addr);
584
+ } else {
585
+ tcg_temp_free_i32(addr);
586
+ }
587
+}
588
+
589
+static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque)
590
+{
591
+ arg_vldr_sysreg *a = opaque;
592
+ uint32_t offset = a->imm;
593
+ TCGv_i32 addr;
594
+ TCGv_i32 value = tcg_temp_new_i32();
595
+
596
+ if (!a->a) {
597
+ offset = -offset;
598
+ }
599
+
600
+ addr = load_reg(s, a->rn);
601
+ if (a->p) {
602
+ tcg_gen_addi_i32(addr, addr, offset);
603
+ }
604
+
605
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
606
+ gen_helper_v8m_stackcheck(cpu_env, addr);
607
+ }
608
+
609
+ gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
610
+ MO_UL | MO_ALIGN | s->be_data);
611
+
612
+ if (a->w) {
613
+ /* writeback */
614
+ if (!a->p) {
615
+ tcg_gen_addi_i32(addr, addr, offset);
616
+ }
617
+ store_reg(s, a->rn, addr);
618
+ } else {
619
+ tcg_temp_free_i32(addr);
620
+ }
621
+ return value;
622
+}
623
+
624
+static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
625
+{
626
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
627
+ return false;
628
+ }
629
+ if (a->rn == 15) {
630
+ return false;
631
+ }
632
+ return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a);
633
+}
634
+
635
+static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
636
+{
637
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
638
+ return false;
639
+ }
640
+ if (a->rn == 15) {
641
+ return false;
642
+ }
643
+ return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a);
644
+}
645
+
646
static bool trans_NOCP(DisasContext *s, arg_nocp *a)
647
{
648
/*
649
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
650
index XXXXXXX..XXXXXXX 100644
651
--- a/target/arm/translate-vfp.c
652
+++ b/target/arm/translate-vfp.c
653
@@ -XXX,XX +XXX,XX @@ static inline long vfp_f16_offset(unsigned reg, bool top)
654
* Generate code for M-profile lazy FP state preservation if needed;
655
* this corresponds to the pseudocode PreserveFPState() function.
656
*/
657
-static void gen_preserve_fp_state(DisasContext *s)
658
+void gen_preserve_fp_state(DisasContext *s)
659
{
660
if (s->v7m_lspact) {
661
/*
662
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
663
return true;
664
}
665
666
-/*
667
- * M-profile provides two different sets of instructions that can
668
- * access floating point system registers: VMSR/VMRS (which move
669
- * to/from a general purpose register) and VLDR/VSTR sysreg (which
670
- * move directly to/from memory). In some cases there are also side
671
- * effects which must happen after any write to memory (which could
672
- * cause an exception). So we implement the common logic for the
673
- * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
674
- * which take pointers to callback functions which will perform the
675
- * actual "read/write general purpose register" and "read/write
676
- * memory" operations.
677
- */
678
-
679
-/*
680
- * Emit code to store the sysreg to its final destination; frees the
681
- * TCG temp 'value' it is passed.
682
- */
683
-typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value);
684
-/*
685
- * Emit code to load the value to be copied to the sysreg; returns
686
- * a new TCG temporary
687
- */
688
-typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque);
689
-
690
-/* Common decode/access checks for fp sysreg read/write */
691
-typedef enum FPSysRegCheckResult {
692
- FPSysRegCheckFailed, /* caller should return false */
693
- FPSysRegCheckDone, /* caller should return true */
694
- FPSysRegCheckContinue, /* caller should continue generating code */
695
-} FPSysRegCheckResult;
696
-
697
-static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
698
-{
699
- if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
700
- return FPSysRegCheckFailed;
701
- }
702
-
703
- switch (regno) {
704
- case ARM_VFP_FPSCR:
705
- case QEMU_VFP_FPSCR_NZCV:
706
- break;
707
- case ARM_VFP_FPSCR_NZCVQC:
708
- if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
709
- return FPSysRegCheckFailed;
710
- }
711
- break;
712
- case ARM_VFP_FPCXT_S:
713
- case ARM_VFP_FPCXT_NS:
714
- if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
715
- return FPSysRegCheckFailed;
716
- }
717
- if (!s->v8m_secure) {
718
- return FPSysRegCheckFailed;
719
- }
720
- break;
721
- case ARM_VFP_VPR:
722
- case ARM_VFP_P0:
723
- if (!dc_isar_feature(aa32_mve, s)) {
724
- return FPSysRegCheckFailed;
725
- }
726
- break;
727
- default:
728
- return FPSysRegCheckFailed;
729
- }
730
-
731
- /*
732
- * FPCXT_NS is a special case: it has specific handling for
733
- * "current FP state is inactive", and must do the PreserveFPState()
734
- * but not the usual full set of actions done by ExecuteFPCheck().
735
- * So we don't call vfp_access_check() and the callers must handle this.
736
- */
737
- if (regno != ARM_VFP_FPCXT_NS && !vfp_access_check(s)) {
738
- return FPSysRegCheckDone;
739
- }
740
- return FPSysRegCheckContinue;
741
-}
742
-
743
-static void gen_branch_fpInactive(DisasContext *s, TCGCond cond,
744
- TCGLabel *label)
745
-{
746
- /*
747
- * FPCXT_NS is a special case: it has specific handling for
748
- * "current FP state is inactive", and must do the PreserveFPState()
749
- * but not the usual full set of actions done by ExecuteFPCheck().
750
- * We don't have a TB flag that matches the fpInactive check, so we
751
- * do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
752
- *
753
- * Emit code that checks fpInactive and does a conditional
754
- * branch to label based on it:
755
- * if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
756
- * if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
757
- */
758
- assert(cond == TCG_COND_EQ || cond == TCG_COND_NE);
759
-
760
- /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
761
- TCGv_i32 aspen, fpca;
762
- aspen = load_cpu_field(v7m.fpccr[M_REG_NS]);
763
- fpca = load_cpu_field(v7m.control[M_REG_S]);
764
- tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
765
- tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
766
- tcg_gen_andi_i32(fpca, fpca, R_V7M_CONTROL_FPCA_MASK);
767
- tcg_gen_or_i32(fpca, fpca, aspen);
768
- tcg_gen_brcondi_i32(tcg_invert_cond(cond), fpca, 0, label);
769
- tcg_temp_free_i32(aspen);
770
- tcg_temp_free_i32(fpca);
771
-}
772
-
773
-static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
774
- fp_sysreg_loadfn *loadfn,
775
- void *opaque)
776
-{
777
- /* Do a write to an M-profile floating point system register */
778
- TCGv_i32 tmp;
779
- TCGLabel *lab_end = NULL;
780
-
781
- switch (fp_sysreg_checks(s, regno)) {
782
- case FPSysRegCheckFailed:
783
- return false;
784
- case FPSysRegCheckDone:
785
- return true;
786
- case FPSysRegCheckContinue:
787
- break;
55
- break;
788
- }
56
- }
789
-
57
- case 4: /* Replicate(Zeros(8):imm8, 4) */
790
- switch (regno) {
58
- case 5: /* Replicate(imm8:Zeros(8), 4) */
791
- case ARM_VFP_FPSCR:
792
- tmp = loadfn(s, opaque);
793
- gen_helper_vfp_set_fpscr(cpu_env, tmp);
794
- tcg_temp_free_i32(tmp);
795
- gen_lookup_tb(s);
796
- break;
797
- case ARM_VFP_FPSCR_NZCVQC:
798
- {
59
- {
799
- TCGv_i32 fpscr;
60
- int shift = (cmode_3_1 & 0x1) * 8;
800
- tmp = loadfn(s, opaque);
61
- imm = bitfield_replicate(abcdefgh << shift, 16);
801
- if (dc_isar_feature(aa32_mve, s)) {
802
- /* QC is only present for MVE; otherwise RES0 */
803
- TCGv_i32 qc = tcg_temp_new_i32();
804
- tcg_gen_andi_i32(qc, tmp, FPCR_QC);
805
- /*
806
- * The 4 vfp.qc[] fields need only be "zero" vs "non-zero";
807
- * here writing the same value into all elements is simplest.
808
- */
809
- tcg_gen_gvec_dup_i32(MO_32, offsetof(CPUARMState, vfp.qc),
810
- 16, 16, qc);
811
- }
812
- tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
813
- fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
814
- tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
815
- tcg_gen_or_i32(fpscr, fpscr, tmp);
816
- store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
817
- tcg_temp_free_i32(tmp);
818
- break;
62
- break;
819
- }
63
- }
820
- case ARM_VFP_FPCXT_NS:
64
- case 6:
821
- lab_end = gen_new_label();
65
- if (cmode_0) {
822
- /* fpInactive case: write is a NOP, so branch to end */
66
- /* Replicate(Zeros(8):imm8:Ones(16), 2) */
823
- gen_branch_fpInactive(s, TCG_COND_NE, lab_end);
67
- imm = (abcdefgh << 16) | 0xffff;
824
- /*
68
- } else {
825
- * !fpInactive: if FPU disabled, take NOCP exception;
69
- /* Replicate(Zeros(16):imm8:Ones(8), 2) */
826
- * otherwise PreserveFPState(), and then FPCXT_NS writes
70
- imm = (abcdefgh << 8) | 0xff;
827
- * behave the same as FPCXT_S writes.
828
- */
829
- if (s->fp_excp_el) {
830
- gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
831
- syn_uncategorized(), s->fp_excp_el);
832
- /*
833
- * This was only a conditional exception, so override
834
- * gen_exception_insn()'s default to DISAS_NORETURN
835
- */
836
- s->base.is_jmp = DISAS_NEXT;
837
- break;
838
- }
71
- }
839
- gen_preserve_fp_state(s);
72
- imm = bitfield_replicate(imm, 32);
840
- /* fall through */
841
- case ARM_VFP_FPCXT_S:
842
- {
843
- TCGv_i32 sfpa, control;
844
- /*
845
- * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
846
- * bits [27:0] from value and zeroes bits [31:28].
847
- */
848
- tmp = loadfn(s, opaque);
849
- sfpa = tcg_temp_new_i32();
850
- tcg_gen_shri_i32(sfpa, tmp, 31);
851
- control = load_cpu_field(v7m.control[M_REG_S]);
852
- tcg_gen_deposit_i32(control, control, sfpa,
853
- R_V7M_CONTROL_SFPA_SHIFT, 1);
854
- store_cpu_field(control, v7m.control[M_REG_S]);
855
- tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
856
- gen_helper_vfp_set_fpscr(cpu_env, tmp);
857
- tcg_temp_free_i32(tmp);
858
- tcg_temp_free_i32(sfpa);
859
- break;
73
- break;
860
- }
74
- case 7:
861
- case ARM_VFP_VPR:
75
- if (!cmode_0 && !is_neg) {
862
- /* Behaves as NOP if not privileged */
76
- imm = bitfield_replicate(abcdefgh, 8);
863
- if (IS_USER(s)) {
77
- } else if (!cmode_0 && is_neg) {
864
- break;
78
- int i;
79
- imm = 0;
80
- for (i = 0; i < 8; i++) {
81
- if ((abcdefgh) & (1 << i)) {
82
- imm |= 0xffULL << (i * 8);
83
- }
84
- }
85
- } else if (cmode_0) {
86
- if (is_neg) {
87
- imm = (abcdefgh & 0x3f) << 48;
88
- if (abcdefgh & 0x80) {
89
- imm |= 0x8000000000000000ULL;
90
- }
91
- if (abcdefgh & 0x40) {
92
- imm |= 0x3fc0000000000000ULL;
93
- } else {
94
- imm |= 0x4000000000000000ULL;
95
- }
96
- } else {
97
- if (o2) {
98
- /* FMOV (vector, immediate) - half-precision */
99
- imm = vfp_expand_imm(MO_16, abcdefgh);
100
- /* now duplicate across the lanes */
101
- imm = bitfield_replicate(imm, 16);
102
- } else {
103
- imm = (abcdefgh & 0x3f) << 19;
104
- if (abcdefgh & 0x80) {
105
- imm |= 0x80000000;
106
- }
107
- if (abcdefgh & 0x40) {
108
- imm |= 0x3e000000;
109
- } else {
110
- imm |= 0x40000000;
111
- }
112
- imm |= (imm << 32);
113
- }
114
- }
865
- }
115
- }
866
- tmp = loadfn(s, opaque);
867
- store_cpu_field(tmp, v7m.vpr);
868
- break;
869
- case ARM_VFP_P0:
870
- {
871
- TCGv_i32 vpr;
872
- tmp = loadfn(s, opaque);
873
- vpr = load_cpu_field(v7m.vpr);
874
- tcg_gen_deposit_i32(vpr, vpr, tmp,
875
- R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
876
- store_cpu_field(vpr, v7m.vpr);
877
- tcg_temp_free_i32(tmp);
878
- break;
879
- }
880
- default:
881
- g_assert_not_reached();
882
- }
883
- if (lab_end) {
884
- gen_set_label(lab_end);
885
- }
886
- return true;
887
-}
888
-
889
-static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
890
- fp_sysreg_storefn *storefn,
891
- void *opaque)
892
-{
893
- /* Do a read from an M-profile floating point system register */
894
- TCGv_i32 tmp;
895
- TCGLabel *lab_end = NULL;
896
- bool lookup_tb = false;
897
-
898
- switch (fp_sysreg_checks(s, regno)) {
899
- case FPSysRegCheckFailed:
900
- return false;
901
- case FPSysRegCheckDone:
902
- return true;
903
- case FPSysRegCheckContinue:
904
- break;
905
- }
906
-
907
- if (regno == ARM_VFP_FPSCR_NZCVQC && !dc_isar_feature(aa32_mve, s)) {
908
- /* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */
909
- regno = QEMU_VFP_FPSCR_NZCV;
910
- }
911
-
912
- switch (regno) {
913
- case ARM_VFP_FPSCR:
914
- tmp = tcg_temp_new_i32();
915
- gen_helper_vfp_get_fpscr(tmp, cpu_env);
916
- storefn(s, opaque, tmp);
917
- break;
918
- case ARM_VFP_FPSCR_NZCVQC:
919
- tmp = tcg_temp_new_i32();
920
- gen_helper_vfp_get_fpscr(tmp, cpu_env);
921
- tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK);
922
- storefn(s, opaque, tmp);
923
- break;
924
- case QEMU_VFP_FPSCR_NZCV:
925
- /*
926
- * Read just NZCV; this is a special case to avoid the
927
- * helper call for the "VMRS to CPSR.NZCV" insn.
928
- */
929
- tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
930
- tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
931
- storefn(s, opaque, tmp);
932
- break;
933
- case ARM_VFP_FPCXT_S:
934
- {
935
- TCGv_i32 control, sfpa, fpscr;
936
- /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
937
- tmp = tcg_temp_new_i32();
938
- sfpa = tcg_temp_new_i32();
939
- gen_helper_vfp_get_fpscr(tmp, cpu_env);
940
- tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
941
- control = load_cpu_field(v7m.control[M_REG_S]);
942
- tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
943
- tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
944
- tcg_gen_or_i32(tmp, tmp, sfpa);
945
- tcg_temp_free_i32(sfpa);
946
- /*
947
- * Store result before updating FPSCR etc, in case
948
- * it is a memory write which causes an exception.
949
- */
950
- storefn(s, opaque, tmp);
951
- /*
952
- * Now we must reset FPSCR from FPDSCR_NS, and clear
953
- * CONTROL.SFPA; so we'll end the TB here.
954
- */
955
- tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
956
- store_cpu_field(control, v7m.control[M_REG_S]);
957
- fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
958
- gen_helper_vfp_set_fpscr(cpu_env, fpscr);
959
- tcg_temp_free_i32(fpscr);
960
- lookup_tb = true;
961
- break;
962
- }
963
- case ARM_VFP_FPCXT_NS:
964
- {
965
- TCGv_i32 control, sfpa, fpscr, fpdscr, zero;
966
- TCGLabel *lab_active = gen_new_label();
967
-
968
- lookup_tb = true;
969
-
970
- gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
971
- /* fpInactive case: reads as FPDSCR_NS */
972
- TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
973
- storefn(s, opaque, tmp);
974
- lab_end = gen_new_label();
975
- tcg_gen_br(lab_end);
976
-
977
- gen_set_label(lab_active);
978
- /*
979
- * !fpInactive: if FPU disabled, take NOCP exception;
980
- * otherwise PreserveFPState(), and then FPCXT_NS
981
- * reads the same as FPCXT_S.
982
- */
983
- if (s->fp_excp_el) {
984
- gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
985
- syn_uncategorized(), s->fp_excp_el);
986
- /*
987
- * This was only a conditional exception, so override
988
- * gen_exception_insn()'s default to DISAS_NORETURN
989
- */
990
- s->base.is_jmp = DISAS_NEXT;
991
- break;
992
- }
993
- gen_preserve_fp_state(s);
994
- tmp = tcg_temp_new_i32();
995
- sfpa = tcg_temp_new_i32();
996
- fpscr = tcg_temp_new_i32();
997
- gen_helper_vfp_get_fpscr(fpscr, cpu_env);
998
- tcg_gen_andi_i32(tmp, fpscr, ~FPCR_NZCV_MASK);
999
- control = load_cpu_field(v7m.control[M_REG_S]);
1000
- tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
1001
- tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
1002
- tcg_gen_or_i32(tmp, tmp, sfpa);
1003
- tcg_temp_free_i32(control);
1004
- /* Store result before updating FPSCR, in case it faults */
1005
- storefn(s, opaque, tmp);
1006
- /* If SFPA is zero then set FPSCR from FPDSCR_NS */
1007
- fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
1008
- zero = tcg_const_i32(0);
1009
- tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, zero, fpdscr, fpscr);
1010
- gen_helper_vfp_set_fpscr(cpu_env, fpscr);
1011
- tcg_temp_free_i32(zero);
1012
- tcg_temp_free_i32(sfpa);
1013
- tcg_temp_free_i32(fpdscr);
1014
- tcg_temp_free_i32(fpscr);
1015
- break;
1016
- }
1017
- case ARM_VFP_VPR:
1018
- /* Behaves as NOP if not privileged */
1019
- if (IS_USER(s)) {
1020
- break;
1021
- }
1022
- tmp = load_cpu_field(v7m.vpr);
1023
- storefn(s, opaque, tmp);
1024
- break;
1025
- case ARM_VFP_P0:
1026
- tmp = load_cpu_field(v7m.vpr);
1027
- tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
1028
- storefn(s, opaque, tmp);
1029
- break;
116
- break;
1030
- default:
117
- default:
1031
- g_assert_not_reached();
118
- g_assert_not_reached();
1032
- }
119
- }
1033
-
120
-
1034
- if (lab_end) {
121
- if (cmode_3_1 != 7 && is_neg) {
1035
- gen_set_label(lab_end);
122
- imm = ~imm;
1036
- }
123
+ if (cmode == 15 && o2 && !is_neg) {
1037
- if (lookup_tb) {
124
+ /* FMOV (vector, immediate) - half-precision */
1038
- gen_lookup_tb(s);
125
+ imm = vfp_expand_imm(MO_16, abcdefgh);
1039
- }
126
+ /* now duplicate across the lanes */
1040
- return true;
127
+ imm = bitfield_replicate(imm, 16);
1041
-}
128
+ } else {
1042
-
129
+ imm = asimd_imm_const(abcdefgh, cmode, is_neg);
1043
-static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value)
1044
-{
1045
- arg_VMSR_VMRS *a = opaque;
1046
-
1047
- if (a->rt == 15) {
1048
- /* Set the 4 flag bits in the CPSR */
1049
- gen_set_nzcv(value);
1050
- tcg_temp_free_i32(value);
1051
- } else {
1052
- store_reg(s, a->rt, value);
1053
- }
1054
-}
1055
-
1056
-static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque)
1057
-{
1058
- arg_VMSR_VMRS *a = opaque;
1059
-
1060
- return load_reg(s, a->rt);
1061
-}
1062
-
1063
-static bool gen_M_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
1064
-{
1065
- /*
1066
- * Accesses to R15 are UNPREDICTABLE; we choose to undef.
1067
- * FPSCR -> r15 is a special case which writes to the PSR flags;
1068
- * set a->reg to a special value to tell gen_M_fp_sysreg_read()
1069
- * we only care about the top 4 bits of FPSCR there.
1070
- */
1071
- if (a->rt == 15) {
1072
- if (a->l && a->reg == ARM_VFP_FPSCR) {
1073
- a->reg = QEMU_VFP_FPSCR_NZCV;
1074
- } else {
1075
- return false;
1076
- }
1077
- }
1078
-
1079
- if (a->l) {
1080
- /* VMRS, move FP system register to gp register */
1081
- return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a);
1082
- } else {
1083
- /* VMSR, move gp register to FP system register */
1084
- return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a);
1085
- }
1086
-}
1087
-
1088
static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
1089
{
1090
TCGv_i32 tmp;
1091
bool ignore_vfp_enabled = false;
1092
1093
if (arm_dc_feature(s, ARM_FEATURE_M)) {
1094
- return gen_M_VMSR_VMRS(s, a);
1095
+ /* M profile version was already handled in m-nocp.decode */
1096
+ return false;
1097
}
130
}
1098
131
1099
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
132
if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
1100
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
133
diff --git a/target/arm/translate.c b/target/arm/translate.c
1101
return true;
134
index XXXXXXX..XXXXXXX 100644
1102
}
135
--- a/target/arm/translate.c
1103
136
+++ b/target/arm/translate.c
1104
-static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
137
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
1105
-{
138
case 14:
1106
- arg_vldr_sysreg *a = opaque;
139
if (op) {
1107
- uint32_t offset = a->imm;
140
/*
1108
- TCGv_i32 addr;
141
- * This is the only case where the top and bottom 32 bits
1109
-
142
- * of the encoded constant differ.
1110
- if (!a->a) {
143
+ * This and cmode == 15 op == 1 are the only cases where
1111
- offset = -offset;
144
+ * the top and bottom 32 bits of the encoded constant differ.
1112
- }
145
*/
1113
-
146
uint64_t imm64 = 0;
1114
- addr = load_reg(s, a->rn);
147
int n;
1115
- if (a->p) {
148
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
1116
- tcg_gen_addi_i32(addr, addr, offset);
149
imm |= (imm << 8) | (imm << 16) | (imm << 24);
1117
- }
150
break;
1118
-
151
case 15:
1119
- if (s->v8m_stackcheck && a->rn == 13 && a->w) {
152
+ if (op) {
1120
- gen_helper_v8m_stackcheck(cpu_env, addr);
153
+ /* Reserved encoding for AArch32; valid for AArch64 */
1121
- }
154
+ uint64_t imm64 = (uint64_t)(imm & 0x3f) << 48;
1122
-
155
+ if (imm & 0x80) {
1123
- gen_aa32_st_i32(s, value, addr, get_mem_index(s),
156
+ imm64 |= 0x8000000000000000ULL;
1124
- MO_UL | MO_ALIGN | s->be_data);
157
+ }
1125
- tcg_temp_free_i32(value);
158
+ if (imm & 0x40) {
1126
-
159
+ imm64 |= 0x3fc0000000000000ULL;
1127
- if (a->w) {
160
+ } else {
1128
- /* writeback */
161
+ imm64 |= 0x4000000000000000ULL;
1129
- if (!a->p) {
162
+ }
1130
- tcg_gen_addi_i32(addr, addr, offset);
163
+ return imm64;
1131
- }
164
+ }
1132
- store_reg(s, a->rn, addr);
165
imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
1133
- } else {
166
| ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
1134
- tcg_temp_free_i32(addr);
167
break;
1135
- }
1136
-}
1137
-
1138
-static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque)
1139
-{
1140
- arg_vldr_sysreg *a = opaque;
1141
- uint32_t offset = a->imm;
1142
- TCGv_i32 addr;
1143
- TCGv_i32 value = tcg_temp_new_i32();
1144
-
1145
- if (!a->a) {
1146
- offset = -offset;
1147
- }
1148
-
1149
- addr = load_reg(s, a->rn);
1150
- if (a->p) {
1151
- tcg_gen_addi_i32(addr, addr, offset);
1152
- }
1153
-
1154
- if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1155
- gen_helper_v8m_stackcheck(cpu_env, addr);
1156
- }
1157
-
1158
- gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
1159
- MO_UL | MO_ALIGN | s->be_data);
1160
-
1161
- if (a->w) {
1162
- /* writeback */
1163
- if (!a->p) {
1164
- tcg_gen_addi_i32(addr, addr, offset);
1165
- }
1166
- store_reg(s, a->rn, addr);
1167
- } else {
1168
- tcg_temp_free_i32(addr);
1169
- }
1170
- return value;
1171
-}
1172
-
1173
-static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
1174
-{
1175
- if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
1176
- return false;
1177
- }
1178
- if (a->rn == 15) {
1179
- return false;
1180
- }
1181
- return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a);
1182
-}
1183
-
1184
-static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
1185
-{
1186
- if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
1187
- return false;
1188
- }
1189
- if (a->rn == 15) {
1190
- return false;
1191
- }
1192
- return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a);
1193
-}
1194
1195
static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a)
1196
{
1197
--
168
--
1198
2.20.1
169
2.20.1
1199
170
1200
171
diff view generated by jsdifflib
Deleted patch
1
A few subcases of VLDR/VSTR sysreg succeed but do not perform a
2
memory access:
3
* VSTR of VPR when unprivileged
4
* VLDR to VPR when unprivileged
5
* VLDR to FPCXT_NS when fpInactive
6
1
7
In these cases, even though we don't do the memory access we should
8
still update the base register and perform the stack limit check if
9
the insn's addressing mode specifies writeback. Our implementation
10
failed to do this, because we handle these side-effects inside the
11
memory_to_fp_sysreg() and fp_sysreg_to_memory() callback functions,
12
which are only called if there's something to load or store.
13
14
Fix this by adding an extra argument to the callbacks which is set to
15
true to actually perform the access and false to only do side effects
16
like writeback, and calling the callback with do_access = false
17
for the three cases listed above.
18
19
This produces slightly suboptimal code for the case of a write
20
to FPCXT_NS when the FPU is inactive and the insn didn't have
21
side effects (ie no writeback, or via VMSR), in which case we'll
22
generate a conditional branch over an unconditional branch.
23
But this doesn't seem to be important enough to merit requiring
24
the callback to report back whether it generated any code or not.
25
26
Cc: qemu-stable@nongnu.org
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
Message-id: 20210618141019.10671-5-peter.maydell@linaro.org
30
---
31
target/arm/translate-m-nocp.c | 102 ++++++++++++++++++++++++----------
32
1 file changed, 72 insertions(+), 30 deletions(-)
33
34
diff --git a/target/arm/translate-m-nocp.c b/target/arm/translate-m-nocp.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/translate-m-nocp.c
37
+++ b/target/arm/translate-m-nocp.c
38
@@ -XXX,XX +XXX,XX @@ static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
39
40
/*
41
* Emit code to store the sysreg to its final destination; frees the
42
- * TCG temp 'value' it is passed.
43
+ * TCG temp 'value' it is passed. do_access is true to do the store,
44
+ * and false to skip it and only perform side-effects like base
45
+ * register writeback.
46
*/
47
-typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value);
48
+typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value,
49
+ bool do_access);
50
/*
51
* Emit code to load the value to be copied to the sysreg; returns
52
- * a new TCG temporary
53
+ * a new TCG temporary. do_access is true to do the store,
54
+ * and false to skip it and only perform side-effects like base
55
+ * register writeback.
56
*/
57
-typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque);
58
+typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque,
59
+ bool do_access);
60
61
/* Common decode/access checks for fp sysreg read/write */
62
typedef enum FPSysRegCheckResult {
63
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
64
65
switch (regno) {
66
case ARM_VFP_FPSCR:
67
- tmp = loadfn(s, opaque);
68
+ tmp = loadfn(s, opaque, true);
69
gen_helper_vfp_set_fpscr(cpu_env, tmp);
70
tcg_temp_free_i32(tmp);
71
gen_lookup_tb(s);
72
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
73
case ARM_VFP_FPSCR_NZCVQC:
74
{
75
TCGv_i32 fpscr;
76
- tmp = loadfn(s, opaque);
77
+ tmp = loadfn(s, opaque, true);
78
if (dc_isar_feature(aa32_mve, s)) {
79
/* QC is only present for MVE; otherwise RES0 */
80
TCGv_i32 qc = tcg_temp_new_i32();
81
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
82
break;
83
}
84
case ARM_VFP_FPCXT_NS:
85
+ {
86
+ TCGLabel *lab_active = gen_new_label();
87
+
88
lab_end = gen_new_label();
89
- /* fpInactive case: write is a NOP, so branch to end */
90
- gen_branch_fpInactive(s, TCG_COND_NE, lab_end);
91
+ gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
92
+ /*
93
+ * fpInactive case: write is a NOP, so only do side effects
94
+ * like register writeback before we branch to end
95
+ */
96
+ loadfn(s, opaque, false);
97
+ tcg_gen_br(lab_end);
98
+
99
+ gen_set_label(lab_active);
100
/*
101
* !fpInactive: if FPU disabled, take NOCP exception;
102
* otherwise PreserveFPState(), and then FPCXT_NS writes
103
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
104
break;
105
}
106
gen_preserve_fp_state(s);
107
- /* fall through */
108
+ }
109
+ /* fall through */
110
case ARM_VFP_FPCXT_S:
111
{
112
TCGv_i32 sfpa, control;
113
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
114
* Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
115
* bits [27:0] from value and zeroes bits [31:28].
116
*/
117
- tmp = loadfn(s, opaque);
118
+ tmp = loadfn(s, opaque, true);
119
sfpa = tcg_temp_new_i32();
120
tcg_gen_shri_i32(sfpa, tmp, 31);
121
control = load_cpu_field(v7m.control[M_REG_S]);
122
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
123
case ARM_VFP_VPR:
124
/* Behaves as NOP if not privileged */
125
if (IS_USER(s)) {
126
+ loadfn(s, opaque, false);
127
break;
128
}
129
- tmp = loadfn(s, opaque);
130
+ tmp = loadfn(s, opaque, true);
131
store_cpu_field(tmp, v7m.vpr);
132
break;
133
case ARM_VFP_P0:
134
{
135
TCGv_i32 vpr;
136
- tmp = loadfn(s, opaque);
137
+ tmp = loadfn(s, opaque, true);
138
vpr = load_cpu_field(v7m.vpr);
139
tcg_gen_deposit_i32(vpr, vpr, tmp,
140
R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
141
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
142
case ARM_VFP_FPSCR:
143
tmp = tcg_temp_new_i32();
144
gen_helper_vfp_get_fpscr(tmp, cpu_env);
145
- storefn(s, opaque, tmp);
146
+ storefn(s, opaque, tmp, true);
147
break;
148
case ARM_VFP_FPSCR_NZCVQC:
149
tmp = tcg_temp_new_i32();
150
gen_helper_vfp_get_fpscr(tmp, cpu_env);
151
tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK);
152
- storefn(s, opaque, tmp);
153
+ storefn(s, opaque, tmp, true);
154
break;
155
case QEMU_VFP_FPSCR_NZCV:
156
/*
157
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
158
*/
159
tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
160
tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
161
- storefn(s, opaque, tmp);
162
+ storefn(s, opaque, tmp, true);
163
break;
164
case ARM_VFP_FPCXT_S:
165
{
166
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
167
* Store result before updating FPSCR etc, in case
168
* it is a memory write which causes an exception.
169
*/
170
- storefn(s, opaque, tmp);
171
+ storefn(s, opaque, tmp, true);
172
/*
173
* Now we must reset FPSCR from FPDSCR_NS, and clear
174
* CONTROL.SFPA; so we'll end the TB here.
175
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
176
gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
177
/* fpInactive case: reads as FPDSCR_NS */
178
TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
179
- storefn(s, opaque, tmp);
180
+ storefn(s, opaque, tmp, true);
181
lab_end = gen_new_label();
182
tcg_gen_br(lab_end);
183
184
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
185
tcg_gen_or_i32(tmp, tmp, sfpa);
186
tcg_temp_free_i32(control);
187
/* Store result before updating FPSCR, in case it faults */
188
- storefn(s, opaque, tmp);
189
+ storefn(s, opaque, tmp, true);
190
/* If SFPA is zero then set FPSCR from FPDSCR_NS */
191
fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
192
zero = tcg_const_i32(0);
193
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
194
case ARM_VFP_VPR:
195
/* Behaves as NOP if not privileged */
196
if (IS_USER(s)) {
197
+ storefn(s, opaque, NULL, false);
198
break;
199
}
200
tmp = load_cpu_field(v7m.vpr);
201
- storefn(s, opaque, tmp);
202
+ storefn(s, opaque, tmp, true);
203
break;
204
case ARM_VFP_P0:
205
tmp = load_cpu_field(v7m.vpr);
206
tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
207
- storefn(s, opaque, tmp);
208
+ storefn(s, opaque, tmp, true);
209
break;
210
default:
211
g_assert_not_reached();
212
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
213
return true;
214
}
215
216
-static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value)
217
+static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value,
218
+ bool do_access)
219
{
220
arg_VMSR_VMRS *a = opaque;
221
222
+ if (!do_access) {
223
+ return;
224
+ }
225
+
226
if (a->rt == 15) {
227
/* Set the 4 flag bits in the CPSR */
228
gen_set_nzcv(value);
229
@@ -XXX,XX +XXX,XX @@ static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value)
230
}
231
}
232
233
-static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque)
234
+static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque, bool do_access)
235
{
236
arg_VMSR_VMRS *a = opaque;
237
238
+ if (!do_access) {
239
+ return NULL;
240
+ }
241
return load_reg(s, a->rt);
242
}
243
244
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
245
}
246
}
247
248
-static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
249
+static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value,
250
+ bool do_access)
251
{
252
arg_vldr_sysreg *a = opaque;
253
uint32_t offset = a->imm;
254
@@ -XXX,XX +XXX,XX @@ static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
255
offset = -offset;
256
}
257
258
+ if (!do_access && !a->w) {
259
+ return;
260
+ }
261
+
262
addr = load_reg(s, a->rn);
263
if (a->p) {
264
tcg_gen_addi_i32(addr, addr, offset);
265
@@ -XXX,XX +XXX,XX @@ static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
266
gen_helper_v8m_stackcheck(cpu_env, addr);
267
}
268
269
- gen_aa32_st_i32(s, value, addr, get_mem_index(s),
270
- MO_UL | MO_ALIGN | s->be_data);
271
- tcg_temp_free_i32(value);
272
+ if (do_access) {
273
+ gen_aa32_st_i32(s, value, addr, get_mem_index(s),
274
+ MO_UL | MO_ALIGN | s->be_data);
275
+ tcg_temp_free_i32(value);
276
+ }
277
278
if (a->w) {
279
/* writeback */
280
@@ -XXX,XX +XXX,XX @@ static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
281
}
282
}
283
284
-static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque)
285
+static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque,
286
+ bool do_access)
287
{
288
arg_vldr_sysreg *a = opaque;
289
uint32_t offset = a->imm;
290
TCGv_i32 addr;
291
- TCGv_i32 value = tcg_temp_new_i32();
292
+ TCGv_i32 value = NULL;
293
294
if (!a->a) {
295
offset = -offset;
296
}
297
298
+ if (!do_access && !a->w) {
299
+ return NULL;
300
+ }
301
+
302
addr = load_reg(s, a->rn);
303
if (a->p) {
304
tcg_gen_addi_i32(addr, addr, offset);
305
@@ -XXX,XX +XXX,XX @@ static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque)
306
gen_helper_v8m_stackcheck(cpu_env, addr);
307
}
308
309
- gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
310
- MO_UL | MO_ALIGN | s->be_data);
311
+ if (do_access) {
312
+ value = tcg_temp_new_i32();
313
+ gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
314
+ MO_UL | MO_ALIGN | s->be_data);
315
+ }
316
317
if (a->w) {
318
/* writeback */
319
--
320
2.20.1
321
322
diff view generated by jsdifflib
Deleted patch
1
Factor the code in full_vfp_access_check() which updates the
2
ownership of the FP context and creates a new FP context
3
out into its own function.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210618141019.10671-6-peter.maydell@linaro.org
8
---
9
target/arm/translate-vfp.c | 104 +++++++++++++++++++++----------------
10
1 file changed, 58 insertions(+), 46 deletions(-)
11
12
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate-vfp.c
15
+++ b/target/arm/translate-vfp.c
16
@@ -XXX,XX +XXX,XX @@ void gen_preserve_fp_state(DisasContext *s)
17
}
18
}
19
20
+/*
21
+ * Generate code for M-profile FP context handling: update the
22
+ * ownership of the FP context, and create a new context if
23
+ * necessary. This corresponds to the parts of the pseudocode
24
+ * ExecuteFPCheck() after the inital PreserveFPState() call.
25
+ */
26
+static void gen_update_fp_context(DisasContext *s)
27
+{
28
+ /* Update ownership of FP context: set FPCCR.S to match current state */
29
+ if (s->v8m_fpccr_s_wrong) {
30
+ TCGv_i32 tmp;
31
+
32
+ tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
33
+ if (s->v8m_secure) {
34
+ tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
35
+ } else {
36
+ tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
37
+ }
38
+ store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
39
+ /* Don't need to do this for any further FP insns in this TB */
40
+ s->v8m_fpccr_s_wrong = false;
41
+ }
42
+
43
+ if (s->v7m_new_fp_ctxt_needed) {
44
+ /*
45
+ * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA,
46
+ * the FPSCR, and VPR.
47
+ */
48
+ TCGv_i32 control, fpscr;
49
+ uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
50
+
51
+ fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
52
+ gen_helper_vfp_set_fpscr(cpu_env, fpscr);
53
+ tcg_temp_free_i32(fpscr);
54
+ if (dc_isar_feature(aa32_mve, s)) {
55
+ TCGv_i32 z32 = tcg_const_i32(0);
56
+ store_cpu_field(z32, v7m.vpr);
57
+ }
58
+
59
+ /*
60
+ * We don't need to arrange to end the TB, because the only
61
+ * parts of FPSCR which we cache in the TB flags are the VECLEN
62
+ * and VECSTRIDE, and those don't exist for M-profile.
63
+ */
64
+
65
+ if (s->v8m_secure) {
66
+ bits |= R_V7M_CONTROL_SFPA_MASK;
67
+ }
68
+ control = load_cpu_field(v7m.control[M_REG_S]);
69
+ tcg_gen_ori_i32(control, control, bits);
70
+ store_cpu_field(control, v7m.control[M_REG_S]);
71
+ /* Don't need to do this for any further FP insns in this TB */
72
+ s->v7m_new_fp_ctxt_needed = false;
73
+ }
74
+}
75
+
76
/*
77
* Check that VFP access is enabled. If it is, do the necessary
78
* M-profile lazy-FP handling and then return true.
79
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
80
/* Trigger lazy-state preservation if necessary */
81
gen_preserve_fp_state(s);
82
83
- /* Update ownership of FP context: set FPCCR.S to match current state */
84
- if (s->v8m_fpccr_s_wrong) {
85
- TCGv_i32 tmp;
86
-
87
- tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
88
- if (s->v8m_secure) {
89
- tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
90
- } else {
91
- tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
92
- }
93
- store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
94
- /* Don't need to do this for any further FP insns in this TB */
95
- s->v8m_fpccr_s_wrong = false;
96
- }
97
-
98
- if (s->v7m_new_fp_ctxt_needed) {
99
- /*
100
- * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA,
101
- * the FPSCR, and VPR.
102
- */
103
- TCGv_i32 control, fpscr;
104
- uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
105
-
106
- fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
107
- gen_helper_vfp_set_fpscr(cpu_env, fpscr);
108
- tcg_temp_free_i32(fpscr);
109
- if (dc_isar_feature(aa32_mve, s)) {
110
- TCGv_i32 z32 = tcg_const_i32(0);
111
- store_cpu_field(z32, v7m.vpr);
112
- }
113
-
114
- /*
115
- * We don't need to arrange to end the TB, because the only
116
- * parts of FPSCR which we cache in the TB flags are the VECLEN
117
- * and VECSTRIDE, and those don't exist for M-profile.
118
- */
119
-
120
- if (s->v8m_secure) {
121
- bits |= R_V7M_CONTROL_SFPA_MASK;
122
- }
123
- control = load_cpu_field(v7m.control[M_REG_S]);
124
- tcg_gen_ori_i32(control, control, bits);
125
- store_cpu_field(control, v7m.control[M_REG_S]);
126
- /* Don't need to do this for any further FP insns in this TB */
127
- s->v7m_new_fp_ctxt_needed = false;
128
- }
129
+ /* Update ownership of FP context and create new FP context if needed */
130
+ gen_update_fp_context(s);
131
}
132
133
return true;
134
--
135
2.20.1
136
137
diff view generated by jsdifflib
1
Instead of open-coding the "take NOCP exception if FPU disabled,
1
Use dup_const() instead of bitfield_replicate() in
2
otherwise call gen_preserve_fp_state()" code in the accessors for
2
disas_simd_mod_imm().
3
FPCXT_NS, add an argument to vfp_access_check_m() which tells it to
3
4
skip the gen_update_fp_context() call, so we can use it for the
4
(We can't replace the other use of bitfield_replicate() in this file,
5
FPCXT_NS case.
5
in logic_imm_decode_wmask(), because that location needs to handle 2
6
and 4 bit elements, which dup_const() cannot.)
6
7
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210618141019.10671-8-peter.maydell@linaro.org
10
Message-id: 20210628135835.6690-6-peter.maydell@linaro.org
10
---
11
---
11
target/arm/translate-a32.h | 2 +-
12
target/arm/translate-a64.c | 2 +-
12
target/arm/translate-m-nocp.c | 10 ++--------
13
1 file changed, 1 insertion(+), 1 deletion(-)
13
target/arm/translate-vfp.c | 13 ++++++++-----
14
3 files changed, 11 insertions(+), 14 deletions(-)
15
14
16
diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
17
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-a32.h
17
--- a/target/arm/translate-a64.c
19
+++ b/target/arm/translate-a32.h
18
+++ b/target/arm/translate-a64.c
20
@@ -XXX,XX +XXX,XX @@ bool disas_neon_shared(DisasContext *s, uint32_t insn);
19
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
21
void load_reg_var(DisasContext *s, TCGv_i32 var, int reg);
20
/* FMOV (vector, immediate) - half-precision */
22
void arm_gen_condlabel(DisasContext *s);
21
imm = vfp_expand_imm(MO_16, abcdefgh);
23
bool vfp_access_check(DisasContext *s);
22
/* now duplicate across the lanes */
24
-void gen_preserve_fp_state(DisasContext *s);
23
- imm = bitfield_replicate(imm, 16);
25
+bool vfp_access_check_m(DisasContext *s, bool skip_context_update);
24
+ imm = dup_const(MO_16, imm);
26
void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop);
27
void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop);
28
void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop);
29
diff --git a/target/arm/translate-m-nocp.c b/target/arm/translate-m-nocp.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate-m-nocp.c
32
+++ b/target/arm/translate-m-nocp.c
33
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
34
* otherwise PreserveFPState(), and then FPCXT_NS writes
35
* behave the same as FPCXT_S writes.
36
*/
37
- if (s->fp_excp_el) {
38
- gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
39
- syn_uncategorized(), s->fp_excp_el);
40
+ if (!vfp_access_check_m(s, true)) {
41
/*
42
* This was only a conditional exception, so override
43
* gen_exception_insn()'s default to DISAS_NORETURN
44
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
45
s->base.is_jmp = DISAS_NEXT;
46
break;
47
}
48
- gen_preserve_fp_state(s);
49
}
50
/* fall through */
51
case ARM_VFP_FPCXT_S:
52
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
53
* otherwise PreserveFPState(), and then FPCXT_NS
54
* reads the same as FPCXT_S.
55
*/
56
- if (s->fp_excp_el) {
57
- gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
58
- syn_uncategorized(), s->fp_excp_el);
59
+ if (!vfp_access_check_m(s, true)) {
60
/*
61
* This was only a conditional exception, so override
62
* gen_exception_insn()'s default to DISAS_NORETURN
63
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
64
s->base.is_jmp = DISAS_NEXT;
65
break;
66
}
67
- gen_preserve_fp_state(s);
68
tmp = tcg_temp_new_i32();
69
sfpa = tcg_temp_new_i32();
70
fpscr = tcg_temp_new_i32();
71
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/translate-vfp.c
74
+++ b/target/arm/translate-vfp.c
75
@@ -XXX,XX +XXX,XX @@ static inline long vfp_f16_offset(unsigned reg, bool top)
76
* Generate code for M-profile lazy FP state preservation if needed;
77
* this corresponds to the pseudocode PreserveFPState() function.
78
*/
79
-void gen_preserve_fp_state(DisasContext *s)
80
+static void gen_preserve_fp_state(DisasContext *s)
81
{
82
if (s->v7m_lspact) {
83
/*
84
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
85
* If VFP is enabled, do the necessary M-profile lazy-FP handling and then
86
* return true. If not, emit code to generate an appropriate exception and
87
* return false.
88
+ * skip_context_update is true to skip the "update FP context" part of this.
89
*/
90
-static bool vfp_access_check_m(DisasContext *s)
91
+bool vfp_access_check_m(DisasContext *s, bool skip_context_update)
92
{
93
if (s->fp_excp_el) {
94
/*
95
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_m(DisasContext *s)
96
/* Trigger lazy-state preservation if necessary */
97
gen_preserve_fp_state(s);
98
99
- /* Update ownership of FP context and create new FP context if needed */
100
- gen_update_fp_context(s);
101
+ if (!skip_context_update) {
102
+ /* Update ownership of FP context and create new FP context if needed */
103
+ gen_update_fp_context(s);
104
+ }
105
106
return true;
107
}
108
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_m(DisasContext *s)
109
bool vfp_access_check(DisasContext *s)
110
{
111
if (arm_dc_feature(s, ARM_FEATURE_M)) {
112
- return vfp_access_check_m(s);
113
+ return vfp_access_check_m(s, false);
114
} else {
25
} else {
115
return vfp_access_check_a(s, false);
26
imm = asimd_imm_const(abcdefgh, cmode, is_neg);
116
}
27
}
117
--
28
--
118
2.20.1
29
2.20.1
119
30
120
31
diff view generated by jsdifflib
Deleted patch
1
Implement the forms of the MVE VLDR and VSTR insns which perform
2
non-widening loads of bytes, halfwords or words from memory into
3
vector elements of the same width (encodings T5, T6, T7).
4
1
5
(At the moment we know for MVE and M-profile in general that
6
vfp_access_check() can never return false, but we include the
7
conventional return-true-on-failure check for consistency
8
with non-M-profile translation code.)
9
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210617121628.20116-2-peter.maydell@linaro.org
13
---
14
target/arm/{translate-mve.c => helper-mve.h} | 19 +-
15
target/arm/helper.h | 2 +
16
target/arm/internals.h | 11 ++
17
target/arm/mve.decode | 22 +++
18
target/arm/mve_helper.c | 172 +++++++++++++++++++
19
target/arm/translate-mve.c | 119 +++++++++++++
20
target/arm/meson.build | 1 +
21
7 files changed, 334 insertions(+), 12 deletions(-)
22
copy target/arm/{translate-mve.c => helper-mve.h} (61%)
23
create mode 100644 target/arm/mve_helper.c
24
25
diff --git a/target/arm/translate-mve.c b/target/arm/helper-mve.h
26
similarity index 61%
27
copy from target/arm/translate-mve.c
28
copy to target/arm/helper-mve.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-mve.c
31
+++ b/target/arm/helper-mve.h
32
@@ -XXX,XX +XXX,XX @@
33
/*
34
- * ARM translation: M-profile MVE instructions
35
+ * M-profile MVE specific helper definitions
36
*
37
* Copyright (c) 2021 Linaro, Ltd.
38
*
39
@@ -XXX,XX +XXX,XX @@
40
* You should have received a copy of the GNU Lesser General Public
41
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
42
*/
43
-
44
-#include "qemu/osdep.h"
45
-#include "tcg/tcg-op.h"
46
-#include "tcg/tcg-op-gvec.h"
47
-#include "exec/exec-all.h"
48
-#include "exec/gen-icount.h"
49
-#include "translate.h"
50
-#include "translate-a32.h"
51
-
52
-/* Include the generated decoder */
53
-#include "decode-mve.c.inc"
54
+DEF_HELPER_FLAGS_3(mve_vldrb, TCG_CALL_NO_WG, void, env, ptr, i32)
55
+DEF_HELPER_FLAGS_3(mve_vldrh, TCG_CALL_NO_WG, void, env, ptr, i32)
56
+DEF_HELPER_FLAGS_3(mve_vldrw, TCG_CALL_NO_WG, void, env, ptr, i32)
57
+DEF_HELPER_FLAGS_3(mve_vstrb, TCG_CALL_NO_WG, void, env, ptr, i32)
58
+DEF_HELPER_FLAGS_3(mve_vstrh, TCG_CALL_NO_WG, void, env, ptr, i32)
59
+DEF_HELPER_FLAGS_3(mve_vstrw, TCG_CALL_NO_WG, void, env, ptr, i32)
60
diff --git a/target/arm/helper.h b/target/arm/helper.h
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/helper.h
63
+++ b/target/arm/helper.h
64
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
65
#include "helper-a64.h"
66
#include "helper-sve.h"
67
#endif
68
+
69
+#include "helper-mve.h"
70
diff --git a/target/arm/internals.h b/target/arm/internals.h
71
index XXXXXXX..XXXXXXX 100644
72
--- a/target/arm/internals.h
73
+++ b/target/arm/internals.h
74
@@ -XXX,XX +XXX,XX @@ static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
75
return ptr;
76
}
77
78
+/* Values for M-profile PSR.ECI for MVE insns */
79
+enum MVEECIState {
80
+ ECI_NONE = 0, /* No completed beats */
81
+ ECI_A0 = 1, /* Completed: A0 */
82
+ ECI_A0A1 = 2, /* Completed: A0, A1 */
83
+ /* 3 is reserved */
84
+ ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
85
+ ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
86
+ /* All other values reserved */
87
+};
88
+
89
#endif
90
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/mve.decode
93
+++ b/target/arm/mve.decode
94
@@ -XXX,XX +XXX,XX @@
95
#
96
# This file is processed by scripts/decodetree.py
97
#
98
+
99
+%qd 22:1 13:3
100
+
101
+&vldr_vstr rn qd imm p a w size l
102
+
103
+@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd
104
+
105
+# Vector loads and stores
106
+
107
+# Non-widening loads/stores (P=0 W=0 is 'related encoding')
108
+VLDR_VSTR 1110110 0 a:1 . 1 . .... ... 111100 ....... @vldr_vstr \
109
+ size=0 p=0 w=1
110
+VLDR_VSTR 1110110 0 a:1 . 1 . .... ... 111101 ....... @vldr_vstr \
111
+ size=1 p=0 w=1
112
+VLDR_VSTR 1110110 0 a:1 . 1 . .... ... 111110 ....... @vldr_vstr \
113
+ size=2 p=0 w=1
114
+VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111100 ....... @vldr_vstr \
115
+ size=0 p=1
116
+VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111101 ....... @vldr_vstr \
117
+ size=1 p=1
118
+VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \
119
+ size=2 p=1
120
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
121
new file mode 100644
122
index XXXXXXX..XXXXXXX
123
--- /dev/null
124
+++ b/target/arm/mve_helper.c
125
@@ -XXX,XX +XXX,XX @@
126
+/*
127
+ * M-profile MVE Operations
128
+ *
129
+ * Copyright (c) 2021 Linaro, Ltd.
130
+ *
131
+ * This library is free software; you can redistribute it and/or
132
+ * modify it under the terms of the GNU Lesser General Public
133
+ * License as published by the Free Software Foundation; either
134
+ * version 2.1 of the License, or (at your option) any later version.
135
+ *
136
+ * This library is distributed in the hope that it will be useful,
137
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
138
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
139
+ * Lesser General Public License for more details.
140
+ *
141
+ * You should have received a copy of the GNU Lesser General Public
142
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
143
+ */
144
+
145
+#include "qemu/osdep.h"
146
+#include "cpu.h"
147
+#include "internals.h"
148
+#include "vec_internal.h"
149
+#include "exec/helper-proto.h"
150
+#include "exec/cpu_ldst.h"
151
+#include "exec/exec-all.h"
152
+
153
+static uint16_t mve_element_mask(CPUARMState *env)
154
+{
155
+ /*
156
+ * Return the mask of which elements in the MVE vector should be
157
+ * updated. This is a combination of multiple things:
158
+ * (1) by default, we update every lane in the vector
159
+ * (2) VPT predication stores its state in the VPR register;
160
+ * (3) low-overhead-branch tail predication will mask out part
161
+ * the vector on the final iteration of the loop
162
+ * (4) if EPSR.ECI is set then we must execute only some beats
163
+ * of the insn
164
+ * We combine all these into a 16-bit result with the same semantics
165
+ * as VPR.P0: 0 to mask the lane, 1 if it is active.
166
+ * 8-bit vector ops will look at all bits of the result;
167
+ * 16-bit ops will look at bits 0, 2, 4, ...;
168
+ * 32-bit ops will look at bits 0, 4, 8 and 12.
169
+ * Compare pseudocode GetCurInstrBeat(), though that only returns
170
+ * the 4-bit slice of the mask corresponding to a single beat.
171
+ */
172
+ uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
173
+
174
+ if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) {
175
+ mask |= 0xff;
176
+ }
177
+ if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) {
178
+ mask |= 0xff00;
179
+ }
180
+
181
+ if (env->v7m.ltpsize < 4 &&
182
+ env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) {
183
+ /*
184
+ * Tail predication active, and this is the last loop iteration.
185
+ * The element size is (1 << ltpsize), and we only want to process
186
+ * loopcount elements, so we want to retain the least significant
187
+ * (loopcount * esize) predicate bits and zero out bits above that.
188
+ */
189
+ int masklen = env->regs[14] << env->v7m.ltpsize;
190
+ assert(masklen <= 16);
191
+ mask &= MAKE_64BIT_MASK(0, masklen);
192
+ }
193
+
194
+ if ((env->condexec_bits & 0xf) == 0) {
195
+ /*
196
+ * ECI bits indicate which beats are already executed;
197
+ * we handle this by effectively predicating them out.
198
+ */
199
+ int eci = env->condexec_bits >> 4;
200
+ switch (eci) {
201
+ case ECI_NONE:
202
+ break;
203
+ case ECI_A0:
204
+ mask &= 0xfff0;
205
+ break;
206
+ case ECI_A0A1:
207
+ mask &= 0xff00;
208
+ break;
209
+ case ECI_A0A1A2:
210
+ case ECI_A0A1A2B0:
211
+ mask &= 0xf000;
212
+ break;
213
+ default:
214
+ g_assert_not_reached();
215
+ }
216
+ }
217
+
218
+ return mask;
219
+}
220
+
221
+static void mve_advance_vpt(CPUARMState *env)
222
+{
223
+ /* Advance the VPT and ECI state if necessary */
224
+ uint32_t vpr = env->v7m.vpr;
225
+ unsigned mask01, mask23;
226
+
227
+ if ((env->condexec_bits & 0xf) == 0) {
228
+ env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ?
229
+ (ECI_A0 << 4) : (ECI_NONE << 4);
230
+ }
231
+
232
+ if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) {
233
+ /* VPT not enabled, nothing to do */
234
+ return;
235
+ }
236
+
237
+ mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01);
238
+ mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23);
239
+ if (mask01 > 8) {
240
+ /* high bit set, but not 0b1000: invert the relevant half of P0 */
241
+ vpr ^= 0xff;
242
+ }
243
+ if (mask23 > 8) {
244
+ /* high bit set, but not 0b1000: invert the relevant half of P0 */
245
+ vpr ^= 0xff00;
246
+ }
247
+ vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1);
248
+ vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1);
249
+ env->v7m.vpr = vpr;
250
+}
251
+
252
+
253
+#define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \
254
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
255
+ { \
256
+ TYPE *d = vd; \
257
+ uint16_t mask = mve_element_mask(env); \
258
+ unsigned b, e; \
259
+ /* \
260
+ * R_SXTM allows the dest reg to become UNKNOWN for abandoned \
261
+ * beats so we don't care if we update part of the dest and \
262
+ * then take an exception. \
263
+ */ \
264
+ for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
265
+ if (mask & (1 << b)) { \
266
+ d[H##ESIZE(e)] = cpu_##LDTYPE##_data_ra(env, addr, GETPC()); \
267
+ } \
268
+ addr += MSIZE; \
269
+ } \
270
+ mve_advance_vpt(env); \
271
+ }
272
+
273
+#define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \
274
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
275
+ { \
276
+ TYPE *d = vd; \
277
+ uint16_t mask = mve_element_mask(env); \
278
+ unsigned b, e; \
279
+ for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
280
+ if (mask & (1 << b)) { \
281
+ cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
282
+ } \
283
+ addr += MSIZE; \
284
+ } \
285
+ mve_advance_vpt(env); \
286
+ }
287
+
288
+DO_VLDR(vldrb, 1, ldub, 1, uint8_t)
289
+DO_VLDR(vldrh, 2, lduw, 2, uint16_t)
290
+DO_VLDR(vldrw, 4, ldl, 4, uint32_t)
291
+
292
+DO_VSTR(vstrb, 1, stb, 1, uint8_t)
293
+DO_VSTR(vstrh, 2, stw, 2, uint16_t)
294
+DO_VSTR(vstrw, 4, stl, 4, uint32_t)
295
+
296
+#undef DO_VLDR
297
+#undef DO_VSTR
298
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
299
index XXXXXXX..XXXXXXX 100644
300
--- a/target/arm/translate-mve.c
301
+++ b/target/arm/translate-mve.c
302
@@ -XXX,XX +XXX,XX @@
303
304
/* Include the generated decoder */
305
#include "decode-mve.c.inc"
306
+
307
+typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
308
+
309
+/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
310
+static inline long mve_qreg_offset(unsigned reg)
311
+{
312
+ return offsetof(CPUARMState, vfp.zregs[reg].d[0]);
313
+}
314
+
315
+static TCGv_ptr mve_qreg_ptr(unsigned reg)
316
+{
317
+ TCGv_ptr ret = tcg_temp_new_ptr();
318
+ tcg_gen_addi_ptr(ret, cpu_env, mve_qreg_offset(reg));
319
+ return ret;
320
+}
321
+
322
+static bool mve_check_qreg_bank(DisasContext *s, int qmask)
323
+{
324
+ /*
325
+ * Check whether Qregs are in range. For v8.1M only Q0..Q7
326
+ * are supported, see VFPSmallRegisterBank().
327
+ */
328
+ return qmask < 8;
329
+}
330
+
331
+static bool mve_eci_check(DisasContext *s)
332
+{
333
+ /*
334
+ * This is a beatwise insn: check that ECI is valid (not a
335
+ * reserved value) and note that we are handling it.
336
+ * Return true if OK, false if we generated an exception.
337
+ */
338
+ s->eci_handled = true;
339
+ switch (s->eci) {
340
+ case ECI_NONE:
341
+ case ECI_A0:
342
+ case ECI_A0A1:
343
+ case ECI_A0A1A2:
344
+ case ECI_A0A1A2B0:
345
+ return true;
346
+ default:
347
+ /* Reserved value: INVSTATE UsageFault */
348
+ gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
349
+ default_exception_el(s));
350
+ return false;
351
+ }
352
+}
353
+
354
+static void mve_update_eci(DisasContext *s)
355
+{
356
+ /*
357
+ * The helper function will always update the CPUState field,
358
+ * so we only need to update the DisasContext field.
359
+ */
360
+ if (s->eci) {
361
+ s->eci = (s->eci == ECI_A0A1A2B0) ? ECI_A0 : ECI_NONE;
362
+ }
363
+}
364
+
365
+static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
366
+{
367
+ TCGv_i32 addr;
368
+ uint32_t offset;
369
+ TCGv_ptr qreg;
370
+
371
+ if (!dc_isar_feature(aa32_mve, s) ||
372
+ !mve_check_qreg_bank(s, a->qd) ||
373
+ !fn) {
374
+ return false;
375
+ }
376
+
377
+ /* CONSTRAINED UNPREDICTABLE: we choose to UNDEF */
378
+ if (a->rn == 15 || (a->rn == 13 && a->w)) {
379
+ return false;
380
+ }
381
+
382
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
383
+ return true;
384
+ }
385
+
386
+ offset = a->imm << a->size;
387
+ if (!a->a) {
388
+ offset = -offset;
389
+ }
390
+ addr = load_reg(s, a->rn);
391
+ if (a->p) {
392
+ tcg_gen_addi_i32(addr, addr, offset);
393
+ }
394
+
395
+ qreg = mve_qreg_ptr(a->qd);
396
+ fn(cpu_env, qreg, addr);
397
+ tcg_temp_free_ptr(qreg);
398
+
399
+ /*
400
+ * Writeback always happens after the last beat of the insn,
401
+ * regardless of predication
402
+ */
403
+ if (a->w) {
404
+ if (!a->p) {
405
+ tcg_gen_addi_i32(addr, addr, offset);
406
+ }
407
+ store_reg(s, a->rn, addr);
408
+ } else {
409
+ tcg_temp_free_i32(addr);
410
+ }
411
+ mve_update_eci(s);
412
+ return true;
413
+}
414
+
415
+static bool trans_VLDR_VSTR(DisasContext *s, arg_VLDR_VSTR *a)
416
+{
417
+ static MVEGenLdStFn * const ldstfns[4][2] = {
418
+ { gen_helper_mve_vstrb, gen_helper_mve_vldrb },
419
+ { gen_helper_mve_vstrh, gen_helper_mve_vldrh },
420
+ { gen_helper_mve_vstrw, gen_helper_mve_vldrw },
421
+ { NULL, NULL }
422
+ };
423
+ return do_ldst(s, a, ldstfns[a->size][a->l]);
424
+}
425
diff --git a/target/arm/meson.build b/target/arm/meson.build
426
index XXXXXXX..XXXXXXX 100644
427
--- a/target/arm/meson.build
428
+++ b/target/arm/meson.build
429
@@ -XXX,XX +XXX,XX @@ arm_ss.add(files(
430
'helper.c',
431
'iwmmxt_helper.c',
432
'm_helper.c',
433
+ 'mve_helper.c',
434
'neon_helper.c',
435
'op_helper.c',
436
'tlb_helper.c',
437
--
438
2.20.1
439
440
diff view generated by jsdifflib
Deleted patch
1
Implement the variants of MVE VLDR (encodings T1, T2) which perform
2
"widening" loads where bytes or halfwords are loaded from memory and
3
zero or sign-extended into halfword or word length vector elements,
4
and the narrowing MVE VSTR (encodings T1, T2) where bytes or
5
halfwords are stored from halfword or word elements.
6
1
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210617121628.20116-3-peter.maydell@linaro.org
10
---
11
target/arm/helper-mve.h | 10 ++++++++++
12
target/arm/mve.decode | 25 +++++++++++++++++++++++--
13
target/arm/mve_helper.c | 11 +++++++++++
14
target/arm/translate-mve.c | 14 ++++++++++++++
15
4 files changed, 58 insertions(+), 2 deletions(-)
16
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-mve.h
20
+++ b/target/arm/helper-mve.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vldrw, TCG_CALL_NO_WG, void, env, ptr, i32)
22
DEF_HELPER_FLAGS_3(mve_vstrb, TCG_CALL_NO_WG, void, env, ptr, i32)
23
DEF_HELPER_FLAGS_3(mve_vstrh, TCG_CALL_NO_WG, void, env, ptr, i32)
24
DEF_HELPER_FLAGS_3(mve_vstrw, TCG_CALL_NO_WG, void, env, ptr, i32)
25
+
26
+DEF_HELPER_FLAGS_3(mve_vldrb_sh, TCG_CALL_NO_WG, void, env, ptr, i32)
27
+DEF_HELPER_FLAGS_3(mve_vldrb_sw, TCG_CALL_NO_WG, void, env, ptr, i32)
28
+DEF_HELPER_FLAGS_3(mve_vldrb_uh, TCG_CALL_NO_WG, void, env, ptr, i32)
29
+DEF_HELPER_FLAGS_3(mve_vldrb_uw, TCG_CALL_NO_WG, void, env, ptr, i32)
30
+DEF_HELPER_FLAGS_3(mve_vldrh_sw, TCG_CALL_NO_WG, void, env, ptr, i32)
31
+DEF_HELPER_FLAGS_3(mve_vldrh_uw, TCG_CALL_NO_WG, void, env, ptr, i32)
32
+DEF_HELPER_FLAGS_3(mve_vstrb_h, TCG_CALL_NO_WG, void, env, ptr, i32)
33
+DEF_HELPER_FLAGS_3(mve_vstrb_w, TCG_CALL_NO_WG, void, env, ptr, i32)
34
+DEF_HELPER_FLAGS_3(mve_vstrh_w, TCG_CALL_NO_WG, void, env, ptr, i32)
35
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/mve.decode
38
+++ b/target/arm/mve.decode
39
@@ -XXX,XX +XXX,XX @@
40
41
%qd 22:1 13:3
42
43
-&vldr_vstr rn qd imm p a w size l
44
+&vldr_vstr rn qd imm p a w size l u
45
46
-@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd
47
+@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
48
+# Note that both Rn and Qd are 3 bits only (no D bit)
49
+@vldst_wn ... u:1 ... . . . . l:1 . rn:3 qd:3 . ... .. imm:7 &vldr_vstr
50
51
# Vector loads and stores
52
53
+# Widening loads and narrowing stores:
54
+# for these P=0 W=0 is 'related encoding'; sz=11 is 'related encoding'
55
+# This means we need to expand out to multiple patterns for P, W, SZ.
56
+# For stores the U bit must be 0 but we catch that in the trans_ function.
57
+# The naming scheme here is "VLDSTB_H == in-memory byte load/store to/from
58
+# signed halfword element in register", etc.
59
+VLDSTB_H 111 . 110 0 a:1 0 1 . 0 ... ... 0 111 01 ....... @vldst_wn \
60
+ p=0 w=1 size=1
61
+VLDSTB_H 111 . 110 1 a:1 0 w:1 . 0 ... ... 0 111 01 ....... @vldst_wn \
62
+ p=1 size=1
63
+VLDSTB_W 111 . 110 0 a:1 0 1 . 0 ... ... 0 111 10 ....... @vldst_wn \
64
+ p=0 w=1 size=2
65
+VLDSTB_W 111 . 110 1 a:1 0 w:1 . 0 ... ... 0 111 10 ....... @vldst_wn \
66
+ p=1 size=2
67
+VLDSTH_W 111 . 110 0 a:1 0 1 . 1 ... ... 0 111 10 ....... @vldst_wn \
68
+ p=0 w=1 size=2
69
+VLDSTH_W 111 . 110 1 a:1 0 w:1 . 1 ... ... 0 111 10 ....... @vldst_wn \
70
+ p=1 size=2
71
+
72
# Non-widening loads/stores (P=0 W=0 is 'related encoding')
73
VLDR_VSTR 1110110 0 a:1 . 1 . .... ... 111100 ....... @vldr_vstr \
74
size=0 p=0 w=1
75
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/mve_helper.c
78
+++ b/target/arm/mve_helper.c
79
@@ -XXX,XX +XXX,XX @@ DO_VSTR(vstrb, 1, stb, 1, uint8_t)
80
DO_VSTR(vstrh, 2, stw, 2, uint16_t)
81
DO_VSTR(vstrw, 4, stl, 4, uint32_t)
82
83
+DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t)
84
+DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t)
85
+DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t)
86
+DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t)
87
+DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t)
88
+DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t)
89
+
90
+DO_VSTR(vstrb_h, 1, stb, 2, int16_t)
91
+DO_VSTR(vstrb_w, 1, stb, 4, int32_t)
92
+DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
93
+
94
#undef DO_VLDR
95
#undef DO_VSTR
96
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/target/arm/translate-mve.c
99
+++ b/target/arm/translate-mve.c
100
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR(DisasContext *s, arg_VLDR_VSTR *a)
101
};
102
return do_ldst(s, a, ldstfns[a->size][a->l]);
103
}
104
+
105
+#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST) \
106
+ static bool trans_##OP(DisasContext *s, arg_VLDR_VSTR *a) \
107
+ { \
108
+ static MVEGenLdStFn * const ldstfns[2][2] = { \
109
+ { gen_helper_mve_##ST, gen_helper_mve_##SLD }, \
110
+ { NULL, gen_helper_mve_##ULD }, \
111
+ }; \
112
+ return do_ldst(s, a, ldstfns[a->u][a->l]); \
113
+ }
114
+
115
+DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h)
116
+DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w)
117
+DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w)
118
--
119
2.20.1
120
121
diff view generated by jsdifflib
1
Implement the MVE vector logical operations operating
1
Implement the MVE logical-immediate insns (VMOV, VMVN,
2
on two registers.
2
VORR and VBIC). These have essentially the same encoding
3
as their Neon equivalents, and we implement the decode
4
in the same way.
3
5
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-12-peter.maydell@linaro.org
8
Message-id: 20210628135835.6690-7-peter.maydell@linaro.org
7
---
9
---
8
target/arm/helper-mve.h | 6 ++++++
10
target/arm/helper-mve.h | 4 +++
9
target/arm/mve.decode | 9 +++++++++
11
target/arm/mve.decode | 17 +++++++++++++
10
target/arm/mve_helper.c | 26 ++++++++++++++++++++++++++
12
target/arm/mve_helper.c | 24 ++++++++++++++++++
11
target/arm/translate-mve.c | 37 +++++++++++++++++++++++++++++++++++++
13
target/arm/translate-mve.c | 50 ++++++++++++++++++++++++++++++++++++++
12
4 files changed, 78 insertions(+)
14
4 files changed, 95 insertions(+)
13
15
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
18
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
19
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
19
DEF_HELPER_FLAGS_3(mve_vnegw, TCG_CALL_NO_WG, void, env, ptr, ptr)
21
DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
20
DEF_HELPER_FLAGS_3(mve_vfnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
22
DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
21
DEF_HELPER_FLAGS_3(mve_vfnegs, TCG_CALL_NO_WG, void, env, ptr, ptr)
23
DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
22
+
24
+
23
+DEF_HELPER_FLAGS_4(mve_vand, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
24
+DEF_HELPER_FLAGS_4(mve_vbic, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
25
+DEF_HELPER_FLAGS_4(mve_vorr, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
26
+DEF_HELPER_FLAGS_4(mve_vorn, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_veor, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
30
--- a/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
32
@@ -XXX,XX +XXX,XX @@
32
@@ -XXX,XX +XXX,XX @@
33
33
# VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit
34
%size_28 28:1 !function=plus_1
35
36
+# 1imm format immediate
37
+%imm_28_16_0 28:1 16:3 0:4
38
+
34
&vldr_vstr rn qd imm p a w size l u
39
&vldr_vstr rn qd imm p a w size l u
35
&1op qd qm size
40
&1op qd qm size
36
+&2op qd qm qn size
41
&2op qd qm qn size
42
&2scalar qd qn rm size
43
+&1imm qd imm cmode op
37
44
38
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
45
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
39
# Note that both Rn and Qd are 3 bits only (no D bit)
46
# Note that both Rn and Qd are 3 bits only (no D bit)
40
@@ -XXX,XX +XXX,XX @@
47
@@ -XXX,XX +XXX,XX @@
41
48
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
42
@1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm
49
@2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \
43
@1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0
50
size=%size_28
44
+@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
51
+@1imm .... .... .... .... .... cmode:4 .. op:1 . .... &1imm qd=%qd imm=%imm_28_16_0
45
52
46
# Vector loads and stores
53
# The _rev suffix indicates that Vn and Vm are reversed. This is
47
54
# the case for shifts. In the Arm ARM these insns are documented
48
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111101 ....... @vldr_vstr \
55
@@ -XXX,XX +XXX,XX @@ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rd
49
VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \
56
# Predicate operations
50
size=2 p=1
57
%mask_22_13 22:1 13:3
51
58
VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
52
+# Vector 2-op
53
+VAND 1110 1111 0 . 00 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
54
+VBIC 1110 1111 0 . 01 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
55
+VORR 1110 1111 0 . 10 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
56
+VORN 1110 1111 0 . 11 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
57
+VEOR 1111 1111 0 . 00 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
58
+
59
+
59
# Vector miscellaneous
60
+# Logical immediate operations (1 reg and modified-immediate)
60
61
+
61
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
62
+# The cmode/op bits here decode VORR/VBIC/VMOV/VMVN, but
63
+# not in a way we can conveniently represent in decodetree without
64
+# a lot of repetition:
65
+# VORR: op=0, (cmode & 1) && cmode < 12
66
+# VBIC: op=1, (cmode & 1) && cmode < 12
67
+# VMOV: everything else
68
+# So we have a single decode line and check the cmode/op in the
69
+# trans function.
70
+Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
62
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
71
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
63
index XXXXXXX..XXXXXXX 100644
72
index XXXXXXX..XXXXXXX 100644
64
--- a/target/arm/mve_helper.c
73
--- a/target/arm/mve_helper.c
65
+++ b/target/arm/mve_helper.c
74
+++ b/target/arm/mve_helper.c
66
@@ -XXX,XX +XXX,XX @@ DO_1OP(vnegw, 4, int32_t, DO_NEG)
75
@@ -XXX,XX +XXX,XX @@ DO_1OP(vnegw, 4, int32_t, DO_NEG)
67
/* We can do these 64 bits at a time */
68
DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
76
DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
69
DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
77
DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
70
+
78
71
+#define DO_2OP(OP, ESIZE, TYPE, FN) \
79
+/*
72
+ void HELPER(glue(mve_, OP))(CPUARMState *env, \
80
+ * 1 operand immediates: Vda is destination and possibly also one source.
73
+ void *vd, void *vn, void *vm) \
81
+ * All these insns work at 64-bit widths.
82
+ */
83
+#define DO_1OP_IMM(OP, FN) \
84
+ void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \
74
+ { \
85
+ { \
75
+ TYPE *d = vd, *n = vn, *m = vm; \
86
+ uint64_t *da = vda; \
76
+ uint16_t mask = mve_element_mask(env); \
87
+ uint16_t mask = mve_element_mask(env); \
77
+ unsigned e; \
88
+ unsigned e; \
78
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
89
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
79
+ mergemask(&d[H##ESIZE(e)], \
90
+ mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \
80
+ FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \
81
+ } \
91
+ } \
82
+ mve_advance_vpt(env); \
92
+ mve_advance_vpt(env); \
83
+ }
93
+ }
84
+
94
+
85
+#define DO_AND(N, M) ((N) & (M))
95
+#define DO_MOVI(N, I) (I)
86
+#define DO_BIC(N, M) ((N) & ~(M))
96
+#define DO_ANDI(N, I) ((N) & (I))
87
+#define DO_ORR(N, M) ((N) | (M))
97
+#define DO_ORRI(N, I) ((N) | (I))
88
+#define DO_ORN(N, M) ((N) | ~(M))
89
+#define DO_EOR(N, M) ((N) ^ (M))
90
+
98
+
91
+DO_2OP(vand, 8, uint64_t, DO_AND)
99
+DO_1OP_IMM(vmovi, DO_MOVI)
92
+DO_2OP(vbic, 8, uint64_t, DO_BIC)
100
+DO_1OP_IMM(vandi, DO_ANDI)
93
+DO_2OP(vorr, 8, uint64_t, DO_ORR)
101
+DO_1OP_IMM(vorri, DO_ORRI)
94
+DO_2OP(vorn, 8, uint64_t, DO_ORN)
102
+
95
+DO_2OP(veor, 8, uint64_t, DO_EOR)
103
#define DO_2OP(OP, ESIZE, TYPE, FN) \
104
void HELPER(glue(mve_, OP))(CPUARMState *env, \
105
void *vd, void *vn, void *vm) \
96
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
106
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
97
index XXXXXXX..XXXXXXX 100644
107
index XXXXXXX..XXXXXXX 100644
98
--- a/target/arm/translate-mve.c
108
--- a/target/arm/translate-mve.c
99
+++ b/target/arm/translate-mve.c
109
+++ b/target/arm/translate-mve.c
100
@@ -XXX,XX +XXX,XX @@
110
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
101
111
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
102
typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
112
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
103
typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
113
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
104
+typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
114
+typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
105
115
106
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
116
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
107
static inline long mve_qreg_offset(unsigned reg)
117
static inline long mve_qreg_offset(unsigned reg)
108
@@ -XXX,XX +XXX,XX @@ static bool trans_VNEG_fp(DisasContext *s, arg_1op *a)
118
@@ -XXX,XX +XXX,XX @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
109
}
119
mve_update_eci(s);
110
return do_1op(s, a, fns[a->size]);
120
return true;
111
}
121
}
112
+
122
+
113
+static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn)
123
+static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
114
+{
124
+{
115
+ TCGv_ptr qd, qn, qm;
125
+ TCGv_ptr qd;
126
+ uint64_t imm;
116
+
127
+
117
+ if (!dc_isar_feature(aa32_mve, s) ||
128
+ if (!dc_isar_feature(aa32_mve, s) ||
118
+ !mve_check_qreg_bank(s, a->qd | a->qn | a->qm) ||
129
+ !mve_check_qreg_bank(s, a->qd) ||
119
+ !fn) {
130
+ !fn) {
120
+ return false;
131
+ return false;
121
+ }
132
+ }
122
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
133
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
123
+ return true;
134
+ return true;
124
+ }
135
+ }
125
+
136
+
137
+ imm = asimd_imm_const(a->imm, a->cmode, a->op);
138
+
126
+ qd = mve_qreg_ptr(a->qd);
139
+ qd = mve_qreg_ptr(a->qd);
127
+ qn = mve_qreg_ptr(a->qn);
140
+ fn(cpu_env, qd, tcg_constant_i64(imm));
128
+ qm = mve_qreg_ptr(a->qm);
129
+ fn(cpu_env, qd, qn, qm);
130
+ tcg_temp_free_ptr(qd);
141
+ tcg_temp_free_ptr(qd);
131
+ tcg_temp_free_ptr(qn);
132
+ tcg_temp_free_ptr(qm);
133
+ mve_update_eci(s);
142
+ mve_update_eci(s);
134
+ return true;
143
+ return true;
135
+}
144
+}
136
+
145
+
137
+#define DO_LOGIC(INSN, HELPER) \
146
+static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
138
+ static bool trans_##INSN(DisasContext *s, arg_2op *a) \
147
+{
139
+ { \
148
+ /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
140
+ return do_2op(s, a, HELPER); \
149
+ MVEGenOneOpImmFn *fn;
150
+
151
+ if ((a->cmode & 1) && a->cmode < 12) {
152
+ if (a->op) {
153
+ /*
154
+ * For op=1, the immediate will be inverted by asimd_imm_const(),
155
+ * so the VBIC becomes a logical AND operation.
156
+ */
157
+ fn = gen_helper_mve_vandi;
158
+ } else {
159
+ fn = gen_helper_mve_vorri;
160
+ }
161
+ } else {
162
+ /* There is one unallocated cmode/op combination in this space */
163
+ if (a->cmode == 15 && a->op == 1) {
164
+ return false;
165
+ }
166
+ /* asimd_imm_const() sorts out VMVNI vs VMOVI for us */
167
+ fn = gen_helper_mve_vmovi;
141
+ }
168
+ }
142
+
169
+ return do_1imm(s, a, fn);
143
+DO_LOGIC(VAND, gen_helper_mve_vand)
170
+}
144
+DO_LOGIC(VBIC, gen_helper_mve_vbic)
145
+DO_LOGIC(VORR, gen_helper_mve_vorr)
146
+DO_LOGIC(VORN, gen_helper_mve_vorn)
147
+DO_LOGIC(VEOR, gen_helper_mve_veor)
148
--
171
--
149
2.20.1
172
2.20.1
150
173
151
174
diff view generated by jsdifflib
1
Implement the MVE VCLZ insn (and the necessary machinery
1
Implement the MVE shift-vector-left-by-immediate insns VSHL, VQSHL
2
for MVE 1-input vector ops).
2
and VQSHLU.
3
3
4
Note that for non-load instructions predication is always performed
4
The size-and-immediate encoding here is the same as Neon, and we
5
at a byte level granularity regardless of element size (R_ZLSJ),
5
handle it the same way neon-dp.decode does.
6
and so the masking logic here differs from that used in the VLDR
7
and VSTR helpers.
8
6
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210617121628.20116-4-peter.maydell@linaro.org
9
Message-id: 20210628135835.6690-8-peter.maydell@linaro.org
12
---
10
---
13
target/arm/helper-mve.h | 4 ++
11
target/arm/helper-mve.h | 16 +++++++++++
14
target/arm/mve.decode | 8 ++++
12
target/arm/mve.decode | 23 +++++++++++++++
15
target/arm/mve_helper.c | 82 ++++++++++++++++++++++++++++++++++++++
13
target/arm/mve_helper.c | 57 ++++++++++++++++++++++++++++++++++++++
16
target/arm/translate-mve.c | 38 ++++++++++++++++++
14
target/arm/translate-mve.c | 51 ++++++++++++++++++++++++++++++++++
17
4 files changed, 132 insertions(+)
15
4 files changed, 147 insertions(+)
18
16
19
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
20
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper-mve.h
19
--- a/target/arm/helper-mve.h
22
+++ b/target/arm/helper-mve.h
20
+++ b/target/arm/helper-mve.h
23
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vldrh_uw, TCG_CALL_NO_WG, void, env, ptr, i32)
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
24
DEF_HELPER_FLAGS_3(mve_vstrb_h, TCG_CALL_NO_WG, void, env, ptr, i32)
22
DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
25
DEF_HELPER_FLAGS_3(mve_vstrb_w, TCG_CALL_NO_WG, void, env, ptr, i32)
23
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
26
DEF_HELPER_FLAGS_3(mve_vstrh_w, TCG_CALL_NO_WG, void, env, ptr, i32)
24
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
27
+
25
+
28
+DEF_HELPER_FLAGS_3(mve_vclzb, TCG_CALL_NO_WG, void, env, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_3(mve_vclzh, TCG_CALL_NO_WG, void, env, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_3(mve_vclzw, TCG_CALL_NO_WG, void, env, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vqshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vqshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vqshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+
34
+DEF_HELPER_FLAGS_4(mve_vqshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_4(mve_vqshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+
38
+DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
41
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
32
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/mve.decode
43
--- a/target/arm/mve.decode
34
+++ b/target/arm/mve.decode
44
+++ b/target/arm/mve.decode
35
@@ -XXX,XX +XXX,XX @@
45
@@ -XXX,XX +XXX,XX @@
36
#
46
&2op qd qm qn size
37
47
&2scalar qd qn rm size
38
%qd 22:1 13:3
48
&1imm qd imm cmode op
39
+%qm 5:1 1:3
49
+&2shift qd qm shift size
40
41
&vldr_vstr rn qd imm p a w size l u
42
+&1op qd qm size
43
50
44
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
51
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
45
# Note that both Rn and Qd are 3 bits only (no D bit)
52
# Note that both Rn and Qd are 3 bits only (no D bit)
46
@vldst_wn ... u:1 ... . . . . l:1 . rn:3 qd:3 . ... .. imm:7 &vldr_vstr
53
@@ -XXX,XX +XXX,XX @@
47
54
@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
48
+@1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm
55
@2scalar_nosz .... .... .... .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
56
57
+@2_shl_b .... .... .. 001 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0
58
+@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
59
+@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
49
+
60
+
50
# Vector loads and stores
61
# Vector loads and stores
51
62
52
# Widening loads and narrowing stores:
63
# Widening loads and narrowing stores:
53
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111101 ....... @vldr_vstr \
64
@@ -XXX,XX +XXX,XX @@ VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
54
size=1 p=1
65
# So we have a single decode line and check the cmode/op in the
55
VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \
66
# trans function.
56
size=2 p=1
67
Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
57
+
68
+
58
+# Vector miscellaneous
69
+# Shifts by immediate
59
+
70
+
60
+VCLZ 1111 1111 1 . 11 .. 00 ... 0 0100 11 . 0 ... 0 @1op
71
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
72
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
73
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
74
+
75
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b
76
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h
77
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
78
+
79
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b
80
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h
81
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
82
+
83
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b
84
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h
85
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w
61
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
86
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
62
index XXXXXXX..XXXXXXX 100644
87
index XXXXXXX..XXXXXXX 100644
63
--- a/target/arm/mve_helper.c
88
--- a/target/arm/mve_helper.c
64
+++ b/target/arm/mve_helper.c
89
+++ b/target/arm/mve_helper.c
65
@@ -XXX,XX +XXX,XX @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
90
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
66
91
WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
67
#undef DO_VLDR
92
#define DO_UQRSHL_OP(N, M, satp) \
68
#undef DO_VSTR
93
WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
69
+
94
+#define DO_SUQSHL_OP(N, M, satp) \
70
+/*
95
+ WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp)
71
+ * The mergemask(D, R, M) macro performs the operation "*D = R" but
96
72
+ * storing only the bytes which correspond to 1 bits in M,
97
DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
73
+ * leaving other bytes in *D unchanged. We use _Generic
98
DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
74
+ * to select the correct implementation based on the type of D.
99
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvsw, 4, uint32_t)
75
+ */
100
DO_VADDV(vaddvub, 1, uint8_t)
76
+
101
DO_VADDV(vaddvuh, 2, uint16_t)
77
+static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask)
102
DO_VADDV(vaddvuw, 4, uint32_t)
78
+{
103
+
79
+ if (mask & 1) {
104
+/* Shifts by immediate */
80
+ *d = r;
105
+#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
81
+ }
106
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
82
+}
107
+ void *vm, uint32_t shift) \
83
+
108
+ { \
84
+static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask)
109
+ TYPE *d = vd, *m = vm; \
85
+{
110
+ uint16_t mask = mve_element_mask(env); \
86
+ mergemask_ub((uint8_t *)d, r, mask);
111
+ unsigned e; \
87
+}
112
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
88
+
113
+ mergemask(&d[H##ESIZE(e)], \
89
+static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask)
114
+ FN(m[H##ESIZE(e)], shift), mask); \
90
+{
115
+ } \
91
+ uint16_t bmask = expand_pred_b_data[mask & 3];
116
+ mve_advance_vpt(env); \
92
+ *d = (*d & ~bmask) | (r & bmask);
117
+ }
93
+}
118
+
94
+
119
+#define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \
95
+static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask)
120
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
96
+{
121
+ void *vm, uint32_t shift) \
97
+ mergemask_uh((uint16_t *)d, r, mask);
122
+ { \
98
+}
123
+ TYPE *d = vd, *m = vm; \
99
+
124
+ uint16_t mask = mve_element_mask(env); \
100
+static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask)
125
+ unsigned e; \
101
+{
126
+ bool qc = false; \
102
+ uint32_t bmask = expand_pred_b_data[mask & 0xf];
127
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
103
+ *d = (*d & ~bmask) | (r & bmask);
128
+ bool sat = false; \
104
+}
129
+ mergemask(&d[H##ESIZE(e)], \
105
+
130
+ FN(m[H##ESIZE(e)], shift, &sat), mask); \
106
+static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask)
131
+ qc |= sat & mask & 1; \
107
+{
132
+ } \
108
+ mergemask_uw((uint32_t *)d, r, mask);
133
+ if (qc) { \
109
+}
134
+ env->vfp.qc[0] = qc; \
110
+
135
+ } \
111
+static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask)
136
+ mve_advance_vpt(env); \
112
+{
137
+ }
113
+ uint64_t bmask = expand_pred_b_data[mask & 0xff];
138
+
114
+ *d = (*d & ~bmask) | (r & bmask);
139
+/* provide unsigned 2-op shift helpers for all sizes */
115
+}
140
+#define DO_2SHIFT_U(OP, FN) \
116
+
141
+ DO_2SHIFT(OP##b, 1, uint8_t, FN) \
117
+static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask)
142
+ DO_2SHIFT(OP##h, 2, uint16_t, FN) \
118
+{
143
+ DO_2SHIFT(OP##w, 4, uint32_t, FN)
119
+ mergemask_uq((uint64_t *)d, r, mask);
144
+
120
+}
145
+#define DO_2SHIFT_SAT_U(OP, FN) \
121
+
146
+ DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
122
+#define mergemask(D, R, M) \
147
+ DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \
123
+ _Generic(D, \
148
+ DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN)
124
+ uint8_t *: mergemask_ub, \
149
+#define DO_2SHIFT_SAT_S(OP, FN) \
125
+ int8_t *: mergemask_sb, \
150
+ DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \
126
+ uint16_t *: mergemask_uh, \
151
+ DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \
127
+ int16_t *: mergemask_sh, \
152
+ DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
128
+ uint32_t *: mergemask_uw, \
153
+
129
+ int32_t *: mergemask_sw, \
154
+DO_2SHIFT_U(vshli_u, DO_VSHLU)
130
+ uint64_t *: mergemask_uq, \
155
+DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
131
+ int64_t *: mergemask_sq)(D, R, M)
156
+DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
132
+
157
+DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
133
+#define DO_1OP(OP, ESIZE, TYPE, FN) \
134
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
135
+ { \
136
+ TYPE *d = vd, *m = vm; \
137
+ uint16_t mask = mve_element_mask(env); \
138
+ unsigned e; \
139
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
140
+ mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \
141
+ } \
142
+ mve_advance_vpt(env); \
143
+ }
144
+
145
+#define DO_CLZ_B(N) (clz32(N) - 24)
146
+#define DO_CLZ_H(N) (clz32(N) - 16)
147
+
148
+DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B)
149
+DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H)
150
+DO_1OP(vclzw, 4, uint32_t, clz32)
151
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
158
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
152
index XXXXXXX..XXXXXXX 100644
159
index XXXXXXX..XXXXXXX 100644
153
--- a/target/arm/translate-mve.c
160
--- a/target/arm/translate-mve.c
154
+++ b/target/arm/translate-mve.c
161
+++ b/target/arm/translate-mve.c
155
@@ -XXX,XX +XXX,XX @@
162
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
156
#include "decode-mve.c.inc"
163
typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
157
164
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
158
typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
165
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
159
+typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
166
+typedef void MVEGenTwoOpShiftFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
160
167
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
161
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
168
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
162
static inline long mve_qreg_offset(unsigned reg)
169
typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
163
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR(DisasContext *s, arg_VLDR_VSTR *a)
170
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
164
DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h)
171
}
165
DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w)
172
return do_1imm(s, a, fn);
166
DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w)
173
}
167
+
174
+
168
+static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
175
+static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
176
+ bool negateshift)
169
+{
177
+{
170
+ TCGv_ptr qd, qm;
178
+ TCGv_ptr qd, qm;
179
+ int shift = a->shift;
171
+
180
+
172
+ if (!dc_isar_feature(aa32_mve, s) ||
181
+ if (!dc_isar_feature(aa32_mve, s) ||
173
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
182
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
174
+ !fn) {
183
+ !fn) {
175
+ return false;
184
+ return false;
176
+ }
185
+ }
177
+
178
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
186
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
179
+ return true;
187
+ return true;
180
+ }
188
+ }
181
+
189
+
190
+ /*
191
+ * When we handle a right shift insn using a left-shift helper
192
+ * which permits a negative shift count to indicate a right-shift,
193
+ * we must negate the shift count.
194
+ */
195
+ if (negateshift) {
196
+ shift = -shift;
197
+ }
198
+
182
+ qd = mve_qreg_ptr(a->qd);
199
+ qd = mve_qreg_ptr(a->qd);
183
+ qm = mve_qreg_ptr(a->qm);
200
+ qm = mve_qreg_ptr(a->qm);
184
+ fn(cpu_env, qd, qm);
201
+ fn(cpu_env, qd, qm, tcg_constant_i32(shift));
185
+ tcg_temp_free_ptr(qd);
202
+ tcg_temp_free_ptr(qd);
186
+ tcg_temp_free_ptr(qm);
203
+ tcg_temp_free_ptr(qm);
187
+ mve_update_eci(s);
204
+ mve_update_eci(s);
188
+ return true;
205
+ return true;
189
+}
206
+}
190
+
207
+
191
+#define DO_1OP(INSN, FN) \
208
+#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \
192
+ static bool trans_##INSN(DisasContext *s, arg_1op *a) \
209
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
193
+ { \
210
+ { \
194
+ static MVEGenOneOpFn * const fns[] = { \
211
+ static MVEGenTwoOpShiftFn * const fns[] = { \
195
+ gen_helper_mve_##FN##b, \
212
+ gen_helper_mve_##FN##b, \
196
+ gen_helper_mve_##FN##h, \
213
+ gen_helper_mve_##FN##h, \
197
+ gen_helper_mve_##FN##w, \
214
+ gen_helper_mve_##FN##w, \
198
+ NULL, \
215
+ NULL, \
199
+ }; \
216
+ }; \
200
+ return do_1op(s, a, fns[a->size]); \
217
+ return do_2shift(s, a, fns[a->size], NEGATESHIFT); \
201
+ }
218
+ }
202
+
219
+
203
+DO_1OP(VCLZ, vclz)
220
+DO_2SHIFT(VSHLI, vshli_u, false)
221
+DO_2SHIFT(VQSHLI_S, vqshli_s, false)
222
+DO_2SHIFT(VQSHLI_U, vqshli_u, false)
223
+DO_2SHIFT(VQSHLUI, vqshlui_s, false)
204
--
224
--
205
2.20.1
225
2.20.1
206
226
207
227
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VCLS insn.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-5-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 4 ++++
8
target/arm/mve.decode | 1 +
9
target/arm/mve_helper.c | 7 +++++++
10
target/arm/translate-mve.c | 1 +
11
4 files changed, 13 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vstrb_h, TCG_CALL_NO_WG, void, env, ptr, i32)
18
DEF_HELPER_FLAGS_3(mve_vstrb_w, TCG_CALL_NO_WG, void, env, ptr, i32)
19
DEF_HELPER_FLAGS_3(mve_vstrh_w, TCG_CALL_NO_WG, void, env, ptr, i32)
20
21
+DEF_HELPER_FLAGS_3(mve_vclsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
22
+DEF_HELPER_FLAGS_3(mve_vclsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
23
+DEF_HELPER_FLAGS_3(mve_vclsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
24
+
25
DEF_HELPER_FLAGS_3(mve_vclzb, TCG_CALL_NO_WG, void, env, ptr, ptr)
26
DEF_HELPER_FLAGS_3(mve_vclzh, TCG_CALL_NO_WG, void, env, ptr, ptr)
27
DEF_HELPER_FLAGS_3(mve_vclzw, TCG_CALL_NO_WG, void, env, ptr, ptr)
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
32
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \
33
34
# Vector miscellaneous
35
36
+VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
37
VCLZ 1111 1111 1 . 11 .. 00 ... 0 0100 11 . 0 ... 0 @1op
38
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/mve_helper.c
41
+++ b/target/arm/mve_helper.c
42
@@ -XXX,XX +XXX,XX @@ static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask)
43
mve_advance_vpt(env); \
44
}
45
46
+#define DO_CLS_B(N) (clrsb32(N) - 24)
47
+#define DO_CLS_H(N) (clrsb32(N) - 16)
48
+
49
+DO_1OP(vclsb, 1, int8_t, DO_CLS_B)
50
+DO_1OP(vclsh, 2, int16_t, DO_CLS_H)
51
+DO_1OP(vclsw, 4, int32_t, clrsb32)
52
+
53
#define DO_CLZ_B(N) (clz32(N) - 24)
54
#define DO_CLZ_H(N) (clz32(N) - 16)
55
56
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/translate-mve.c
59
+++ b/target/arm/translate-mve.c
60
@@ -XXX,XX +XXX,XX @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
61
}
62
63
DO_1OP(VCLZ, vclz)
64
+DO_1OP(VCLS, vcls)
65
--
66
2.20.1
67
68
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE instructions VREV16, VREV32 and VREV64.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-6-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 7 +++++++
8
target/arm/mve.decode | 4 ++++
9
target/arm/mve_helper.c | 7 +++++++
10
target/arm/translate-mve.c | 33 +++++++++++++++++++++++++++++++++
11
4 files changed, 51 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vclsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
18
DEF_HELPER_FLAGS_3(mve_vclzb, TCG_CALL_NO_WG, void, env, ptr, ptr)
19
DEF_HELPER_FLAGS_3(mve_vclzh, TCG_CALL_NO_WG, void, env, ptr, ptr)
20
DEF_HELPER_FLAGS_3(mve_vclzw, TCG_CALL_NO_WG, void, env, ptr, ptr)
21
+
22
+DEF_HELPER_FLAGS_3(mve_vrev16b, TCG_CALL_NO_WG, void, env, ptr, ptr)
23
+DEF_HELPER_FLAGS_3(mve_vrev32b, TCG_CALL_NO_WG, void, env, ptr, ptr)
24
+DEF_HELPER_FLAGS_3(mve_vrev32h, TCG_CALL_NO_WG, void, env, ptr, ptr)
25
+DEF_HELPER_FLAGS_3(mve_vrev64b, TCG_CALL_NO_WG, void, env, ptr, ptr)
26
+DEF_HELPER_FLAGS_3(mve_vrev64h, TCG_CALL_NO_WG, void, env, ptr, ptr)
27
+DEF_HELPER_FLAGS_3(mve_vrev64w, TCG_CALL_NO_WG, void, env, ptr, ptr)
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
32
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \
33
34
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
35
VCLZ 1111 1111 1 . 11 .. 00 ... 0 0100 11 . 0 ... 0 @1op
36
+
37
+VREV16 1111 1111 1 . 11 .. 00 ... 0 0001 01 . 0 ... 0 @1op
38
+VREV32 1111 1111 1 . 11 .. 00 ... 0 0000 11 . 0 ... 0 @1op
39
+VREV64 1111 1111 1 . 11 .. 00 ... 0 0000 01 . 0 ... 0 @1op
40
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/mve_helper.c
43
+++ b/target/arm/mve_helper.c
44
@@ -XXX,XX +XXX,XX @@ DO_1OP(vclsw, 4, int32_t, clrsb32)
45
DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B)
46
DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H)
47
DO_1OP(vclzw, 4, uint32_t, clz32)
48
+
49
+DO_1OP(vrev16b, 2, uint16_t, bswap16)
50
+DO_1OP(vrev32b, 4, uint32_t, bswap32)
51
+DO_1OP(vrev32h, 4, uint32_t, hswap32)
52
+DO_1OP(vrev64b, 8, uint64_t, bswap64)
53
+DO_1OP(vrev64h, 8, uint64_t, hswap64)
54
+DO_1OP(vrev64w, 8, uint64_t, wswap64)
55
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/translate-mve.c
58
+++ b/target/arm/translate-mve.c
59
@@ -XXX,XX +XXX,XX @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
60
61
DO_1OP(VCLZ, vclz)
62
DO_1OP(VCLS, vcls)
63
+
64
+static bool trans_VREV16(DisasContext *s, arg_1op *a)
65
+{
66
+ static MVEGenOneOpFn * const fns[] = {
67
+ gen_helper_mve_vrev16b,
68
+ NULL,
69
+ NULL,
70
+ NULL,
71
+ };
72
+ return do_1op(s, a, fns[a->size]);
73
+}
74
+
75
+static bool trans_VREV32(DisasContext *s, arg_1op *a)
76
+{
77
+ static MVEGenOneOpFn * const fns[] = {
78
+ gen_helper_mve_vrev32b,
79
+ gen_helper_mve_vrev32h,
80
+ NULL,
81
+ NULL,
82
+ };
83
+ return do_1op(s, a, fns[a->size]);
84
+}
85
+
86
+static bool trans_VREV64(DisasContext *s, arg_1op *a)
87
+{
88
+ static MVEGenOneOpFn * const fns[] = {
89
+ gen_helper_mve_vrev64b,
90
+ gen_helper_mve_vrev64h,
91
+ gen_helper_mve_vrev64w,
92
+ NULL,
93
+ };
94
+ return do_1op(s, a, fns[a->size]);
95
+}
96
--
97
2.20.1
98
99
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VMVN(register) operation. Note that for
2
predication this operation is byte-by-byte.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-7-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 2 ++
9
target/arm/mve.decode | 3 +++
10
target/arm/mve_helper.c | 4 ++++
11
target/arm/translate-mve.c | 5 +++++
12
4 files changed, 14 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vrev32h, TCG_CALL_NO_WG, void, env, ptr, ptr)
19
DEF_HELPER_FLAGS_3(mve_vrev64b, TCG_CALL_NO_WG, void, env, ptr, ptr)
20
DEF_HELPER_FLAGS_3(mve_vrev64h, TCG_CALL_NO_WG, void, env, ptr, ptr)
21
DEF_HELPER_FLAGS_3(mve_vrev64w, TCG_CALL_NO_WG, void, env, ptr, ptr)
22
+
23
+DEF_HELPER_FLAGS_3(mve_vmvn, TCG_CALL_NO_WG, void, env, ptr, ptr)
24
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/mve.decode
27
+++ b/target/arm/mve.decode
28
@@ -XXX,XX +XXX,XX @@
29
@vldst_wn ... u:1 ... . . . . l:1 . rn:3 qd:3 . ... .. imm:7 &vldr_vstr
30
31
@1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm
32
+@1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0
33
34
# Vector loads and stores
35
36
@@ -XXX,XX +XXX,XX @@ VCLZ 1111 1111 1 . 11 .. 00 ... 0 0100 11 . 0 ... 0 @1op
37
VREV16 1111 1111 1 . 11 .. 00 ... 0 0001 01 . 0 ... 0 @1op
38
VREV32 1111 1111 1 . 11 .. 00 ... 0 0000 11 . 0 ... 0 @1op
39
VREV64 1111 1111 1 . 11 .. 00 ... 0 0000 01 . 0 ... 0 @1op
40
+
41
+VMVN 1111 1111 1 . 11 00 00 ... 0 0101 11 . 0 ... 0 @1op_nosz
42
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/mve_helper.c
45
+++ b/target/arm/mve_helper.c
46
@@ -XXX,XX +XXX,XX @@ DO_1OP(vrev32h, 4, uint32_t, hswap32)
47
DO_1OP(vrev64b, 8, uint64_t, bswap64)
48
DO_1OP(vrev64h, 8, uint64_t, hswap64)
49
DO_1OP(vrev64w, 8, uint64_t, wswap64)
50
+
51
+#define DO_NOT(N) (~(N))
52
+
53
+DO_1OP(vmvn, 8, uint64_t, DO_NOT)
54
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/arm/translate-mve.c
57
+++ b/target/arm/translate-mve.c
58
@@ -XXX,XX +XXX,XX @@ static bool trans_VREV64(DisasContext *s, arg_1op *a)
59
};
60
return do_1op(s, a, fns[a->size]);
61
}
62
+
63
+static bool trans_VMVN(DisasContext *s, arg_1op *a)
64
+{
65
+ return do_1op(s, a, gen_helper_mve_vmvn);
66
+}
67
--
68
2.20.1
69
70
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VABS functions (both integer and floating point).
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-8-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 6 ++++++
8
target/arm/mve.decode | 3 +++
9
target/arm/mve_helper.c | 13 +++++++++++++
10
target/arm/translate-mve.c | 15 +++++++++++++++
11
4 files changed, 37 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vrev64h, TCG_CALL_NO_WG, void, env, ptr, ptr)
18
DEF_HELPER_FLAGS_3(mve_vrev64w, TCG_CALL_NO_WG, void, env, ptr, ptr)
19
20
DEF_HELPER_FLAGS_3(mve_vmvn, TCG_CALL_NO_WG, void, env, ptr, ptr)
21
+
22
+DEF_HELPER_FLAGS_3(mve_vabsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
23
+DEF_HELPER_FLAGS_3(mve_vabsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
24
+DEF_HELPER_FLAGS_3(mve_vabsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
25
+DEF_HELPER_FLAGS_3(mve_vfabsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
26
+DEF_HELPER_FLAGS_3(mve_vfabss, TCG_CALL_NO_WG, void, env, ptr, ptr)
27
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/mve.decode
30
+++ b/target/arm/mve.decode
31
@@ -XXX,XX +XXX,XX @@ VREV32 1111 1111 1 . 11 .. 00 ... 0 0000 11 . 0 ... 0 @1op
32
VREV64 1111 1111 1 . 11 .. 00 ... 0 0000 01 . 0 ... 0 @1op
33
34
VMVN 1111 1111 1 . 11 00 00 ... 0 0101 11 . 0 ... 0 @1op_nosz
35
+
36
+VABS 1111 1111 1 . 11 .. 01 ... 0 0011 01 . 0 ... 0 @1op
37
+VABS_fp 1111 1111 1 . 11 .. 01 ... 0 0111 01 . 0 ... 0 @1op
38
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/mve_helper.c
41
+++ b/target/arm/mve_helper.c
42
@@ -XXX,XX +XXX,XX @@
43
#include "exec/helper-proto.h"
44
#include "exec/cpu_ldst.h"
45
#include "exec/exec-all.h"
46
+#include "tcg/tcg.h"
47
48
static uint16_t mve_element_mask(CPUARMState *env)
49
{
50
@@ -XXX,XX +XXX,XX @@ DO_1OP(vrev64w, 8, uint64_t, wswap64)
51
#define DO_NOT(N) (~(N))
52
53
DO_1OP(vmvn, 8, uint64_t, DO_NOT)
54
+
55
+#define DO_ABS(N) ((N) < 0 ? -(N) : (N))
56
+#define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff))
57
+#define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff))
58
+
59
+DO_1OP(vabsb, 1, int8_t, DO_ABS)
60
+DO_1OP(vabsh, 2, int16_t, DO_ABS)
61
+DO_1OP(vabsw, 4, int32_t, DO_ABS)
62
+
63
+/* We can do these 64 bits at a time */
64
+DO_1OP(vfabsh, 8, uint64_t, DO_FABSH)
65
+DO_1OP(vfabss, 8, uint64_t, DO_FABSS)
66
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/translate-mve.c
69
+++ b/target/arm/translate-mve.c
70
@@ -XXX,XX +XXX,XX @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
71
72
DO_1OP(VCLZ, vclz)
73
DO_1OP(VCLS, vcls)
74
+DO_1OP(VABS, vabs)
75
76
static bool trans_VREV16(DisasContext *s, arg_1op *a)
77
{
78
@@ -XXX,XX +XXX,XX @@ static bool trans_VMVN(DisasContext *s, arg_1op *a)
79
{
80
return do_1op(s, a, gen_helper_mve_vmvn);
81
}
82
+
83
+static bool trans_VABS_fp(DisasContext *s, arg_1op *a)
84
+{
85
+ static MVEGenOneOpFn * const fns[] = {
86
+ NULL,
87
+ gen_helper_mve_vfabsh,
88
+ gen_helper_mve_vfabss,
89
+ NULL,
90
+ };
91
+ if (!dc_isar_feature(aa32_mve_fp, s)) {
92
+ return false;
93
+ }
94
+ return do_1op(s, a, fns[a->size]);
95
+}
96
--
97
2.20.1
98
99
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VNEG insn (both integer and floating point forms).
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-9-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 6 ++++++
8
target/arm/mve.decode | 2 ++
9
target/arm/mve_helper.c | 12 ++++++++++++
10
target/arm/translate-mve.c | 15 +++++++++++++++
11
4 files changed, 35 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vabsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
18
DEF_HELPER_FLAGS_3(mve_vabsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
19
DEF_HELPER_FLAGS_3(mve_vfabsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
20
DEF_HELPER_FLAGS_3(mve_vfabss, TCG_CALL_NO_WG, void, env, ptr, ptr)
21
+
22
+DEF_HELPER_FLAGS_3(mve_vnegb, TCG_CALL_NO_WG, void, env, ptr, ptr)
23
+DEF_HELPER_FLAGS_3(mve_vnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
24
+DEF_HELPER_FLAGS_3(mve_vnegw, TCG_CALL_NO_WG, void, env, ptr, ptr)
25
+DEF_HELPER_FLAGS_3(mve_vfnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
26
+DEF_HELPER_FLAGS_3(mve_vfnegs, TCG_CALL_NO_WG, void, env, ptr, ptr)
27
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/mve.decode
30
+++ b/target/arm/mve.decode
31
@@ -XXX,XX +XXX,XX @@ VMVN 1111 1111 1 . 11 00 00 ... 0 0101 11 . 0 ... 0 @1op_nosz
32
33
VABS 1111 1111 1 . 11 .. 01 ... 0 0011 01 . 0 ... 0 @1op
34
VABS_fp 1111 1111 1 . 11 .. 01 ... 0 0111 01 . 0 ... 0 @1op
35
+VNEG 1111 1111 1 . 11 .. 01 ... 0 0011 11 . 0 ... 0 @1op
36
+VNEG_fp 1111 1111 1 . 11 .. 01 ... 0 0111 11 . 0 ... 0 @1op
37
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/mve_helper.c
40
+++ b/target/arm/mve_helper.c
41
@@ -XXX,XX +XXX,XX @@ DO_1OP(vabsw, 4, int32_t, DO_ABS)
42
/* We can do these 64 bits at a time */
43
DO_1OP(vfabsh, 8, uint64_t, DO_FABSH)
44
DO_1OP(vfabss, 8, uint64_t, DO_FABSS)
45
+
46
+#define DO_NEG(N) (-(N))
47
+#define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000))
48
+#define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000))
49
+
50
+DO_1OP(vnegb, 1, int8_t, DO_NEG)
51
+DO_1OP(vnegh, 2, int16_t, DO_NEG)
52
+DO_1OP(vnegw, 4, int32_t, DO_NEG)
53
+
54
+/* We can do these 64 bits at a time */
55
+DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
56
+DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
57
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/translate-mve.c
60
+++ b/target/arm/translate-mve.c
61
@@ -XXX,XX +XXX,XX @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
62
DO_1OP(VCLZ, vclz)
63
DO_1OP(VCLS, vcls)
64
DO_1OP(VABS, vabs)
65
+DO_1OP(VNEG, vneg)
66
67
static bool trans_VREV16(DisasContext *s, arg_1op *a)
68
{
69
@@ -XXX,XX +XXX,XX @@ static bool trans_VABS_fp(DisasContext *s, arg_1op *a)
70
}
71
return do_1op(s, a, fns[a->size]);
72
}
73
+
74
+static bool trans_VNEG_fp(DisasContext *s, arg_1op *a)
75
+{
76
+ static MVEGenOneOpFn * const fns[] = {
77
+ NULL,
78
+ gen_helper_mve_vfnegh,
79
+ gen_helper_mve_vfnegs,
80
+ NULL,
81
+ };
82
+ if (!dc_isar_feature(aa32_mve_fp, s)) {
83
+ return false;
84
+ }
85
+ return do_1op(s, a, fns[a->size]);
86
+}
87
--
88
2.20.1
89
90
diff view generated by jsdifflib
Deleted patch
1
The Arm MVE VDUP implementation would like to be able to emit code to
2
duplicate a byte or halfword value into an i32. We have code to do
3
this already in tcg-op-gvec.c, so all we need to do is make the
4
functions global.
5
1
6
For consistency with other functions made available to the frontends:
7
* we rename to tcg_gen_dup_*
8
* we expose both the _i32 and _i64 forms
9
* we provide the #define for a _tl form
10
11
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Message-id: 20210617121628.20116-10-peter.maydell@linaro.org
15
---
16
include/tcg/tcg-op.h | 8 ++++++++
17
include/tcg/tcg.h | 1 -
18
tcg/tcg-op-gvec.c | 20 ++++++++++----------
19
3 files changed, 18 insertions(+), 11 deletions(-)
20
21
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/include/tcg/tcg-op.h
24
+++ b/include/tcg/tcg-op.h
25
@@ -XXX,XX +XXX,XX @@ void tcg_gen_umin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
26
void tcg_gen_umax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
27
void tcg_gen_abs_i32(TCGv_i32, TCGv_i32);
28
29
+/* Replicate a value of size @vece from @in to all the lanes in @out */
30
+void tcg_gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in);
31
+
32
static inline void tcg_gen_discard_i32(TCGv_i32 arg)
33
{
34
tcg_gen_op1_i32(INDEX_op_discard, arg);
35
@@ -XXX,XX +XXX,XX @@ void tcg_gen_umin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
36
void tcg_gen_umax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
37
void tcg_gen_abs_i64(TCGv_i64, TCGv_i64);
38
39
+/* Replicate a value of size @vece from @in to all the lanes in @out */
40
+void tcg_gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in);
41
+
42
#if TCG_TARGET_REG_BITS == 64
43
static inline void tcg_gen_discard_i64(TCGv_i64 arg)
44
{
45
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
46
#define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i64
47
#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i64
48
#define tcg_gen_dup_tl_vec tcg_gen_dup_i64_vec
49
+#define tcg_gen_dup_tl tcg_gen_dup_i64
50
#else
51
#define tcg_gen_movi_tl tcg_gen_movi_i32
52
#define tcg_gen_mov_tl tcg_gen_mov_i32
53
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
54
#define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i32
55
#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i32
56
#define tcg_gen_dup_tl_vec tcg_gen_dup_i32_vec
57
+#define tcg_gen_dup_tl tcg_gen_dup_i32
58
#endif
59
60
#if UINTPTR_MAX == UINT32_MAX
61
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
62
index XXXXXXX..XXXXXXX 100644
63
--- a/include/tcg/tcg.h
64
+++ b/include/tcg/tcg.h
65
@@ -XXX,XX +XXX,XX @@ uint64_t dup_const(unsigned vece, uint64_t c);
66
: (qemu_build_not_reached_always(), 0)) \
67
: dup_const(VECE, C))
68
69
-
70
/*
71
* Memory helpers that will be used by TCG generated code.
72
*/
73
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/tcg/tcg-op-gvec.c
76
+++ b/tcg/tcg-op-gvec.c
77
@@ -XXX,XX +XXX,XX @@ uint64_t (dup_const)(unsigned vece, uint64_t c)
78
}
79
80
/* Duplicate IN into OUT as per VECE. */
81
-static void gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in)
82
+void tcg_gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in)
83
{
84
switch (vece) {
85
case MO_8:
86
@@ -XXX,XX +XXX,XX @@ static void gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in)
87
}
88
}
89
90
-static void gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in)
91
+void tcg_gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in)
92
{
93
switch (vece) {
94
case MO_8:
95
@@ -XXX,XX +XXX,XX @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
96
&& (vece != MO_32 || !check_size_impl(oprsz, 4))) {
97
t_64 = tcg_temp_new_i64();
98
tcg_gen_extu_i32_i64(t_64, in_32);
99
- gen_dup_i64(vece, t_64, t_64);
100
+ tcg_gen_dup_i64(vece, t_64, t_64);
101
} else {
102
t_32 = tcg_temp_new_i32();
103
- gen_dup_i32(vece, t_32, in_32);
104
+ tcg_gen_dup_i32(vece, t_32, in_32);
105
}
106
} else if (in_64) {
107
/* We are given a 64-bit variable input. */
108
t_64 = tcg_temp_new_i64();
109
- gen_dup_i64(vece, t_64, in_64);
110
+ tcg_gen_dup_i64(vece, t_64, in_64);
111
} else {
112
/* We are given a constant input. */
113
/* For 64-bit hosts, use 64-bit constants for "simple" constants
114
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
115
} else if (g->fni8 && check_size_impl(oprsz, 8)) {
116
TCGv_i64 t64 = tcg_temp_new_i64();
117
118
- gen_dup_i64(g->vece, t64, c);
119
+ tcg_gen_dup_i64(g->vece, t64, c);
120
expand_2s_i64(dofs, aofs, oprsz, t64, g->scalar_first, g->fni8);
121
tcg_temp_free_i64(t64);
122
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
123
TCGv_i32 t32 = tcg_temp_new_i32();
124
125
tcg_gen_extrl_i64_i32(t32, c);
126
- gen_dup_i32(g->vece, t32, t32);
127
+ tcg_gen_dup_i32(g->vece, t32, t32);
128
expand_2s_i32(dofs, aofs, oprsz, t32, g->scalar_first, g->fni4);
129
tcg_temp_free_i32(t32);
130
} else {
131
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs,
132
TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
133
{
134
TCGv_i64 tmp = tcg_temp_new_i64();
135
- gen_dup_i64(vece, tmp, c);
136
+ tcg_gen_dup_i64(vece, tmp, c);
137
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands);
138
tcg_temp_free_i64(tmp);
139
}
140
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs,
141
TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
142
{
143
TCGv_i64 tmp = tcg_temp_new_i64();
144
- gen_dup_i64(vece, tmp, c);
145
+ tcg_gen_dup_i64(vece, tmp, c);
146
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors);
147
tcg_temp_free_i64(tmp);
148
}
149
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
150
TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
151
{
152
TCGv_i64 tmp = tcg_temp_new_i64();
153
- gen_dup_i64(vece, tmp, c);
154
+ tcg_gen_dup_i64(vece, tmp, c);
155
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors);
156
tcg_temp_free_i64(tmp);
157
}
158
--
159
2.20.1
160
161
diff view generated by jsdifflib
1
Implement the scalar form of the MVE VADD insn. This takes the
1
Implement the MVE vector shift right by immediate insns VSHRI and
2
scalar operand from a general purpose register.
2
VRSHRI. As with Neon, we implement these by using helper functions
3
which perform left shifts but allow negative shift counts to indicate
4
right shifts.
3
5
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-23-peter.maydell@linaro.org
8
Message-id: 20210628135835.6690-9-peter.maydell@linaro.org
7
---
9
---
8
target/arm/helper-mve.h | 4 ++++
10
target/arm/helper-mve.h | 12 ++++++++++++
9
target/arm/mve.decode | 7 ++++++
11
target/arm/translate.h | 20 ++++++++++++++++++++
10
target/arm/mve_helper.c | 22 +++++++++++++++++++
12
target/arm/mve.decode | 28 ++++++++++++++++++++++++++++
11
target/arm/translate-mve.c | 45 ++++++++++++++++++++++++++++++++++++++
13
target/arm/mve_helper.c | 7 +++++++
12
4 files changed, 78 insertions(+)
14
target/arm/translate-mve.c | 5 +++++
15
target/arm/translate-neon.c | 18 ------------------
16
6 files changed, 72 insertions(+), 18 deletions(-)
13
17
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
20
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
21
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmulltub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
19
DEF_HELPER_FLAGS_4(mve_vmulltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
20
DEF_HELPER_FLAGS_4(mve_vmulltuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
21
25
22
+DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(mve_vshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+
29
+
26
DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
30
DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
31
DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
32
DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+
38
+DEF_HELPER_FLAGS_4(mve_vrshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(mve_vrshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
+
42
+DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
+DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
44
+DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
45
diff --git a/target/arm/translate.h b/target/arm/translate.h
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/translate.h
48
+++ b/target/arm/translate.h
49
@@ -XXX,XX +XXX,XX @@ static inline int times_2_plus_1(DisasContext *s, int x)
50
return x * 2 + 1;
51
}
52
53
+static inline int rsub_64(DisasContext *s, int x)
54
+{
55
+ return 64 - x;
56
+}
57
+
58
+static inline int rsub_32(DisasContext *s, int x)
59
+{
60
+ return 32 - x;
61
+}
62
+
63
+static inline int rsub_16(DisasContext *s, int x)
64
+{
65
+ return 16 - x;
66
+}
67
+
68
+static inline int rsub_8(DisasContext *s, int x)
69
+{
70
+ return 8 - x;
71
+}
72
+
73
static inline int arm_dc_feature(DisasContext *dc, int feature)
74
{
75
return (dc->features & (1ULL << feature)) != 0;
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
76
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
30
index XXXXXXX..XXXXXXX 100644
77
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mve.decode
78
--- a/target/arm/mve.decode
32
+++ b/target/arm/mve.decode
79
+++ b/target/arm/mve.decode
33
@@ -XXX,XX +XXX,XX @@
80
@@ -XXX,XX +XXX,XX @@
34
&vldr_vstr rn qd imm p a w size l u
81
@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
35
&1op qd qm size
82
@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
36
&2op qd qm qn size
83
37
+&2scalar qd qn rm size
84
+# Right shifts are encoded as N - shift, where N is the element size in bits.
38
85
+%rshift_i5 16:5 !function=rsub_32
39
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
86
+%rshift_i4 16:4 !function=rsub_16
40
# Note that both Rn and Qd are 3 bits only (no D bit)
87
+%rshift_i3 16:3 !function=rsub_8
41
@@ -XXX,XX +XXX,XX @@
88
+
42
@2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn
89
+@2_shr_b .... .... .. 001 ... .... .... .... .... &2shift qd=%qd qm=%qm \
43
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
90
+ size=0 shift=%rshift_i3
44
91
+@2_shr_h .... .... .. 01 .... .... .... .... .... &2shift qd=%qd qm=%qm \
45
+@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
92
+ size=1 shift=%rshift_i4
93
+@2_shr_w .... .... .. 1 ..... .... .... .... .... &2shift qd=%qd qm=%qm \
94
+ size=2 shift=%rshift_i5
46
+
95
+
47
# Vector loads and stores
96
# Vector loads and stores
48
97
49
# Widening loads and narrowing stores:
98
# Widening loads and narrowing stores:
50
@@ -XXX,XX +XXX,XX @@ VRMLALDAVH_S 1110 1110 1 ... ... 0 ... x:1 1111 . 0 a:1 0 ... 0 @vmlaldav_no
99
@@ -XXX,XX +XXX,XX @@ VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
51
VRMLALDAVH_U 1111 1110 1 ... ... 0 ... x:1 1111 . 0 a:1 0 ... 0 @vmlaldav_nosz
100
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b
52
101
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h
53
VRMLSLDAVH 1111 1110 1 ... ... 0 ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav_nosz
102
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w
54
+
103
+
55
+# Scalar operations
104
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b
105
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h
106
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w
56
+
107
+
57
+VADD_scalar 1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar
108
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b
109
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h
110
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w
111
+
112
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
113
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
114
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
115
+
116
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
117
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
118
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
58
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
119
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
59
index XXXXXXX..XXXXXXX 100644
120
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/mve_helper.c
121
--- a/target/arm/mve_helper.c
61
+++ b/target/arm/mve_helper.c
122
+++ b/target/arm/mve_helper.c
62
@@ -XXX,XX +XXX,XX @@ DO_2OP_S(vhsubs, do_vhsub_s)
123
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvuw, 4, uint32_t)
63
DO_2OP_U(vhsubu, do_vhsub_u)
124
DO_2SHIFT(OP##b, 1, uint8_t, FN) \
64
125
DO_2SHIFT(OP##h, 2, uint16_t, FN) \
65
126
DO_2SHIFT(OP##w, 4, uint32_t, FN)
66
+#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
127
+#define DO_2SHIFT_S(OP, FN) \
67
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
128
+ DO_2SHIFT(OP##b, 1, int8_t, FN) \
68
+ uint32_t rm) \
129
+ DO_2SHIFT(OP##h, 2, int16_t, FN) \
69
+ { \
130
+ DO_2SHIFT(OP##w, 4, int32_t, FN)
70
+ TYPE *d = vd, *n = vn; \
131
71
+ TYPE m = rm; \
132
#define DO_2SHIFT_SAT_U(OP, FN) \
72
+ uint16_t mask = mve_element_mask(env); \
133
DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
73
+ unsigned e; \
134
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvuw, 4, uint32_t)
74
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
135
DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
75
+ mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \
136
76
+ } \
137
DO_2SHIFT_U(vshli_u, DO_VSHLU)
77
+ mve_advance_vpt(env); \
138
+DO_2SHIFT_S(vshli_s, DO_VSHLS)
78
+ }
139
DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
79
+
140
DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
80
+/* provide unsigned 2-op scalar helpers for all sizes */
141
DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
81
+#define DO_2OP_SCALAR_U(OP, FN) \
142
+DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
82
+ DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
143
+DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
83
+ DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \
84
+ DO_2OP_SCALAR(OP##w, 4, uint32_t, FN)
85
+
86
+DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
87
+
88
/*
89
* Multiply add long dual accumulate ops.
90
*/
91
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
144
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
92
index XXXXXXX..XXXXXXX 100644
145
index XXXXXXX..XXXXXXX 100644
93
--- a/target/arm/translate-mve.c
146
--- a/target/arm/translate-mve.c
94
+++ b/target/arm/translate-mve.c
147
+++ b/target/arm/translate-mve.c
95
@@ -XXX,XX +XXX,XX @@
148
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHLI, vshli_u, false)
96
typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
149
DO_2SHIFT(VQSHLI_S, vqshli_s, false)
97
typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
150
DO_2SHIFT(VQSHLI_U, vqshli_u, false)
98
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
151
DO_2SHIFT(VQSHLUI, vqshlui_s, false)
99
+typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
152
+/* These right shifts use a left-shift helper with negated shift count */
100
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
153
+DO_2SHIFT(VSHRI_S, vshli_s, true)
101
154
+DO_2SHIFT(VSHRI_U, vshli_u, true)
102
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
155
+DO_2SHIFT(VRSHRI_S, vrshli_s, true)
103
@@ -XXX,XX +XXX,XX @@ DO_2OP(VMULL_BU, vmullbu)
156
+DO_2SHIFT(VRSHRI_U, vrshli_u, true)
104
DO_2OP(VMULL_TS, vmullts)
157
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
105
DO_2OP(VMULL_TU, vmulltu)
158
index XXXXXXX..XXXXXXX 100644
106
159
--- a/target/arm/translate-neon.c
107
+static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
160
+++ b/target/arm/translate-neon.c
108
+ MVEGenTwoOpScalarFn fn)
161
@@ -XXX,XX +XXX,XX @@ static inline int plus1(DisasContext *s, int x)
109
+{
162
return x + 1;
110
+ TCGv_ptr qd, qn;
163
}
111
+ TCGv_i32 rm;
164
112
+
165
-static inline int rsub_64(DisasContext *s, int x)
113
+ if (!dc_isar_feature(aa32_mve, s) ||
166
-{
114
+ !mve_check_qreg_bank(s, a->qd | a->qn) ||
167
- return 64 - x;
115
+ !fn) {
168
-}
116
+ return false;
169
-
117
+ }
170
-static inline int rsub_32(DisasContext *s, int x)
118
+ if (a->rm == 13 || a->rm == 15) {
171
-{
119
+ /* UNPREDICTABLE */
172
- return 32 - x;
120
+ return false;
173
-}
121
+ }
174
-static inline int rsub_16(DisasContext *s, int x)
122
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
175
-{
123
+ return true;
176
- return 16 - x;
124
+ }
177
-}
125
+
178
-static inline int rsub_8(DisasContext *s, int x)
126
+ qd = mve_qreg_ptr(a->qd);
179
-{
127
+ qn = mve_qreg_ptr(a->qn);
180
- return 8 - x;
128
+ rm = load_reg(s, a->rm);
181
-}
129
+ fn(cpu_env, qd, qn, rm);
182
-
130
+ tcg_temp_free_i32(rm);
183
static inline int neon_3same_fp_size(DisasContext *s, int x)
131
+ tcg_temp_free_ptr(qd);
132
+ tcg_temp_free_ptr(qn);
133
+ mve_update_eci(s);
134
+ return true;
135
+}
136
+
137
+#define DO_2OP_SCALAR(INSN, FN) \
138
+ static bool trans_##INSN(DisasContext *s, arg_2scalar *a) \
139
+ { \
140
+ static MVEGenTwoOpScalarFn * const fns[] = { \
141
+ gen_helper_mve_##FN##b, \
142
+ gen_helper_mve_##FN##h, \
143
+ gen_helper_mve_##FN##w, \
144
+ NULL, \
145
+ }; \
146
+ return do_2op_scalar(s, a, fns[a->size]); \
147
+ }
148
+
149
+DO_2OP_SCALAR(VADD_scalar, vadd_scalar)
150
+
151
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
152
MVEGenDualAccOpFn *fn)
153
{
184
{
185
/* Convert 0==fp32, 1==fp16 into a MO_* value */
154
--
186
--
155
2.20.1
187
2.20.1
156
188
157
189
diff view generated by jsdifflib
1
Implement the MVE VQDMULL scalar insn. This multiplies the top or
1
Implement the MVE VHLL (vector shift left long) insn. This has two
2
bottom half of each element by the scalar, doubles and saturates
2
encodings: the T1 encoding is the usual shift-by-immediate format,
3
to a double-width result.
3
and the T2 encoding is a special case where the shift count is always
4
4
equal to the element size.
5
Note that this encoding overlaps with VQADD and VQSUB; it uses
6
what in VQADD and VQSUB would be the 'size=0b11' encoding.
7
5
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210617121628.20116-30-peter.maydell@linaro.org
8
Message-id: 20210628135835.6690-10-peter.maydell@linaro.org
11
---
9
---
12
target/arm/helper-mve.h | 5 +++
10
target/arm/helper-mve.h | 9 +++++++
13
target/arm/mve.decode | 23 +++++++++++---
11
target/arm/mve.decode | 53 +++++++++++++++++++++++++++++++++++---
14
target/arm/mve_helper.c | 65 ++++++++++++++++++++++++++++++++++++++
12
target/arm/mve_helper.c | 32 +++++++++++++++++++++++
15
target/arm/translate-mve.c | 30 ++++++++++++++++++
13
target/arm/translate-mve.c | 15 +++++++++++
16
4 files changed, 119 insertions(+), 4 deletions(-)
14
4 files changed, 105 insertions(+), 4 deletions(-)
17
15
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-mve.h
18
--- a/target/arm/helper-mve.h
21
+++ b/target/arm/helper-mve.h
19
+++ b/target/arm/helper-mve.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vbrsrb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
DEF_HELPER_FLAGS_4(mve_vbrsrh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
DEF_HELPER_FLAGS_4(mve_vbrsrw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
23
DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(mve_vqdmullb_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vqdmullb_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+
24
+
31
DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
25
+DEF_HELPER_FLAGS_4(mve_vshllbsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
26
+DEF_HELPER_FLAGS_4(mve_vshllbsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
27
+DEF_HELPER_FLAGS_4(mve_vshllbub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vshllbuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
35
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/mve.decode
35
--- a/target/arm/mve.decode
37
+++ b/target/arm/mve.decode
36
+++ b/target/arm/mve.decode
38
@@ -XXX,XX +XXX,XX @@
37
@@ -XXX,XX +XXX,XX @@
39
%qm 5:1 1:3
38
@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
40
%qn 7:1 17:3
39
@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
41
40
42
+# VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit
41
+@2_shll_b .... .... ... 01 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0
43
+%size_28 28:1 !function=plus_1
42
+@2_shll_h .... .... ... 1 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
43
+# VSHLL encoding T2 where shift == esize
44
+@2_shll_esize_b .... .... .... 00 .. .... .... .... .... &2shift \
45
+ qd=%qd qm=%qm size=0 shift=8
46
+@2_shll_esize_h .... .... .... 01 .. .... .... .... .... &2shift \
47
+ qd=%qd qm=%qm size=1 shift=16
44
+
48
+
45
&vldr_vstr rn qd imm p a w size l u
49
# Right shifts are encoded as N - shift, where N is the element size in bits.
46
&1op qd qm size
50
%rshift_i5 16:5 !function=rsub_32
47
&2op qd qm qn size
51
%rshift_i4 16:4 !function=rsub_16
48
@@ -XXX,XX +XXX,XX @@
52
@@ -XXX,XX +XXX,XX @@ VADD 1110 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
49
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
53
VSUB 1111 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
50
54
VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
51
@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
55
52
+@2scalar_nosz .... .... .... .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
56
-VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
53
57
-VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
54
# Vector loads and stores
58
+# The VSHLL T2 encoding is not a @2op pattern, but is here because it
55
59
+# overlaps what would be size=0b11 VMULH/VRMULH
56
@@ -XXX,XX +XXX,XX @@ VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
57
VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
58
VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
59
60
-VQADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
61
-VQADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
62
-VQSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
63
-VQSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
64
+{
60
+{
65
+ VQADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
61
+ VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
66
+ VQADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
62
+ VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
67
+ VQDMULLB_scalar 111 . 1110 0 . 11 ... 0 ... 0 1111 . 110 .... @2scalar_nosz \
63
68
+ size=%size_28
64
-VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
65
-VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
66
+ VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
69
+}
67
+}
70
+
68
+
71
+{
69
+{
72
+ VQSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
70
+ VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
73
+ VQSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
71
+ VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
74
+ VQDMULLT_scalar 111 . 1110 0 . 11 ... 0 ... 1 1111 . 110 .... @2scalar_nosz \
72
+
75
+ size=%size_28
73
+ VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
76
+}
74
+}
77
+
75
+
78
VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
76
+{
79
77
+ VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
80
VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
78
+ VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
81
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
82
83
+
79
+
84
# Predicate operations
80
+ VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
85
%mask_22_13 22:1 13:3
81
+}
86
VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
82
+
83
+{
84
+ VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
85
+ VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
86
+
87
+ VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
88
+}
89
90
VMAX_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
91
VMAX_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
92
@@ -XXX,XX +XXX,XX @@ VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
93
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
94
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
95
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
96
+
97
+# VSHLL T1 encoding; the T2 VSHLL encoding is elsewhere in this file
98
+VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
99
+VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
100
+
101
+VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
102
+VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
103
+
104
+VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
105
+VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
106
+
107
+VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
108
+VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
87
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
109
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
88
index XXXXXXX..XXXXXXX 100644
110
index XXXXXXX..XXXXXXX 100644
89
--- a/target/arm/mve_helper.c
111
--- a/target/arm/mve_helper.c
90
+++ b/target/arm/mve_helper.c
112
+++ b/target/arm/mve_helper.c
91
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
113
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
92
DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
114
DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
93
DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
115
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
94
116
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
117
+
95
+/*
118
+/*
96
+ * Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the
119
+ * Long shifts taking half-sized inputs from top or bottom of the input
97
+ * input (smaller) type and LESIZE, LTYPE, LH for the output (long) type.
120
+ * vector and producing a double-width result. ESIZE, TYPE are for
98
+ * SATMASK specifies which bits of the predicate mask matter for determining
121
+ * the input, and LESIZE, LTYPE for the output.
99
+ * whether to propagate a saturation indication into FPSCR.QC -- for
122
+ * Unlike the normal shift helpers, we do not handle negative shift counts,
100
+ * the 16x16->32 case we must check only the bit corresponding to the T or B
123
+ * because the long shift is strictly left-only.
101
+ * half that we used, but for the 32x32->64 case we propagate if the mask
102
+ * bit is set for either half.
103
+ */
124
+ */
104
+#define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
125
+#define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
105
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
126
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
106
+ uint32_t rm) \
127
+ void *vm, uint32_t shift) \
107
+ { \
128
+ { \
108
+ LTYPE *d = vd; \
129
+ LTYPE *d = vd; \
109
+ TYPE *n = vn; \
130
+ TYPE *m = vm; \
110
+ TYPE m = rm; \
111
+ uint16_t mask = mve_element_mask(env); \
131
+ uint16_t mask = mve_element_mask(env); \
112
+ unsigned le; \
132
+ unsigned le; \
113
+ bool qc = false; \
133
+ assert(shift <= 16); \
114
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
134
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
115
+ bool sat = false; \
135
+ LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \
116
+ LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \
117
+ mergemask(&d[H##LESIZE(le)], r, mask); \
136
+ mergemask(&d[H##LESIZE(le)], r, mask); \
118
+ qc |= sat && (mask & SATMASK); \
119
+ } \
120
+ if (qc) { \
121
+ env->vfp.qc[0] = qc; \
122
+ } \
137
+ } \
123
+ mve_advance_vpt(env); \
138
+ mve_advance_vpt(env); \
124
+ }
139
+ }
125
+
140
+
126
+static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat)
141
+#define DO_VSHLL_ALL(OP, TOP) \
127
+{
142
+ DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \
128
+ int64_t r = ((int64_t)n * m) * 2;
143
+ DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \
129
+ return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat);
144
+ DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \
130
+}
145
+ DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \
131
+
146
+
132
+static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat)
147
+DO_VSHLL_ALL(vshllb, false)
133
+{
148
+DO_VSHLL_ALL(vshllt, true)
134
+ /* The multiply can't overflow, but the doubling might */
135
+ int64_t r = (int64_t)n * m;
136
+ if (r > INT64_MAX / 2) {
137
+ *sat = true;
138
+ return INT64_MAX;
139
+ } else if (r < INT64_MIN / 2) {
140
+ *sat = true;
141
+ return INT64_MIN;
142
+ } else {
143
+ return r * 2;
144
+ }
145
+}
146
+
147
+#define SATMASK16B 1
148
+#define SATMASK16T (1 << 2)
149
+#define SATMASK32 ((1 << 4) | 1)
150
+
151
+DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, 2, int16_t, 4, int32_t, \
152
+ do_qdmullh, SATMASK16B)
153
+DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, 4, int32_t, 8, int64_t, \
154
+ do_qdmullw, SATMASK32)
155
+DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \
156
+ do_qdmullh, SATMASK16T)
157
+DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \
158
+ do_qdmullw, SATMASK32)
159
+
160
static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
161
{
162
m &= 0xff;
163
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
149
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
164
index XXXXXXX..XXXXXXX 100644
150
index XXXXXXX..XXXXXXX 100644
165
--- a/target/arm/translate-mve.c
151
--- a/target/arm/translate-mve.c
166
+++ b/target/arm/translate-mve.c
152
+++ b/target/arm/translate-mve.c
167
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR(VQDMULH_scalar, vqdmulh_scalar)
153
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHRI_S, vshli_s, true)
168
DO_2OP_SCALAR(VQRDMULH_scalar, vqrdmulh_scalar)
154
DO_2SHIFT(VSHRI_U, vshli_u, true)
169
DO_2OP_SCALAR(VBRSR, vbrsr)
155
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
170
156
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
171
+static bool trans_VQDMULLB_scalar(DisasContext *s, arg_2scalar *a)
157
+
172
+{
158
+#define DO_VSHLL(INSN, FN) \
173
+ static MVEGenTwoOpScalarFn * const fns[] = {
159
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
174
+ NULL,
160
+ { \
175
+ gen_helper_mve_vqdmullb_scalarh,
161
+ static MVEGenTwoOpShiftFn * const fns[] = { \
176
+ gen_helper_mve_vqdmullb_scalarw,
162
+ gen_helper_mve_##FN##b, \
177
+ NULL,
163
+ gen_helper_mve_##FN##h, \
178
+ };
164
+ }; \
179
+ if (a->qd == a->qn && a->size == MO_32) {
165
+ return do_2shift(s, a, fns[a->size], false); \
180
+ /* UNPREDICTABLE; we choose to undef */
181
+ return false;
182
+ }
166
+ }
183
+ return do_2op_scalar(s, a, fns[a->size]);
184
+}
185
+
167
+
186
+static bool trans_VQDMULLT_scalar(DisasContext *s, arg_2scalar *a)
168
+DO_VSHLL(VSHLL_BS, vshllbs)
187
+{
169
+DO_VSHLL(VSHLL_BU, vshllbu)
188
+ static MVEGenTwoOpScalarFn * const fns[] = {
170
+DO_VSHLL(VSHLL_TS, vshllts)
189
+ NULL,
171
+DO_VSHLL(VSHLL_TU, vshlltu)
190
+ gen_helper_mve_vqdmullt_scalarh,
191
+ gen_helper_mve_vqdmullt_scalarw,
192
+ NULL,
193
+ };
194
+ if (a->qd == a->qn && a->size == MO_32) {
195
+ /* UNPREDICTABLE; we choose to undef */
196
+ return false;
197
+ }
198
+ return do_2op_scalar(s, a, fns[a->size]);
199
+}
200
+
201
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
202
MVEGenDualAccOpFn *fn)
203
{
204
--
172
--
205
2.20.1
173
2.20.1
206
174
207
175
diff view generated by jsdifflib
1
Implement the vector forms of the MVE VQDMULH and VQRDMULH insns.
1
Implement the MVE VSRI and VSLI insns, which perform a
2
shift-and-insert operation.
2
3
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-31-peter.maydell@linaro.org
6
Message-id: 20210628135835.6690-11-peter.maydell@linaro.org
6
---
7
---
7
target/arm/helper-mve.h | 8 ++++++++
8
target/arm/helper-mve.h | 8 ++++++++
8
target/arm/mve.decode | 3 +++
9
target/arm/mve.decode | 9 ++++++++
9
target/arm/mve_helper.c | 27 +++++++++++++++++++++++++++
10
target/arm/mve_helper.c | 42 ++++++++++++++++++++++++++++++++++++++
10
target/arm/translate-mve.c | 2 ++
11
target/arm/translate-mve.c | 3 +++
11
4 files changed, 40 insertions(+)
12
4 files changed, 62 insertions(+)
12
13
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmulltub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
DEF_HELPER_FLAGS_4(mve_vmulltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_4(mve_vmulltuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
21
DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
+DEF_HELPER_FLAGS_4(mve_vqdmulhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+DEF_HELPER_FLAGS_4(mve_vqdmulhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vqdmulhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+
22
+
25
+DEF_HELPER_FLAGS_4(mve_vqrdmulhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vsrib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(mve_vqrdmulhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vsrih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vqrdmulhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+
26
+
29
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
30
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/mve.decode
32
--- a/target/arm/mve.decode
35
+++ b/target/arm/mve.decode
33
+++ b/target/arm/mve.decode
36
@@ -XXX,XX +XXX,XX @@ VMULL_BU 111 1 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op
34
@@ -XXX,XX +XXX,XX @@ VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
37
VMULL_TS 111 0 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
35
38
VMULL_TU 111 1 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
36
VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
39
37
VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
40
+VQDMULH 1110 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op
41
+VQRDMULH 1111 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op
42
+
38
+
43
# Vector miscellaneous
39
+# Shift-and-insert
44
40
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_b
45
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
41
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_h
42
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w
43
+
44
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
45
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
46
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
46
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
47
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/mve_helper.c
49
--- a/target/arm/mve_helper.c
49
+++ b/target/arm/mve_helper.c
50
+++ b/target/arm/mve_helper.c
50
@@ -XXX,XX +XXX,XX @@ DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
51
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
51
mve_advance_vpt(env); \
52
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
52
}
53
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
53
54
54
+#define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \
55
+/* Shift-and-insert; we always work with 64 bits at a time */
55
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
56
+#define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \
57
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
58
+ void *vm, uint32_t shift) \
56
+ { \
59
+ { \
57
+ TYPE *d = vd, *n = vn, *m = vm; \
60
+ uint64_t *d = vd, *m = vm; \
58
+ uint16_t mask = mve_element_mask(env); \
61
+ uint16_t mask; \
62
+ uint64_t shiftmask; \
59
+ unsigned e; \
63
+ unsigned e; \
60
+ bool qc = false; \
64
+ if (shift == 0 || shift == ESIZE * 8) { \
61
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
65
+ /* \
62
+ bool sat = false; \
66
+ * Only VSLI can shift by 0; only VSRI can shift by <dt>. \
63
+ TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \
67
+ * The generic logic would give the right answer for 0 but \
64
+ mergemask(&d[H##ESIZE(e)], r, mask); \
68
+ * fails for <dt>. \
65
+ qc |= sat & mask & 1; \
69
+ */ \
70
+ goto done; \
66
+ } \
71
+ } \
67
+ if (qc) { \
72
+ assert(shift < ESIZE * 8); \
68
+ env->vfp.qc[0] = qc; \
73
+ mask = mve_element_mask(env); \
74
+ /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \
75
+ shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \
76
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
77
+ uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \
78
+ (d[H8(e)] & ~shiftmask); \
79
+ mergemask(&d[H8(e)], r, mask); \
69
+ } \
80
+ } \
81
+done: \
70
+ mve_advance_vpt(env); \
82
+ mve_advance_vpt(env); \
71
+ }
83
+ }
72
+
84
+
73
#define DO_AND(N, M) ((N) & (M))
85
+#define DO_SHL(N, SHIFT) ((N) << (SHIFT))
74
#define DO_BIC(N, M) ((N) & ~(M))
86
+#define DO_SHR(N, SHIFT) ((N) >> (SHIFT))
75
#define DO_ORR(N, M) ((N) | (M))
87
+#define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT))
76
@@ -XXX,XX +XXX,XX @@ static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
88
+#define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT))
77
#define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \
78
INT32_MIN, INT32_MAX, s)
79
80
+DO_2OP_SAT(vqdmulhb, 1, int8_t, DO_QDMULH_B)
81
+DO_2OP_SAT(vqdmulhh, 2, int16_t, DO_QDMULH_H)
82
+DO_2OP_SAT(vqdmulhw, 4, int32_t, DO_QDMULH_W)
83
+
89
+
84
+DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B)
90
+DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK)
85
+DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H)
91
+DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK)
86
+DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W)
92
+DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK)
93
+DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK)
94
+DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK)
95
+DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
87
+
96
+
88
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
97
/*
89
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
98
* Long shifts taking half-sized inputs from top or bottom of the input
90
uint32_t rm) \
99
* vector and producing a double-width result. ESIZE, TYPE are for
91
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
100
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
92
index XXXXXXX..XXXXXXX 100644
101
index XXXXXXX..XXXXXXX 100644
93
--- a/target/arm/translate-mve.c
102
--- a/target/arm/translate-mve.c
94
+++ b/target/arm/translate-mve.c
103
+++ b/target/arm/translate-mve.c
95
@@ -XXX,XX +XXX,XX @@ DO_2OP(VMULL_BS, vmullbs)
104
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHRI_U, vshli_u, true)
96
DO_2OP(VMULL_BU, vmullbu)
105
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
97
DO_2OP(VMULL_TS, vmullts)
106
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
98
DO_2OP(VMULL_TU, vmulltu)
107
99
+DO_2OP(VQDMULH, vqdmulh)
108
+DO_2SHIFT(VSRI, vsri, false)
100
+DO_2OP(VQRDMULH, vqrdmulh)
109
+DO_2SHIFT(VSLI, vsli, false)
101
110
+
102
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
111
#define DO_VSHLL(INSN, FN) \
103
MVEGenTwoOpScalarFn fn)
112
static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
113
{ \
104
--
114
--
105
2.20.1
115
2.20.1
106
116
107
117
diff view generated by jsdifflib
1
Implement the MVE VQDMLADH and VQRDMLADH insns. These multiply
1
Implement the MVE shift-right-and-narrow insn VSHRN and VRSHRN.
2
elements, and then add pairs of products, double, possibly round,
2
3
saturate and return the high half of the result.
3
do_urshr() is borrowed from sve_helper.c.
4
4
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210617121628.20116-37-peter.maydell@linaro.org
7
Message-id: 20210628135835.6690-12-peter.maydell@linaro.org
8
---
8
---
9
target/arm/helper-mve.h | 16 +++++++
9
target/arm/helper-mve.h | 10 ++++++++++
10
target/arm/mve.decode | 5 +++
10
target/arm/mve.decode | 11 +++++++++++
11
target/arm/mve_helper.c | 89 ++++++++++++++++++++++++++++++++++++++
11
target/arm/mve_helper.c | 40 ++++++++++++++++++++++++++++++++++++++
12
target/arm/translate-mve.c | 4 ++
12
target/arm/translate-mve.c | 15 ++++++++++++++
13
4 files changed, 114 insertions(+)
13
4 files changed, 76 insertions(+)
14
14
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
17
--- a/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_4(mve_vqrshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_4(mve_vqrshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
22
DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_4(mve_vqdmladhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vqdmladhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vqdmladhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+
23
+
27
+DEF_HELPER_FLAGS_4(mve_vqdmladhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vqdmladhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(mve_vqdmladhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+
28
+
31
+DEF_HELPER_FLAGS_4(mve_vqrdmladhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
+DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vqrdmladhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
+DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_4(mve_vqrdmladhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
+DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
+
32
+DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_4(mve_vqrdmladhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
36
+DEF_HELPER_FLAGS_4(mve_vqrdmladhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
37
+DEF_HELPER_FLAGS_4(mve_vqrdmladhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
38
+
39
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
42
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
43
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/mve.decode
35
--- a/target/arm/mve.decode
45
+++ b/target/arm/mve.decode
36
+++ b/target/arm/mve.decode
46
@@ -XXX,XX +XXX,XX @@ VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
37
@@ -XXX,XX +XXX,XX @@ VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w
47
VQRSHL_S 111 0 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev
38
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
48
VQRSHL_U 111 1 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev
39
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
49
40
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
50
+VQDMLADH 1110 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op
51
+VQDMLADHX 1110 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op
52
+VQRDMLADH 1110 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op
53
+VQRDMLADHX 1110 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op
54
+
41
+
55
# Vector miscellaneous
42
+# Narrowing shifts (which only support b and h sizes)
56
43
+VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
57
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
44
+VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
45
+VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
46
+VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
47
+
48
+VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
49
+VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
50
+VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
51
+VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
58
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
59
index XXXXXXX..XXXXXXX 100644
53
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/mve_helper.c
54
--- a/target/arm/mve_helper.c
61
+++ b/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
62
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
56
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
63
DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP)
57
64
DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP)
58
DO_VSHLL_ALL(vshllb, false)
65
59
DO_VSHLL_ALL(vshllt, true)
60
+
66
+/*
61
+/*
67
+ * Multiply add dual returning high half
62
+ * Narrowing right shifts, taking a double sized input, shifting it
68
+ * The 'FN' here takes four inputs A, B, C, D, a 0/1 indicator of
63
+ * and putting the result in either the top or bottom half of the output.
69
+ * whether to add the rounding constant, and the pointer to the
64
+ * ESIZE, TYPE are the output, and LESIZE, LTYPE the input.
70
+ * saturation flag, and should do "(A * B + C * D) * 2 + rounding constant",
71
+ * saturate to twice the input size and return the high half; or
72
+ * (A * B - C * D) etc for VQDMLSDH.
73
+ */
65
+ */
74
+#define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \
66
+#define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
75
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
67
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
76
+ void *vm) \
68
+ void *vm, uint32_t shift) \
77
+ { \
69
+ { \
78
+ TYPE *d = vd, *n = vn, *m = vm; \
70
+ LTYPE *m = vm; \
79
+ uint16_t mask = mve_element_mask(env); \
71
+ TYPE *d = vd; \
80
+ unsigned e; \
72
+ uint16_t mask = mve_element_mask(env); \
81
+ bool qc = false; \
73
+ unsigned le; \
82
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
74
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
83
+ bool sat = false; \
75
+ TYPE r = FN(m[H##LESIZE(le)], shift); \
84
+ if ((e & 1) == XCHG) { \
76
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
85
+ TYPE r = FN(n[H##ESIZE(e)], \
77
+ } \
86
+ m[H##ESIZE(e - XCHG)], \
78
+ mve_advance_vpt(env); \
87
+ n[H##ESIZE(e + (1 - 2 * XCHG))], \
88
+ m[H##ESIZE(e + (1 - XCHG))], \
89
+ ROUND, &sat); \
90
+ mergemask(&d[H##ESIZE(e)], r, mask); \
91
+ qc |= sat & mask & 1; \
92
+ } \
93
+ } \
94
+ if (qc) { \
95
+ env->vfp.qc[0] = qc; \
96
+ } \
97
+ mve_advance_vpt(env); \
98
+ }
79
+ }
99
+
80
+
100
+static int8_t do_vqdmladh_b(int8_t a, int8_t b, int8_t c, int8_t d,
81
+#define DO_VSHRN_ALL(OP, FN) \
101
+ int round, bool *sat)
82
+ DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \
83
+ DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \
84
+ DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \
85
+ DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN)
86
+
87
+static inline uint64_t do_urshr(uint64_t x, unsigned sh)
102
+{
88
+{
103
+ int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 7);
89
+ if (likely(sh < 64)) {
104
+ return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
90
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
91
+ } else if (sh == 64) {
92
+ return x >> 63;
93
+ } else {
94
+ return 0;
95
+ }
105
+}
96
+}
106
+
97
+
107
+static int16_t do_vqdmladh_h(int16_t a, int16_t b, int16_t c, int16_t d,
98
+DO_VSHRN_ALL(vshrn, DO_SHR)
108
+ int round, bool *sat)
99
+DO_VSHRN_ALL(vrshrn, do_urshr)
109
+{
110
+ int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 15);
111
+ return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
112
+}
113
+
114
+static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d,
115
+ int round, bool *sat)
116
+{
117
+ int64_t m1 = (int64_t)a * b;
118
+ int64_t m2 = (int64_t)c * d;
119
+ int64_t r;
120
+ /*
121
+ * Architecturally we should do the entire add, double, round
122
+ * and then check for saturation. We do three saturating adds,
123
+ * but we need to be careful about the order. If the first
124
+ * m1 + m2 saturates then it's impossible for the *2+rc to
125
+ * bring it back into the non-saturated range. However, if
126
+ * m1 + m2 is negative then it's possible that doing the doubling
127
+ * would take the intermediate result below INT64_MAX and the
128
+ * addition of the rounding constant then brings it back in range.
129
+ * So we add half the rounding constant before doubling rather
130
+ * than adding the rounding constant after the doubling.
131
+ */
132
+ if (sadd64_overflow(m1, m2, &r) ||
133
+ sadd64_overflow(r, (round << 30), &r) ||
134
+ sadd64_overflow(r, r, &r)) {
135
+ *sat = true;
136
+ return r < 0 ? INT32_MAX : INT32_MIN;
137
+ }
138
+ return r >> 32;
139
+}
140
+
141
+DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b)
142
+DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h)
143
+DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w)
144
+DO_VQDMLADH_OP(vqdmladhxb, 1, int8_t, 1, 0, do_vqdmladh_b)
145
+DO_VQDMLADH_OP(vqdmladhxh, 2, int16_t, 1, 0, do_vqdmladh_h)
146
+DO_VQDMLADH_OP(vqdmladhxw, 4, int32_t, 1, 0, do_vqdmladh_w)
147
+
148
+DO_VQDMLADH_OP(vqrdmladhb, 1, int8_t, 0, 1, do_vqdmladh_b)
149
+DO_VQDMLADH_OP(vqrdmladhh, 2, int16_t, 0, 1, do_vqdmladh_h)
150
+DO_VQDMLADH_OP(vqrdmladhw, 4, int32_t, 0, 1, do_vqdmladh_w)
151
+DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b)
152
+DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h)
153
+DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w)
154
+
155
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
156
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
157
uint32_t rm) \
158
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
100
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
159
index XXXXXXX..XXXXXXX 100644
101
index XXXXXXX..XXXXXXX 100644
160
--- a/target/arm/translate-mve.c
102
--- a/target/arm/translate-mve.c
161
+++ b/target/arm/translate-mve.c
103
+++ b/target/arm/translate-mve.c
162
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQSHL_S, vqshls)
104
@@ -XXX,XX +XXX,XX @@ DO_VSHLL(VSHLL_BS, vshllbs)
163
DO_2OP(VQSHL_U, vqshlu)
105
DO_VSHLL(VSHLL_BU, vshllbu)
164
DO_2OP(VQRSHL_S, vqrshls)
106
DO_VSHLL(VSHLL_TS, vshllts)
165
DO_2OP(VQRSHL_U, vqrshlu)
107
DO_VSHLL(VSHLL_TU, vshlltu)
166
+DO_2OP(VQDMLADH, vqdmladh)
108
+
167
+DO_2OP(VQDMLADHX, vqdmladhx)
109
+#define DO_2SHIFT_N(INSN, FN) \
168
+DO_2OP(VQRDMLADH, vqrdmladh)
110
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
169
+DO_2OP(VQRDMLADHX, vqrdmladhx)
111
+ { \
170
112
+ static MVEGenTwoOpShiftFn * const fns[] = { \
171
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
113
+ gen_helper_mve_##FN##b, \
172
MVEGenTwoOpScalarFn fn)
114
+ gen_helper_mve_##FN##h, \
115
+ }; \
116
+ return do_2shift(s, a, fns[a->size], false); \
117
+ }
118
+
119
+DO_2SHIFT_N(VSHRNB, vshrnb)
120
+DO_2SHIFT_N(VSHRNT, vshrnt)
121
+DO_2SHIFT_N(VRSHRNB, vrshrnb)
122
+DO_2SHIFT_N(VRSHRNT, vrshrnt)
173
--
123
--
174
2.20.1
124
2.20.1
175
125
176
126
diff view generated by jsdifflib
1
Implement the MVE VQADD and VQSUB insns, which perform saturating
1
Implement the MVE saturating shift-right-and-narrow insns
2
addition of a scalar to each element. Note that individual bytes of
2
VQSHRN, VQSHRUN, VQRSHRN and VQRSHRUN.
3
each result element are used or discarded according to the predicate
3
4
mask, but FPSCR.QC is only set if the predicate mask for the lowest
4
do_srshr() is borrowed from sve_helper.c.
5
byte of the element is set.
6
5
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210617121628.20116-28-peter.maydell@linaro.org
8
Message-id: 20210628135835.6690-13-peter.maydell@linaro.org
10
---
9
---
11
target/arm/helper-mve.h | 16 ++++++++++
10
target/arm/helper-mve.h | 30 +++++++++++
12
target/arm/mve.decode | 5 +++
11
target/arm/mve.decode | 28 ++++++++++
13
target/arm/mve_helper.c | 62 ++++++++++++++++++++++++++++++++++++++
12
target/arm/mve_helper.c | 104 +++++++++++++++++++++++++++++++++++++
14
target/arm/translate-mve.c | 4 +++
13
target/arm/translate-mve.c | 12 +++++
15
4 files changed, 87 insertions(+)
14
4 files changed, 174 insertions(+)
16
15
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
18
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-mve.h
18
--- a/target/arm/helper-mve.h
20
+++ b/target/arm/helper-mve.h
19
+++ b/target/arm/helper-mve.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vhsubu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
DEF_HELPER_FLAGS_4(mve_vhsubu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
DEF_HELPER_FLAGS_4(mve_vhsubu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
23
DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_4(mve_vqadds_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+
26
+DEF_HELPER_FLAGS_4(mve_vqadds_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_4(mve_vqshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vqadds_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(mve_vqshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+
27
+DEF_HELPER_FLAGS_4(mve_vqshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(mve_vqaddu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vqshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_4(mve_vqaddu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+
31
+DEF_HELPER_FLAGS_4(mve_vqaddu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_4(mve_vqshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+
31
+DEF_HELPER_FLAGS_4(mve_vqshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_4(mve_vqsubs_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vqshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_4(mve_vqsubs_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_4(mve_vqshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_4(mve_vqsubs_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
+
36
+
35
+DEF_HELPER_FLAGS_4(mve_vqshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_4(mve_vqsubu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(mve_vqshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_4(mve_vqsubu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_4(mve_vqshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(mve_vqsubu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_4(mve_vqshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+
39
+
41
DEF_HELPER_FLAGS_4(mve_vbrsrb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
42
DEF_HELPER_FLAGS_4(mve_vbrsrh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
DEF_HELPER_FLAGS_4(mve_vbrsrw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
42
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
44
+
45
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
46
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
47
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
48
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
49
+
50
+DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
51
+DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
52
+DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
53
+DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
44
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
54
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
45
index XXXXXXX..XXXXXXX 100644
55
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/mve.decode
56
--- a/target/arm/mve.decode
47
+++ b/target/arm/mve.decode
57
+++ b/target/arm/mve.decode
48
@@ -XXX,XX +XXX,XX @@ VHADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
58
@@ -XXX,XX +XXX,XX @@ VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
49
VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
59
VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
50
VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
60
VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
51
VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
61
VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
52
+
62
+
53
+VQADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
63
+VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b
54
+VQADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
64
+VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h
55
+VQSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
65
+VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b
56
+VQSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
66
+VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h
57
VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
67
+VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b
58
68
+VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h
59
# Predicate operations
69
+VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b
70
+VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h
71
+
72
+VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
73
+VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
74
+VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
75
+VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
76
+
77
+VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b
78
+VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h
79
+VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b
80
+VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h
81
+VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b
82
+VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h
83
+VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b
84
+VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h
85
+
86
+VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
87
+VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
88
+VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
89
+VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
60
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
90
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
61
index XXXXXXX..XXXXXXX 100644
91
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/mve_helper.c
92
--- a/target/arm/mve_helper.c
63
+++ b/target/arm/mve_helper.c
93
+++ b/target/arm/mve_helper.c
64
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vhaddu, do_vhadd_u)
94
@@ -XXX,XX +XXX,XX @@ static inline uint64_t do_urshr(uint64_t x, unsigned sh)
65
DO_2OP_S(vhsubs, do_vhsub_s)
95
}
66
DO_2OP_U(vhsubu, do_vhsub_u)
96
}
67
97
68
+static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
98
+static inline int64_t do_srshr(int64_t x, unsigned sh)
99
+{
100
+ if (likely(sh < 64)) {
101
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
102
+ } else {
103
+ /* Rounding the sign bit always produces 0. */
104
+ return 0;
105
+ }
106
+}
107
+
108
DO_VSHRN_ALL(vshrn, DO_SHR)
109
DO_VSHRN_ALL(vrshrn, do_urshr)
110
+
111
+static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max,
112
+ bool *satp)
69
+{
113
+{
70
+ if (val > max) {
114
+ if (val > max) {
71
+ *s = true;
115
+ *satp = true;
72
+ return max;
116
+ return max;
73
+ } else if (val < min) {
117
+ } else if (val < min) {
74
+ *s = true;
118
+ *satp = true;
75
+ return min;
119
+ return min;
120
+ } else {
121
+ return val;
76
+ }
122
+ }
77
+ return val;
78
+}
123
+}
79
+
124
+
80
+#define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s)
125
+/* Saturating narrowing right shifts */
81
+#define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s)
126
+#define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
82
+#define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s)
127
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
83
+
128
+ void *vm, uint32_t shift) \
84
+#define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s)
129
+ { \
85
+#define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s)
130
+ LTYPE *m = vm; \
86
+#define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s)
131
+ TYPE *d = vd; \
87
+
132
+ uint16_t mask = mve_element_mask(env); \
88
+#define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s)
133
+ bool qc = false; \
89
+#define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s)
134
+ unsigned le; \
90
+#define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s)
135
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
91
+
136
+ bool sat = false; \
92
+#define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s)
137
+ TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \
93
+#define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s)
138
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
94
+#define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s)
139
+ qc |= sat && (mask & 1 << (TOP * ESIZE)); \
95
140
+ } \
96
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
141
+ if (qc) { \
97
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
142
+ env->vfp.qc[0] = qc; \
98
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vhsubu, do_vhsub_u)
143
+ } \
99
mve_advance_vpt(env); \
144
+ mve_advance_vpt(env); \
100
}
101
102
+#define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \
103
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
104
+ uint32_t rm) \
105
+ { \
106
+ TYPE *d = vd, *n = vn; \
107
+ TYPE m = rm; \
108
+ uint16_t mask = mve_element_mask(env); \
109
+ unsigned e; \
110
+ bool qc = false; \
111
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
112
+ bool sat = false; \
113
+ mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \
114
+ mask); \
115
+ qc |= sat & mask & 1; \
116
+ } \
117
+ if (qc) { \
118
+ env->vfp.qc[0] = qc; \
119
+ } \
120
+ mve_advance_vpt(env); \
121
+ }
145
+ }
122
+
146
+
123
/* provide unsigned 2-op scalar helpers for all sizes */
147
+#define DO_VSHRN_SAT_UB(BOP, TOP, FN) \
124
#define DO_2OP_SCALAR_U(OP, FN) \
148
+ DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
125
DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
149
+ DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
126
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u)
150
+
127
DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s)
151
+#define DO_VSHRN_SAT_UH(BOP, TOP, FN) \
128
DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u)
152
+ DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
129
153
+ DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
130
+DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B)
154
+
131
+DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H)
155
+#define DO_VSHRN_SAT_SB(BOP, TOP, FN) \
132
+DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W)
156
+ DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
133
+DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B)
157
+ DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
134
+DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H)
158
+
135
+DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W)
159
+#define DO_VSHRN_SAT_SH(BOP, TOP, FN) \
136
+
160
+ DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
137
+DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B)
161
+ DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
138
+DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H)
162
+
139
+DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W)
163
+#define DO_SHRN_SB(N, M, SATP) \
140
+DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B)
164
+ do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP)
141
+DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H)
165
+#define DO_SHRN_UB(N, M, SATP) \
142
+DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W)
166
+ do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP)
143
+
167
+#define DO_SHRUN_B(N, M, SATP) \
144
static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
168
+ do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP)
145
{
169
+
146
m &= 0xff;
170
+#define DO_SHRN_SH(N, M, SATP) \
171
+ do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP)
172
+#define DO_SHRN_UH(N, M, SATP) \
173
+ do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP)
174
+#define DO_SHRUN_H(N, M, SATP) \
175
+ do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP)
176
+
177
+#define DO_RSHRN_SB(N, M, SATP) \
178
+ do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP)
179
+#define DO_RSHRN_UB(N, M, SATP) \
180
+ do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP)
181
+#define DO_RSHRUN_B(N, M, SATP) \
182
+ do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP)
183
+
184
+#define DO_RSHRN_SH(N, M, SATP) \
185
+ do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP)
186
+#define DO_RSHRN_UH(N, M, SATP) \
187
+ do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP)
188
+#define DO_RSHRUN_H(N, M, SATP) \
189
+ do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP)
190
+
191
+DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB)
192
+DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH)
193
+DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB)
194
+DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH)
195
+DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B)
196
+DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H)
197
+
198
+DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB)
199
+DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH)
200
+DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
201
+DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
202
+DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
203
+DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
147
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
204
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
148
index XXXXXXX..XXXXXXX 100644
205
index XXXXXXX..XXXXXXX 100644
149
--- a/target/arm/translate-mve.c
206
--- a/target/arm/translate-mve.c
150
+++ b/target/arm/translate-mve.c
207
+++ b/target/arm/translate-mve.c
151
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR(VHADD_S_scalar, vhadds_scalar)
208
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_N(VSHRNB, vshrnb)
152
DO_2OP_SCALAR(VHADD_U_scalar, vhaddu_scalar)
209
DO_2SHIFT_N(VSHRNT, vshrnt)
153
DO_2OP_SCALAR(VHSUB_S_scalar, vhsubs_scalar)
210
DO_2SHIFT_N(VRSHRNB, vrshrnb)
154
DO_2OP_SCALAR(VHSUB_U_scalar, vhsubu_scalar)
211
DO_2SHIFT_N(VRSHRNT, vrshrnt)
155
+DO_2OP_SCALAR(VQADD_S_scalar, vqadds_scalar)
212
+DO_2SHIFT_N(VQSHRNB_S, vqshrnb_s)
156
+DO_2OP_SCALAR(VQADD_U_scalar, vqaddu_scalar)
213
+DO_2SHIFT_N(VQSHRNT_S, vqshrnt_s)
157
+DO_2OP_SCALAR(VQSUB_S_scalar, vqsubs_scalar)
214
+DO_2SHIFT_N(VQSHRNB_U, vqshrnb_u)
158
+DO_2OP_SCALAR(VQSUB_U_scalar, vqsubu_scalar)
215
+DO_2SHIFT_N(VQSHRNT_U, vqshrnt_u)
159
DO_2OP_SCALAR(VBRSR, vbrsr)
216
+DO_2SHIFT_N(VQSHRUNB, vqshrunb)
160
217
+DO_2SHIFT_N(VQSHRUNT, vqshrunt)
161
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
218
+DO_2SHIFT_N(VQRSHRNB_S, vqrshrnb_s)
219
+DO_2SHIFT_N(VQRSHRNT_S, vqrshrnt_s)
220
+DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u)
221
+DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u)
222
+DO_2SHIFT_N(VQRSHRUNB, vqrshrunb)
223
+DO_2SHIFT_N(VQRSHRUNT, vqrshrunt)
162
--
224
--
163
2.20.1
225
2.20.1
164
226
165
227
diff view generated by jsdifflib
1
Implement the MVE VDUP insn, which duplicates a value from
1
Implement the MVE VSHLC insn, which performs a shift left of the
2
a general-purpose register into every lane of a vector
2
entire vector with carry in bits provided from a general purpose
3
register (subject to predication).
3
register and carry out bits written back to that register.
4
4
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210617121628.20116-11-peter.maydell@linaro.org
7
Message-id: 20210628135835.6690-14-peter.maydell@linaro.org
8
---
8
---
9
target/arm/helper-mve.h | 2 ++
9
target/arm/helper-mve.h | 2 ++
10
target/arm/mve.decode | 10 ++++++++++
10
target/arm/mve.decode | 2 ++
11
target/arm/mve_helper.c | 16 ++++++++++++++++
11
target/arm/mve_helper.c | 38 ++++++++++++++++++++++++++++++++++++++
12
target/arm/translate-mve.c | 27 +++++++++++++++++++++++++++
12
target/arm/translate-mve.c | 30 ++++++++++++++++++++++++++++++
13
4 files changed, 55 insertions(+)
13
4 files changed, 72 insertions(+)
14
14
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
17
--- a/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vstrb_h, TCG_CALL_NO_WG, void, env, ptr, i32)
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_3(mve_vstrb_w, TCG_CALL_NO_WG, void, env, ptr, i32)
20
DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_3(mve_vstrh_w, TCG_CALL_NO_WG, void, env, ptr, i32)
21
DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
22
DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_3(mve_vdup, TCG_CALL_NO_WG, void, env, ptr, i32)
24
+
23
+
25
DEF_HELPER_FLAGS_3(mve_vclsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
26
DEF_HELPER_FLAGS_3(mve_vclsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
27
DEF_HELPER_FLAGS_3(mve_vclsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
25
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
27
--- a/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
28
+++ b/target/arm/mve.decode
32
@@ -XXX,XX +XXX,XX @@
29
@@ -XXX,XX +XXX,XX @@ VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
33
30
VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
34
%qd 22:1 13:3
31
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
35
%qm 5:1 1:3
32
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
36
+%qn 7:1 17:3
37
38
&vldr_vstr rn qd imm p a w size l u
39
&1op qd qm size
40
@@ -XXX,XX +XXX,XX @@ VABS 1111 1111 1 . 11 .. 01 ... 0 0011 01 . 0 ... 0 @1op
41
VABS_fp 1111 1111 1 . 11 .. 01 ... 0 0111 01 . 0 ... 0 @1op
42
VNEG 1111 1111 1 . 11 .. 01 ... 0 0011 11 . 0 ... 0 @1op
43
VNEG_fp 1111 1111 1 . 11 .. 01 ... 0 0111 11 . 0 ... 0 @1op
44
+
33
+
45
+&vdup qd rt size
34
+VSHLC 111 0 1110 1 . 1 imm:5 ... 0 1111 1100 rdm:4 qd=%qd
46
+# Qd is in the fields usually named Qn
47
+@vdup .... .... . . .. ... . rt:4 .... . . . . .... qd=%qn &vdup
48
+
49
+# B and E bits encode size, which we decode here to the usual size values
50
+VDUP 1110 1110 1 1 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=0
51
+VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 1 1 0000 @vdup size=1
52
+VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2
53
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
35
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
54
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/mve_helper.c
37
--- a/target/arm/mve_helper.c
56
+++ b/target/arm/mve_helper.c
38
+++ b/target/arm/mve_helper.c
57
@@ -XXX,XX +XXX,XX @@ static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask)
39
@@ -XXX,XX +XXX,XX @@ DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
58
uint64_t *: mergemask_uq, \
40
DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
59
int64_t *: mergemask_sq)(D, R, M)
41
DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
60
42
DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
61
+void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val)
43
+
44
+uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
45
+ uint32_t shift)
62
+{
46
+{
63
+ /*
64
+ * The generated code already replicated an 8 or 16 bit constant
65
+ * into the 32-bit value, so we only need to write the 32-bit
66
+ * value to all elements of the Qreg, allowing for predication.
67
+ */
68
+ uint32_t *d = vd;
47
+ uint32_t *d = vd;
69
+ uint16_t mask = mve_element_mask(env);
48
+ uint16_t mask = mve_element_mask(env);
70
+ unsigned e;
49
+ unsigned e;
71
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
50
+ uint32_t r;
72
+ mergemask(&d[H4(e)], val, mask);
51
+
52
+ /*
53
+ * For each 32-bit element, we shift it left, bringing in the
54
+ * low 'shift' bits of rdm at the bottom. Bits shifted out at
55
+ * the top become the new rdm, if the predicate mask permits.
56
+ * The final rdm value is returned to update the register.
57
+ * shift == 0 here means "shift by 32 bits".
58
+ */
59
+ if (shift == 0) {
60
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
61
+ r = rdm;
62
+ if (mask & 1) {
63
+ rdm = d[H4(e)];
64
+ }
65
+ mergemask(&d[H4(e)], r, mask);
66
+ }
67
+ } else {
68
+ uint32_t shiftmask = MAKE_64BIT_MASK(0, shift);
69
+
70
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
71
+ r = (d[H4(e)] << shift) | (rdm & shiftmask);
72
+ if (mask & 1) {
73
+ rdm = d[H4(e)] >> (32 - shift);
74
+ }
75
+ mergemask(&d[H4(e)], r, mask);
76
+ }
73
+ }
77
+ }
74
+ mve_advance_vpt(env);
78
+ mve_advance_vpt(env);
79
+ return rdm;
75
+}
80
+}
76
+
77
#define DO_1OP(OP, ESIZE, TYPE, FN) \
78
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
79
{ \
80
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
81
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
81
index XXXXXXX..XXXXXXX 100644
82
index XXXXXXX..XXXXXXX 100644
82
--- a/target/arm/translate-mve.c
83
--- a/target/arm/translate-mve.c
83
+++ b/target/arm/translate-mve.c
84
+++ b/target/arm/translate-mve.c
84
@@ -XXX,XX +XXX,XX @@ DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h)
85
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u)
85
DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w)
86
DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u)
86
DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w)
87
DO_2SHIFT_N(VQRSHRUNB, vqrshrunb)
87
88
DO_2SHIFT_N(VQRSHRUNT, vqrshrunt)
88
+static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
89
+
90
+static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a)
89
+{
91
+{
92
+ /*
93
+ * Whole Vector Left Shift with Carry. The carry is taken
94
+ * from a general purpose register and written back there.
95
+ * An imm of 0 means "shift by 32".
96
+ */
90
+ TCGv_ptr qd;
97
+ TCGv_ptr qd;
91
+ TCGv_i32 rt;
98
+ TCGv_i32 rdm;
92
+
99
+
93
+ if (!dc_isar_feature(aa32_mve, s) ||
100
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) {
94
+ !mve_check_qreg_bank(s, a->qd)) {
95
+ return false;
101
+ return false;
96
+ }
102
+ }
97
+ if (a->rt == 13 || a->rt == 15) {
103
+ if (a->rdm == 13 || a->rdm == 15) {
98
+ /* UNPREDICTABLE; we choose to UNDEF */
104
+ /* CONSTRAINED UNPREDICTABLE: we UNDEF */
99
+ return false;
105
+ return false;
100
+ }
106
+ }
101
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
107
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
102
+ return true;
108
+ return true;
103
+ }
109
+ }
104
+
110
+
105
+ qd = mve_qreg_ptr(a->qd);
111
+ qd = mve_qreg_ptr(a->qd);
106
+ rt = load_reg(s, a->rt);
112
+ rdm = load_reg(s, a->rdm);
107
+ tcg_gen_dup_i32(a->size, rt, rt);
113
+ gen_helper_mve_vshlc(rdm, cpu_env, qd, rdm, tcg_constant_i32(a->imm));
108
+ gen_helper_mve_vdup(cpu_env, qd, rt);
114
+ store_reg(s, a->rdm, rdm);
109
+ tcg_temp_free_ptr(qd);
115
+ tcg_temp_free_ptr(qd);
110
+ tcg_temp_free_i32(rt);
111
+ mve_update_eci(s);
116
+ mve_update_eci(s);
112
+ return true;
117
+ return true;
113
+}
118
+}
114
+
115
static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
116
{
117
TCGv_ptr qd, qm;
118
--
119
--
119
2.20.1
120
2.20.1
120
121
121
122
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VADD, VSUB and VMUL insns.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-13-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 12 ++++++++++++
8
target/arm/mve.decode | 5 +++++
9
target/arm/mve_helper.c | 14 ++++++++++++++
10
target/arm/translate-mve.c | 16 ++++++++++++++++
11
4 files changed, 47 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vbic, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
DEF_HELPER_FLAGS_4(mve_vorr, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vorn, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_veor, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
+
22
+DEF_HELPER_FLAGS_4(mve_vaddb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vaddh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vaddw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+
26
+DEF_HELPER_FLAGS_4(mve_vsubb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vsubh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vsubw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vmulb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
+DEF_HELPER_FLAGS_4(mve_vmulh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
+DEF_HELPER_FLAGS_4(mve_vmulw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
36
+++ b/target/arm/mve.decode
37
@@ -XXX,XX +XXX,XX @@
38
39
@1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm
40
@1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0
41
+@2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn
42
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
43
44
# Vector loads and stores
45
@@ -XXX,XX +XXX,XX @@ VORR 1110 1111 0 . 10 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
46
VORN 1110 1111 0 . 11 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
47
VEOR 1111 1111 0 . 00 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
48
49
+VADD 1110 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
50
+VSUB 1111 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
51
+VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
52
+
53
# Vector miscellaneous
54
55
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
56
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/mve_helper.c
59
+++ b/target/arm/mve_helper.c
60
@@ -XXX,XX +XXX,XX @@ DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
61
mve_advance_vpt(env); \
62
}
63
64
+/* provide unsigned 2-op helpers for all sizes */
65
+#define DO_2OP_U(OP, FN) \
66
+ DO_2OP(OP##b, 1, uint8_t, FN) \
67
+ DO_2OP(OP##h, 2, uint16_t, FN) \
68
+ DO_2OP(OP##w, 4, uint32_t, FN)
69
+
70
#define DO_AND(N, M) ((N) & (M))
71
#define DO_BIC(N, M) ((N) & ~(M))
72
#define DO_ORR(N, M) ((N) | (M))
73
@@ -XXX,XX +XXX,XX @@ DO_2OP(vbic, 8, uint64_t, DO_BIC)
74
DO_2OP(vorr, 8, uint64_t, DO_ORR)
75
DO_2OP(vorn, 8, uint64_t, DO_ORN)
76
DO_2OP(veor, 8, uint64_t, DO_EOR)
77
+
78
+#define DO_ADD(N, M) ((N) + (M))
79
+#define DO_SUB(N, M) ((N) - (M))
80
+#define DO_MUL(N, M) ((N) * (M))
81
+
82
+DO_2OP_U(vadd, DO_ADD)
83
+DO_2OP_U(vsub, DO_SUB)
84
+DO_2OP_U(vmul, DO_MUL)
85
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/target/arm/translate-mve.c
88
+++ b/target/arm/translate-mve.c
89
@@ -XXX,XX +XXX,XX @@ DO_LOGIC(VBIC, gen_helper_mve_vbic)
90
DO_LOGIC(VORR, gen_helper_mve_vorr)
91
DO_LOGIC(VORN, gen_helper_mve_vorn)
92
DO_LOGIC(VEOR, gen_helper_mve_veor)
93
+
94
+#define DO_2OP(INSN, FN) \
95
+ static bool trans_##INSN(DisasContext *s, arg_2op *a) \
96
+ { \
97
+ static MVEGenTwoOpFn * const fns[] = { \
98
+ gen_helper_mve_##FN##b, \
99
+ gen_helper_mve_##FN##h, \
100
+ gen_helper_mve_##FN##w, \
101
+ NULL, \
102
+ }; \
103
+ return do_2op(s, a, fns[a->size]); \
104
+ }
105
+
106
+DO_2OP(VADD, vadd)
107
+DO_2OP(VSUB, vsub)
108
+DO_2OP(VMUL, vmul)
109
--
110
2.20.1
111
112
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VMULH insn, which performs a vector
2
multiply and returns the high half of the result.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-14-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 7 +++++++
9
target/arm/mve.decode | 3 +++
10
target/arm/mve_helper.c | 26 ++++++++++++++++++++++++++
11
target/arm/translate-mve.c | 2 ++
12
4 files changed, 38 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vsubw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vmulb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vmulh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
DEF_HELPER_FLAGS_4(mve_vmulw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+
23
+DEF_HELPER_FLAGS_4(mve_vmulhsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vmulhsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vmulhsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vmulhub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vmulhuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vmulhuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mve.decode
32
+++ b/target/arm/mve.decode
33
@@ -XXX,XX +XXX,XX @@ VADD 1110 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
34
VSUB 1111 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
35
VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
36
37
+VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
38
+VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
39
+
40
# Vector miscellaneous
41
42
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
43
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/mve_helper.c
46
+++ b/target/arm/mve_helper.c
47
@@ -XXX,XX +XXX,XX @@ DO_2OP(veor, 8, uint64_t, DO_EOR)
48
DO_2OP_U(vadd, DO_ADD)
49
DO_2OP_U(vsub, DO_SUB)
50
DO_2OP_U(vmul, DO_MUL)
51
+
52
+/*
53
+ * Because the computation type is at least twice as large as required,
54
+ * these work for both signed and unsigned source types.
55
+ */
56
+static inline uint8_t do_mulh_b(int32_t n, int32_t m)
57
+{
58
+ return (n * m) >> 8;
59
+}
60
+
61
+static inline uint16_t do_mulh_h(int32_t n, int32_t m)
62
+{
63
+ return (n * m) >> 16;
64
+}
65
+
66
+static inline uint32_t do_mulh_w(int64_t n, int64_t m)
67
+{
68
+ return (n * m) >> 32;
69
+}
70
+
71
+DO_2OP(vmulhsb, 1, int8_t, do_mulh_b)
72
+DO_2OP(vmulhsh, 2, int16_t, do_mulh_h)
73
+DO_2OP(vmulhsw, 4, int32_t, do_mulh_w)
74
+DO_2OP(vmulhub, 1, uint8_t, do_mulh_b)
75
+DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h)
76
+DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w)
77
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/translate-mve.c
80
+++ b/target/arm/translate-mve.c
81
@@ -XXX,XX +XXX,XX @@ DO_LOGIC(VEOR, gen_helper_mve_veor)
82
DO_2OP(VADD, vadd)
83
DO_2OP(VSUB, vsub)
84
DO_2OP(VMUL, vmul)
85
+DO_2OP(VMULH_S, vmulhs)
86
+DO_2OP(VMULH_U, vmulhu)
87
--
88
2.20.1
89
90
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VRMULH insn, which performs a rounding multiply
2
and then returns the high half.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-15-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 7 +++++++
9
target/arm/mve.decode | 3 +++
10
target/arm/mve_helper.c | 22 ++++++++++++++++++++++
11
target/arm/translate-mve.c | 2 ++
12
4 files changed, 34 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmulhsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vmulhub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vmulhuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
DEF_HELPER_FLAGS_4(mve_vmulhuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+
23
+DEF_HELPER_FLAGS_4(mve_vrmulhsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vrmulhsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vrmulhsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vrmulhub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vrmulhuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vrmulhuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mve.decode
32
+++ b/target/arm/mve.decode
33
@@ -XXX,XX +XXX,XX @@ VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
34
VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
35
VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
36
37
+VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
38
+VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
39
+
40
# Vector miscellaneous
41
42
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
43
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/mve_helper.c
46
+++ b/target/arm/mve_helper.c
47
@@ -XXX,XX +XXX,XX @@ static inline uint32_t do_mulh_w(int64_t n, int64_t m)
48
return (n * m) >> 32;
49
}
50
51
+static inline uint8_t do_rmulh_b(int32_t n, int32_t m)
52
+{
53
+ return (n * m + (1U << 7)) >> 8;
54
+}
55
+
56
+static inline uint16_t do_rmulh_h(int32_t n, int32_t m)
57
+{
58
+ return (n * m + (1U << 15)) >> 16;
59
+}
60
+
61
+static inline uint32_t do_rmulh_w(int64_t n, int64_t m)
62
+{
63
+ return (n * m + (1U << 31)) >> 32;
64
+}
65
+
66
DO_2OP(vmulhsb, 1, int8_t, do_mulh_b)
67
DO_2OP(vmulhsh, 2, int16_t, do_mulh_h)
68
DO_2OP(vmulhsw, 4, int32_t, do_mulh_w)
69
DO_2OP(vmulhub, 1, uint8_t, do_mulh_b)
70
DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h)
71
DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w)
72
+
73
+DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b)
74
+DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h)
75
+DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w)
76
+DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b)
77
+DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h)
78
+DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w)
79
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/target/arm/translate-mve.c
82
+++ b/target/arm/translate-mve.c
83
@@ -XXX,XX +XXX,XX @@ DO_2OP(VSUB, vsub)
84
DO_2OP(VMUL, vmul)
85
DO_2OP(VMULH_S, vmulhs)
86
DO_2OP(VMULH_U, vmulhu)
87
+DO_2OP(VRMULH_S, vrmulhs)
88
+DO_2OP(VRMULH_U, vrmulhu)
89
--
90
2.20.1
91
92
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VMAX and VMIN insns.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-16-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 14 ++++++++++++++
8
target/arm/mve.decode | 5 +++++
9
target/arm/mve_helper.c | 14 ++++++++++++++
10
target/arm/translate-mve.c | 4 ++++
11
4 files changed, 37 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrmulhsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
DEF_HELPER_FLAGS_4(mve_vrmulhub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vrmulhuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vrmulhuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
+
22
+DEF_HELPER_FLAGS_4(mve_vmaxsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vmaxsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vmaxsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vmaxub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vmaxuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vmaxuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+
29
+DEF_HELPER_FLAGS_4(mve_vminsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
+DEF_HELPER_FLAGS_4(mve_vminsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
+DEF_HELPER_FLAGS_4(mve_vminsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
+DEF_HELPER_FLAGS_4(mve_vminub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
33
+DEF_HELPER_FLAGS_4(mve_vminuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
+DEF_HELPER_FLAGS_4(mve_vminuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
35
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/mve.decode
38
+++ b/target/arm/mve.decode
39
@@ -XXX,XX +XXX,XX @@ VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
40
VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
41
VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
42
43
+VMAX_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
44
+VMAX_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
45
+VMIN_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 1 ... 0 @2op
46
+VMIN_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 1 ... 0 @2op
47
+
48
# Vector miscellaneous
49
50
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
51
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/mve_helper.c
54
+++ b/target/arm/mve_helper.c
55
@@ -XXX,XX +XXX,XX @@ DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
56
DO_2OP(OP##h, 2, uint16_t, FN) \
57
DO_2OP(OP##w, 4, uint32_t, FN)
58
59
+/* provide signed 2-op helpers for all sizes */
60
+#define DO_2OP_S(OP, FN) \
61
+ DO_2OP(OP##b, 1, int8_t, FN) \
62
+ DO_2OP(OP##h, 2, int16_t, FN) \
63
+ DO_2OP(OP##w, 4, int32_t, FN)
64
+
65
#define DO_AND(N, M) ((N) & (M))
66
#define DO_BIC(N, M) ((N) & ~(M))
67
#define DO_ORR(N, M) ((N) | (M))
68
@@ -XXX,XX +XXX,XX @@ DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w)
69
DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b)
70
DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h)
71
DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w)
72
+
73
+#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
74
+#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
75
+
76
+DO_2OP_S(vmaxs, DO_MAX)
77
+DO_2OP_U(vmaxu, DO_MAX)
78
+DO_2OP_S(vmins, DO_MIN)
79
+DO_2OP_U(vminu, DO_MIN)
80
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/target/arm/translate-mve.c
83
+++ b/target/arm/translate-mve.c
84
@@ -XXX,XX +XXX,XX @@ DO_2OP(VMULH_S, vmulhs)
85
DO_2OP(VMULH_U, vmulhu)
86
DO_2OP(VRMULH_S, vrmulhs)
87
DO_2OP(VRMULH_U, vrmulhu)
88
+DO_2OP(VMAX_S, vmaxs)
89
+DO_2OP(VMAX_U, vmaxu)
90
+DO_2OP(VMIN_S, vmins)
91
+DO_2OP(VMIN_U, vminu)
92
--
93
2.20.1
94
95
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VABD insn.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-17-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 7 +++++++
8
target/arm/mve.decode | 3 +++
9
target/arm/mve_helper.c | 5 +++++
10
target/arm/translate-mve.c | 2 ++
11
4 files changed, 17 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vminsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
DEF_HELPER_FLAGS_4(mve_vminub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vminuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vminuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
+
22
+DEF_HELPER_FLAGS_4(mve_vabdsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vabdsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vabdsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vabdub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vabduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vabduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
32
@@ -XXX,XX +XXX,XX @@ VMAX_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
33
VMIN_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 1 ... 0 @2op
34
VMIN_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 1 ... 0 @2op
35
36
+VABD_S 111 0 1111 0 . .. ... 0 ... 0 0111 . 1 . 0 ... 0 @2op
37
+VABD_U 111 1 1111 0 . .. ... 0 ... 0 0111 . 1 . 0 ... 0 @2op
38
+
39
# Vector miscellaneous
40
41
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
42
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/mve_helper.c
45
+++ b/target/arm/mve_helper.c
46
@@ -XXX,XX +XXX,XX @@ DO_2OP_S(vmaxs, DO_MAX)
47
DO_2OP_U(vmaxu, DO_MAX)
48
DO_2OP_S(vmins, DO_MIN)
49
DO_2OP_U(vminu, DO_MIN)
50
+
51
+#define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N))
52
+
53
+DO_2OP_S(vabds, DO_ABD)
54
+DO_2OP_U(vabdu, DO_ABD)
55
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/translate-mve.c
58
+++ b/target/arm/translate-mve.c
59
@@ -XXX,XX +XXX,XX @@ DO_2OP(VMAX_S, vmaxs)
60
DO_2OP(VMAX_U, vmaxu)
61
DO_2OP(VMIN_S, vmins)
62
DO_2OP(VMIN_U, vminu)
63
+DO_2OP(VABD_S, vabds)
64
+DO_2OP(VABD_U, vabdu)
65
--
66
2.20.1
67
68
diff view generated by jsdifflib
1
Implement the MVE VMLALDAV insn, which multiplies pairs of integer
1
Implement the MVE VADDLV insn; this is similar to VADDV, except
2
elements, accumulating them into a 64-bit result in a pair of
2
that it accumulates 32-bit elements into a 64-bit accumulator
3
general-purpose registers.
3
stored in a pair of general-purpose registers.
4
4
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210617121628.20116-20-peter.maydell@linaro.org
7
Message-id: 20210628135835.6690-15-peter.maydell@linaro.org
8
---
8
---
9
target/arm/helper-mve.h | 8 ++++
9
target/arm/helper-mve.h | 3 ++
10
target/arm/translate.h | 10 ++++
10
target/arm/mve.decode | 6 +++-
11
target/arm/mve.decode | 15 ++++++
11
target/arm/mve_helper.c | 19 ++++++++++++
12
target/arm/mve_helper.c | 34 ++++++++++++++
12
target/arm/translate-mve.c | 63 ++++++++++++++++++++++++++++++++++++++
13
target/arm/translate-mve.c | 96 ++++++++++++++++++++++++++++++++++++++
13
4 files changed, 90 insertions(+), 1 deletion(-)
14
5 files changed, 163 insertions(+)
15
14
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
17
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-mve.h
17
--- a/target/arm/helper-mve.h
19
+++ b/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmulltsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
21
DEF_HELPER_FLAGS_4(mve_vmulltub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
22
DEF_HELPER_FLAGS_4(mve_vmulltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
23
DEF_HELPER_FLAGS_4(mve_vmulltuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
23
+DEF_HELPER_FLAGS_3(mve_vaddlv_s, TCG_CALL_NO_WG, i64, env, ptr, i64)
24
+DEF_HELPER_FLAGS_3(mve_vaddlv_u, TCG_CALL_NO_WG, i64, env, ptr, i64)
24
+
25
+
25
+DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
26
DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
26
+DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
27
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
27
+DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
28
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
28
+DEF_HELPER_FLAGS_4(mve_vmlaldavxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vmlaldavuh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
31
+DEF_HELPER_FLAGS_4(mve_vmlaldavuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
32
diff --git a/target/arm/translate.h b/target/arm/translate.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate.h
35
+++ b/target/arm/translate.h
36
@@ -XXX,XX +XXX,XX @@ static inline int negate(DisasContext *s, int x)
37
return -x;
38
}
39
40
+static inline int plus_1(DisasContext *s, int x)
41
+{
42
+ return x + 1;
43
+}
44
+
45
static inline int plus_2(DisasContext *s, int x)
46
{
47
return x + 2;
48
@@ -XXX,XX +XXX,XX @@ static inline int times_4(DisasContext *s, int x)
49
return x * 4;
50
}
51
52
+static inline int times_2_plus_1(DisasContext *s, int x)
53
+{
54
+ return x * 2 + 1;
55
+}
56
+
57
static inline int arm_dc_feature(DisasContext *dc, int feature)
58
{
59
return (dc->features & (1ULL << feature)) != 0;
60
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
61
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/mve.decode
31
--- a/target/arm/mve.decode
63
+++ b/target/arm/mve.decode
32
+++ b/target/arm/mve.decode
64
@@ -XXX,XX +XXX,XX @@ VNEG_fp 1111 1111 1 . 11 .. 01 ... 0 0111 11 . 0 ... 0 @1op
33
@@ -XXX,XX +XXX,XX @@ VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
65
VDUP 1110 1110 1 1 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=0
34
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
66
VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 1 1 0000 @vdup size=1
35
67
VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2
36
# Vector add across vector
68
+
37
-VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
69
+# multiply-add long dual accumulate
38
+{
70
+# rdahi: bits [3:1] from insn, bit 0 is 1
39
+ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
71
+# rdalo: bits [3:1] from insn, bit 0 is 0
40
+ VADDLV 111 u:1 1110 1 ... 1001 ... 0 1111 00 a:1 0 qm:3 0 \
72
+%rdahi 20:3 !function=times_2_plus_1
41
+ rdahi=%rdahi rdalo=%rdalo
73
+%rdalo 13:3 !function=times_2
42
+}
74
+# size bit is 0 for 16 bit, 1 for 32 bit
43
75
+%size_16 16:1 !function=plus_1
44
# Predicate operations
76
+
45
%mask_22_13 22:1 13:3
77
+&vmlaldav rdahi rdalo size qn qm x a
78
+
79
+@vmlaldav .... .... . ... ... . ... . .... .... qm:3 . \
80
+ qn=%qn rdahi=%rdahi rdalo=%rdalo size=%size_16 &vmlaldav
81
+VMLALDAV_S 1110 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav
82
+VMLALDAV_U 1111 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav
83
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
46
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
84
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
85
--- a/target/arm/mve_helper.c
48
--- a/target/arm/mve_helper.c
86
+++ b/target/arm/mve_helper.c
49
+++ b/target/arm/mve_helper.c
87
@@ -XXX,XX +XXX,XX @@ DO_2OP_S(vhadds, do_vhadd_s)
50
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvub, 1, uint8_t)
88
DO_2OP_U(vhaddu, do_vhadd_u)
51
DO_VADDV(vaddvuh, 2, uint16_t)
89
DO_2OP_S(vhsubs, do_vhsub_s)
52
DO_VADDV(vaddvuw, 4, uint32_t)
90
DO_2OP_U(vhsubu, do_vhsub_u)
53
54
+#define DO_VADDLV(OP, TYPE, LTYPE) \
55
+ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
56
+ uint64_t ra) \
57
+ { \
58
+ uint16_t mask = mve_element_mask(env); \
59
+ unsigned e; \
60
+ TYPE *m = vm; \
61
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
62
+ if (mask & 1) { \
63
+ ra += (LTYPE)m[H4(e)]; \
64
+ } \
65
+ } \
66
+ mve_advance_vpt(env); \
67
+ return ra; \
68
+ } \
91
+
69
+
70
+DO_VADDLV(vaddlv_s, int32_t, int64_t)
71
+DO_VADDLV(vaddlv_u, uint32_t, uint64_t)
92
+
72
+
93
+/*
73
/* Shifts by immediate */
94
+ * Multiply add long dual accumulate ops.
74
#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
95
+ */
75
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
96
+#define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
97
+ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
98
+ void *vm, uint64_t a) \
99
+ { \
100
+ uint16_t mask = mve_element_mask(env); \
101
+ unsigned e; \
102
+ TYPE *n = vn, *m = vm; \
103
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
104
+ if (mask & 1) { \
105
+ if (e & 1) { \
106
+ a ODDACC \
107
+ (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
108
+ } else { \
109
+ a EVENACC \
110
+ (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
111
+ } \
112
+ } \
113
+ } \
114
+ mve_advance_vpt(env); \
115
+ return a; \
116
+ }
117
+
118
+DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=)
119
+DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=)
120
+DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=)
121
+DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=)
122
+
123
+DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=)
124
+DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=)
125
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
76
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
126
index XXXXXXX..XXXXXXX 100644
77
index XXXXXXX..XXXXXXX 100644
127
--- a/target/arm/translate-mve.c
78
--- a/target/arm/translate-mve.c
128
+++ b/target/arm/translate-mve.c
79
+++ b/target/arm/translate-mve.c
129
@@ -XXX,XX +XXX,XX @@
80
@@ -XXX,XX +XXX,XX @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
130
typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
81
return true;
131
typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
132
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
133
+typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
134
135
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
136
static inline long mve_qreg_offset(unsigned reg)
137
@@ -XXX,XX +XXX,XX @@ static void mve_update_eci(DisasContext *s)
138
}
139
}
82
}
140
83
141
+static bool mve_skip_first_beat(DisasContext *s)
84
+static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a)
142
+{
85
+{
143
+ /* Return true if PSR.ECI says we must skip the first beat of this insn */
86
+ /*
144
+ switch (s->eci) {
87
+ * Vector Add Long Across Vector: accumulate the 32-bit
145
+ case ECI_NONE:
88
+ * elements of the vector into a 64-bit result stored in
146
+ return false;
89
+ * a pair of general-purpose registers.
147
+ case ECI_A0:
90
+ * No need to check Qm's bank: it is only 3 bits in decode.
148
+ case ECI_A0A1:
91
+ */
149
+ case ECI_A0A1A2:
92
+ TCGv_ptr qm;
150
+ case ECI_A0A1A2B0:
151
+ return true;
152
+ default:
153
+ g_assert_not_reached();
154
+ }
155
+}
156
+
157
static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
158
{
159
TCGv_i32 addr;
160
@@ -XXX,XX +XXX,XX @@ DO_2OP(VMULL_BS, vmullbs)
161
DO_2OP(VMULL_BU, vmullbu)
162
DO_2OP(VMULL_TS, vmullts)
163
DO_2OP(VMULL_TU, vmulltu)
164
+
165
+static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
166
+ MVEGenDualAccOpFn *fn)
167
+{
168
+ TCGv_ptr qn, qm;
169
+ TCGv_i64 rda;
93
+ TCGv_i64 rda;
170
+ TCGv_i32 rdalo, rdahi;
94
+ TCGv_i32 rdalo, rdahi;
171
+
95
+
172
+ if (!dc_isar_feature(aa32_mve, s) ||
96
+ if (!dc_isar_feature(aa32_mve, s)) {
173
+ !mve_check_qreg_bank(s, a->qn | a->qm) ||
174
+ !fn) {
175
+ return false;
97
+ return false;
176
+ }
98
+ }
177
+ /*
99
+ /*
178
+ * rdahi == 13 is UNPREDICTABLE; rdahi == 15 is a related
100
+ * rdahi == 13 is UNPREDICTABLE; rdahi == 15 is a related
179
+ * encoding; rdalo always has bit 0 clear so cannot be 13 or 15.
101
+ * encoding; rdalo always has bit 0 clear so cannot be 13 or 15.
...
...
183
+ }
105
+ }
184
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
106
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
185
+ return true;
107
+ return true;
186
+ }
108
+ }
187
+
109
+
188
+ qn = mve_qreg_ptr(a->qn);
189
+ qm = mve_qreg_ptr(a->qm);
190
+
191
+ /*
110
+ /*
192
+ * This insn is subject to beat-wise execution. Partial execution
111
+ * This insn is subject to beat-wise execution. Partial execution
193
+ * of an A=0 (no-accumulate) insn which does not execute the first
112
+ * of an A=0 (no-accumulate) insn which does not execute the first
194
+ * beat must start with the current rda value, not 0.
113
+ * beat must start with the current value of RdaHi:RdaLo, not zero.
195
+ */
114
+ */
196
+ if (a->a || mve_skip_first_beat(s)) {
115
+ if (a->a || mve_skip_first_beat(s)) {
116
+ /* Accumulate input from RdaHi:RdaLo */
197
+ rda = tcg_temp_new_i64();
117
+ rda = tcg_temp_new_i64();
198
+ rdalo = load_reg(s, a->rdalo);
118
+ rdalo = load_reg(s, a->rdalo);
199
+ rdahi = load_reg(s, a->rdahi);
119
+ rdahi = load_reg(s, a->rdahi);
200
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
120
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
201
+ tcg_temp_free_i32(rdalo);
121
+ tcg_temp_free_i32(rdalo);
202
+ tcg_temp_free_i32(rdahi);
122
+ tcg_temp_free_i32(rdahi);
203
+ } else {
123
+ } else {
124
+ /* Accumulate starting at zero */
204
+ rda = tcg_const_i64(0);
125
+ rda = tcg_const_i64(0);
205
+ }
126
+ }
206
+
127
+
207
+ fn(rda, cpu_env, qn, qm, rda);
128
+ qm = mve_qreg_ptr(a->qm);
208
+ tcg_temp_free_ptr(qn);
129
+ if (a->u) {
130
+ gen_helper_mve_vaddlv_u(rda, cpu_env, qm, rda);
131
+ } else {
132
+ gen_helper_mve_vaddlv_s(rda, cpu_env, qm, rda);
133
+ }
209
+ tcg_temp_free_ptr(qm);
134
+ tcg_temp_free_ptr(qm);
210
+
135
+
211
+ rdalo = tcg_temp_new_i32();
136
+ rdalo = tcg_temp_new_i32();
212
+ rdahi = tcg_temp_new_i32();
137
+ rdahi = tcg_temp_new_i32();
213
+ tcg_gen_extrl_i64_i32(rdalo, rda);
138
+ tcg_gen_extrl_i64_i32(rdalo, rda);
...
...
217
+ tcg_temp_free_i64(rda);
142
+ tcg_temp_free_i64(rda);
218
+ mve_update_eci(s);
143
+ mve_update_eci(s);
219
+ return true;
144
+ return true;
220
+}
145
+}
221
+
146
+
222
+static bool trans_VMLALDAV_S(DisasContext *s, arg_vmlaldav *a)
147
static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
223
+{
148
{
224
+ static MVEGenDualAccOpFn * const fns[4][2] = {
149
TCGv_ptr qd;
225
+ { NULL, NULL },
226
+ { gen_helper_mve_vmlaldavsh, gen_helper_mve_vmlaldavxsh },
227
+ { gen_helper_mve_vmlaldavsw, gen_helper_mve_vmlaldavxsw },
228
+ { NULL, NULL },
229
+ };
230
+ return do_long_dual_acc(s, a, fns[a->size][a->x]);
231
+}
232
+
233
+static bool trans_VMLALDAV_U(DisasContext *s, arg_vmlaldav *a)
234
+{
235
+ static MVEGenDualAccOpFn * const fns[4][2] = {
236
+ { NULL, NULL },
237
+ { gen_helper_mve_vmlaldavuh, NULL },
238
+ { gen_helper_mve_vmlaldavuw, NULL },
239
+ { NULL, NULL },
240
+ };
241
+ return do_long_dual_acc(s, a, fns[a->size][a->x]);
242
+}
243
--
150
--
244
2.20.1
151
2.20.1
245
152
246
153
diff view generated by jsdifflib
1
Implement the vector form of the MVE VQDMULL insn.
1
The MVE extension to v8.1M includes some new shift instructions which
2
sit entirely within the non-coprocessor part of the encoding space
3
and which operate only on general-purpose registers. They take up
4
the space which was previously UNPREDICTABLE MOVS and ORRS encodings
5
with Rm == 13 or 15.
6
7
Implement the long shifts by immediate, which perform shifts on a
8
pair of general-purpose registers treated as a 64-bit quantity, with
9
an immediate shift count between 1 and 32.
10
11
Awkwardly, because the MOVS and ORRS trans functions do not UNDEF for
12
the Rm==13,15 case, we need to explicitly emit code to UNDEF for the
13
cases where v8.1M now requires that. (Trying to change MOVS and ORRS
14
is too difficult, because the functions that generate the code are
15
shared between a dozen different kinds of arithmetic or logical
16
instruction for all A32, T16 and T32 encodings, and for some insns
17
and some encodings Rm==13,15 are valid.)
18
19
We make the helper functions we need for UQSHLL and SQSHLL take
20
a 32-bit value which the helper casts to int8_t because we'll need
21
these helpers also for the shift-by-register insns, where the shift
22
count might be < 0 or > 32.
2
23
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
25
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-39-peter.maydell@linaro.org
26
Message-id: 20210628135835.6690-16-peter.maydell@linaro.org
6
---
27
---
7
target/arm/helper-mve.h | 5 +++++
28
target/arm/helper-mve.h | 3 ++
8
target/arm/mve.decode | 5 +++++
29
target/arm/translate.h | 1 +
9
target/arm/mve_helper.c | 30 ++++++++++++++++++++++++++++++
30
target/arm/t32.decode | 28 +++++++++++++
10
target/arm/translate-mve.c | 30 ++++++++++++++++++++++++++++++
31
target/arm/mve_helper.c | 10 +++++
11
4 files changed, 70 insertions(+)
32
target/arm/translate.c | 90 +++++++++++++++++++++++++++++++++++++++++
33
5 files changed, 132 insertions(+)
12
34
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
35
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
37
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
38
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
39
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
40
DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
41
20
42
DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
21
+DEF_HELPER_FLAGS_4(mve_vqdmullbh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
43
+
22
+DEF_HELPER_FLAGS_4(mve_vqdmullbw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
44
+DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
23
+DEF_HELPER_FLAGS_4(mve_vqdmullth, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
45
+DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
24
+DEF_HELPER_FLAGS_4(mve_vqdmulltw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
46
diff --git a/target/arm/translate.h b/target/arm/translate.h
25
+
47
index XXXXXXX..XXXXXXX 100644
26
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
48
--- a/target/arm/translate.h
27
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
49
+++ b/target/arm/translate.h
28
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
50
@@ -XXX,XX +XXX,XX @@ typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
51
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
30
index XXXXXXX..XXXXXXX 100644
52
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
31
--- a/target/arm/mve.decode
53
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
32
+++ b/target/arm/mve.decode
54
+typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
55
56
/**
57
* arm_tbflags_from_tb:
58
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/t32.decode
61
+++ b/target/arm/t32.decode
33
@@ -XXX,XX +XXX,XX @@
62
@@ -XXX,XX +XXX,XX @@
34
@1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0
63
&mcr !extern cp opc1 crn crm opc2 rt
35
@2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn
64
&mcrr !extern cp opc1 crm rt rt2
36
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
65
37
+@2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \
66
+&mve_shl_ri rdalo rdahi shim
38
+ size=%size_28
67
+
39
68
+# rdahi: bits [3:1] from insn, bit 0 is 1
40
# The _rev suffix indicates that Vn and Vm are reversed. This is
69
+# rdalo: bits [3:1] from insn, bit 0 is 0
41
# the case for shifts. In the Arm ARM these insns are documented
70
+%rdahi_9 9:3 !function=times_2_plus_1
42
@@ -XXX,XX +XXX,XX @@ VQDMLSDHX 1111 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op
71
+%rdalo_17 17:3 !function=times_2
43
VQRDMLSDH 1111 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op
72
+
44
VQRDMLSDHX 1111 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op
73
# Data-processing (register)
45
74
46
+VQDMULLB 111 . 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 1 @2op_sz28
75
%imm5_12_6 12:3 6:2
47
+VQDMULLT 111 . 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 1 @2op_sz28
76
@@ -XXX,XX +XXX,XX @@
48
+
77
@S_xrr_shi ....... .... . rn:4 .... .... .. shty:2 rm:4 \
49
# Vector miscellaneous
78
&s_rrr_shi shim=%imm5_12_6 s=1 rd=0
50
79
51
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
80
+@mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \
81
+ &mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
82
+
83
{
84
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
85
AND_rrri 1110101 0000 . .... 0 ... .... .... .... @s_rrr_shi
86
}
87
BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
88
{
89
+ # The v8.1M MVE shift insns overlap in encoding with MOVS/ORRS
90
+ # and are distinguished by having Rm==13 or 15. Those are UNPREDICTABLE
91
+ # cases for MOVS/ORRS. We decode the MVE cases first, ensuring that
92
+ # they explicitly call unallocated_encoding() for cases that must UNDEF
93
+ # (eg "using a new shift insn on a v8.1M CPU without MVE"), and letting
94
+ # the rest fall through (where ORR_rrri and MOV_rxri will end up
95
+ # handling them as r13 and r15 accesses with the same semantics as A32).
96
+ [
97
+ LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
98
+ LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
99
+ ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
100
+
101
+ UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
102
+ URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
103
+ SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
104
+ SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
105
+ ]
106
+
107
MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi
108
ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi
109
}
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
110
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
index XXXXXXX..XXXXXXX 100644
111
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/mve_helper.c
112
--- a/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
113
+++ b/target/arm/mve_helper.c
56
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \
114
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
57
DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \
115
mve_advance_vpt(env);
58
do_qdmullw, SATMASK32)
116
return rdm;
117
}
118
+
119
+uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
120
+{
121
+ return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
122
+}
123
+
124
+uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
125
+{
126
+ return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
127
+}
128
diff --git a/target/arm/translate.c b/target/arm/translate.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/target/arm/translate.c
131
+++ b/target/arm/translate.c
132
@@ -XXX,XX +XXX,XX @@ static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
133
return true;
134
}
59
135
60
+/*
136
+/*
61
+ * Long saturating ops
137
+ * v8.1M MVE wide-shifts
62
+ */
138
+ */
63
+#define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
139
+static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a,
64
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
140
+ WideShiftImmFn *fn)
65
+ void *vm) \
141
+{
66
+ { \
142
+ TCGv_i64 rda;
67
+ LTYPE *d = vd; \
143
+ TCGv_i32 rdalo, rdahi;
68
+ TYPE *n = vn, *m = vm; \
144
+
69
+ uint16_t mask = mve_element_mask(env); \
145
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
70
+ unsigned le; \
146
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
71
+ bool qc = false; \
72
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
73
+ bool sat = false; \
74
+ LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \
75
+ LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \
76
+ mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \
77
+ qc |= sat && (mask & SATMASK); \
78
+ } \
79
+ if (qc) { \
80
+ env->vfp.qc[0] = qc; \
81
+ } \
82
+ mve_advance_vpt(env); \
83
+ }
84
+
85
+DO_2OP_SAT_L(vqdmullbh, 0, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16B)
86
+DO_2OP_SAT_L(vqdmullbw, 0, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
87
+DO_2OP_SAT_L(vqdmullth, 1, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16T)
88
+DO_2OP_SAT_L(vqdmulltw, 1, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
89
+
90
static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
91
{
92
m &= 0xff;
93
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/target/arm/translate-mve.c
96
+++ b/target/arm/translate-mve.c
97
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQDMLSDHX, vqdmlsdhx)
98
DO_2OP(VQRDMLSDH, vqrdmlsdh)
99
DO_2OP(VQRDMLSDHX, vqrdmlsdhx)
100
101
+static bool trans_VQDMULLB(DisasContext *s, arg_2op *a)
102
+{
103
+ static MVEGenTwoOpFn * const fns[] = {
104
+ NULL,
105
+ gen_helper_mve_vqdmullbh,
106
+ gen_helper_mve_vqdmullbw,
107
+ NULL,
108
+ };
109
+ if (a->size == MO_32 && (a->qd == a->qm || a->qd == a->qn)) {
110
+ /* UNPREDICTABLE; we choose to undef */
111
+ return false;
147
+ return false;
112
+ }
148
+ }
113
+ return do_2op(s, a, fns[a->size]);
149
+ if (a->rdahi == 15) {
114
+}
150
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
115
+
116
+static bool trans_VQDMULLT(DisasContext *s, arg_2op *a)
117
+{
118
+ static MVEGenTwoOpFn * const fns[] = {
119
+ NULL,
120
+ gen_helper_mve_vqdmullth,
121
+ gen_helper_mve_vqdmulltw,
122
+ NULL,
123
+ };
124
+ if (a->size == MO_32 && (a->qd == a->qm || a->qd == a->qn)) {
125
+ /* UNPREDICTABLE; we choose to undef */
126
+ return false;
151
+ return false;
127
+ }
152
+ }
128
+ return do_2op(s, a, fns[a->size]);
153
+ if (!dc_isar_feature(aa32_mve, s) ||
129
+}
154
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
130
+
155
+ a->rdahi == 13) {
131
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
156
+ /* RdaHi == 13 is UNPREDICTABLE; we choose to UNDEF */
132
MVEGenTwoOpScalarFn fn)
157
+ unallocated_encoding(s);
133
{
158
+ return true;
159
+ }
160
+
161
+ if (a->shim == 0) {
162
+ a->shim = 32;
163
+ }
164
+
165
+ rda = tcg_temp_new_i64();
166
+ rdalo = load_reg(s, a->rdalo);
167
+ rdahi = load_reg(s, a->rdahi);
168
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
169
+
170
+ fn(rda, rda, a->shim);
171
+
172
+ tcg_gen_extrl_i64_i32(rdalo, rda);
173
+ tcg_gen_extrh_i64_i32(rdahi, rda);
174
+ store_reg(s, a->rdalo, rdalo);
175
+ store_reg(s, a->rdahi, rdahi);
176
+ tcg_temp_free_i64(rda);
177
+
178
+ return true;
179
+}
180
+
181
+static bool trans_ASRL_ri(DisasContext *s, arg_mve_shl_ri *a)
182
+{
183
+ return do_mve_shl_ri(s, a, tcg_gen_sari_i64);
184
+}
185
+
186
+static bool trans_LSLL_ri(DisasContext *s, arg_mve_shl_ri *a)
187
+{
188
+ return do_mve_shl_ri(s, a, tcg_gen_shli_i64);
189
+}
190
+
191
+static bool trans_LSRL_ri(DisasContext *s, arg_mve_shl_ri *a)
192
+{
193
+ return do_mve_shl_ri(s, a, tcg_gen_shri_i64);
194
+}
195
+
196
+static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
197
+{
198
+ gen_helper_mve_sqshll(r, cpu_env, n, tcg_constant_i32(shift));
199
+}
200
+
201
+static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
202
+{
203
+ return do_mve_shl_ri(s, a, gen_mve_sqshll);
204
+}
205
+
206
+static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
207
+{
208
+ gen_helper_mve_uqshll(r, cpu_env, n, tcg_constant_i32(shift));
209
+}
210
+
211
+static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
212
+{
213
+ return do_mve_shl_ri(s, a, gen_mve_uqshll);
214
+}
215
+
216
+static bool trans_SRSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
217
+{
218
+ return do_mve_shl_ri(s, a, gen_srshr64_i64);
219
+}
220
+
221
+static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
222
+{
223
+ return do_mve_shl_ri(s, a, gen_urshr64_i64);
224
+}
225
+
226
/*
227
* Multiply and multiply accumulate
228
*/
134
--
229
--
135
2.20.1
230
2.20.1
136
231
137
232
diff view generated by jsdifflib
1
Implement the MVE VBRSR insn, which reverses a specified
1
Implement the MVE long shifts by register, which perform shifts on a
2
number of bits in each element, setting the rest to zero.
2
pair of general-purpose registers treated as a 64-bit quantity, with
3
the shift count in another general-purpose register, which might be
4
either positive or negative.
5
6
Like the long-shifts-by-immediate, these encodings sit in the space
7
that was previously the UNPREDICTABLE MOVS/ORRS with Rm==13,15.
8
Because LSLL_rr and ASRL_rr overlap with both MOV_rxri/ORR_rrri and
9
also with CSEL (as one of the previously-UNPREDICTABLE Rm==13 cases),
10
we have to move the CSEL pattern into the same decodetree group.
3
11
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-26-peter.maydell@linaro.org
14
Message-id: 20210628135835.6690-17-peter.maydell@linaro.org
7
---
15
---
8
target/arm/helper-mve.h | 4 ++++
16
target/arm/helper-mve.h | 6 +++
9
target/arm/mve.decode | 1 +
17
target/arm/translate.h | 1 +
10
target/arm/mve_helper.c | 43 ++++++++++++++++++++++++++++++++++++++
18
target/arm/t32.decode | 16 +++++--
11
target/arm/translate-mve.c | 1 +
19
target/arm/mve_helper.c | 93 +++++++++++++++++++++++++++++++++++++++++
12
4 files changed, 49 insertions(+)
20
target/arm/translate.c | 69 ++++++++++++++++++++++++++++++
21
5 files changed, 182 insertions(+), 3 deletions(-)
13
22
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
23
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
25
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
26
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vhsubu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_4(mve_vhsubu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
20
DEF_HELPER_FLAGS_4(mve_vhsubu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
21
30
22
+DEF_HELPER_FLAGS_4(mve_vbrsrb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_3(mve_sshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
23
+DEF_HELPER_FLAGS_4(mve_vbrsrh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_3(mve_ushll, TCG_CALL_NO_RWG, i64, env, i64, i32)
24
+DEF_HELPER_FLAGS_4(mve_vbrsrw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
25
+
34
DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
26
DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
35
+DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
27
DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
36
+DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
28
DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
37
+DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32)
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
38
+DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
30
index XXXXXXX..XXXXXXX 100644
39
diff --git a/target/arm/translate.h b/target/arm/translate.h
31
--- a/target/arm/mve.decode
40
index XXXXXXX..XXXXXXX 100644
32
+++ b/target/arm/mve.decode
41
--- a/target/arm/translate.h
33
@@ -XXX,XX +XXX,XX @@ VHADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
42
+++ b/target/arm/translate.h
34
VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
43
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
35
VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
44
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
36
VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
45
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
37
+VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
46
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
47
+typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
48
49
/**
50
* arm_tbflags_from_tb:
51
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/t32.decode
54
+++ b/target/arm/t32.decode
55
@@ -XXX,XX +XXX,XX @@
56
&mcrr !extern cp opc1 crm rt rt2
57
58
&mve_shl_ri rdalo rdahi shim
59
+&mve_shl_rr rdalo rdahi rm
60
61
# rdahi: bits [3:1] from insn, bit 0 is 1
62
# rdalo: bits [3:1] from insn, bit 0 is 0
63
@@ -XXX,XX +XXX,XX @@
64
65
@mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \
66
&mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
67
+@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \
68
+ &mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
69
70
{
71
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
72
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
73
URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
74
SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
75
SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
76
+
77
+ LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
78
+ ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
79
+ UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
80
+ SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
81
+ UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr
82
+ SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr
83
]
84
85
MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi
86
ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi
87
+
88
+ # v8.1M CSEL and friends
89
+ CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4
90
}
91
{
92
MVN_rxri 1110101 0011 . 1111 0 ... .... .... .... @s_rxr_shi
93
@@ -XXX,XX +XXX,XX @@ SBC_rrri 1110101 1011 . .... 0 ... .... .... .... @s_rrr_shi
94
}
95
RSB_rrri 1110101 1110 . .... 0 ... .... .... .... @s_rrr_shi
96
97
-# v8.1M CSEL and friends
98
-CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4
99
-
100
# Data-processing (register-shifted register)
101
102
MOV_rxrr 1111 1010 0 shty:2 s:1 rm:4 1111 rd:4 0000 rs:4 \
38
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
103
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
39
index XXXXXXX..XXXXXXX 100644
104
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/mve_helper.c
105
--- a/target/arm/mve_helper.c
41
+++ b/target/arm/mve_helper.c
106
+++ b/target/arm/mve_helper.c
42
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u)
107
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
43
DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s)
108
return rdm;
44
DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u)
109
}
45
110
46
+static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
111
+uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
47
+{
112
+{
48
+ m &= 0xff;
113
+ return do_sqrshl_d(n, -(int8_t)shift, false, NULL);
49
+ if (m == 0) {
114
+}
115
+
116
+uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift)
117
+{
118
+ return do_uqrshl_d(n, (int8_t)shift, false, NULL);
119
+}
120
+
121
uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
122
{
123
return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
124
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
125
{
126
return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
127
}
128
+
129
+uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
130
+{
131
+ return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF);
132
+}
133
+
134
+uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift)
135
+{
136
+ return do_uqrshl_d(n, (int8_t)shift, true, &env->QF);
137
+}
138
+
139
+/* Operate on 64-bit values, but saturate at 48 bits */
140
+static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
141
+ bool round, uint32_t *sat)
142
+{
143
+ if (shift <= -48) {
144
+ /* Rounding the sign bit always produces 0. */
145
+ if (round) {
146
+ return 0;
147
+ }
148
+ return src >> 63;
149
+ } else if (shift < 0) {
150
+ if (round) {
151
+ src >>= -shift - 1;
152
+ return (src >> 1) + (src & 1);
153
+ }
154
+ return src >> -shift;
155
+ } else if (shift < 48) {
156
+ int64_t val = src << shift;
157
+ int64_t extval = sextract64(val, 0, 48);
158
+ if (!sat || val == extval) {
159
+ return extval;
160
+ }
161
+ } else if (!sat || src == 0) {
50
+ return 0;
162
+ return 0;
51
+ }
163
+ }
52
+ n = revbit8(n);
164
+
53
+ if (m < 8) {
165
+ *sat = 1;
54
+ n >>= 8 - m;
166
+ return (1ULL << 47) - (src >= 0);
55
+ }
167
+}
56
+ return n;
168
+
57
+}
169
+/* Operate on 64-bit values, but saturate at 48 bits */
58
+
170
+static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift,
59
+static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m)
171
+ bool round, uint32_t *sat)
60
+{
172
+{
61
+ m &= 0xff;
173
+ uint64_t val, extval;
62
+ if (m == 0) {
174
+
175
+ if (shift <= -(48 + round)) {
63
+ return 0;
176
+ return 0;
64
+ }
177
+ } else if (shift < 0) {
65
+ n = revbit16(n);
178
+ if (round) {
66
+ if (m < 16) {
179
+ val = src >> (-shift - 1);
67
+ n >>= 16 - m;
180
+ val = (val >> 1) + (val & 1);
68
+ }
181
+ } else {
69
+ return n;
182
+ val = src >> -shift;
70
+}
183
+ }
71
+
184
+ extval = extract64(val, 0, 48);
72
+static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m)
185
+ if (!sat || val == extval) {
73
+{
186
+ return extval;
74
+ m &= 0xff;
187
+ }
75
+ if (m == 0) {
188
+ } else if (shift < 48) {
189
+ uint64_t val = src << shift;
190
+ uint64_t extval = extract64(val, 0, 48);
191
+ if (!sat || val == extval) {
192
+ return extval;
193
+ }
194
+ } else if (!sat || src == 0) {
76
+ return 0;
195
+ return 0;
77
+ }
196
+ }
78
+ n = revbit32(n);
197
+
79
+ if (m < 32) {
198
+ *sat = 1;
80
+ n >>= 32 - m;
199
+ return MAKE_64BIT_MASK(0, 48);
81
+ }
200
+}
82
+ return n;
201
+
83
+}
202
+uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift)
84
+
203
+{
85
+DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb)
204
+ return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF);
86
+DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh)
205
+}
87
+DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw)
206
+
207
+uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
208
+{
209
+ return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
210
+}
211
diff --git a/target/arm/translate.c b/target/arm/translate.c
212
index XXXXXXX..XXXXXXX 100644
213
--- a/target/arm/translate.c
214
+++ b/target/arm/translate.c
215
@@ -XXX,XX +XXX,XX @@ static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
216
return do_mve_shl_ri(s, a, gen_urshr64_i64);
217
}
218
219
+static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn)
220
+{
221
+ TCGv_i64 rda;
222
+ TCGv_i32 rdalo, rdahi;
223
+
224
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
225
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
226
+ return false;
227
+ }
228
+ if (a->rdahi == 15) {
229
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
230
+ return false;
231
+ }
232
+ if (!dc_isar_feature(aa32_mve, s) ||
233
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
234
+ a->rdahi == 13 || a->rm == 13 || a->rm == 15 ||
235
+ a->rm == a->rdahi || a->rm == a->rdalo) {
236
+ /* These rdahi/rdalo/rm cases are UNPREDICTABLE; we choose to UNDEF */
237
+ unallocated_encoding(s);
238
+ return true;
239
+ }
240
+
241
+ rda = tcg_temp_new_i64();
242
+ rdalo = load_reg(s, a->rdalo);
243
+ rdahi = load_reg(s, a->rdahi);
244
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
245
+
246
+ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
247
+ fn(rda, cpu_env, rda, cpu_R[a->rm]);
248
+
249
+ tcg_gen_extrl_i64_i32(rdalo, rda);
250
+ tcg_gen_extrh_i64_i32(rdahi, rda);
251
+ store_reg(s, a->rdalo, rdalo);
252
+ store_reg(s, a->rdahi, rdahi);
253
+ tcg_temp_free_i64(rda);
254
+
255
+ return true;
256
+}
257
+
258
+static bool trans_LSLL_rr(DisasContext *s, arg_mve_shl_rr *a)
259
+{
260
+ return do_mve_shl_rr(s, a, gen_helper_mve_ushll);
261
+}
262
+
263
+static bool trans_ASRL_rr(DisasContext *s, arg_mve_shl_rr *a)
264
+{
265
+ return do_mve_shl_rr(s, a, gen_helper_mve_sshrl);
266
+}
267
+
268
+static bool trans_UQRSHLL64_rr(DisasContext *s, arg_mve_shl_rr *a)
269
+{
270
+ return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll);
271
+}
272
+
273
+static bool trans_SQRSHRL64_rr(DisasContext *s, arg_mve_shl_rr *a)
274
+{
275
+ return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl);
276
+}
277
+
278
+static bool trans_UQRSHLL48_rr(DisasContext *s, arg_mve_shl_rr *a)
279
+{
280
+ return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll48);
281
+}
282
+
283
+static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
284
+{
285
+ return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
286
+}
88
+
287
+
89
/*
288
/*
90
* Multiply add long dual accumulate ops.
289
* Multiply and multiply accumulate
91
*/
290
*/
92
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
93
index XXXXXXX..XXXXXXX 100644
94
--- a/target/arm/translate-mve.c
95
+++ b/target/arm/translate-mve.c
96
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR(VHADD_S_scalar, vhadds_scalar)
97
DO_2OP_SCALAR(VHADD_U_scalar, vhaddu_scalar)
98
DO_2OP_SCALAR(VHSUB_S_scalar, vhsubs_scalar)
99
DO_2OP_SCALAR(VHSUB_U_scalar, vhsubu_scalar)
100
+DO_2OP_SCALAR(VBRSR, vbrsr)
101
102
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
103
MVEGenDualAccOpFn *fn)
104
--
291
--
105
2.20.1
292
2.20.1
106
293
107
294
diff view generated by jsdifflib
1
Implement the MVE VADDV insn, which performs an addition
1
Implement the MVE shifts by immediate, which perform shifts
2
across vector lanes.
2
on a single general-purpose register.
3
4
These patterns overlap with the long-shift-by-immediates,
5
so we have to rearrange the grouping a little here.
3
6
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-44-peter.maydell@linaro.org
9
Message-id: 20210628135835.6690-18-peter.maydell@linaro.org
7
---
10
---
8
target/arm/helper-mve.h | 7 +++++++
11
target/arm/helper-mve.h | 3 ++
9
target/arm/mve.decode | 2 ++
12
target/arm/translate.h | 1 +
10
target/arm/mve_helper.c | 24 +++++++++++++++++++++
13
target/arm/t32.decode | 31 ++++++++++++++-----
11
target/arm/translate-mve.c | 43 ++++++++++++++++++++++++++++++++++++++
14
target/arm/mve_helper.c | 10 ++++++
12
4 files changed, 76 insertions(+)
15
target/arm/translate.c | 68 +++++++++++++++++++++++++++++++++++++++--
16
5 files changed, 104 insertions(+), 9 deletions(-)
13
17
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
20
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
21
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrmlaldavhuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
19
23
DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
20
DEF_HELPER_FLAGS_4(mve_vrmlsldavhsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
24
DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32)
21
DEF_HELPER_FLAGS_4(mve_vrmlsldavhxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
25
DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
22
+
26
+
23
+DEF_HELPER_FLAGS_3(mve_vaddvsb, TCG_CALL_NO_WG, i32, env, ptr, i32)
27
+DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
24
+DEF_HELPER_FLAGS_3(mve_vaddvub, TCG_CALL_NO_WG, i32, env, ptr, i32)
28
+DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
25
+DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
29
diff --git a/target/arm/translate.h b/target/arm/translate.h
26
+DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
30
index XXXXXXX..XXXXXXX 100644
27
+DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
31
--- a/target/arm/translate.h
28
+DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
32
+++ b/target/arm/translate.h
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
30
index XXXXXXX..XXXXXXX 100644
34
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
31
--- a/target/arm/mve.decode
35
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
32
+++ b/target/arm/mve.decode
36
typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
33
@@ -XXX,XX +XXX,XX @@ VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
37
+typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
34
VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
38
35
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
39
/**
36
40
* arm_tbflags_from_tb:
37
+# Vector add across vector
41
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
38
+VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
42
index XXXXXXX..XXXXXXX 100644
39
43
--- a/target/arm/t32.decode
40
# Predicate operations
44
+++ b/target/arm/t32.decode
41
%mask_22_13 22:1 13:3
45
@@ -XXX,XX +XXX,XX @@
46
47
&mve_shl_ri rdalo rdahi shim
48
&mve_shl_rr rdalo rdahi rm
49
+&mve_sh_ri rda shim
50
51
# rdahi: bits [3:1] from insn, bit 0 is 1
52
# rdalo: bits [3:1] from insn, bit 0 is 0
53
@@ -XXX,XX +XXX,XX @@
54
&mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
55
@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \
56
&mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
57
+@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \
58
+ &mve_sh_ri shim=%imm5_12_6
59
60
{
61
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
62
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
63
# the rest fall through (where ORR_rrri and MOV_rxri will end up
64
# handling them as r13 and r15 accesses with the same semantics as A32).
65
[
66
- LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
67
- LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
68
- ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
69
+ {
70
+ UQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 00 1111 @mve_sh_ri
71
+ LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
72
+ UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
73
+ }
74
75
- UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
76
- URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
77
- SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
78
- SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
79
+ {
80
+ URSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 01 1111 @mve_sh_ri
81
+ LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
82
+ URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
83
+ }
84
+
85
+ {
86
+ SRSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 10 1111 @mve_sh_ri
87
+ ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
88
+ SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
89
+ }
90
+
91
+ {
92
+ SQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 11 1111 @mve_sh_ri
93
+ SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
94
+ }
95
96
LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
97
ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
42
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
98
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
43
index XXXXXXX..XXXXXXX 100644
99
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/mve_helper.c
100
--- a/target/arm/mve_helper.c
45
+++ b/target/arm/mve_helper.c
101
+++ b/target/arm/mve_helper.c
46
@@ -XXX,XX +XXX,XX @@ DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64
102
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
47
103
{
48
DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64)
104
return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
49
DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64)
50
+
51
+/* Vector add across vector */
52
+#define DO_VADDV(OP, ESIZE, TYPE) \
53
+ uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
54
+ uint32_t ra) \
55
+ { \
56
+ uint16_t mask = mve_element_mask(env); \
57
+ unsigned e; \
58
+ TYPE *m = vm; \
59
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
60
+ if (mask & 1) { \
61
+ ra += m[H##ESIZE(e)]; \
62
+ } \
63
+ } \
64
+ mve_advance_vpt(env); \
65
+ return ra; \
66
+ } \
67
+
68
+DO_VADDV(vaddvsb, 1, uint8_t)
69
+DO_VADDV(vaddvsh, 2, uint16_t)
70
+DO_VADDV(vaddvsw, 4, uint32_t)
71
+DO_VADDV(vaddvub, 1, uint8_t)
72
+DO_VADDV(vaddvuh, 2, uint16_t)
73
+DO_VADDV(vaddvuw, 4, uint32_t)
74
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/arm/translate-mve.c
77
+++ b/target/arm/translate-mve.c
78
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
79
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
80
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
81
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
82
+typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
83
84
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
85
static inline long mve_qreg_offset(unsigned reg)
86
@@ -XXX,XX +XXX,XX @@ static bool trans_VPST(DisasContext *s, arg_VPST *a)
87
mve_update_and_store_eci(s);
88
return true;
89
}
105
}
90
+
106
+
91
+static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
107
+uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
92
+{
108
+{
93
+ /* VADDV: vector add across vector */
109
+ return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
94
+ static MVEGenVADDVFn * const fns[4][2] = {
110
+}
95
+ { gen_helper_mve_vaddvsb, gen_helper_mve_vaddvub },
111
+
96
+ { gen_helper_mve_vaddvsh, gen_helper_mve_vaddvuh },
112
+uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
97
+ { gen_helper_mve_vaddvsw, gen_helper_mve_vaddvuw },
113
+{
98
+ { NULL, NULL }
114
+ return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
99
+ };
115
+}
100
+ TCGv_ptr qm;
116
diff --git a/target/arm/translate.c b/target/arm/translate.c
101
+ TCGv_i32 rda;
117
index XXXXXXX..XXXXXXX 100644
102
+
118
--- a/target/arm/translate.c
119
+++ b/target/arm/translate.c
120
@@ -XXX,XX +XXX,XX @@ static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
121
122
static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
123
{
124
- TCGv_i32 t = tcg_temp_new_i32();
125
+ TCGv_i32 t;
126
127
+ /* Handle shift by the input size for the benefit of trans_SRSHR_ri */
128
+ if (sh == 32) {
129
+ tcg_gen_movi_i32(d, 0);
130
+ return;
131
+ }
132
+ t = tcg_temp_new_i32();
133
tcg_gen_extract_i32(t, a, sh - 1, 1);
134
tcg_gen_sari_i32(d, a, sh);
135
tcg_gen_add_i32(d, d, t);
136
@@ -XXX,XX +XXX,XX @@ static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
137
138
static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
139
{
140
- TCGv_i32 t = tcg_temp_new_i32();
141
+ TCGv_i32 t;
142
143
+ /* Handle shift by the input size for the benefit of trans_URSHR_ri */
144
+ if (sh == 32) {
145
+ tcg_gen_extract_i32(d, a, sh - 1, 1);
146
+ return;
147
+ }
148
+ t = tcg_temp_new_i32();
149
tcg_gen_extract_i32(t, a, sh - 1, 1);
150
tcg_gen_shri_i32(d, a, sh);
151
tcg_gen_add_i32(d, d, t);
152
@@ -XXX,XX +XXX,XX @@ static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
153
return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
154
}
155
156
+static bool do_mve_sh_ri(DisasContext *s, arg_mve_sh_ri *a, ShiftImmFn *fn)
157
+{
158
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
159
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
160
+ return false;
161
+ }
103
+ if (!dc_isar_feature(aa32_mve, s) ||
162
+ if (!dc_isar_feature(aa32_mve, s) ||
104
+ a->size == 3) {
163
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
105
+ return false;
164
+ a->rda == 13 || a->rda == 15) {
106
+ }
165
+ /* These rda cases are UNPREDICTABLE; we choose to UNDEF */
107
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
166
+ unallocated_encoding(s);
108
+ return true;
167
+ return true;
109
+ }
168
+ }
110
+
169
+
111
+ /*
170
+ if (a->shim == 0) {
112
+ * This insn is subject to beat-wise execution. Partial execution
171
+ a->shim = 32;
113
+ * of an A=0 (no-accumulate) insn which does not execute the first
172
+ }
114
+ * beat must start with the current value of Rda, not zero.
173
+ fn(cpu_R[a->rda], cpu_R[a->rda], a->shim);
115
+ */
174
+
116
+ if (a->a || mve_skip_first_beat(s)) {
117
+ /* Accumulate input from Rda */
118
+ rda = load_reg(s, a->rda);
119
+ } else {
120
+ /* Accumulate starting at zero */
121
+ rda = tcg_const_i32(0);
122
+ }
123
+
124
+ qm = mve_qreg_ptr(a->qm);
125
+ fns[a->size][a->u](rda, cpu_env, qm, rda);
126
+ store_reg(s, a->rda, rda);
127
+ tcg_temp_free_ptr(qm);
128
+
129
+ mve_update_eci(s);
130
+ return true;
175
+ return true;
131
+}
176
+}
177
+
178
+static bool trans_URSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
179
+{
180
+ return do_mve_sh_ri(s, a, gen_urshr32_i32);
181
+}
182
+
183
+static bool trans_SRSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
184
+{
185
+ return do_mve_sh_ri(s, a, gen_srshr32_i32);
186
+}
187
+
188
+static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
189
+{
190
+ gen_helper_mve_sqshl(r, cpu_env, n, tcg_constant_i32(shift));
191
+}
192
+
193
+static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
194
+{
195
+ return do_mve_sh_ri(s, a, gen_mve_sqshl);
196
+}
197
+
198
+static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
199
+{
200
+ gen_helper_mve_uqshl(r, cpu_env, n, tcg_constant_i32(shift));
201
+}
202
+
203
+static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
204
+{
205
+ return do_mve_sh_ri(s, a, gen_mve_uqshl);
206
+}
207
+
208
/*
209
* Multiply and multiply accumulate
210
*/
132
--
211
--
133
2.20.1
212
2.20.1
134
213
135
214
diff view generated by jsdifflib
1
Implement MVE VHADD and VHSUB insns, which perform an addition
1
Implement the MVE shifts by register, which perform
2
or subtraction and then halve the result.
2
shifts on a single general-purpose register.
3
3
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-18-peter.maydell@linaro.org
6
Message-id: 20210628135835.6690-19-peter.maydell@linaro.org
7
---
7
---
8
target/arm/helper-mve.h | 14 ++++++++++++++
8
target/arm/helper-mve.h | 2 ++
9
target/arm/mve.decode | 5 +++++
9
target/arm/translate.h | 1 +
10
target/arm/mve_helper.c | 25 +++++++++++++++++++++++++
10
target/arm/t32.decode | 18 ++++++++++++++----
11
target/arm/translate-mve.c | 4 ++++
11
target/arm/mve_helper.c | 10 ++++++++++
12
4 files changed, 48 insertions(+)
12
target/arm/translate.c | 30 ++++++++++++++++++++++++++++++
13
5 files changed, 57 insertions(+), 4 deletions(-)
13
14
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vabdsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
19
DEF_HELPER_FLAGS_4(mve_vabdub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
20
DEF_HELPER_FLAGS_4(mve_vabduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
21
DEF_HELPER_FLAGS_4(mve_vabduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
23
+DEF_HELPER_FLAGS_3(mve_uqrshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
24
+DEF_HELPER_FLAGS_3(mve_sqrshr, TCG_CALL_NO_RWG, i32, env, i32, i32)
25
diff --git a/target/arm/translate.h b/target/arm/translate.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate.h
28
+++ b/target/arm/translate.h
29
@@ -XXX,XX +XXX,XX @@ typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
30
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
31
typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
32
typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
33
+typedef void ShiftFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
34
35
/**
36
* arm_tbflags_from_tb:
37
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/t32.decode
40
+++ b/target/arm/t32.decode
41
@@ -XXX,XX +XXX,XX @@
42
&mve_shl_ri rdalo rdahi shim
43
&mve_shl_rr rdalo rdahi rm
44
&mve_sh_ri rda shim
45
+&mve_sh_rr rda rm
46
47
# rdahi: bits [3:1] from insn, bit 0 is 1
48
# rdalo: bits [3:1] from insn, bit 0 is 0
49
@@ -XXX,XX +XXX,XX @@
50
&mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
51
@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \
52
&mve_sh_ri shim=%imm5_12_6
53
+@mve_sh_rr ....... .... . rda:4 rm:4 .... .... .... &mve_sh_rr
54
55
{
56
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
57
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
58
SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
59
}
60
61
- LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
62
- ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
63
- UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
64
- SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
65
+ {
66
+ UQRSHL_rr 1110101 0010 1 .... .... 1111 0000 1101 @mve_sh_rr
67
+ LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
68
+ UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
69
+ }
22
+
70
+
23
+DEF_HELPER_FLAGS_4(mve_vhaddsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
71
+ {
24
+DEF_HELPER_FLAGS_4(mve_vhaddsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
72
+ SQRSHR_rr 1110101 0010 1 .... .... 1111 0010 1101 @mve_sh_rr
25
+DEF_HELPER_FLAGS_4(mve_vhaddsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
73
+ ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
26
+DEF_HELPER_FLAGS_4(mve_vhaddub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
74
+ SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
27
+DEF_HELPER_FLAGS_4(mve_vhadduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
75
+ }
28
+DEF_HELPER_FLAGS_4(mve_vhadduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
+
76
+
30
+DEF_HELPER_FLAGS_4(mve_vhsubsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
77
UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr
31
+DEF_HELPER_FLAGS_4(mve_vhsubsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
78
SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr
32
+DEF_HELPER_FLAGS_4(mve_vhsubsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
79
]
33
+DEF_HELPER_FLAGS_4(mve_vhsubub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
+DEF_HELPER_FLAGS_4(mve_vhsubuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
35
+DEF_HELPER_FLAGS_4(mve_vhsubuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
36
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/mve.decode
39
+++ b/target/arm/mve.decode
40
@@ -XXX,XX +XXX,XX @@ VMIN_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 1 ... 0 @2op
41
VABD_S 111 0 1111 0 . .. ... 0 ... 0 0111 . 1 . 0 ... 0 @2op
42
VABD_U 111 1 1111 0 . .. ... 0 ... 0 0111 . 1 . 0 ... 0 @2op
43
44
+VHADD_S 111 0 1111 0 . .. ... 0 ... 0 0000 . 1 . 0 ... 0 @2op
45
+VHADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 0 ... 0 @2op
46
+VHSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op
47
+VHSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op
48
+
49
# Vector miscellaneous
50
51
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
80
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
index XXXXXXX..XXXXXXX 100644
81
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/mve_helper.c
82
--- a/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
83
+++ b/target/arm/mve_helper.c
56
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vminu, DO_MIN)
84
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
57
85
{
58
DO_2OP_S(vabds, DO_ABD)
86
return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
59
DO_2OP_U(vabdu, DO_ABD)
87
}
60
+
88
+
61
+static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m)
89
+uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift)
62
+{
90
+{
63
+ return ((uint64_t)n + m) >> 1;
91
+ return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF);
64
+}
92
+}
65
+
93
+
66
+static inline int32_t do_vhadd_s(int32_t n, int32_t m)
94
+uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift)
67
+{
95
+{
68
+ return ((int64_t)n + m) >> 1;
96
+ return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF);
97
+}
98
diff --git a/target/arm/translate.c b/target/arm/translate.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/translate.c
101
+++ b/target/arm/translate.c
102
@@ -XXX,XX +XXX,XX @@ static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
103
return do_mve_sh_ri(s, a, gen_mve_uqshl);
104
}
105
106
+static bool do_mve_sh_rr(DisasContext *s, arg_mve_sh_rr *a, ShiftFn *fn)
107
+{
108
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
109
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
110
+ return false;
111
+ }
112
+ if (!dc_isar_feature(aa32_mve, s) ||
113
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
114
+ a->rda == 13 || a->rda == 15 || a->rm == 13 || a->rm == 15 ||
115
+ a->rm == a->rda) {
116
+ /* These rda/rm cases are UNPREDICTABLE; we choose to UNDEF */
117
+ unallocated_encoding(s);
118
+ return true;
119
+ }
120
+
121
+ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
122
+ fn(cpu_R[a->rda], cpu_env, cpu_R[a->rda], cpu_R[a->rm]);
123
+ return true;
69
+}
124
+}
70
+
125
+
71
+static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m)
126
+static bool trans_SQRSHR_rr(DisasContext *s, arg_mve_sh_rr *a)
72
+{
127
+{
73
+ return ((uint64_t)n - m) >> 1;
128
+ return do_mve_sh_rr(s, a, gen_helper_mve_sqrshr);
74
+}
129
+}
75
+
130
+
76
+static inline int32_t do_vhsub_s(int32_t n, int32_t m)
131
+static bool trans_UQRSHL_rr(DisasContext *s, arg_mve_sh_rr *a)
77
+{
132
+{
78
+ return ((int64_t)n - m) >> 1;
133
+ return do_mve_sh_rr(s, a, gen_helper_mve_uqrshl);
79
+}
134
+}
80
+
135
+
81
+DO_2OP_S(vhadds, do_vhadd_s)
136
/*
82
+DO_2OP_U(vhaddu, do_vhadd_u)
137
* Multiply and multiply accumulate
83
+DO_2OP_S(vhsubs, do_vhsub_s)
138
*/
84
+DO_2OP_U(vhsubu, do_vhsub_u)
85
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/target/arm/translate-mve.c
88
+++ b/target/arm/translate-mve.c
89
@@ -XXX,XX +XXX,XX @@ DO_2OP(VMIN_S, vmins)
90
DO_2OP(VMIN_U, vminu)
91
DO_2OP(VABD_S, vabds)
92
DO_2OP(VABD_U, vabdu)
93
+DO_2OP(VHADD_S, vhadds)
94
+DO_2OP(VHADD_U, vhaddu)
95
+DO_2OP(VHSUB_S, vhsubs)
96
+DO_2OP(VHSUB_U, vhsubu)
97
--
139
--
98
2.20.1
140
2.20.1
99
141
100
142
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VMULL insn, which multiplies two single
2
width integer elements to produce a double width result.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-19-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 14 ++++++++++++++
9
target/arm/mve.decode | 5 +++++
10
target/arm/mve_helper.c | 34 ++++++++++++++++++++++++++++++++++
11
target/arm/translate-mve.c | 4 ++++
12
4 files changed, 57 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vhsubsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vhsubub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vhsubuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
DEF_HELPER_FLAGS_4(mve_vhsubuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+
23
+DEF_HELPER_FLAGS_4(mve_vmullbsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vmullbsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vmullbsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vmullbub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vmullbuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vmullbuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vmulltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
+DEF_HELPER_FLAGS_4(mve_vmulltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
+DEF_HELPER_FLAGS_4(mve_vmulltsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
33
+DEF_HELPER_FLAGS_4(mve_vmulltub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
+DEF_HELPER_FLAGS_4(mve_vmulltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
35
+DEF_HELPER_FLAGS_4(mve_vmulltuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
36
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/mve.decode
39
+++ b/target/arm/mve.decode
40
@@ -XXX,XX +XXX,XX @@ VHADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 0 ... 0 @2op
41
VHSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op
42
VHSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op
43
44
+VMULL_BS 111 0 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op
45
+VMULL_BU 111 1 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op
46
+VMULL_TS 111 0 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
47
+VMULL_TU 111 1 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
48
+
49
# Vector miscellaneous
50
51
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
56
@@ -XXX,XX +XXX,XX @@ DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
57
DO_2OP(OP##h, 2, int16_t, FN) \
58
DO_2OP(OP##w, 4, int32_t, FN)
59
60
+/*
61
+ * "Long" operations where two half-sized inputs (taken from either the
62
+ * top or the bottom of the input vector) produce a double-width result.
63
+ * Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output.
64
+ */
65
+#define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
66
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
67
+ { \
68
+ LTYPE *d = vd; \
69
+ TYPE *n = vn, *m = vm; \
70
+ uint16_t mask = mve_element_mask(env); \
71
+ unsigned le; \
72
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
73
+ LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \
74
+ m[H##ESIZE(le * 2 + TOP)]); \
75
+ mergemask(&d[H##LESIZE(le)], r, mask); \
76
+ } \
77
+ mve_advance_vpt(env); \
78
+ }
79
+
80
#define DO_AND(N, M) ((N) & (M))
81
#define DO_BIC(N, M) ((N) & ~(M))
82
#define DO_ORR(N, M) ((N) | (M))
83
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vadd, DO_ADD)
84
DO_2OP_U(vsub, DO_SUB)
85
DO_2OP_U(vmul, DO_MUL)
86
87
+DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL)
88
+DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL)
89
+DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL)
90
+DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL)
91
+DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL)
92
+DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL)
93
+
94
+DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL)
95
+DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL)
96
+DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL)
97
+DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL)
98
+DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL)
99
+DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL)
100
+
101
/*
102
* Because the computation type is at least twice as large as required,
103
* these work for both signed and unsigned source types.
104
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/target/arm/translate-mve.c
107
+++ b/target/arm/translate-mve.c
108
@@ -XXX,XX +XXX,XX @@ DO_2OP(VHADD_S, vhadds)
109
DO_2OP(VHADD_U, vhaddu)
110
DO_2OP(VHSUB_S, vhsubs)
111
DO_2OP(VHSUB_U, vhsubu)
112
+DO_2OP(VMULL_BS, vmullbs)
113
+DO_2OP(VMULL_BU, vmullbu)
114
+DO_2OP(VMULL_TS, vmullts)
115
+DO_2OP(VMULL_TU, vmulltu)
116
--
117
2.20.1
118
119
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE insn VMLSLDAV, which multiplies source elements,
2
alternately adding and subtracting them, and accumulates into a
3
64-bit result in a pair of general purpose registers.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210617121628.20116-21-peter.maydell@linaro.org
8
---
9
target/arm/helper-mve.h | 5 +++++
10
target/arm/mve.decode | 2 ++
11
target/arm/mve_helper.c | 5 +++++
12
target/arm/translate-mve.c | 11 +++++++++++
13
4 files changed, 23 insertions(+)
14
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmlaldavxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
20
21
DEF_HELPER_FLAGS_4(mve_vmlaldavuh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
22
DEF_HELPER_FLAGS_4(mve_vmlaldavuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
23
+
24
+DEF_HELPER_FLAGS_4(mve_vmlsldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
25
+DEF_HELPER_FLAGS_4(mve_vmlsldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
26
+DEF_HELPER_FLAGS_4(mve_vmlsldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
27
+DEF_HELPER_FLAGS_4(mve_vmlsldavxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
32
@@ -XXX,XX +XXX,XX @@ VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2
33
qn=%qn rdahi=%rdahi rdalo=%rdalo size=%size_16 &vmlaldav
34
VMLALDAV_S 1110 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav
35
VMLALDAV_U 1111 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav
36
+
37
+VMLSLDAV 1110 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav
38
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/mve_helper.c
41
+++ b/target/arm/mve_helper.c
42
@@ -XXX,XX +XXX,XX @@ DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=)
43
44
DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=)
45
DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=)
46
+
47
+DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=)
48
+DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=)
49
+DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
50
+DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
51
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/translate-mve.c
54
+++ b/target/arm/translate-mve.c
55
@@ -XXX,XX +XXX,XX @@ static bool trans_VMLALDAV_U(DisasContext *s, arg_vmlaldav *a)
56
};
57
return do_long_dual_acc(s, a, fns[a->size][a->x]);
58
}
59
+
60
+static bool trans_VMLSLDAV(DisasContext *s, arg_vmlaldav *a)
61
+{
62
+ static MVEGenDualAccOpFn * const fns[4][2] = {
63
+ { NULL, NULL },
64
+ { gen_helper_mve_vmlsldavsh, gen_helper_mve_vmlsldavxsh },
65
+ { gen_helper_mve_vmlsldavsw, gen_helper_mve_vmlsldavxsw },
66
+ { NULL, NULL },
67
+ };
68
+ return do_long_dual_acc(s, a, fns[a->size][a->x]);
69
+}
70
--
71
2.20.1
72
73
diff view generated by jsdifflib
Deleted patch
1
Implement the scalar forms of the MVE VSUB and VMUL insns.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-24-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 8 ++++++++
8
target/arm/mve.decode | 2 ++
9
target/arm/mve_helper.c | 2 ++
10
target/arm/translate-mve.c | 2 ++
11
4 files changed, 14 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
21
+DEF_HELPER_FLAGS_4(mve_vsub_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
+DEF_HELPER_FLAGS_4(mve_vsub_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_4(mve_vsub_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+
25
+DEF_HELPER_FLAGS_4(mve_vmul_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(mve_vmul_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vmul_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+
29
DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
30
DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
31
DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
32
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/mve.decode
35
+++ b/target/arm/mve.decode
36
@@ -XXX,XX +XXX,XX @@ VRMLSLDAVH 1111 1110 1 ... ... 0 ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav_no
37
# Scalar operations
38
39
VADD_scalar 1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar
40
+VSUB_scalar 1110 1110 0 . .. ... 1 ... 1 1111 . 100 .... @2scalar
41
+VMUL_scalar 1110 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
42
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/mve_helper.c
45
+++ b/target/arm/mve_helper.c
46
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vhsubu, do_vhsub_u)
47
DO_2OP_SCALAR(OP##w, 4, uint32_t, FN)
48
49
DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
50
+DO_2OP_SCALAR_U(vsub_scalar, DO_SUB)
51
+DO_2OP_SCALAR_U(vmul_scalar, DO_MUL)
52
53
/*
54
* Multiply add long dual accumulate ops.
55
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/translate-mve.c
58
+++ b/target/arm/translate-mve.c
59
@@ -XXX,XX +XXX,XX @@ static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
60
}
61
62
DO_2OP_SCALAR(VADD_scalar, vadd_scalar)
63
+DO_2OP_SCALAR(VSUB_scalar, vsub_scalar)
64
+DO_2OP_SCALAR(VMUL_scalar, vmul_scalar)
65
66
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
67
MVEGenDualAccOpFn *fn)
68
--
69
2.20.1
70
71
diff view generated by jsdifflib
Deleted patch
1
Implement the scalar variants of the MVE VHADD and VHSUB insns.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-25-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 16 ++++++++++++++++
8
target/arm/mve.decode | 4 ++++
9
target/arm/mve_helper.c | 8 ++++++++
10
target/arm/translate-mve.c | 4 ++++
11
4 files changed, 32 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmul_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
DEF_HELPER_FLAGS_4(mve_vmul_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_4(mve_vmul_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
21
+DEF_HELPER_FLAGS_4(mve_vhadds_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
+DEF_HELPER_FLAGS_4(mve_vhadds_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_4(mve_vhadds_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+
25
+DEF_HELPER_FLAGS_4(mve_vhaddu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(mve_vhaddu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vhaddu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+
29
+DEF_HELPER_FLAGS_4(mve_vhsubs_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_4(mve_vhsubs_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vhsubs_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+
33
+DEF_HELPER_FLAGS_4(mve_vhsubu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_4(mve_vhsubu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_4(mve_vhsubu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
+
37
DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
38
DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
39
DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
40
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/mve.decode
43
+++ b/target/arm/mve.decode
44
@@ -XXX,XX +XXX,XX @@ VRMLSLDAVH 1111 1110 1 ... ... 0 ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav_no
45
VADD_scalar 1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar
46
VSUB_scalar 1110 1110 0 . .. ... 1 ... 1 1111 . 100 .... @2scalar
47
VMUL_scalar 1110 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
48
+VHADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
49
+VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
50
+VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
51
+VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
56
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vhsubu, do_vhsub_u)
57
DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
58
DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \
59
DO_2OP_SCALAR(OP##w, 4, uint32_t, FN)
60
+#define DO_2OP_SCALAR_S(OP, FN) \
61
+ DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \
62
+ DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \
63
+ DO_2OP_SCALAR(OP##w, 4, int32_t, FN)
64
65
DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
66
DO_2OP_SCALAR_U(vsub_scalar, DO_SUB)
67
DO_2OP_SCALAR_U(vmul_scalar, DO_MUL)
68
+DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s)
69
+DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u)
70
+DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s)
71
+DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u)
72
73
/*
74
* Multiply add long dual accumulate ops.
75
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/translate-mve.c
78
+++ b/target/arm/translate-mve.c
79
@@ -XXX,XX +XXX,XX @@ static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
80
DO_2OP_SCALAR(VADD_scalar, vadd_scalar)
81
DO_2OP_SCALAR(VSUB_scalar, vsub_scalar)
82
DO_2OP_SCALAR(VMUL_scalar, vmul_scalar)
83
+DO_2OP_SCALAR(VHADD_S_scalar, vhadds_scalar)
84
+DO_2OP_SCALAR(VHADD_U_scalar, vhaddu_scalar)
85
+DO_2OP_SCALAR(VHSUB_S_scalar, vhsubs_scalar)
86
+DO_2OP_SCALAR(VHSUB_U_scalar, vhsubu_scalar)
87
88
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
89
MVEGenDualAccOpFn *fn)
90
--
91
2.20.1
92
93
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VPST insn, which sets the predicate mask
2
fields in the VPR to the immediate value encoded in the insn.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-27-peter.maydell@linaro.org
7
---
8
target/arm/mve.decode | 4 +++
9
target/arm/translate-mve.c | 59 ++++++++++++++++++++++++++++++++++++++
10
2 files changed, 63 insertions(+)
11
12
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/mve.decode
15
+++ b/target/arm/mve.decode
16
@@ -XXX,XX +XXX,XX @@ VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
17
VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
18
VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
19
VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
20
+
21
+# Predicate operations
22
+%mask_22_13 22:1 13:3
23
+VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
24
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate-mve.c
27
+++ b/target/arm/translate-mve.c
28
@@ -XXX,XX +XXX,XX @@ static void mve_update_eci(DisasContext *s)
29
}
30
}
31
32
+static void mve_update_and_store_eci(DisasContext *s)
33
+{
34
+ /*
35
+ * For insns which don't call a helper function that will call
36
+ * mve_advance_vpt(), this version updates s->eci and also stores
37
+ * it out to the CPUState field.
38
+ */
39
+ if (s->eci) {
40
+ mve_update_eci(s);
41
+ store_cpu_field(tcg_constant_i32(s->eci << 4), condexec_bits);
42
+ }
43
+}
44
+
45
static bool mve_skip_first_beat(DisasContext *s)
46
{
47
/* Return true if PSR.ECI says we must skip the first beat of this insn */
48
@@ -XXX,XX +XXX,XX @@ static bool trans_VRMLSLDAVH(DisasContext *s, arg_vmlaldav *a)
49
};
50
return do_long_dual_acc(s, a, fns[a->x]);
51
}
52
+
53
+static bool trans_VPST(DisasContext *s, arg_VPST *a)
54
+{
55
+ TCGv_i32 vpr;
56
+
57
+ /* mask == 0 is a "related encoding" */
58
+ if (!dc_isar_feature(aa32_mve, s) || !a->mask) {
59
+ return false;
60
+ }
61
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
62
+ return true;
63
+ }
64
+ /*
65
+ * Set the VPR mask fields. We take advantage of MASK01 and MASK23
66
+ * being adjacent fields in the register.
67
+ *
68
+ * This insn is not predicated, but it is subject to beat-wise
69
+ * execution, and the mask is updated on the odd-numbered beats.
70
+ * So if PSR.ECI says we should skip beat 1, we mustn't update the
71
+ * 01 mask field.
72
+ */
73
+ vpr = load_cpu_field(v7m.vpr);
74
+ switch (s->eci) {
75
+ case ECI_NONE:
76
+ case ECI_A0:
77
+ /* Update both 01 and 23 fields */
78
+ tcg_gen_deposit_i32(vpr, vpr,
79
+ tcg_constant_i32(a->mask | (a->mask << 4)),
80
+ R_V7M_VPR_MASK01_SHIFT,
81
+ R_V7M_VPR_MASK01_LENGTH + R_V7M_VPR_MASK23_LENGTH);
82
+ break;
83
+ case ECI_A0A1:
84
+ case ECI_A0A1A2:
85
+ case ECI_A0A1A2B0:
86
+ /* Update only the 23 mask field */
87
+ tcg_gen_deposit_i32(vpr, vpr,
88
+ tcg_constant_i32(a->mask),
89
+ R_V7M_VPR_MASK23_SHIFT, R_V7M_VPR_MASK23_LENGTH);
90
+ break;
91
+ default:
92
+ g_assert_not_reached();
93
+ }
94
+ store_cpu_field(vpr, v7m.vpr);
95
+ mve_update_and_store_eci(s);
96
+ return true;
97
+}
98
--
99
2.20.1
100
101
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VQDMULH and VQRDMULH scalar insns, which multiply
2
elements by the scalar, double, possibly round, take the high half
3
and saturate.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210617121628.20116-29-peter.maydell@linaro.org
8
---
9
target/arm/helper-mve.h | 8 ++++++++
10
target/arm/mve.decode | 3 +++
11
target/arm/mve_helper.c | 25 +++++++++++++++++++++++++
12
target/arm/translate-mve.c | 2 ++
13
4 files changed, 38 insertions(+)
14
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqsubu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_4(mve_vqsubu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_4(mve_vqsubu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
23
+DEF_HELPER_FLAGS_4(mve_vqdmulh_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_4(mve_vqdmulh_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_4(mve_vqdmulh_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+
27
+DEF_HELPER_FLAGS_4(mve_vqrdmulh_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vqrdmulh_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(mve_vqrdmulh_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+
31
DEF_HELPER_FLAGS_4(mve_vbrsrb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
DEF_HELPER_FLAGS_4(mve_vbrsrh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
DEF_HELPER_FLAGS_4(mve_vbrsrw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/mve.decode
37
+++ b/target/arm/mve.decode
38
@@ -XXX,XX +XXX,XX @@ VQSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
39
VQSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
40
VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
41
42
+VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
43
+VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
44
+
45
# Predicate operations
46
%mask_22_13 22:1 13:3
47
VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
48
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/target/arm/mve_helper.c
51
+++ b/target/arm/mve_helper.c
52
@@ -XXX,XX +XXX,XX @@ static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
53
#define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s)
54
#define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s)
55
56
+/*
57
+ * For QDMULH and QRDMULH we simplify "double and shift by esize" into
58
+ * "shift by esize-1", adjusting the QRDMULH rounding constant to match.
59
+ */
60
+#define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \
61
+ INT8_MIN, INT8_MAX, s)
62
+#define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \
63
+ INT16_MIN, INT16_MAX, s)
64
+#define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \
65
+ INT32_MIN, INT32_MAX, s)
66
+
67
+#define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \
68
+ INT8_MIN, INT8_MAX, s)
69
+#define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \
70
+ INT16_MIN, INT16_MAX, s)
71
+#define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \
72
+ INT32_MIN, INT32_MAX, s)
73
+
74
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
75
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
76
uint32_t rm) \
77
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B)
78
DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H)
79
DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W)
80
81
+DO_2OP_SAT_SCALAR(vqdmulh_scalarb, 1, int8_t, DO_QDMULH_B)
82
+DO_2OP_SAT_SCALAR(vqdmulh_scalarh, 2, int16_t, DO_QDMULH_H)
83
+DO_2OP_SAT_SCALAR(vqdmulh_scalarw, 4, int32_t, DO_QDMULH_W)
84
+DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
85
+DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
86
+DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
87
+
88
static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
89
{
90
m &= 0xff;
91
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/target/arm/translate-mve.c
94
+++ b/target/arm/translate-mve.c
95
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR(VQADD_S_scalar, vqadds_scalar)
96
DO_2OP_SCALAR(VQADD_U_scalar, vqaddu_scalar)
97
DO_2OP_SCALAR(VQSUB_S_scalar, vqsubs_scalar)
98
DO_2OP_SCALAR(VQSUB_U_scalar, vqsubu_scalar)
99
+DO_2OP_SCALAR(VQDMULH_scalar, vqdmulh_scalar)
100
+DO_2OP_SCALAR(VQRDMULH_scalar, vqrdmulh_scalar)
101
DO_2OP_SCALAR(VBRSR, vbrsr)
102
103
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
104
--
105
2.20.1
106
107
diff view generated by jsdifflib
Deleted patch
1
Implement the vector forms of the MVE VQADD and VQSUB insns.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-32-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 16 ++++++++++++++++
8
target/arm/mve.decode | 5 +++++
9
target/arm/mve_helper.c | 14 ++++++++++++++
10
target/arm/translate-mve.c | 4 ++++
11
4 files changed, 39 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrdmulhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
DEF_HELPER_FLAGS_4(mve_vqrdmulhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vqrdmulhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
21
+DEF_HELPER_FLAGS_4(mve_vqaddsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+DEF_HELPER_FLAGS_4(mve_vqaddsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vqaddsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+
25
+DEF_HELPER_FLAGS_4(mve_vqaddub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vqadduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vqadduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+
29
+DEF_HELPER_FLAGS_4(mve_vqsubsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
+DEF_HELPER_FLAGS_4(mve_vqsubsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
+DEF_HELPER_FLAGS_4(mve_vqsubsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
+
33
+DEF_HELPER_FLAGS_4(mve_vqsubub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
+DEF_HELPER_FLAGS_4(mve_vqsubuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
35
+DEF_HELPER_FLAGS_4(mve_vqsubuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
36
+
37
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/mve.decode
43
+++ b/target/arm/mve.decode
44
@@ -XXX,XX +XXX,XX @@ VMULL_TU 111 1 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
45
VQDMULH 1110 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op
46
VQRDMULH 1111 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op
47
48
+VQADD_S 111 0 1111 0 . .. ... 0 ... 0 0000 . 1 . 1 ... 0 @2op
49
+VQADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 1 ... 0 @2op
50
+VQSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
51
+VQSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
52
+
53
# Vector miscellaneous
54
55
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
56
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/mve_helper.c
59
+++ b/target/arm/mve_helper.c
60
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B)
61
DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H)
62
DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W)
63
64
+DO_2OP_SAT(vqaddub, 1, uint8_t, DO_UQADD_B)
65
+DO_2OP_SAT(vqadduh, 2, uint16_t, DO_UQADD_H)
66
+DO_2OP_SAT(vqadduw, 4, uint32_t, DO_UQADD_W)
67
+DO_2OP_SAT(vqaddsb, 1, int8_t, DO_SQADD_B)
68
+DO_2OP_SAT(vqaddsh, 2, int16_t, DO_SQADD_H)
69
+DO_2OP_SAT(vqaddsw, 4, int32_t, DO_SQADD_W)
70
+
71
+DO_2OP_SAT(vqsubub, 1, uint8_t, DO_UQSUB_B)
72
+DO_2OP_SAT(vqsubuh, 2, uint16_t, DO_UQSUB_H)
73
+DO_2OP_SAT(vqsubuw, 4, uint32_t, DO_UQSUB_W)
74
+DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B)
75
+DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H)
76
+DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
77
+
78
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
79
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
80
uint32_t rm) \
81
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/arm/translate-mve.c
84
+++ b/target/arm/translate-mve.c
85
@@ -XXX,XX +XXX,XX @@ DO_2OP(VMULL_TS, vmullts)
86
DO_2OP(VMULL_TU, vmulltu)
87
DO_2OP(VQDMULH, vqdmulh)
88
DO_2OP(VQRDMULH, vqrdmulh)
89
+DO_2OP(VQADD_S, vqadds)
90
+DO_2OP(VQADD_U, vqaddu)
91
+DO_2OP(VQSUB_S, vqsubs)
92
+DO_2OP(VQSUB_U, vqsubu)
93
94
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
95
MVEGenTwoOpScalarFn fn)
96
--
97
2.20.1
98
99
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VQSHL insn (encoding T4, which is the
2
vector-shift-by-vector version).
3
1
4
The DO_SQSHL_OP and DO_UQSHL_OP macros here are derived from
5
the neon_helper.c code for qshl_u{8,16,32} and qshl_s{8,16,32}.
6
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210617121628.20116-33-peter.maydell@linaro.org
10
---
11
target/arm/helper-mve.h | 8 ++++++++
12
target/arm/mve.decode | 12 ++++++++++++
13
target/arm/mve_helper.c | 34 ++++++++++++++++++++++++++++++++++
14
target/arm/translate-mve.c | 2 ++
15
4 files changed, 56 insertions(+)
16
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-mve.h
20
+++ b/target/arm/helper-mve.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqsubub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
DEF_HELPER_FLAGS_4(mve_vqsubuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
DEF_HELPER_FLAGS_4(mve_vqsubuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
25
+DEF_HELPER_FLAGS_4(mve_vqshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vqshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vqshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+
29
+DEF_HELPER_FLAGS_4(mve_vqshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
+DEF_HELPER_FLAGS_4(mve_vqshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
+DEF_HELPER_FLAGS_4(mve_vqshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
+
33
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/mve.decode
39
+++ b/target/arm/mve.decode
40
@@ -XXX,XX +XXX,XX @@
41
@2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn
42
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
43
44
+# The _rev suffix indicates that Vn and Vm are reversed. This is
45
+# the case for shifts. In the Arm ARM these insns are documented
46
+# with the Vm and Vn fields in their usual places, but in the
47
+# assembly the operands are listed "backwards", ie in the order
48
+# Qd, Qm, Qn where other insns use Qd, Qn, Qm. For QEMU we choose
49
+# to consider Vm and Vn as being in different fields in the insn.
50
+# This gives us consistency with A64 and Neon.
51
+@2op_rev .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qn qn=%qm
52
+
53
@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
54
@2scalar_nosz .... .... .... .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
55
56
@@ -XXX,XX +XXX,XX @@ VQADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 1 ... 0 @2op
57
VQSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
58
VQSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
59
60
+VQSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
61
+VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
62
+
63
# Vector miscellaneous
64
65
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
66
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/mve_helper.c
69
+++ b/target/arm/mve_helper.c
70
@@ -XXX,XX +XXX,XX @@ DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
71
mve_advance_vpt(env); \
72
}
73
74
+/* provide unsigned 2-op helpers for all sizes */
75
+#define DO_2OP_SAT_U(OP, FN) \
76
+ DO_2OP_SAT(OP##b, 1, uint8_t, FN) \
77
+ DO_2OP_SAT(OP##h, 2, uint16_t, FN) \
78
+ DO_2OP_SAT(OP##w, 4, uint32_t, FN)
79
+
80
+/* provide signed 2-op helpers for all sizes */
81
+#define DO_2OP_SAT_S(OP, FN) \
82
+ DO_2OP_SAT(OP##b, 1, int8_t, FN) \
83
+ DO_2OP_SAT(OP##h, 2, int16_t, FN) \
84
+ DO_2OP_SAT(OP##w, 4, int32_t, FN)
85
+
86
#define DO_AND(N, M) ((N) & (M))
87
#define DO_BIC(N, M) ((N) & ~(M))
88
#define DO_ORR(N, M) ((N) | (M))
89
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B)
90
DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H)
91
DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
92
93
+/*
94
+ * This wrapper fixes up the impedance mismatch between do_sqrshl_bhs()
95
+ * and friends wanting a uint32_t* sat and our needing a bool*.
96
+ */
97
+#define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \
98
+ ({ \
99
+ uint32_t su32 = 0; \
100
+ typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \
101
+ if (su32) { \
102
+ *satp = true; \
103
+ } \
104
+ r; \
105
+ })
106
+
107
+#define DO_SQSHL_OP(N, M, satp) \
108
+ WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp)
109
+#define DO_UQSHL_OP(N, M, satp) \
110
+ WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp)
111
+
112
+DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
113
+DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
114
+
115
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
116
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
117
uint32_t rm) \
118
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
119
index XXXXXXX..XXXXXXX 100644
120
--- a/target/arm/translate-mve.c
121
+++ b/target/arm/translate-mve.c
122
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQADD_S, vqadds)
123
DO_2OP(VQADD_U, vqaddu)
124
DO_2OP(VQSUB_S, vqsubs)
125
DO_2OP(VQSUB_U, vqsubu)
126
+DO_2OP(VQSHL_S, vqshls)
127
+DO_2OP(VQSHL_U, vqshlu)
128
129
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
130
MVEGenTwoOpScalarFn fn)
131
--
132
2.20.1
133
134
diff view generated by jsdifflib
Deleted patch
1
Implement the MV VQRSHL (vector) insn. Again, the code to perform
2
the actual shifts is borrowed from neon_helper.c.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-34-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 8 ++++++++
9
target/arm/mve.decode | 3 +++
10
target/arm/mve_helper.c | 6 ++++++
11
target/arm/translate-mve.c | 2 ++
12
4 files changed, 19 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vqshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vqshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
22
+DEF_HELPER_FLAGS_4(mve_vqrshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vqrshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vqrshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+
26
+DEF_HELPER_FLAGS_4(mve_vqrshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vqrshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vqrshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
+
30
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
36
+++ b/target/arm/mve.decode
37
@@ -XXX,XX +XXX,XX @@ VQSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
38
VQSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
39
VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
40
41
+VQRSHL_S 111 0 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev
42
+VQRSHL_U 111 1 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev
43
+
44
# Vector miscellaneous
45
46
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
47
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/mve_helper.c
50
+++ b/target/arm/mve_helper.c
51
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
52
WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp)
53
#define DO_UQSHL_OP(N, M, satp) \
54
WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp)
55
+#define DO_SQRSHL_OP(N, M, satp) \
56
+ WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
57
+#define DO_UQRSHL_OP(N, M, satp) \
58
+ WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
59
60
DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
61
DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
62
+DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP)
63
+DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP)
64
65
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
66
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
67
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/target/arm/translate-mve.c
70
+++ b/target/arm/translate-mve.c
71
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQSUB_S, vqsubs)
72
DO_2OP(VQSUB_U, vqsubu)
73
DO_2OP(VQSHL_S, vqshls)
74
DO_2OP(VQSHL_U, vqshlu)
75
+DO_2OP(VQRSHL_S, vqrshls)
76
+DO_2OP(VQRSHL_U, vqrshlu)
77
78
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
79
MVEGenTwoOpScalarFn fn)
80
--
81
2.20.1
82
83
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VSHL insn (vector form).
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-35-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 8 ++++++++
8
target/arm/mve.decode | 3 +++
9
target/arm/mve_helper.c | 6 ++++++
10
target/arm/translate-mve.c | 2 ++
11
4 files changed, 19 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqsubub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
DEF_HELPER_FLAGS_4(mve_vqsubuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vqsubuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
21
+DEF_HELPER_FLAGS_4(mve_vshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+DEF_HELPER_FLAGS_4(mve_vshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+
25
+DEF_HELPER_FLAGS_4(mve_vshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+
29
DEF_HELPER_FLAGS_4(mve_vqshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
DEF_HELPER_FLAGS_4(mve_vqshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
DEF_HELPER_FLAGS_4(mve_vqshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/mve.decode
35
+++ b/target/arm/mve.decode
36
@@ -XXX,XX +XXX,XX @@ VQADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 1 ... 0 @2op
37
VQSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
38
VQSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
39
40
+VSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 0 ... 0 @2op_rev
41
+VSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 0 ... 0 @2op_rev
42
+
43
VQSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
44
VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
45
46
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/mve_helper.c
49
+++ b/target/arm/mve_helper.c
50
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vhaddu, do_vhadd_u)
51
DO_2OP_S(vhsubs, do_vhsub_s)
52
DO_2OP_U(vhsubu, do_vhsub_u)
53
54
+#define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
55
+#define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
56
+
57
+DO_2OP_S(vshls, DO_VSHLS)
58
+DO_2OP_U(vshlu, DO_VSHLU)
59
+
60
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
61
{
62
if (val > max) {
63
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/translate-mve.c
66
+++ b/target/arm/translate-mve.c
67
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQADD_S, vqadds)
68
DO_2OP(VQADD_U, vqaddu)
69
DO_2OP(VQSUB_S, vqsubs)
70
DO_2OP(VQSUB_U, vqsubu)
71
+DO_2OP(VSHL_S, vshls)
72
+DO_2OP(VSHL_U, vshlu)
73
DO_2OP(VQSHL_S, vqshls)
74
DO_2OP(VQSHL_U, vqshlu)
75
DO_2OP(VQRSHL_S, vqrshls)
76
--
77
2.20.1
78
79
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VRSHL insn (vector form).
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-36-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 8 ++++++++
8
target/arm/mve.decode | 3 +++
9
target/arm/mve_helper.c | 4 ++++
10
target/arm/translate-mve.c | 2 ++
11
4 files changed, 17 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
DEF_HELPER_FLAGS_4(mve_vshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
21
+DEF_HELPER_FLAGS_4(mve_vrshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+DEF_HELPER_FLAGS_4(mve_vrshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vrshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+
25
+DEF_HELPER_FLAGS_4(mve_vrshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vrshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vrshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+
29
DEF_HELPER_FLAGS_4(mve_vqshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
DEF_HELPER_FLAGS_4(mve_vqshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
DEF_HELPER_FLAGS_4(mve_vqshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/mve.decode
35
+++ b/target/arm/mve.decode
36
@@ -XXX,XX +XXX,XX @@ VQSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
37
VSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 0 ... 0 @2op_rev
38
VSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 0 ... 0 @2op_rev
39
40
+VRSHL_S 111 0 1111 0 . .. ... 0 ... 0 0101 . 1 . 0 ... 0 @2op_rev
41
+VRSHL_U 111 1 1111 0 . .. ... 0 ... 0 0101 . 1 . 0 ... 0 @2op_rev
42
+
43
VQSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
44
VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
45
46
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/mve_helper.c
49
+++ b/target/arm/mve_helper.c
50
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vhsubu, do_vhsub_u)
51
52
#define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
53
#define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
54
+#define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
55
+#define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
56
57
DO_2OP_S(vshls, DO_VSHLS)
58
DO_2OP_U(vshlu, DO_VSHLU)
59
+DO_2OP_S(vrshls, DO_VRSHLS)
60
+DO_2OP_U(vrshlu, DO_VRSHLU)
61
62
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
63
{
64
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate-mve.c
67
+++ b/target/arm/translate-mve.c
68
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQSUB_S, vqsubs)
69
DO_2OP(VQSUB_U, vqsubu)
70
DO_2OP(VSHL_S, vshls)
71
DO_2OP(VSHL_U, vshlu)
72
+DO_2OP(VRSHL_S, vrshls)
73
+DO_2OP(VRSHL_U, vrshlu)
74
DO_2OP(VQSHL_S, vqshls)
75
DO_2OP(VQSHL_U, vqshlu)
76
DO_2OP(VQRSHL_S, vqrshls)
77
--
78
2.20.1
79
80
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VQDMLSDH and VQRDMLSDH insns, which are
2
like VQDMLADH and VQRDMLADH except that products are subtracted
3
rather than added.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210617121628.20116-38-peter.maydell@linaro.org
8
---
9
target/arm/helper-mve.h | 16 ++++++++++++++
10
target/arm/mve.decode | 5 +++++
11
target/arm/mve_helper.c | 44 ++++++++++++++++++++++++++++++++++++++
12
target/arm/translate-mve.c | 4 ++++
13
4 files changed, 69 insertions(+)
14
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrdmladhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vqrdmladhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
DEF_HELPER_FLAGS_4(mve_vqrdmladhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
23
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+
27
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
+
31
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
33
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
+
35
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
36
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
37
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
38
+
39
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
42
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/mve.decode
45
+++ b/target/arm/mve.decode
46
@@ -XXX,XX +XXX,XX @@ VQDMLADHX 1110 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op
47
VQRDMLADH 1110 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op
48
VQRDMLADHX 1110 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op
49
50
+VQDMLSDH 1111 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op
51
+VQDMLSDHX 1111 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op
52
+VQRDMLSDH 1111 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op
53
+VQRDMLSDHX 1111 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op
54
+
55
# Vector miscellaneous
56
57
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
58
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/mve_helper.c
61
+++ b/target/arm/mve_helper.c
62
@@ -XXX,XX +XXX,XX @@ static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d,
63
return r >> 32;
64
}
65
66
+static int8_t do_vqdmlsdh_b(int8_t a, int8_t b, int8_t c, int8_t d,
67
+ int round, bool *sat)
68
+{
69
+ int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 7);
70
+ return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
71
+}
72
+
73
+static int16_t do_vqdmlsdh_h(int16_t a, int16_t b, int16_t c, int16_t d,
74
+ int round, bool *sat)
75
+{
76
+ int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 15);
77
+ return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
78
+}
79
+
80
+static int32_t do_vqdmlsdh_w(int32_t a, int32_t b, int32_t c, int32_t d,
81
+ int round, bool *sat)
82
+{
83
+ int64_t m1 = (int64_t)a * b;
84
+ int64_t m2 = (int64_t)c * d;
85
+ int64_t r;
86
+ /* The same ordering issue as in do_vqdmladh_w applies here too */
87
+ if (ssub64_overflow(m1, m2, &r) ||
88
+ sadd64_overflow(r, (round << 30), &r) ||
89
+ sadd64_overflow(r, r, &r)) {
90
+ *sat = true;
91
+ return r < 0 ? INT32_MAX : INT32_MIN;
92
+ }
93
+ return r >> 32;
94
+}
95
+
96
DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b)
97
DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h)
98
DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w)
99
@@ -XXX,XX +XXX,XX @@ DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b)
100
DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h)
101
DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w)
102
103
+DO_VQDMLADH_OP(vqdmlsdhb, 1, int8_t, 0, 0, do_vqdmlsdh_b)
104
+DO_VQDMLADH_OP(vqdmlsdhh, 2, int16_t, 0, 0, do_vqdmlsdh_h)
105
+DO_VQDMLADH_OP(vqdmlsdhw, 4, int32_t, 0, 0, do_vqdmlsdh_w)
106
+DO_VQDMLADH_OP(vqdmlsdhxb, 1, int8_t, 1, 0, do_vqdmlsdh_b)
107
+DO_VQDMLADH_OP(vqdmlsdhxh, 2, int16_t, 1, 0, do_vqdmlsdh_h)
108
+DO_VQDMLADH_OP(vqdmlsdhxw, 4, int32_t, 1, 0, do_vqdmlsdh_w)
109
+
110
+DO_VQDMLADH_OP(vqrdmlsdhb, 1, int8_t, 0, 1, do_vqdmlsdh_b)
111
+DO_VQDMLADH_OP(vqrdmlsdhh, 2, int16_t, 0, 1, do_vqdmlsdh_h)
112
+DO_VQDMLADH_OP(vqrdmlsdhw, 4, int32_t, 0, 1, do_vqdmlsdh_w)
113
+DO_VQDMLADH_OP(vqrdmlsdhxb, 1, int8_t, 1, 1, do_vqdmlsdh_b)
114
+DO_VQDMLADH_OP(vqrdmlsdhxh, 2, int16_t, 1, 1, do_vqdmlsdh_h)
115
+DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w)
116
+
117
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
118
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
119
uint32_t rm) \
120
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
121
index XXXXXXX..XXXXXXX 100644
122
--- a/target/arm/translate-mve.c
123
+++ b/target/arm/translate-mve.c
124
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQDMLADH, vqdmladh)
125
DO_2OP(VQDMLADHX, vqdmladhx)
126
DO_2OP(VQRDMLADH, vqrdmladh)
127
DO_2OP(VQRDMLADHX, vqrdmladhx)
128
+DO_2OP(VQDMLSDH, vqdmlsdh)
129
+DO_2OP(VQDMLSDHX, vqdmlsdhx)
130
+DO_2OP(VQRDMLSDH, vqrdmlsdh)
131
+DO_2OP(VQRDMLSDHX, vqrdmlsdhx)
132
133
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
134
MVEGenTwoOpScalarFn fn)
135
--
136
2.20.1
137
138
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VRHADD insn, which performs a rounded halving
2
addition.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-40-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 8 ++++++++
9
target/arm/mve.decode | 3 +++
10
target/arm/mve_helper.c | 6 ++++++
11
target/arm/translate-mve.c | 2 ++
12
4 files changed, 19 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqdmullbw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vqdmullth, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vqdmulltw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
22
+DEF_HELPER_FLAGS_4(mve_vrhaddsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vrhaddsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vrhaddsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+
26
+DEF_HELPER_FLAGS_4(mve_vrhaddub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vrhadduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vrhadduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
+
30
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
36
+++ b/target/arm/mve.decode
37
@@ -XXX,XX +XXX,XX @@ VQRDMLSDHX 1111 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op
38
VQDMULLB 111 . 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 1 @2op_sz28
39
VQDMULLT 111 . 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 1 @2op_sz28
40
41
+VRHADD_S 111 0 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
42
+VRHADD_U 111 1 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
43
+
44
# Vector miscellaneous
45
46
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
47
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/mve_helper.c
50
+++ b/target/arm/mve_helper.c
51
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vshlu, DO_VSHLU)
52
DO_2OP_S(vrshls, DO_VRSHLS)
53
DO_2OP_U(vrshlu, DO_VRSHLU)
54
55
+#define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1)
56
+#define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1)
57
+
58
+DO_2OP_S(vrhadds, DO_RHADD_S)
59
+DO_2OP_U(vrhaddu, DO_RHADD_U)
60
+
61
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
62
{
63
if (val > max) {
64
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate-mve.c
67
+++ b/target/arm/translate-mve.c
68
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQDMLSDH, vqdmlsdh)
69
DO_2OP(VQDMLSDHX, vqdmlsdhx)
70
DO_2OP(VQRDMLSDH, vqrdmlsdh)
71
DO_2OP(VQRDMLSDHX, vqrdmlsdhx)
72
+DO_2OP(VRHADD_S, vrhadds)
73
+DO_2OP(VRHADD_U, vrhaddu)
74
75
static bool trans_VQDMULLB(DisasContext *s, arg_2op *a)
76
{
77
--
78
2.20.1
79
80
diff view generated by jsdifflib