1
Big fat pullreq this time around, because it has all of RTH's
1
First arm pullreq of the cycle; this is mostly my softfloat NaN
2
SVE2 emulation patchset in it.
2
handling series. (Lots more in my to-review queue, but I don't
3
like pullreqs growing too close to a hundred patches at a time :-))
3
4
5
thanks
4
-- PMM
6
-- PMM
5
7
6
The following changes since commit 0dab1d36f55c3ed649bb8e4c74b9269ef3a63049:
8
The following changes since commit 97f2796a3736ed37a1b85dc1c76a6c45b829dd17:
7
9
8
Merge remote-tracking branch 'remotes/stefanha-gitlab/tags/block-pull-request' into staging (2021-05-24 15:48:08 +0100)
10
Open 10.0 development tree (2024-12-10 17:41:17 +0000)
9
11
10
are available in the Git repository at:
12
are available in the Git repository at:
11
13
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210525
14
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20241211
13
15
14
for you to fetch changes up to f8680aaa6e5bfc6022b75157c23db7d2ea98ab11:
16
for you to fetch changes up to 1abe28d519239eea5cf9620bb13149423e5665f8:
15
17
16
target/arm: Enable SVE2 and related extensions (2021-05-25 16:01:44 +0100)
18
MAINTAINERS: Add correct email address for Vikram Garhwal (2024-12-11 15:31:09 +0000)
17
19
18
----------------------------------------------------------------
20
----------------------------------------------------------------
19
target-arm queue:
21
target-arm queue:
20
* Implement SVE2 emulation
22
* hw/net/lan9118: Extract PHY model, reuse with imx_fec, fix bugs
21
* Implement integer matrix multiply accumulate
23
* fpu: Make muladd NaN handling runtime-selected, not compile-time
22
* Implement FEAT_TLBIOS
24
* fpu: Make default NaN pattern runtime-selected, not compile-time
23
* Implement FEAT_TLBRANGE
25
* fpu: Minor NaN-related cleanups
24
* disas/libvixl: Protect C system header for C++ compiler
26
* MAINTAINERS: email address updates
25
* Use correct SP in M-profile exception return
26
* AN524, AN547: Correct modelling of internal SRAMs
27
* hw/intc/arm_gicv3_cpuif: Fix EOIR write access check logic
28
* hw/arm/smmuv3: Another range invalidation fix
29
27
30
----------------------------------------------------------------
28
----------------------------------------------------------------
31
Eric Auger (1):
29
Bernhard Beschow (5):
32
hw/arm/smmuv3: Another range invalidation fix
30
hw/net/lan9118: Extract lan9118_phy
31
hw/net/lan9118_phy: Reuse in imx_fec and consolidate implementations
32
hw/net/lan9118_phy: Fix off-by-one error in MII_ANLPAR register
33
hw/net/lan9118_phy: Reuse MII constants
34
hw/net/lan9118_phy: Add missing 100 mbps full duplex advertisement
33
35
34
Peter Maydell (8):
36
Leif Lindholm (1):
35
hw/intc/arm_gicv3_cpuif: Fix EOIR write access check logic
37
MAINTAINERS: update email address for Leif Lindholm
36
hw/arm/mps2-tz: Don't duplicate modelling of SRAM in AN524
37
hw/arm/mps2-tz: Make SRAM_ADDR_WIDTH board-specific
38
hw/arm/armsse.c: Correct modelling of SSE-300 internal SRAMs
39
hw/arm/armsse: Convert armsse_realize() to use ERRP_GUARD
40
hw/arm/mps2-tz: Allow board to specify a boot RAM size
41
hw/arm: Model TCMs in the SSE-300, not the AN547
42
target/arm: Use correct SP in M-profile exception return
43
38
44
Philippe Mathieu-Daudé (1):
39
Peter Maydell (54):
45
disas/libvixl: Protect C system header for C++ compiler
40
fpu: handle raising Invalid for infzero in pick_nan_muladd
41
fpu: Check for default_nan_mode before calling pickNaNMulAdd
42
softfloat: Allow runtime choice of inf * 0 + NaN result
43
tests/fp: Explicitly set inf-zero-nan rule
44
target/arm: Set FloatInfZeroNaNRule explicitly
45
target/s390: Set FloatInfZeroNaNRule explicitly
46
target/ppc: Set FloatInfZeroNaNRule explicitly
47
target/mips: Set FloatInfZeroNaNRule explicitly
48
target/sparc: Set FloatInfZeroNaNRule explicitly
49
target/xtensa: Set FloatInfZeroNaNRule explicitly
50
target/x86: Set FloatInfZeroNaNRule explicitly
51
target/loongarch: Set FloatInfZeroNaNRule explicitly
52
target/hppa: Set FloatInfZeroNaNRule explicitly
53
softfloat: Pass have_snan to pickNaNMulAdd
54
softfloat: Allow runtime choice of NaN propagation for muladd
55
tests/fp: Explicitly set 3-NaN propagation rule
56
target/arm: Set Float3NaNPropRule explicitly
57
target/loongarch: Set Float3NaNPropRule explicitly
58
target/ppc: Set Float3NaNPropRule explicitly
59
target/s390x: Set Float3NaNPropRule explicitly
60
target/sparc: Set Float3NaNPropRule explicitly
61
target/mips: Set Float3NaNPropRule explicitly
62
target/xtensa: Set Float3NaNPropRule explicitly
63
target/i386: Set Float3NaNPropRule explicitly
64
target/hppa: Set Float3NaNPropRule explicitly
65
fpu: Remove use_first_nan field from float_status
66
target/m68k: Don't pass NULL float_status to floatx80_default_nan()
67
softfloat: Create floatx80 default NaN from parts64_default_nan
68
target/loongarch: Use normal float_status in fclass_s and fclass_d helpers
69
target/m68k: In frem helper, initialize local float_status from env->fp_status
70
target/m68k: Init local float_status from env fp_status in gdb get/set reg
71
target/sparc: Initialize local scratch float_status from env->fp_status
72
target/ppc: Use env->fp_status in helper_compute_fprf functions
73
fpu: Allow runtime choice of default NaN value
74
tests/fp: Set default NaN pattern explicitly
75
target/microblaze: Set default NaN pattern explicitly
76
target/i386: Set default NaN pattern explicitly
77
target/hppa: Set default NaN pattern explicitly
78
target/alpha: Set default NaN pattern explicitly
79
target/arm: Set default NaN pattern explicitly
80
target/loongarch: Set default NaN pattern explicitly
81
target/m68k: Set default NaN pattern explicitly
82
target/mips: Set default NaN pattern explicitly
83
target/openrisc: Set default NaN pattern explicitly
84
target/ppc: Set default NaN pattern explicitly
85
target/sh4: Set default NaN pattern explicitly
86
target/rx: Set default NaN pattern explicitly
87
target/s390x: Set default NaN pattern explicitly
88
target/sparc: Set default NaN pattern explicitly
89
target/xtensa: Set default NaN pattern explicitly
90
target/hexagon: Set default NaN pattern explicitly
91
target/riscv: Set default NaN pattern explicitly
92
target/tricore: Set default NaN pattern explicitly
93
fpu: Remove default handling for dnan_pattern
46
94
47
Rebecca Cran (3):
95
Richard Henderson (11):
48
target/arm: Add support for FEAT_TLBIRANGE
96
target/arm: Copy entire float_status in is_ebf
49
target/arm: Add support for FEAT_TLBIOS
97
softfloat: Inline pickNaNMulAdd
50
target/arm: set ID_AA64ISAR0.TLB to 2 for max AARCH64 CPU type
98
softfloat: Use goto for default nan case in pick_nan_muladd
99
softfloat: Remove which from parts_pick_nan_muladd
100
softfloat: Pad array size in pick_nan_muladd
101
softfloat: Move propagateFloatx80NaN to softfloat.c
102
softfloat: Use parts_pick_nan in propagateFloatx80NaN
103
softfloat: Inline pickNaN
104
softfloat: Share code between parts_pick_nan cases
105
softfloat: Sink frac_cmp in parts_pick_nan until needed
106
softfloat: Replace WHICH with RET in parts_pick_nan
51
107
52
Richard Henderson (84):
108
Vikram Garhwal (1):
53
accel/tcg: Replace g_new() + memcpy() by g_memdup()
109
MAINTAINERS: Add correct email address for Vikram Garhwal
54
accel/tcg: Pass length argument to tlb_flush_range_locked()
55
accel/tlb: Rename TLBFlushPageBitsByMMUIdxData -> TLBFlushRangeData
56
accel/tcg: Remove {encode,decode}_pbm_to_runon
57
accel/tcg: Add tlb_flush_range_by_mmuidx()
58
accel/tcg: Add tlb_flush_range_by_mmuidx_all_cpus()
59
accel/tlb: Add tlb_flush_range_by_mmuidx_all_cpus_synced()
60
accel/tcg: Rename tlb_flush_page_bits -> range]_by_mmuidx_async_0
61
accel/tlb: Rename tlb_flush_[page_bits > range]_by_mmuidx_async_[2 > 1]
62
target/arm: Add ID_AA64ZFR0 fields and isar_feature_aa64_sve2
63
target/arm: Implement SVE2 Integer Multiply - Unpredicated
64
target/arm: Implement SVE2 integer pairwise add and accumulate long
65
target/arm: Implement SVE2 integer unary operations (predicated)
66
target/arm: Split out saturating/rounding shifts from neon
67
target/arm: Implement SVE2 saturating/rounding bitwise shift left (predicated)
68
target/arm: Implement SVE2 integer halving add/subtract (predicated)
69
target/arm: Implement SVE2 integer pairwise arithmetic
70
target/arm: Implement SVE2 saturating add/subtract (predicated)
71
target/arm: Implement SVE2 integer add/subtract long
72
target/arm: Implement SVE2 integer add/subtract interleaved long
73
target/arm: Implement SVE2 integer add/subtract wide
74
target/arm: Implement SVE2 integer multiply long
75
target/arm: Implement SVE2 PMULLB, PMULLT
76
target/arm: Implement SVE2 bitwise shift left long
77
target/arm: Implement SVE2 bitwise exclusive-or interleaved
78
target/arm: Implement SVE2 bitwise permute
79
target/arm: Implement SVE2 complex integer add
80
target/arm: Implement SVE2 integer absolute difference and accumulate long
81
target/arm: Implement SVE2 integer add/subtract long with carry
82
target/arm: Implement SVE2 bitwise shift right and accumulate
83
target/arm: Implement SVE2 bitwise shift and insert
84
target/arm: Implement SVE2 integer absolute difference and accumulate
85
target/arm: Implement SVE2 saturating extract narrow
86
target/arm: Implement SVE2 SHRN, RSHRN
87
target/arm: Implement SVE2 SQSHRUN, SQRSHRUN
88
target/arm: Implement SVE2 UQSHRN, UQRSHRN
89
target/arm: Implement SVE2 SQSHRN, SQRSHRN
90
target/arm: Implement SVE2 WHILEGT, WHILEGE, WHILEHI, WHILEHS
91
target/arm: Implement SVE2 WHILERW, WHILEWR
92
target/arm: Implement SVE2 bitwise ternary operations
93
target/arm: Implement SVE2 saturating multiply-add long
94
target/arm: Implement SVE2 saturating multiply-add high
95
target/arm: Implement SVE2 integer multiply-add long
96
target/arm: Implement SVE2 complex integer multiply-add
97
target/arm: Implement SVE2 XAR
98
target/arm: Use correct output type for gvec_sdot_*_b
99
target/arm: Pass separate addend to {U, S}DOT helpers
100
target/arm: Pass separate addend to FCMLA helpers
101
target/arm: Split out formats for 2 vectors + 1 index
102
target/arm: Split out formats for 3 vectors + 1 index
103
target/arm: Implement SVE2 integer multiply (indexed)
104
target/arm: Implement SVE2 integer multiply-add (indexed)
105
target/arm: Implement SVE2 saturating multiply-add high (indexed)
106
target/arm: Implement SVE2 saturating multiply-add (indexed)
107
target/arm: Implement SVE2 saturating multiply (indexed)
108
target/arm: Implement SVE2 signed saturating doubling multiply high
109
target/arm: Implement SVE2 saturating multiply high (indexed)
110
target/arm: Implement SVE2 multiply-add long (indexed)
111
target/arm: Implement SVE2 integer multiply long (indexed)
112
target/arm: Implement SVE2 complex integer multiply-add (indexed)
113
target/arm: Implement SVE2 complex integer dot product
114
target/arm: Macroize helper_gvec_{s,u}dot_{b,h}
115
target/arm: Macroize helper_gvec_{s,u}dot_idx_{b,h}
116
target/arm: Implement SVE mixed sign dot product (indexed)
117
target/arm: Implement SVE mixed sign dot product
118
target/arm: Implement SVE2 crypto unary operations
119
target/arm: Implement SVE2 crypto destructive binary operations
120
target/arm: Implement SVE2 crypto constructive binary operations
121
target/arm: Implement SVE2 FCVTNT
122
target/arm: Share table of sve load functions
123
target/arm: Tidy do_ldrq
124
target/arm: Implement SVE2 LD1RO
125
target/arm: Implement 128-bit ZIP, UZP, TRN
126
target/arm: Move endian adjustment macros to vec_internal.h
127
target/arm: Implement aarch64 SUDOT, USDOT
128
target/arm: Split out do_neon_ddda_fpst
129
target/arm: Remove unused fpst from VDOT_scalar
130
target/arm: Fix decode for VDOT (indexed)
131
target/arm: Split out do_neon_ddda
132
target/arm: Split decode of VSDOT and VUDOT
133
target/arm: Implement aarch32 VSUDOT, VUSDOT
134
target/arm: Implement integer matrix multiply accumulate
135
linux-user/aarch64: Enable hwcap bits for sve2 and related extensions
136
target/arm: Enable SVE2 and related extensions
137
110
138
Stephen Long (17):
111
MAINTAINERS | 4 +-
139
target/arm: Implement SVE2 floating-point pairwise
112
include/fpu/softfloat-helpers.h | 38 +++-
140
target/arm: Implement SVE2 MATCH, NMATCH
113
include/fpu/softfloat-types.h | 89 +++++++-
141
target/arm: Implement SVE2 ADDHNB, ADDHNT
114
include/hw/net/imx_fec.h | 9 +-
142
target/arm: Implement SVE2 RADDHNB, RADDHNT
115
include/hw/net/lan9118_phy.h | 37 ++++
143
target/arm: Implement SVE2 SUBHNB, SUBHNT
116
include/hw/net/mii.h | 6 +
144
target/arm: Implement SVE2 RSUBHNB, RSUBHNT
117
target/mips/fpu_helper.h | 20 ++
145
target/arm: Implement SVE2 HISTCNT, HISTSEG
118
target/sparc/helper.h | 4 +-
146
target/arm: Implement SVE2 scatter store insns
119
fpu/softfloat.c | 19 ++
147
target/arm: Implement SVE2 gather load insns
120
hw/net/imx_fec.c | 146 ++------------
148
target/arm: Implement SVE2 FMMLA
121
hw/net/lan9118.c | 137 ++-----------
149
target/arm: Implement SVE2 SPLICE, EXT
122
hw/net/lan9118_phy.c | 222 ++++++++++++++++++++
150
target/arm: Implement SVE2 TBL, TBX
123
linux-user/arm/nwfpe/fpa11.c | 5 +
151
target/arm: Implement SVE2 FCVTLT
124
target/alpha/cpu.c | 2 +
152
target/arm: Implement SVE2 FCVTXNT, FCVTX
125
target/arm/cpu.c | 10 +
153
target/arm: Implement SVE2 FLOGB
126
target/arm/tcg/vec_helper.c | 20 +-
154
target/arm: Implement SVE2 bitwise shift immediate
127
target/hexagon/cpu.c | 2 +
155
target/arm: Implement SVE2 fp multiply-add long
128
target/hppa/fpu_helper.c | 12 ++
156
129
target/i386/tcg/fpu_helper.c | 12 ++
157
disas/libvixl/vixl/code-buffer.h | 2 +-
130
target/loongarch/tcg/fpu_helper.c | 14 +-
158
disas/libvixl/vixl/globals.h | 16 +-
131
target/m68k/cpu.c | 14 +-
159
disas/libvixl/vixl/invalset.h | 2 +-
132
target/m68k/fpu_helper.c | 6 +-
160
disas/libvixl/vixl/platform.h | 2 +
133
target/m68k/helper.c | 6 +-
161
disas/libvixl/vixl/utils.h | 2 +-
134
target/microblaze/cpu.c | 2 +
162
include/exec/exec-all.h | 44 +
135
target/mips/msa.c | 10 +
163
include/hw/arm/armsse.h | 2 +
136
target/openrisc/cpu.c | 2 +
164
target/arm/cpu.h | 76 +
137
target/ppc/cpu_init.c | 19 ++
165
target/arm/helper-sve.h | 722 ++++++++-
138
target/ppc/fpu_helper.c | 3 +-
166
target/arm/helper.h | 110 +-
139
target/riscv/cpu.c | 2 +
167
target/arm/translate-a64.h | 3 +
140
target/rx/cpu.c | 2 +
168
target/arm/vec_internal.h | 167 ++
141
target/s390x/cpu.c | 5 +
169
target/arm/neon-shared.decode | 24 +-
142
target/sh4/cpu.c | 2 +
170
target/arm/sve.decode | 574 ++++++-
143
target/sparc/cpu.c | 6 +
171
accel/tcg/cputlb.c | 231 ++-
144
target/sparc/fop_helper.c | 8 +-
172
hw/arm/armsse.c | 35 +-
145
target/sparc/translate.c | 4 +-
173
hw/arm/mps2-tz.c | 39 +-
146
target/tricore/helper.c | 2 +
174
hw/arm/smmuv3.c | 50 +-
147
target/xtensa/cpu.c | 4 +
175
hw/intc/arm_gicv3_cpuif.c | 48 +-
148
target/xtensa/fpu_helper.c | 3 +-
176
linux-user/elfload.c | 10 +
149
tests/fp/fp-bench.c | 7 +
177
target/arm/cpu.c | 2 +
150
tests/fp/fp-test-log2.c | 1 +
178
target/arm/cpu64.c | 14 +
151
tests/fp/fp-test.c | 7 +
179
target/arm/cpu_tcg.c | 1 +
152
fpu/softfloat-parts.c.inc | 152 +++++++++++---
180
target/arm/helper.c | 327 +++-
153
fpu/softfloat-specialize.c.inc | 412 ++------------------------------------
181
target/arm/kvm64.c | 21 +-
154
.mailmap | 5 +-
182
target/arm/m_helper.c | 3 +-
155
hw/net/Kconfig | 5 +
183
target/arm/neon_helper.c | 507 +-----
156
hw/net/meson.build | 1 +
184
target/arm/sve_helper.c | 2110 +++++++++++++++++++++++--
157
hw/net/trace-events | 10 +-
185
target/arm/translate-a64.c | 111 +-
158
47 files changed, 778 insertions(+), 730 deletions(-)
186
target/arm/translate-neon.c | 231 +--
159
create mode 100644 include/hw/net/lan9118_phy.h
187
target/arm/translate-sve.c | 3200 +++++++++++++++++++++++++++++++++++---
160
create mode 100644 hw/net/lan9118_phy.c
188
target/arm/vec_helper.c | 887 ++++++++---
189
disas/libvixl/vixl/utils.cc | 2 +-
190
33 files changed, 8275 insertions(+), 1300 deletions(-)
191
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Bernhard Beschow <shentey@gmail.com>
2
2
3
A very similar implementation of the same device exists in imx_fec. Prepare for
4
a common implementation by extracting a device model into its own files.
5
6
Some migration state has been moved into the new device model which breaks
7
migration compatibility for the following machines:
8
* smdkc210
9
* realview-*
10
* vexpress-*
11
* kzm
12
* mps2-*
13
14
While breaking migration ABI, fix the size of the MII registers to be 16 bit,
15
as defined by IEEE 802.3u.
16
17
Signed-off-by: Bernhard Beschow <shentey@gmail.com>
18
Tested-by: Guenter Roeck <linux@roeck-us.net>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
19
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
20
Message-id: 20241102125724.532843-2-shentey@gmail.com
5
Message-id: 20210525010358.152808-59-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
22
---
8
target/arm/helper.h | 10 +++++
23
include/hw/net/lan9118_phy.h | 37 ++++++++
9
target/arm/sve.decode | 4 ++
24
hw/net/lan9118.c | 137 +++++-----------------------
10
target/arm/translate-sve.c | 18 ++++++++
25
hw/net/lan9118_phy.c | 169 +++++++++++++++++++++++++++++++++++
11
target/arm/vec_helper.c | 84 ++++++++++++++++++++++++++++++++++++++
26
hw/net/Kconfig | 4 +
12
4 files changed, 116 insertions(+)
27
hw/net/meson.build | 1 +
28
5 files changed, 233 insertions(+), 115 deletions(-)
29
create mode 100644 include/hw/net/lan9118_phy.h
30
create mode 100644 hw/net/lan9118_phy.c
13
31
14
diff --git a/target/arm/helper.h b/target/arm/helper.h
32
diff --git a/include/hw/net/lan9118_phy.h b/include/hw/net/lan9118_phy.h
33
new file mode 100644
34
index XXXXXXX..XXXXXXX
35
--- /dev/null
36
+++ b/include/hw/net/lan9118_phy.h
37
@@ -XXX,XX +XXX,XX @@
38
+/*
39
+ * SMSC LAN9118 PHY emulation
40
+ *
41
+ * Copyright (c) 2009 CodeSourcery, LLC.
42
+ * Written by Paul Brook
43
+ *
44
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
45
+ * See the COPYING file in the top-level directory.
46
+ */
47
+
48
+#ifndef HW_NET_LAN9118_PHY_H
49
+#define HW_NET_LAN9118_PHY_H
50
+
51
+#include "qom/object.h"
52
+#include "hw/sysbus.h"
53
+
54
+#define TYPE_LAN9118_PHY "lan9118-phy"
55
+OBJECT_DECLARE_SIMPLE_TYPE(Lan9118PhyState, LAN9118_PHY)
56
+
57
+typedef struct Lan9118PhyState {
58
+ SysBusDevice parent_obj;
59
+
60
+ uint16_t status;
61
+ uint16_t control;
62
+ uint16_t advertise;
63
+ uint16_t ints;
64
+ uint16_t int_mask;
65
+ qemu_irq irq;
66
+ bool link_down;
67
+} Lan9118PhyState;
68
+
69
+void lan9118_phy_update_link(Lan9118PhyState *s, bool link_down);
70
+void lan9118_phy_reset(Lan9118PhyState *s);
71
+uint16_t lan9118_phy_read(Lan9118PhyState *s, int reg);
72
+void lan9118_phy_write(Lan9118PhyState *s, int reg, uint16_t val);
73
+
74
+#endif
75
diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c
15
index XXXXXXX..XXXXXXX 100644
76
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.h
77
--- a/hw/net/lan9118.c
17
+++ b/target/arm/helper.h
78
+++ b/hw/net/lan9118.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG,
79
@@ -XXX,XX +XXX,XX @@
19
DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG,
80
#include "net/net.h"
20
void, ptr, ptr, ptr, ptr, i32)
81
#include "net/eth.h"
21
82
#include "hw/irq.h"
22
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
83
+#include "hw/net/lan9118_phy.h"
23
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
84
#include "hw/net/lan9118.h"
24
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
85
#include "hw/ptimer.h"
25
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
86
#include "hw/qdev-properties.h"
26
+
87
@@ -XXX,XX +XXX,XX @@ do { printf("lan9118: " fmt , ## __VA_ARGS__); } while (0)
27
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
88
#define MAC_CR_RXEN 0x00000004
28
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
89
#define MAC_CR_RESERVED 0x7f404213
29
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
90
30
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
91
-#define PHY_INT_ENERGYON 0x80
31
+
92
-#define PHY_INT_AUTONEG_COMPLETE 0x40
32
DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
93
-#define PHY_INT_FAULT 0x20
33
94
-#define PHY_INT_DOWN 0x10
34
#ifdef TARGET_AARCH64
95
-#define PHY_INT_AUTONEG_LP 0x08
35
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
96
-#define PHY_INT_PARFAULT 0x04
36
index XXXXXXX..XXXXXXX 100644
97
-#define PHY_INT_AUTONEG_PAGE 0x02
37
--- a/target/arm/sve.decode
98
-
38
+++ b/target/arm/sve.decode
99
#define GPT_TIMER_EN 0x20000000
39
@@ -XXX,XX +XXX,XX @@ SMULH_zzz 00000100 .. 1 ..... 0110 10 ..... ..... @rd_rn_rm
100
40
UMULH_zzz 00000100 .. 1 ..... 0110 11 ..... ..... @rd_rn_rm
101
/*
41
PMUL_zzz 00000100 00 1 ..... 0110 01 ..... ..... @rd_rn_rm_e0
102
@@ -XXX,XX +XXX,XX @@ struct lan9118_state {
42
103
uint32_t mac_mii_data;
43
+# SVE2 signed saturating doubling multiply high (unpredicated)
104
uint32_t mac_flow;
44
+SQDMULH_zzz 00000100 .. 1 ..... 0111 00 ..... ..... @rd_rn_rm
105
45
+SQRDMULH_zzz 00000100 .. 1 ..... 0111 01 ..... ..... @rd_rn_rm
106
- uint32_t phy_status;
46
+
107
- uint32_t phy_control;
47
### SVE2 Integer - Predicated
108
- uint32_t phy_advertise;
48
109
- uint32_t phy_int;
49
SADALP_zpzz 01000100 .. 000 100 101 ... ..... ..... @rdm_pg_rn
110
- uint32_t phy_int_mask;
50
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
111
+ Lan9118PhyState mii;
51
index XXXXXXX..XXXXXXX 100644
112
+ IRQState mii_irq;
52
--- a/target/arm/translate-sve.c
113
53
+++ b/target/arm/translate-sve.c
114
int32_t eeprom_writable;
54
@@ -XXX,XX +XXX,XX @@ static bool trans_PMUL_zzz(DisasContext *s, arg_rrr_esz *a)
115
uint8_t eeprom[128];
55
return do_sve2_zzz_ool(s, a, gen_helper_gvec_pmul_b);
116
@@ -XXX,XX +XXX,XX @@ struct lan9118_state {
117
118
static const VMStateDescription vmstate_lan9118 = {
119
.name = "lan9118",
120
- .version_id = 2,
121
- .minimum_version_id = 1,
122
+ .version_id = 3,
123
+ .minimum_version_id = 3,
124
.fields = (const VMStateField[]) {
125
VMSTATE_PTIMER(timer, lan9118_state),
126
VMSTATE_UINT32(irq_cfg, lan9118_state),
127
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_lan9118 = {
128
VMSTATE_UINT32(mac_mii_acc, lan9118_state),
129
VMSTATE_UINT32(mac_mii_data, lan9118_state),
130
VMSTATE_UINT32(mac_flow, lan9118_state),
131
- VMSTATE_UINT32(phy_status, lan9118_state),
132
- VMSTATE_UINT32(phy_control, lan9118_state),
133
- VMSTATE_UINT32(phy_advertise, lan9118_state),
134
- VMSTATE_UINT32(phy_int, lan9118_state),
135
- VMSTATE_UINT32(phy_int_mask, lan9118_state),
136
VMSTATE_INT32(eeprom_writable, lan9118_state),
137
VMSTATE_UINT8_ARRAY(eeprom, lan9118_state, 128),
138
VMSTATE_INT32(tx_fifo_size, lan9118_state),
139
@@ -XXX,XX +XXX,XX @@ static void lan9118_reload_eeprom(lan9118_state *s)
140
lan9118_mac_changed(s);
56
}
141
}
57
142
58
+static bool trans_SQDMULH_zzz(DisasContext *s, arg_rrr_esz *a)
143
-static void phy_update_irq(lan9118_state *s)
59
+{
144
+static void lan9118_update_irq(void *opaque, int n, int level)
60
+ static gen_helper_gvec_3 * const fns[4] = {
145
{
61
+ gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h,
146
- if (s->phy_int & s->phy_int_mask) {
62
+ gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d,
147
+ lan9118_state *s = opaque;
63
+ };
148
+
64
+ return do_sve2_zzz_ool(s, a, fns[a->esz]);
149
+ if (level) {
65
+}
150
s->int_sts |= PHY_INT;
66
+
151
} else {
67
+static bool trans_SQRDMULH_zzz(DisasContext *s, arg_rrr_esz *a)
152
s->int_sts &= ~PHY_INT;
68
+{
153
@@ -XXX,XX +XXX,XX @@ static void phy_update_irq(lan9118_state *s)
69
+ static gen_helper_gvec_3 * const fns[4] = {
154
lan9118_update(s);
70
+ gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h,
155
}
71
+ gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d,
156
72
+ };
157
-static void phy_update_link(lan9118_state *s)
73
+ return do_sve2_zzz_ool(s, a, fns[a->esz]);
158
-{
74
+}
159
- /* Autonegotiation status mirrors link status. */
75
+
160
- if (qemu_get_queue(s->nic)->link_down) {
76
/*
161
- s->phy_status &= ~0x0024;
77
* SVE2 Integer - Predicated
162
- s->phy_int |= PHY_INT_DOWN;
78
*/
163
- } else {
79
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
164
- s->phy_status |= 0x0024;
80
index XXXXXXX..XXXXXXX 100644
165
- s->phy_int |= PHY_INT_ENERGYON;
81
--- a/target/arm/vec_helper.c
166
- s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
82
+++ b/target/arm/vec_helper.c
167
- }
83
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_b)(void *vd, void *vn, void *vm,
168
- phy_update_irq(s);
169
-}
170
-
171
static void lan9118_set_link(NetClientState *nc)
172
{
173
- phy_update_link(qemu_get_nic_opaque(nc));
174
-}
175
-
176
-static void phy_reset(lan9118_state *s)
177
-{
178
- s->phy_status = 0x7809;
179
- s->phy_control = 0x3000;
180
- s->phy_advertise = 0x01e1;
181
- s->phy_int_mask = 0;
182
- s->phy_int = 0;
183
- phy_update_link(s);
184
+ lan9118_phy_update_link(&LAN9118(qemu_get_nic_opaque(nc))->mii,
185
+ nc->link_down);
186
}
187
188
static void lan9118_reset(DeviceState *d)
189
@@ -XXX,XX +XXX,XX @@ static void lan9118_reset(DeviceState *d)
190
s->read_word_n = 0;
191
s->write_word_n = 0;
192
193
- phy_reset(s);
194
-
195
s->eeprom_writable = 0;
196
lan9118_reload_eeprom(s);
197
}
198
@@ -XXX,XX +XXX,XX @@ static void do_tx_packet(lan9118_state *s)
199
uint32_t status;
200
201
/* FIXME: Honor TX disable, and allow queueing of packets. */
202
- if (s->phy_control & 0x4000) {
203
+ if (s->mii.control & 0x4000) {
204
/* This assumes the receive routine doesn't touch the VLANClient. */
205
qemu_receive_packet(qemu_get_queue(s->nic), s->txp->data, s->txp->len);
206
} else {
207
@@ -XXX,XX +XXX,XX @@ static void tx_fifo_push(lan9118_state *s, uint32_t val)
84
}
208
}
85
}
209
}
86
210
87
+void HELPER(sve2_sqdmulh_b)(void *vd, void *vn, void *vm, uint32_t desc)
211
-static uint32_t do_phy_read(lan9118_state *s, int reg)
88
+{
212
-{
89
+ intptr_t i, opr_sz = simd_oprsz(desc);
213
- uint32_t val;
90
+ int8_t *d = vd, *n = vn, *m = vm;
214
-
91
+
215
- switch (reg) {
92
+ for (i = 0; i < opr_sz; ++i) {
216
- case 0: /* Basic Control */
93
+ d[i] = do_sqrdmlah_b(n[i], m[i], 0, false, false);
217
- return s->phy_control;
218
- case 1: /* Basic Status */
219
- return s->phy_status;
220
- case 2: /* ID1 */
221
- return 0x0007;
222
- case 3: /* ID2 */
223
- return 0xc0d1;
224
- case 4: /* Auto-neg advertisement */
225
- return s->phy_advertise;
226
- case 5: /* Auto-neg Link Partner Ability */
227
- return 0x0f71;
228
- case 6: /* Auto-neg Expansion */
229
- return 1;
230
- /* TODO 17, 18, 27, 29, 30, 31 */
231
- case 29: /* Interrupt source. */
232
- val = s->phy_int;
233
- s->phy_int = 0;
234
- phy_update_irq(s);
235
- return val;
236
- case 30: /* Interrupt mask */
237
- return s->phy_int_mask;
238
- default:
239
- qemu_log_mask(LOG_GUEST_ERROR,
240
- "do_phy_read: PHY read reg %d\n", reg);
241
- return 0;
242
- }
243
-}
244
-
245
-static void do_phy_write(lan9118_state *s, int reg, uint32_t val)
246
-{
247
- switch (reg) {
248
- case 0: /* Basic Control */
249
- if (val & 0x8000) {
250
- phy_reset(s);
251
- break;
252
- }
253
- s->phy_control = val & 0x7980;
254
- /* Complete autonegotiation immediately. */
255
- if (val & 0x1000) {
256
- s->phy_status |= 0x0020;
257
- }
258
- break;
259
- case 4: /* Auto-neg advertisement */
260
- s->phy_advertise = (val & 0x2d7f) | 0x80;
261
- break;
262
- /* TODO 17, 18, 27, 31 */
263
- case 30: /* Interrupt mask */
264
- s->phy_int_mask = val & 0xff;
265
- phy_update_irq(s);
266
- break;
267
- default:
268
- qemu_log_mask(LOG_GUEST_ERROR,
269
- "do_phy_write: PHY write reg %d = 0x%04x\n", reg, val);
270
- }
271
-}
272
-
273
static void do_mac_write(lan9118_state *s, int reg, uint32_t val)
274
{
275
switch (reg) {
276
@@ -XXX,XX +XXX,XX @@ static void do_mac_write(lan9118_state *s, int reg, uint32_t val)
277
if (val & 2) {
278
DPRINTF("PHY write %d = 0x%04x\n",
279
(val >> 6) & 0x1f, s->mac_mii_data);
280
- do_phy_write(s, (val >> 6) & 0x1f, s->mac_mii_data);
281
+ lan9118_phy_write(&s->mii, (val >> 6) & 0x1f, s->mac_mii_data);
282
} else {
283
- s->mac_mii_data = do_phy_read(s, (val >> 6) & 0x1f);
284
+ s->mac_mii_data = lan9118_phy_read(&s->mii, (val >> 6) & 0x1f);
285
DPRINTF("PHY read %d = 0x%04x\n",
286
(val >> 6) & 0x1f, s->mac_mii_data);
287
}
288
@@ -XXX,XX +XXX,XX @@ static void lan9118_writel(void *opaque, hwaddr offset,
289
break;
290
case CSR_PMT_CTRL:
291
if (val & 0x400) {
292
- phy_reset(s);
293
+ lan9118_phy_reset(&s->mii);
294
}
295
s->pmt_ctrl &= ~0x34e;
296
s->pmt_ctrl |= (val & 0x34e);
297
@@ -XXX,XX +XXX,XX @@ static void lan9118_realize(DeviceState *dev, Error **errp)
298
const MemoryRegionOps *mem_ops =
299
s->mode_16bit ? &lan9118_16bit_mem_ops : &lan9118_mem_ops;
300
301
+ qemu_init_irq(&s->mii_irq, lan9118_update_irq, s, 0);
302
+ object_initialize_child(OBJECT(s), "mii", &s->mii, TYPE_LAN9118_PHY);
303
+ if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(&s->mii), errp)) {
304
+ return;
94
+ }
305
+ }
95
+}
306
+ qdev_connect_gpio_out(DEVICE(&s->mii), 0, &s->mii_irq);
96
+
307
+
97
+void HELPER(sve2_sqrdmulh_b)(void *vd, void *vn, void *vm, uint32_t desc)
308
memory_region_init_io(&s->mmio, OBJECT(dev), mem_ops, s,
98
+{
309
"lan9118-mmio", 0x100);
99
+ intptr_t i, opr_sz = simd_oprsz(desc);
310
sysbus_init_mmio(sbd, &s->mmio);
100
+ int8_t *d = vd, *n = vn, *m = vm;
311
diff --git a/hw/net/lan9118_phy.c b/hw/net/lan9118_phy.c
101
+
312
new file mode 100644
102
+ for (i = 0; i < opr_sz; ++i) {
313
index XXXXXXX..XXXXXXX
103
+ d[i] = do_sqrdmlah_b(n[i], m[i], 0, false, true);
314
--- /dev/null
315
+++ b/hw/net/lan9118_phy.c
316
@@ -XXX,XX +XXX,XX @@
317
+/*
318
+ * SMSC LAN9118 PHY emulation
319
+ *
320
+ * Copyright (c) 2009 CodeSourcery, LLC.
321
+ * Written by Paul Brook
322
+ *
323
+ * This code is licensed under the GNU GPL v2
324
+ *
325
+ * Contributions after 2012-01-13 are licensed under the terms of the
326
+ * GNU GPL, version 2 or (at your option) any later version.
327
+ */
328
+
329
+#include "qemu/osdep.h"
330
+#include "hw/net/lan9118_phy.h"
331
+#include "hw/irq.h"
332
+#include "hw/resettable.h"
333
+#include "migration/vmstate.h"
334
+#include "qemu/log.h"
335
+
336
+#define PHY_INT_ENERGYON (1 << 7)
337
+#define PHY_INT_AUTONEG_COMPLETE (1 << 6)
338
+#define PHY_INT_FAULT (1 << 5)
339
+#define PHY_INT_DOWN (1 << 4)
340
+#define PHY_INT_AUTONEG_LP (1 << 3)
341
+#define PHY_INT_PARFAULT (1 << 2)
342
+#define PHY_INT_AUTONEG_PAGE (1 << 1)
343
+
344
+static void lan9118_phy_update_irq(Lan9118PhyState *s)
345
+{
346
+ qemu_set_irq(s->irq, !!(s->ints & s->int_mask));
347
+}
348
+
349
+uint16_t lan9118_phy_read(Lan9118PhyState *s, int reg)
350
+{
351
+ uint16_t val;
352
+
353
+ switch (reg) {
354
+ case 0: /* Basic Control */
355
+ return s->control;
356
+ case 1: /* Basic Status */
357
+ return s->status;
358
+ case 2: /* ID1 */
359
+ return 0x0007;
360
+ case 3: /* ID2 */
361
+ return 0xc0d1;
362
+ case 4: /* Auto-neg advertisement */
363
+ return s->advertise;
364
+ case 5: /* Auto-neg Link Partner Ability */
365
+ return 0x0f71;
366
+ case 6: /* Auto-neg Expansion */
367
+ return 1;
368
+ /* TODO 17, 18, 27, 29, 30, 31 */
369
+ case 29: /* Interrupt source. */
370
+ val = s->ints;
371
+ s->ints = 0;
372
+ lan9118_phy_update_irq(s);
373
+ return val;
374
+ case 30: /* Interrupt mask */
375
+ return s->int_mask;
376
+ default:
377
+ qemu_log_mask(LOG_GUEST_ERROR,
378
+ "lan9118_phy_read: PHY read reg %d\n", reg);
379
+ return 0;
104
+ }
380
+ }
105
+}
381
+}
106
+
382
+
107
/* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */
383
+void lan9118_phy_write(Lan9118PhyState *s, int reg, uint16_t val)
108
int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3,
384
+{
109
bool neg, bool round, uint32_t *sat)
385
+ switch (reg) {
110
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_h)(void *vd, void *vn, void *vm,
386
+ case 0: /* Basic Control */
111
}
387
+ if (val & 0x8000) {
112
}
388
+ lan9118_phy_reset(s);
113
389
+ break;
114
+void HELPER(sve2_sqdmulh_h)(void *vd, void *vn, void *vm, uint32_t desc)
390
+ }
115
+{
391
+ s->control = val & 0x7980;
116
+ intptr_t i, opr_sz = simd_oprsz(desc);
392
+ /* Complete autonegotiation immediately. */
117
+ int16_t *d = vd, *n = vn, *m = vm;
393
+ if (val & 0x1000) {
118
+ uint32_t discard;
394
+ s->status |= 0x0020;
119
+
395
+ }
120
+ for (i = 0; i < opr_sz / 2; ++i) {
396
+ break;
121
+ d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, false, &discard);
397
+ case 4: /* Auto-neg advertisement */
398
+ s->advertise = (val & 0x2d7f) | 0x80;
399
+ break;
400
+ /* TODO 17, 18, 27, 31 */
401
+ case 30: /* Interrupt mask */
402
+ s->int_mask = val & 0xff;
403
+ lan9118_phy_update_irq(s);
404
+ break;
405
+ default:
406
+ qemu_log_mask(LOG_GUEST_ERROR,
407
+ "lan9118_phy_write: PHY write reg %d = 0x%04x\n", reg, val);
122
+ }
408
+ }
123
+}
409
+}
124
+
410
+
125
+void HELPER(sve2_sqrdmulh_h)(void *vd, void *vn, void *vm, uint32_t desc)
411
+void lan9118_phy_update_link(Lan9118PhyState *s, bool link_down)
126
+{
412
+{
127
+ intptr_t i, opr_sz = simd_oprsz(desc);
413
+ s->link_down = link_down;
128
+ int16_t *d = vd, *n = vn, *m = vm;
414
+
129
+ uint32_t discard;
415
+ /* Autonegotiation status mirrors link status. */
130
+
416
+ if (link_down) {
131
+ for (i = 0; i < opr_sz / 2; ++i) {
417
+ s->status &= ~0x0024;
132
+ d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, true, &discard);
418
+ s->ints |= PHY_INT_DOWN;
419
+ } else {
420
+ s->status |= 0x0024;
421
+ s->ints |= PHY_INT_ENERGYON;
422
+ s->ints |= PHY_INT_AUTONEG_COMPLETE;
133
+ }
423
+ }
134
+}
424
+ lan9118_phy_update_irq(s);
135
+
425
+}
136
/* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */
426
+
137
int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3,
427
+void lan9118_phy_reset(Lan9118PhyState *s)
138
bool neg, bool round, uint32_t *sat)
428
+{
139
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_s)(void *vd, void *vn, void *vm,
429
+ s->control = 0x3000;
140
}
430
+ s->status = 0x7809;
141
}
431
+ s->advertise = 0x01e1;
142
432
+ s->int_mask = 0;
143
+void HELPER(sve2_sqdmulh_s)(void *vd, void *vn, void *vm, uint32_t desc)
433
+ s->ints = 0;
144
+{
434
+ lan9118_phy_update_link(s, s->link_down);
145
+ intptr_t i, opr_sz = simd_oprsz(desc);
435
+}
146
+ int32_t *d = vd, *n = vn, *m = vm;
436
+
147
+ uint32_t discard;
437
+static void lan9118_phy_reset_hold(Object *obj, ResetType type)
148
+
438
+{
149
+ for (i = 0; i < opr_sz / 4; ++i) {
439
+ Lan9118PhyState *s = LAN9118_PHY(obj);
150
+ d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, false, &discard);
440
+
441
+ lan9118_phy_reset(s);
442
+}
443
+
444
+static void lan9118_phy_init(Object *obj)
445
+{
446
+ Lan9118PhyState *s = LAN9118_PHY(obj);
447
+
448
+ qdev_init_gpio_out(DEVICE(s), &s->irq, 1);
449
+}
450
+
451
+static const VMStateDescription vmstate_lan9118_phy = {
452
+ .name = "lan9118-phy",
453
+ .version_id = 1,
454
+ .minimum_version_id = 1,
455
+ .fields = (const VMStateField[]) {
456
+ VMSTATE_UINT16(control, Lan9118PhyState),
457
+ VMSTATE_UINT16(status, Lan9118PhyState),
458
+ VMSTATE_UINT16(advertise, Lan9118PhyState),
459
+ VMSTATE_UINT16(ints, Lan9118PhyState),
460
+ VMSTATE_UINT16(int_mask, Lan9118PhyState),
461
+ VMSTATE_BOOL(link_down, Lan9118PhyState),
462
+ VMSTATE_END_OF_LIST()
151
+ }
463
+ }
152
+}
464
+};
153
+
465
+
154
+void HELPER(sve2_sqrdmulh_s)(void *vd, void *vn, void *vm, uint32_t desc)
466
+static void lan9118_phy_class_init(ObjectClass *klass, void *data)
155
+{
467
+{
156
+ intptr_t i, opr_sz = simd_oprsz(desc);
468
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
157
+ int32_t *d = vd, *n = vn, *m = vm;
469
+ DeviceClass *dc = DEVICE_CLASS(klass);
158
+ uint32_t discard;
470
+
159
+
471
+ rc->phases.hold = lan9118_phy_reset_hold;
160
+ for (i = 0; i < opr_sz / 4; ++i) {
472
+ dc->vmsd = &vmstate_lan9118_phy;
161
+ d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, true, &discard);
473
+}
474
+
475
+static const TypeInfo types[] = {
476
+ {
477
+ .name = TYPE_LAN9118_PHY,
478
+ .parent = TYPE_SYS_BUS_DEVICE,
479
+ .instance_size = sizeof(Lan9118PhyState),
480
+ .instance_init = lan9118_phy_init,
481
+ .class_init = lan9118_phy_class_init,
162
+ }
482
+ }
163
+}
483
+};
164
+
484
+
165
/* Signed saturating rounding doubling multiply-accumulate high half, 64-bit */
485
+DEFINE_TYPES(types)
166
static int64_t do_sat128_d(Int128 r)
486
diff --git a/hw/net/Kconfig b/hw/net/Kconfig
167
{
487
index XXXXXXX..XXXXXXX 100644
168
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_d)(void *vd, void *vn, void *vm,
488
--- a/hw/net/Kconfig
169
}
489
+++ b/hw/net/Kconfig
170
}
490
@@ -XXX,XX +XXX,XX @@ config VMXNET3_PCI
171
491
config SMC91C111
172
+void HELPER(sve2_sqdmulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
492
bool
173
+{
493
174
+ intptr_t i, opr_sz = simd_oprsz(desc);
494
+config LAN9118_PHY
175
+ int64_t *d = vd, *n = vn, *m = vm;
495
+ bool
176
+
496
+
177
+ for (i = 0; i < opr_sz / 8; ++i) {
497
config LAN9118
178
+ d[i] = do_sqrdmlah_d(n[i], m[i], 0, false, false);
498
bool
179
+ }
499
+ select LAN9118_PHY
180
+}
500
select PTIMER
181
+
501
182
+void HELPER(sve2_sqrdmulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
502
config NE2000_ISA
183
+{
503
diff --git a/hw/net/meson.build b/hw/net/meson.build
184
+ intptr_t i, opr_sz = simd_oprsz(desc);
504
index XXXXXXX..XXXXXXX 100644
185
+ int64_t *d = vd, *n = vn, *m = vm;
505
--- a/hw/net/meson.build
186
+
506
+++ b/hw/net/meson.build
187
+ for (i = 0; i < opr_sz / 8; ++i) {
507
@@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_VMXNET3_PCI', if_true: files('vmxnet3.c'))
188
+ d[i] = do_sqrdmlah_d(n[i], m[i], 0, false, true);
508
189
+ }
509
system_ss.add(when: 'CONFIG_SMC91C111', if_true: files('smc91c111.c'))
190
+}
510
system_ss.add(when: 'CONFIG_LAN9118', if_true: files('lan9118.c'))
191
+
511
+system_ss.add(when: 'CONFIG_LAN9118_PHY', if_true: files('lan9118_phy.c'))
192
/* Integer 8 and 16-bit dot-product.
512
system_ss.add(when: 'CONFIG_NE2000_ISA', if_true: files('ne2000-isa.c'))
193
*
513
system_ss.add(when: 'CONFIG_OPENCORES_ETH', if_true: files('opencores_eth.c'))
194
* Note that for the loops herein, host endianness does not matter
514
system_ss.add(when: 'CONFIG_XGMAC', if_true: files('xgmac.c'))
195
--
515
--
196
2.20.1
516
2.34.1
197
198
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Bernhard Beschow <shentey@gmail.com>
2
2
3
imx_fec models the same PHY as lan9118_phy. The code is almost the same with
4
imx_fec having more logging and tracing. Merge these improvements into
5
lan9118_phy and reuse in imx_fec to fix the code duplication.
6
7
Some migration state how resides in the new device model which breaks migration
8
compatibility for the following machines:
9
* imx25-pdk
10
* sabrelite
11
* mcimx7d-sabre
12
* mcimx6ul-evk
13
14
Signed-off-by: Bernhard Beschow <shentey@gmail.com>
15
Tested-by: Guenter Roeck <linux@roeck-us.net>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20241102125724.532843-3-shentey@gmail.com
5
Message-id: 20210525010358.152808-10-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
19
---
8
target/arm/helper-sve.h | 54 +++++++++++
20
include/hw/net/imx_fec.h | 9 ++-
9
target/arm/sve.decode | 11 +++
21
hw/net/imx_fec.c | 146 ++++-----------------------------------
10
target/arm/sve_helper.c | 194 ++++++++++++++++++++++++++-----------
22
hw/net/lan9118_phy.c | 82 ++++++++++++++++------
11
target/arm/translate-sve.c | 7 ++
23
hw/net/Kconfig | 1 +
12
4 files changed, 210 insertions(+), 56 deletions(-)
24
hw/net/trace-events | 10 +--
25
5 files changed, 85 insertions(+), 163 deletions(-)
13
26
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
27
diff --git a/include/hw/net/imx_fec.h b/include/hw/net/imx_fec.h
15
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
29
--- a/include/hw/net/imx_fec.h
17
+++ b/target/arm/helper-sve.h
30
+++ b/include/hw/net/imx_fec.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_s, TCG_CALL_NO_RWG,
31
@@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(IMXFECState, IMX_FEC)
19
DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_d, TCG_CALL_NO_RWG,
32
#define TYPE_IMX_ENET "imx.enet"
20
void, ptr, ptr, ptr, ptr, i32)
33
21
34
#include "hw/sysbus.h"
22
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_b, TCG_CALL_NO_RWG,
35
+#include "hw/net/lan9118_phy.h"
23
+ void, ptr, ptr, ptr, ptr, i32)
36
+#include "hw/irq.h"
24
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_h, TCG_CALL_NO_RWG,
37
#include "net/net.h"
25
+ void, ptr, ptr, ptr, ptr, i32)
38
26
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_s, TCG_CALL_NO_RWG,
39
#define ENET_EIR 1
27
+ void, ptr, ptr, ptr, ptr, i32)
40
@@ -XXX,XX +XXX,XX @@ struct IMXFECState {
28
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_d, TCG_CALL_NO_RWG,
41
uint32_t tx_descriptor[ENET_TX_RING_NUM];
29
+ void, ptr, ptr, ptr, ptr, i32)
42
uint32_t tx_ring_num;
30
+
43
31
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_b, TCG_CALL_NO_RWG,
44
- uint32_t phy_status;
32
+ void, ptr, ptr, ptr, ptr, i32)
45
- uint32_t phy_control;
33
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_h, TCG_CALL_NO_RWG,
46
- uint32_t phy_advertise;
34
+ void, ptr, ptr, ptr, ptr, i32)
47
- uint32_t phy_int;
35
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_s, TCG_CALL_NO_RWG,
48
- uint32_t phy_int_mask;
36
+ void, ptr, ptr, ptr, ptr, i32)
49
+ Lan9118PhyState mii;
37
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_d, TCG_CALL_NO_RWG,
50
+ IRQState mii_irq;
38
+ void, ptr, ptr, ptr, ptr, i32)
51
uint32_t phy_num;
39
+
52
bool phy_connected;
40
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_b, TCG_CALL_NO_RWG,
53
struct IMXFECState *phy_consumer;
41
+ void, ptr, ptr, ptr, ptr, i32)
54
diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c
42
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_h, TCG_CALL_NO_RWG,
43
+ void, ptr, ptr, ptr, ptr, i32)
44
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_s, TCG_CALL_NO_RWG,
45
+ void, ptr, ptr, ptr, ptr, i32)
46
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_d, TCG_CALL_NO_RWG,
47
+ void, ptr, ptr, ptr, ptr, i32)
48
+
49
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_b, TCG_CALL_NO_RWG,
50
+ void, ptr, ptr, ptr, ptr, i32)
51
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_h, TCG_CALL_NO_RWG,
52
+ void, ptr, ptr, ptr, ptr, i32)
53
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_s, TCG_CALL_NO_RWG,
54
+ void, ptr, ptr, ptr, ptr, i32)
55
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_d, TCG_CALL_NO_RWG,
56
+ void, ptr, ptr, ptr, ptr, i32)
57
+
58
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_b, TCG_CALL_NO_RWG,
59
+ void, ptr, ptr, ptr, ptr, i32)
60
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_h, TCG_CALL_NO_RWG,
61
+ void, ptr, ptr, ptr, ptr, i32)
62
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_s, TCG_CALL_NO_RWG,
63
+ void, ptr, ptr, ptr, ptr, i32)
64
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_d, TCG_CALL_NO_RWG,
65
+ void, ptr, ptr, ptr, ptr, i32)
66
+
67
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_b, TCG_CALL_NO_RWG,
68
+ void, ptr, ptr, ptr, ptr, i32)
69
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_h, TCG_CALL_NO_RWG,
70
+ void, ptr, ptr, ptr, ptr, i32)
71
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_s, TCG_CALL_NO_RWG,
72
+ void, ptr, ptr, ptr, ptr, i32)
73
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_d, TCG_CALL_NO_RWG,
74
+ void, ptr, ptr, ptr, ptr, i32)
75
+
76
DEF_HELPER_FLAGS_5(sve_asr_zpzw_b, TCG_CALL_NO_RWG,
77
void, ptr, ptr, ptr, ptr, i32)
78
DEF_HELPER_FLAGS_5(sve_asr_zpzw_h, TCG_CALL_NO_RWG,
79
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
80
index XXXXXXX..XXXXXXX 100644
55
index XXXXXXX..XXXXXXX 100644
81
--- a/target/arm/sve.decode
56
--- a/hw/net/imx_fec.c
82
+++ b/target/arm/sve.decode
57
+++ b/hw/net/imx_fec.c
83
@@ -XXX,XX +XXX,XX @@ SMAXP 01000100 .. 010 100 101 ... ..... ..... @rdn_pg_rm
58
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_imx_eth_txdescs = {
84
UMAXP 01000100 .. 010 101 101 ... ..... ..... @rdn_pg_rm
59
85
SMINP 01000100 .. 010 110 101 ... ..... ..... @rdn_pg_rm
60
static const VMStateDescription vmstate_imx_eth = {
86
UMINP 01000100 .. 010 111 101 ... ..... ..... @rdn_pg_rm
61
.name = TYPE_IMX_FEC,
87
+
62
- .version_id = 2,
88
+### SVE2 saturating add/subtract (predicated)
63
- .minimum_version_id = 2,
89
+
64
+ .version_id = 3,
90
+SQADD_zpzz 01000100 .. 011 000 100 ... ..... ..... @rdn_pg_rm
65
+ .minimum_version_id = 3,
91
+UQADD_zpzz 01000100 .. 011 001 100 ... ..... ..... @rdn_pg_rm
66
.fields = (const VMStateField[]) {
92
+SQSUB_zpzz 01000100 .. 011 010 100 ... ..... ..... @rdn_pg_rm
67
VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
93
+UQSUB_zpzz 01000100 .. 011 011 100 ... ..... ..... @rdn_pg_rm
68
VMSTATE_UINT32(rx_descriptor, IMXFECState),
94
+SUQADD 01000100 .. 011 100 100 ... ..... ..... @rdn_pg_rm
69
VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
95
+USQADD 01000100 .. 011 101 100 ... ..... ..... @rdn_pg_rm
70
- VMSTATE_UINT32(phy_status, IMXFECState),
96
+SQSUB_zpzz 01000100 .. 011 110 100 ... ..... ..... @rdm_pg_rn # SQSUBR
71
- VMSTATE_UINT32(phy_control, IMXFECState),
97
+UQSUB_zpzz 01000100 .. 011 111 100 ... ..... ..... @rdm_pg_rn # UQSUBR
72
- VMSTATE_UINT32(phy_advertise, IMXFECState),
98
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
73
- VMSTATE_UINT32(phy_int, IMXFECState),
74
- VMSTATE_UINT32(phy_int_mask, IMXFECState),
75
VMSTATE_END_OF_LIST()
76
},
77
.subsections = (const VMStateDescription * const []) {
78
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_imx_eth = {
79
},
80
};
81
82
-#define PHY_INT_ENERGYON (1 << 7)
83
-#define PHY_INT_AUTONEG_COMPLETE (1 << 6)
84
-#define PHY_INT_FAULT (1 << 5)
85
-#define PHY_INT_DOWN (1 << 4)
86
-#define PHY_INT_AUTONEG_LP (1 << 3)
87
-#define PHY_INT_PARFAULT (1 << 2)
88
-#define PHY_INT_AUTONEG_PAGE (1 << 1)
89
-
90
static void imx_eth_update(IMXFECState *s);
91
92
/*
93
@@ -XXX,XX +XXX,XX @@ static void imx_eth_update(IMXFECState *s);
94
* For now we don't handle any GPIO/interrupt line, so the OS will
95
* have to poll for the PHY status.
96
*/
97
-static void imx_phy_update_irq(IMXFECState *s)
98
+static void imx_phy_update_irq(void *opaque, int n, int level)
99
{
100
- imx_eth_update(s);
101
-}
102
-
103
-static void imx_phy_update_link(IMXFECState *s)
104
-{
105
- /* Autonegotiation status mirrors link status. */
106
- if (qemu_get_queue(s->nic)->link_down) {
107
- trace_imx_phy_update_link("down");
108
- s->phy_status &= ~0x0024;
109
- s->phy_int |= PHY_INT_DOWN;
110
- } else {
111
- trace_imx_phy_update_link("up");
112
- s->phy_status |= 0x0024;
113
- s->phy_int |= PHY_INT_ENERGYON;
114
- s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
115
- }
116
- imx_phy_update_irq(s);
117
+ imx_eth_update(opaque);
118
}
119
120
static void imx_eth_set_link(NetClientState *nc)
121
{
122
- imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
123
-}
124
-
125
-static void imx_phy_reset(IMXFECState *s)
126
-{
127
- trace_imx_phy_reset();
128
-
129
- s->phy_status = 0x7809;
130
- s->phy_control = 0x3000;
131
- s->phy_advertise = 0x01e1;
132
- s->phy_int_mask = 0;
133
- s->phy_int = 0;
134
- imx_phy_update_link(s);
135
+ lan9118_phy_update_link(&IMX_FEC(qemu_get_nic_opaque(nc))->mii,
136
+ nc->link_down);
137
}
138
139
static uint32_t imx_phy_read(IMXFECState *s, int reg)
140
{
141
- uint32_t val;
142
uint32_t phy = reg / 32;
143
144
if (!s->phy_connected) {
145
@@ -XXX,XX +XXX,XX @@ static uint32_t imx_phy_read(IMXFECState *s, int reg)
146
147
reg %= 32;
148
149
- switch (reg) {
150
- case 0: /* Basic Control */
151
- val = s->phy_control;
152
- break;
153
- case 1: /* Basic Status */
154
- val = s->phy_status;
155
- break;
156
- case 2: /* ID1 */
157
- val = 0x0007;
158
- break;
159
- case 3: /* ID2 */
160
- val = 0xc0d1;
161
- break;
162
- case 4: /* Auto-neg advertisement */
163
- val = s->phy_advertise;
164
- break;
165
- case 5: /* Auto-neg Link Partner Ability */
166
- val = 0x0f71;
167
- break;
168
- case 6: /* Auto-neg Expansion */
169
- val = 1;
170
- break;
171
- case 29: /* Interrupt source. */
172
- val = s->phy_int;
173
- s->phy_int = 0;
174
- imx_phy_update_irq(s);
175
- break;
176
- case 30: /* Interrupt mask */
177
- val = s->phy_int_mask;
178
- break;
179
- case 17:
180
- case 18:
181
- case 27:
182
- case 31:
183
- qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
184
- TYPE_IMX_FEC, __func__, reg);
185
- val = 0;
186
- break;
187
- default:
188
- qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
189
- TYPE_IMX_FEC, __func__, reg);
190
- val = 0;
191
- break;
192
- }
193
-
194
- trace_imx_phy_read(val, phy, reg);
195
-
196
- return val;
197
+ return lan9118_phy_read(&s->mii, reg);
198
}
199
200
static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
201
@@ -XXX,XX +XXX,XX @@ static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
202
203
reg %= 32;
204
205
- trace_imx_phy_write(val, phy, reg);
206
-
207
- switch (reg) {
208
- case 0: /* Basic Control */
209
- if (val & 0x8000) {
210
- imx_phy_reset(s);
211
- } else {
212
- s->phy_control = val & 0x7980;
213
- /* Complete autonegotiation immediately. */
214
- if (val & 0x1000) {
215
- s->phy_status |= 0x0020;
216
- }
217
- }
218
- break;
219
- case 4: /* Auto-neg advertisement */
220
- s->phy_advertise = (val & 0x2d7f) | 0x80;
221
- break;
222
- case 30: /* Interrupt mask */
223
- s->phy_int_mask = val & 0xff;
224
- imx_phy_update_irq(s);
225
- break;
226
- case 17:
227
- case 18:
228
- case 27:
229
- case 31:
230
- qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
231
- TYPE_IMX_FEC, __func__, reg);
232
- break;
233
- default:
234
- qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
235
- TYPE_IMX_FEC, __func__, reg);
236
- break;
237
- }
238
+ lan9118_phy_write(&s->mii, reg, val);
239
}
240
241
static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
242
@@ -XXX,XX +XXX,XX @@ static void imx_eth_reset(DeviceState *d)
243
244
s->rx_descriptor = 0;
245
memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
246
-
247
- /* We also reset the PHY */
248
- imx_phy_reset(s);
249
}
250
251
static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
252
@@ -XXX,XX +XXX,XX @@ static void imx_eth_realize(DeviceState *dev, Error **errp)
253
sysbus_init_irq(sbd, &s->irq[0]);
254
sysbus_init_irq(sbd, &s->irq[1]);
255
256
+ qemu_init_irq(&s->mii_irq, imx_phy_update_irq, s, 0);
257
+ object_initialize_child(OBJECT(s), "mii", &s->mii, TYPE_LAN9118_PHY);
258
+ if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(&s->mii), errp)) {
259
+ return;
260
+ }
261
+ qdev_connect_gpio_out(DEVICE(&s->mii), 0, &s->mii_irq);
262
+
263
qemu_macaddr_default_if_unset(&s->conf.macaddr);
264
265
s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
266
diff --git a/hw/net/lan9118_phy.c b/hw/net/lan9118_phy.c
99
index XXXXXXX..XXXXXXX 100644
267
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/sve_helper.c
268
--- a/hw/net/lan9118_phy.c
101
+++ b/target/arm/sve_helper.c
269
+++ b/hw/net/lan9118_phy.c
102
@@ -XXX,XX +XXX,XX @@ DO_ZPZZ(sve2_uhsub_zpzz_h, uint16_t, H1_2, DO_HSUB_BHS)
270
@@ -XXX,XX +XXX,XX @@
103
DO_ZPZZ(sve2_uhsub_zpzz_s, uint32_t, H1_4, DO_HSUB_BHS)
271
* Copyright (c) 2009 CodeSourcery, LLC.
104
DO_ZPZZ_D(sve2_uhsub_zpzz_d, uint64_t, DO_HSUB_D)
272
* Written by Paul Brook
105
273
*
106
+static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max)
274
+ * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
107
+{
275
+ *
108
+ return val >= max ? max : val <= min ? min : val;
276
* This code is licensed under the GNU GPL v2
109
+}
277
*
110
+
278
* Contributions after 2012-01-13 are licensed under the terms of the
111
+#define DO_SQADD_B(n, m) do_sat_bhs((int64_t)n + m, INT8_MIN, INT8_MAX)
279
@@ -XXX,XX +XXX,XX @@
112
+#define DO_SQADD_H(n, m) do_sat_bhs((int64_t)n + m, INT16_MIN, INT16_MAX)
280
#include "hw/resettable.h"
113
+#define DO_SQADD_S(n, m) do_sat_bhs((int64_t)n + m, INT32_MIN, INT32_MAX)
281
#include "migration/vmstate.h"
114
+
282
#include "qemu/log.h"
115
+static inline int64_t do_sqadd_d(int64_t n, int64_t m)
283
+#include "trace.h"
116
+{
284
117
+ int64_t r = n + m;
285
#define PHY_INT_ENERGYON (1 << 7)
118
+ if (((r ^ n) & ~(n ^ m)) < 0) {
286
#define PHY_INT_AUTONEG_COMPLETE (1 << 6)
119
+ /* Signed overflow. */
287
@@ -XXX,XX +XXX,XX @@ uint16_t lan9118_phy_read(Lan9118PhyState *s, int reg)
120
+ return r < 0 ? INT64_MAX : INT64_MIN;
288
121
+ }
289
switch (reg) {
122
+ return r;
290
case 0: /* Basic Control */
123
+}
291
- return s->control;
124
+
292
+ val = s->control;
125
+DO_ZPZZ(sve2_sqadd_zpzz_b, int8_t, H1, DO_SQADD_B)
293
+ break;
126
+DO_ZPZZ(sve2_sqadd_zpzz_h, int16_t, H1_2, DO_SQADD_H)
294
case 1: /* Basic Status */
127
+DO_ZPZZ(sve2_sqadd_zpzz_s, int32_t, H1_4, DO_SQADD_S)
295
- return s->status;
128
+DO_ZPZZ_D(sve2_sqadd_zpzz_d, int64_t, do_sqadd_d)
296
+ val = s->status;
129
+
297
+ break;
130
+#define DO_UQADD_B(n, m) do_sat_bhs((int64_t)n + m, 0, UINT8_MAX)
298
case 2: /* ID1 */
131
+#define DO_UQADD_H(n, m) do_sat_bhs((int64_t)n + m, 0, UINT16_MAX)
299
- return 0x0007;
132
+#define DO_UQADD_S(n, m) do_sat_bhs((int64_t)n + m, 0, UINT32_MAX)
300
+ val = 0x0007;
133
+
301
+ break;
134
+static inline uint64_t do_uqadd_d(uint64_t n, uint64_t m)
302
case 3: /* ID2 */
135
+{
303
- return 0xc0d1;
136
+ uint64_t r = n + m;
304
+ val = 0xc0d1;
137
+ return r < n ? UINT64_MAX : r;
305
+ break;
138
+}
306
case 4: /* Auto-neg advertisement */
139
+
307
- return s->advertise;
140
+DO_ZPZZ(sve2_uqadd_zpzz_b, uint8_t, H1, DO_UQADD_B)
308
+ val = s->advertise;
141
+DO_ZPZZ(sve2_uqadd_zpzz_h, uint16_t, H1_2, DO_UQADD_H)
309
+ break;
142
+DO_ZPZZ(sve2_uqadd_zpzz_s, uint32_t, H1_4, DO_UQADD_S)
310
case 5: /* Auto-neg Link Partner Ability */
143
+DO_ZPZZ_D(sve2_uqadd_zpzz_d, uint64_t, do_uqadd_d)
311
- return 0x0f71;
144
+
312
+ val = 0x0f71;
145
+#define DO_SQSUB_B(n, m) do_sat_bhs((int64_t)n - m, INT8_MIN, INT8_MAX)
313
+ break;
146
+#define DO_SQSUB_H(n, m) do_sat_bhs((int64_t)n - m, INT16_MIN, INT16_MAX)
314
case 6: /* Auto-neg Expansion */
147
+#define DO_SQSUB_S(n, m) do_sat_bhs((int64_t)n - m, INT32_MIN, INT32_MAX)
315
- return 1;
148
+
316
- /* TODO 17, 18, 27, 29, 30, 31 */
149
+static inline int64_t do_sqsub_d(int64_t n, int64_t m)
317
+ val = 1;
150
+{
318
+ break;
151
+ int64_t r = n - m;
319
case 29: /* Interrupt source. */
152
+ if (((r ^ n) & (n ^ m)) < 0) {
320
val = s->ints;
153
+ /* Signed overflow. */
321
s->ints = 0;
154
+ return r < 0 ? INT64_MAX : INT64_MIN;
322
lan9118_phy_update_irq(s);
155
+ }
323
- return val;
156
+ return r;
324
+ break;
157
+}
325
case 30: /* Interrupt mask */
158
+
326
- return s->int_mask;
159
+DO_ZPZZ(sve2_sqsub_zpzz_b, int8_t, H1, DO_SQSUB_B)
327
+ val = s->int_mask;
160
+DO_ZPZZ(sve2_sqsub_zpzz_h, int16_t, H1_2, DO_SQSUB_H)
328
+ break;
161
+DO_ZPZZ(sve2_sqsub_zpzz_s, int32_t, H1_4, DO_SQSUB_S)
329
+ case 17:
162
+DO_ZPZZ_D(sve2_sqsub_zpzz_d, int64_t, do_sqsub_d)
330
+ case 18:
163
+
331
+ case 27:
164
+#define DO_UQSUB_B(n, m) do_sat_bhs((int64_t)n - m, 0, UINT8_MAX)
332
+ case 31:
165
+#define DO_UQSUB_H(n, m) do_sat_bhs((int64_t)n - m, 0, UINT16_MAX)
333
+ qemu_log_mask(LOG_UNIMP, "%s: reg %d not implemented\n",
166
+#define DO_UQSUB_S(n, m) do_sat_bhs((int64_t)n - m, 0, UINT32_MAX)
334
+ __func__, reg);
167
+
335
+ val = 0;
168
+static inline uint64_t do_uqsub_d(uint64_t n, uint64_t m)
336
+ break;
169
+{
337
default:
170
+ return n > m ? n - m : 0;
338
- qemu_log_mask(LOG_GUEST_ERROR,
171
+}
339
- "lan9118_phy_read: PHY read reg %d\n", reg);
172
+
340
- return 0;
173
+DO_ZPZZ(sve2_uqsub_zpzz_b, uint8_t, H1, DO_UQSUB_B)
341
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address at offset %d\n",
174
+DO_ZPZZ(sve2_uqsub_zpzz_h, uint16_t, H1_2, DO_UQSUB_H)
342
+ __func__, reg);
175
+DO_ZPZZ(sve2_uqsub_zpzz_s, uint32_t, H1_4, DO_UQSUB_S)
343
+ val = 0;
176
+DO_ZPZZ_D(sve2_uqsub_zpzz_d, uint64_t, do_uqsub_d)
344
+ break;
177
+
345
}
178
+#define DO_SUQADD_B(n, m) \
346
+
179
+ do_sat_bhs((int64_t)(int8_t)n + m, INT8_MIN, INT8_MAX)
347
+ trace_lan9118_phy_read(val, reg);
180
+#define DO_SUQADD_H(n, m) \
348
+
181
+ do_sat_bhs((int64_t)(int16_t)n + m, INT16_MIN, INT16_MAX)
349
+ return val;
182
+#define DO_SUQADD_S(n, m) \
350
}
183
+ do_sat_bhs((int64_t)(int32_t)n + m, INT32_MIN, INT32_MAX)
351
184
+
352
void lan9118_phy_write(Lan9118PhyState *s, int reg, uint16_t val)
185
+static inline int64_t do_suqadd_d(int64_t n, uint64_t m)
353
{
186
+{
354
+ trace_lan9118_phy_write(val, reg);
187
+ uint64_t r = n + m;
355
+
188
+
356
switch (reg) {
189
+ if (n < 0) {
357
case 0: /* Basic Control */
190
+ /* Note that m - abs(n) cannot underflow. */
358
if (val & 0x8000) {
191
+ if (r > INT64_MAX) {
359
lan9118_phy_reset(s);
192
+ /* Result is either very large positive or negative. */
360
- break;
193
+ if (m > -n) {
361
- }
194
+ /* m > abs(n), so r is a very large positive. */
362
- s->control = val & 0x7980;
195
+ return INT64_MAX;
363
- /* Complete autonegotiation immediately. */
364
- if (val & 0x1000) {
365
- s->status |= 0x0020;
366
+ } else {
367
+ s->control = val & 0x7980;
368
+ /* Complete autonegotiation immediately. */
369
+ if (val & 0x1000) {
370
+ s->status |= 0x0020;
196
+ }
371
+ }
197
+ /* Result is negative. */
372
}
198
+ }
373
break;
199
+ } else {
374
case 4: /* Auto-neg advertisement */
200
+ /* Both inputs are positive: check for overflow. */
375
s->advertise = (val & 0x2d7f) | 0x80;
201
+ if (r < m || r > INT64_MAX) {
376
break;
202
+ return INT64_MAX;
377
- /* TODO 17, 18, 27, 31 */
203
+ }
378
case 30: /* Interrupt mask */
204
+ }
379
s->int_mask = val & 0xff;
205
+ return r;
380
lan9118_phy_update_irq(s);
206
+}
381
break;
207
+
382
+ case 17:
208
+DO_ZPZZ(sve2_suqadd_zpzz_b, uint8_t, H1, DO_SUQADD_B)
383
+ case 18:
209
+DO_ZPZZ(sve2_suqadd_zpzz_h, uint16_t, H1_2, DO_SUQADD_H)
384
+ case 27:
210
+DO_ZPZZ(sve2_suqadd_zpzz_s, uint32_t, H1_4, DO_SUQADD_S)
385
+ case 31:
211
+DO_ZPZZ_D(sve2_suqadd_zpzz_d, uint64_t, do_suqadd_d)
386
+ qemu_log_mask(LOG_UNIMP, "%s: reg %d not implemented\n",
212
+
387
+ __func__, reg);
213
+#define DO_USQADD_B(n, m) \
388
+ break;
214
+ do_sat_bhs((int64_t)n + (int8_t)m, 0, UINT8_MAX)
389
default:
215
+#define DO_USQADD_H(n, m) \
390
- qemu_log_mask(LOG_GUEST_ERROR,
216
+ do_sat_bhs((int64_t)n + (int16_t)m, 0, UINT16_MAX)
391
- "lan9118_phy_write: PHY write reg %d = 0x%04x\n", reg, val);
217
+#define DO_USQADD_S(n, m) \
392
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address at offset %d\n",
218
+ do_sat_bhs((int64_t)n + (int32_t)m, 0, UINT32_MAX)
393
+ __func__, reg);
219
+
394
+ break;
220
+static inline uint64_t do_usqadd_d(uint64_t n, int64_t m)
221
+{
222
+ uint64_t r = n + m;
223
+
224
+ if (m < 0) {
225
+ return n < -m ? 0 : r;
226
+ }
227
+ return r < n ? UINT64_MAX : r;
228
+}
229
+
230
+DO_ZPZZ(sve2_usqadd_zpzz_b, uint8_t, H1, DO_USQADD_B)
231
+DO_ZPZZ(sve2_usqadd_zpzz_h, uint16_t, H1_2, DO_USQADD_H)
232
+DO_ZPZZ(sve2_usqadd_zpzz_s, uint32_t, H1_4, DO_USQADD_S)
233
+DO_ZPZZ_D(sve2_usqadd_zpzz_d, uint64_t, do_usqadd_d)
234
+
235
#undef DO_ZPZZ
236
#undef DO_ZPZZ_D
237
238
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_sqaddi_b)(void *d, void *a, int32_t b, uint32_t desc)
239
intptr_t i, oprsz = simd_oprsz(desc);
240
241
for (i = 0; i < oprsz; i += sizeof(int8_t)) {
242
- int r = *(int8_t *)(a + i) + b;
243
- if (r > INT8_MAX) {
244
- r = INT8_MAX;
245
- } else if (r < INT8_MIN) {
246
- r = INT8_MIN;
247
- }
248
- *(int8_t *)(d + i) = r;
249
+ *(int8_t *)(d + i) = DO_SQADD_B(b, *(int8_t *)(a + i));
250
}
395
}
251
}
396
}
252
397
253
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_sqaddi_h)(void *d, void *a, int32_t b, uint32_t desc)
398
@@ -XXX,XX +XXX,XX @@ void lan9118_phy_update_link(Lan9118PhyState *s, bool link_down)
254
intptr_t i, oprsz = simd_oprsz(desc);
399
255
400
/* Autonegotiation status mirrors link status. */
256
for (i = 0; i < oprsz; i += sizeof(int16_t)) {
401
if (link_down) {
257
- int r = *(int16_t *)(a + i) + b;
402
+ trace_lan9118_phy_update_link("down");
258
- if (r > INT16_MAX) {
403
s->status &= ~0x0024;
259
- r = INT16_MAX;
404
s->ints |= PHY_INT_DOWN;
260
- } else if (r < INT16_MIN) {
405
} else {
261
- r = INT16_MIN;
406
+ trace_lan9118_phy_update_link("up");
262
- }
407
s->status |= 0x0024;
263
- *(int16_t *)(d + i) = r;
408
s->ints |= PHY_INT_ENERGYON;
264
+ *(int16_t *)(d + i) = DO_SQADD_H(b, *(int16_t *)(a + i));
409
s->ints |= PHY_INT_AUTONEG_COMPLETE;
265
}
410
@@ -XXX,XX +XXX,XX @@ void lan9118_phy_update_link(Lan9118PhyState *s, bool link_down)
266
}
411
267
412
void lan9118_phy_reset(Lan9118PhyState *s)
268
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_sqaddi_s)(void *d, void *a, int64_t b, uint32_t desc)
413
{
269
intptr_t i, oprsz = simd_oprsz(desc);
414
+ trace_lan9118_phy_reset();
270
415
+
271
for (i = 0; i < oprsz; i += sizeof(int32_t)) {
416
s->control = 0x3000;
272
- int64_t r = *(int32_t *)(a + i) + b;
417
s->status = 0x7809;
273
- if (r > INT32_MAX) {
418
s->advertise = 0x01e1;
274
- r = INT32_MAX;
419
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_lan9118_phy = {
275
- } else if (r < INT32_MIN) {
420
.version_id = 1,
276
- r = INT32_MIN;
421
.minimum_version_id = 1,
277
- }
422
.fields = (const VMStateField[]) {
278
- *(int32_t *)(d + i) = r;
423
- VMSTATE_UINT16(control, Lan9118PhyState),
279
+ *(int32_t *)(d + i) = DO_SQADD_S(b, *(int32_t *)(a + i));
424
VMSTATE_UINT16(status, Lan9118PhyState),
280
}
425
+ VMSTATE_UINT16(control, Lan9118PhyState),
281
}
426
VMSTATE_UINT16(advertise, Lan9118PhyState),
282
427
VMSTATE_UINT16(ints, Lan9118PhyState),
283
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_sqaddi_d)(void *d, void *a, int64_t b, uint32_t desc)
428
VMSTATE_UINT16(int_mask, Lan9118PhyState),
284
intptr_t i, oprsz = simd_oprsz(desc);
429
diff --git a/hw/net/Kconfig b/hw/net/Kconfig
285
286
for (i = 0; i < oprsz; i += sizeof(int64_t)) {
287
- int64_t ai = *(int64_t *)(a + i);
288
- int64_t r = ai + b;
289
- if (((r ^ ai) & ~(ai ^ b)) < 0) {
290
- /* Signed overflow. */
291
- r = (r < 0 ? INT64_MAX : INT64_MIN);
292
- }
293
- *(int64_t *)(d + i) = r;
294
+ *(int64_t *)(d + i) = do_sqadd_d(b, *(int64_t *)(a + i));
295
}
296
}
297
298
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_uqaddi_b)(void *d, void *a, int32_t b, uint32_t desc)
299
intptr_t i, oprsz = simd_oprsz(desc);
300
301
for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
302
- int r = *(uint8_t *)(a + i) + b;
303
- if (r > UINT8_MAX) {
304
- r = UINT8_MAX;
305
- } else if (r < 0) {
306
- r = 0;
307
- }
308
- *(uint8_t *)(d + i) = r;
309
+ *(uint8_t *)(d + i) = DO_UQADD_B(b, *(uint8_t *)(a + i));
310
}
311
}
312
313
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_uqaddi_h)(void *d, void *a, int32_t b, uint32_t desc)
314
intptr_t i, oprsz = simd_oprsz(desc);
315
316
for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
317
- int r = *(uint16_t *)(a + i) + b;
318
- if (r > UINT16_MAX) {
319
- r = UINT16_MAX;
320
- } else if (r < 0) {
321
- r = 0;
322
- }
323
- *(uint16_t *)(d + i) = r;
324
+ *(uint16_t *)(d + i) = DO_UQADD_H(b, *(uint16_t *)(a + i));
325
}
326
}
327
328
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_uqaddi_s)(void *d, void *a, int64_t b, uint32_t desc)
329
intptr_t i, oprsz = simd_oprsz(desc);
330
331
for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
332
- int64_t r = *(uint32_t *)(a + i) + b;
333
- if (r > UINT32_MAX) {
334
- r = UINT32_MAX;
335
- } else if (r < 0) {
336
- r = 0;
337
- }
338
- *(uint32_t *)(d + i) = r;
339
+ *(uint32_t *)(d + i) = DO_UQADD_S(b, *(uint32_t *)(a + i));
340
}
341
}
342
343
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_uqaddi_d)(void *d, void *a, uint64_t b, uint32_t desc)
344
intptr_t i, oprsz = simd_oprsz(desc);
345
346
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
347
- uint64_t r = *(uint64_t *)(a + i) + b;
348
- if (r < b) {
349
- r = UINT64_MAX;
350
- }
351
- *(uint64_t *)(d + i) = r;
352
+ *(uint64_t *)(d + i) = do_uqadd_d(b, *(uint64_t *)(a + i));
353
}
354
}
355
356
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_uqsubi_d)(void *d, void *a, uint64_t b, uint32_t desc)
357
intptr_t i, oprsz = simd_oprsz(desc);
358
359
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
360
- uint64_t ai = *(uint64_t *)(a + i);
361
- *(uint64_t *)(d + i) = (ai < b ? 0 : ai - b);
362
+ *(uint64_t *)(d + i) = do_uqsub_d(*(uint64_t *)(a + i), b);
363
}
364
}
365
366
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
367
index XXXXXXX..XXXXXXX 100644
430
index XXXXXXX..XXXXXXX 100644
368
--- a/target/arm/translate-sve.c
431
--- a/hw/net/Kconfig
369
+++ b/target/arm/translate-sve.c
432
+++ b/hw/net/Kconfig
370
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZPZZ(SMAXP, smaxp)
433
@@ -XXX,XX +XXX,XX @@ config ALLWINNER_SUN8I_EMAC
371
DO_SVE2_ZPZZ(UMAXP, umaxp)
434
372
DO_SVE2_ZPZZ(SMINP, sminp)
435
config IMX_FEC
373
DO_SVE2_ZPZZ(UMINP, uminp)
436
bool
374
+
437
+ select LAN9118_PHY
375
+DO_SVE2_ZPZZ(SQADD_zpzz, sqadd)
438
376
+DO_SVE2_ZPZZ(UQADD_zpzz, uqadd)
439
config CADENCE
377
+DO_SVE2_ZPZZ(SQSUB_zpzz, sqsub)
440
bool
378
+DO_SVE2_ZPZZ(UQSUB_zpzz, uqsub)
441
diff --git a/hw/net/trace-events b/hw/net/trace-events
379
+DO_SVE2_ZPZZ(SUQADD, suqadd)
442
index XXXXXXX..XXXXXXX 100644
380
+DO_SVE2_ZPZZ(USQADD, usqadd)
443
--- a/hw/net/trace-events
444
+++ b/hw/net/trace-events
445
@@ -XXX,XX +XXX,XX @@ allwinner_sun8i_emac_set_link(bool active) "Set link: active=%u"
446
allwinner_sun8i_emac_read(uint64_t offset, uint64_t val) "MMIO read: offset=0x%" PRIx64 " value=0x%" PRIx64
447
allwinner_sun8i_emac_write(uint64_t offset, uint64_t val) "MMIO write: offset=0x%" PRIx64 " value=0x%" PRIx64
448
449
+# lan9118_phy.c
450
+lan9118_phy_read(uint16_t val, int reg) "[0x%02x] -> 0x%04" PRIx16
451
+lan9118_phy_write(uint16_t val, int reg) "[0x%02x] <- 0x%04" PRIx16
452
+lan9118_phy_update_link(const char *s) "%s"
453
+lan9118_phy_reset(void) ""
454
+
455
# lance.c
456
lance_mem_readw(uint64_t addr, uint32_t ret) "addr=0x%"PRIx64"val=0x%04x"
457
lance_mem_writew(uint64_t addr, uint32_t val) "addr=0x%"PRIx64"val=0x%04x"
458
@@ -XXX,XX +XXX,XX @@ i82596_set_multicast(uint16_t count) "Added %d multicast entries"
459
i82596_channel_attention(void *s) "%p: Received CHANNEL ATTENTION"
460
461
# imx_fec.c
462
-imx_phy_read(uint32_t val, int phy, int reg) "0x%04"PRIx32" <= phy[%d].reg[%d]"
463
imx_phy_read_num(int phy, int configured) "read request from unconfigured phy %d (configured %d)"
464
-imx_phy_write(uint32_t val, int phy, int reg) "0x%04"PRIx32" => phy[%d].reg[%d]"
465
imx_phy_write_num(int phy, int configured) "write request to unconfigured phy %d (configured %d)"
466
-imx_phy_update_link(const char *s) "%s"
467
-imx_phy_reset(void) ""
468
imx_fec_read_bd(uint64_t addr, int flags, int len, int data) "tx_bd 0x%"PRIx64" flags 0x%04x len %d data 0x%08x"
469
imx_enet_read_bd(uint64_t addr, int flags, int len, int data, int options, int status) "tx_bd 0x%"PRIx64" flags 0x%04x len %d data 0x%08x option 0x%04x status 0x%04x"
470
imx_eth_tx_bd_busy(void) "tx_bd ran out of descriptors to transmit"
381
--
471
--
382
2.20.1
472
2.34.1
383
384
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Bernhard Beschow <shentey@gmail.com>
2
2
3
Turns 0x70 into 0xe0 (== 0x70 << 1) which adds the missing MII_ANLPAR_TX and
4
fixes the MSB of selector field to be zero, as specified in the datasheet.
5
6
Fixes: 2a424990170b "LAN9118 emulation"
7
Signed-off-by: Bernhard Beschow <shentey@gmail.com>
8
Tested-by: Guenter Roeck <linux@roeck-us.net>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Stephen Long <steplong@quicinc.com>
10
Message-id: 20241102125724.532843-4-shentey@gmail.com
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210525010358.152808-73-richard.henderson@linaro.org
7
Message-Id: <20200428174332.17162-2-steplong@quicinc.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
12
---
11
target/arm/helper-sve.h | 5 +++++
13
hw/net/lan9118_phy.c | 2 +-
12
target/arm/sve.decode | 4 ++++
14
1 file changed, 1 insertion(+), 1 deletion(-)
13
target/arm/sve_helper.c | 20 ++++++++++++++++++++
14
target/arm/translate-sve.c | 16 ++++++++++++++++
15
4 files changed, 45 insertions(+)
16
15
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
16
diff --git a/hw/net/lan9118_phy.c b/hw/net/lan9118_phy.c
18
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-sve.h
18
--- a/hw/net/lan9118_phy.c
20
+++ b/target/arm/helper-sve.h
19
+++ b/hw/net/lan9118_phy.c
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_cdot_idx_s, TCG_CALL_NO_RWG,
20
@@ -XXX,XX +XXX,XX @@ uint16_t lan9118_phy_read(Lan9118PhyState *s, int reg)
22
void, ptr, ptr, ptr, ptr, i32)
21
val = s->advertise;
23
DEF_HELPER_FLAGS_5(sve2_cdot_idx_d, TCG_CALL_NO_RWG,
22
break;
24
void, ptr, ptr, ptr, ptr, i32)
23
case 5: /* Auto-neg Link Partner Ability */
25
+
24
- val = 0x0f71;
26
+DEF_HELPER_FLAGS_5(sve2_fcvtnt_sh, TCG_CALL_NO_RWG,
25
+ val = 0x0fe1;
27
+ void, ptr, ptr, ptr, ptr, i32)
26
break;
28
+DEF_HELPER_FLAGS_5(sve2_fcvtnt_ds, TCG_CALL_NO_RWG,
27
case 6: /* Auto-neg Expansion */
29
+ void, ptr, ptr, ptr, ptr, i32)
28
val = 1;
30
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/sve.decode
33
+++ b/target/arm/sve.decode
34
@@ -XXX,XX +XXX,XX @@ SM4E 01000101 00 10001 1 11100 0 ..... ..... @rdn_rm_e0
35
# SVE2 crypto constructive binary operations
36
SM4EKEY 01000101 00 1 ..... 11110 0 ..... ..... @rd_rn_rm_e0
37
RAX1 01000101 00 1 ..... 11110 1 ..... ..... @rd_rn_rm_e0
38
+
39
+### SVE2 floating-point convert precision odd elements
40
+FCVTNT_sh 01100100 10 0010 00 101 ... ..... ..... @rd_pg_rn_e0
41
+FCVTNT_ds 01100100 11 0010 10 101 ... ..... ..... @rd_pg_rn_e0
42
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/sve_helper.c
45
+++ b/target/arm/sve_helper.c
46
@@ -XXX,XX +XXX,XX @@ void HELPER(fmmla_d)(void *vd, void *vn, void *vm, void *va,
47
d[3] = float64_add(a[3], float64_add(p0, p1, status), status);
48
}
49
}
50
+
51
+#define DO_FCVTNT(NAME, TYPEW, TYPEN, HW, HN, OP) \
52
+void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \
53
+{ \
54
+ intptr_t i = simd_oprsz(desc); \
55
+ uint64_t *g = vg; \
56
+ do { \
57
+ uint64_t pg = g[(i - 1) >> 6]; \
58
+ do { \
59
+ i -= sizeof(TYPEW); \
60
+ if (likely((pg >> (i & 63)) & 1)) { \
61
+ TYPEW nn = *(TYPEW *)(vn + HW(i)); \
62
+ *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, status); \
63
+ } \
64
+ } while (i & 63); \
65
+ } while (i != 0); \
66
+}
67
+
68
+DO_FCVTNT(sve2_fcvtnt_sh, uint32_t, uint16_t, H1_4, H1_2, sve_f32_to_f16)
69
+DO_FCVTNT(sve2_fcvtnt_ds, uint64_t, uint32_t, , H1_4, float64_to_float32)
70
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/target/arm/translate-sve.c
73
+++ b/target/arm/translate-sve.c
74
@@ -XXX,XX +XXX,XX @@ static bool trans_RAX1(DisasContext *s, arg_rrr_esz *a)
75
}
76
return true;
77
}
78
+
79
+static bool trans_FCVTNT_sh(DisasContext *s, arg_rpr_esz *a)
80
+{
81
+ if (!dc_isar_feature(aa64_sve2, s)) {
82
+ return false;
83
+ }
84
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_sh);
85
+}
86
+
87
+static bool trans_FCVTNT_ds(DisasContext *s, arg_rpr_esz *a)
88
+{
89
+ if (!dc_isar_feature(aa64_sve2, s)) {
90
+ return false;
91
+ }
92
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_ds);
93
+}
94
--
29
--
95
2.20.1
30
2.34.1
96
97
diff view generated by jsdifflib
1
From: Stephen Long <steplong@quicinc.com>
1
From: Bernhard Beschow <shentey@gmail.com>
2
3
Prefer named constants over magic values for better readability.
2
4
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Stephen Long <steplong@quicinc.com>
6
Signed-off-by: Bernhard Beschow <shentey@gmail.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Tested-by: Guenter Roeck <linux@roeck-us.net>
6
Message-id: 20210525010358.152808-72-richard.henderson@linaro.org
8
Message-id: 20241102125724.532843-5-shentey@gmail.com
7
Message-Id: <20200428144352.9275-1-steplong@quicinc.com>
8
[rth: rearrange the macros a little and rebase]
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
target/arm/helper-sve.h | 10 +++++
11
include/hw/net/mii.h | 6 +++++
13
target/arm/sve.decode | 5 +++
12
hw/net/lan9118_phy.c | 63 ++++++++++++++++++++++++++++----------------
14
target/arm/sve_helper.c | 90 ++++++++++++++++++++++++++++++--------
13
2 files changed, 46 insertions(+), 23 deletions(-)
15
target/arm/translate-sve.c | 33 ++++++++++++++
16
4 files changed, 119 insertions(+), 19 deletions(-)
17
14
18
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
diff --git a/include/hw/net/mii.h b/include/hw/net/mii.h
19
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-sve.h
17
--- a/include/hw/net/mii.h
21
+++ b/target/arm/helper-sve.h
18
+++ b/include/hw/net/mii.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_tbl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
19
@@ -XXX,XX +XXX,XX @@
23
DEF_HELPER_FLAGS_4(sve_tbl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
20
#define MII_BMSR_JABBER (1 << 1) /* Jabber detected */
24
DEF_HELPER_FLAGS_4(sve_tbl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
21
#define MII_BMSR_EXTCAP (1 << 0) /* Ext-reg capability */
25
22
26
+DEF_HELPER_FLAGS_5(sve2_tbl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
23
+#define MII_ANAR_RFAULT (1 << 13) /* Say we can detect faults */
27
+DEF_HELPER_FLAGS_5(sve2_tbl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
24
#define MII_ANAR_PAUSE_ASYM (1 << 11) /* Try for asymmetric pause */
28
+DEF_HELPER_FLAGS_5(sve2_tbl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
#define MII_ANAR_PAUSE (1 << 10) /* Try for pause */
29
+DEF_HELPER_FLAGS_5(sve2_tbl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
26
#define MII_ANAR_TXFD (1 << 8)
27
@@ -XXX,XX +XXX,XX @@
28
#define MII_ANAR_10FD (1 << 6)
29
#define MII_ANAR_10 (1 << 5)
30
#define MII_ANAR_CSMACD (1 << 0)
31
+#define MII_ANAR_SELECT (0x001f) /* Selector bits */
32
33
#define MII_ANLPAR_ACK (1 << 14)
34
#define MII_ANLPAR_PAUSEASY (1 << 11) /* can pause asymmetrically */
35
@@ -XXX,XX +XXX,XX @@
36
#define RTL8201CP_PHYID1 0x0000
37
#define RTL8201CP_PHYID2 0x8201
38
39
+/* SMSC LAN9118 */
40
+#define SMSCLAN9118_PHYID1 0x0007
41
+#define SMSCLAN9118_PHYID2 0xc0d1
30
+
42
+
31
+DEF_HELPER_FLAGS_4(sve2_tbx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
43
/* RealTek 8211E */
32
+DEF_HELPER_FLAGS_4(sve2_tbx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
44
#define RTL8211E_PHYID1 0x001c
33
+DEF_HELPER_FLAGS_4(sve2_tbx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
45
#define RTL8211E_PHYID2 0xc915
34
+DEF_HELPER_FLAGS_4(sve2_tbx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
46
diff --git a/hw/net/lan9118_phy.c b/hw/net/lan9118_phy.c
35
+
36
DEF_HELPER_FLAGS_3(sve_sunpk_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
37
DEF_HELPER_FLAGS_3(sve_sunpk_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
38
DEF_HELPER_FLAGS_3(sve_sunpk_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
39
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
40
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/sve.decode
48
--- a/hw/net/lan9118_phy.c
42
+++ b/target/arm/sve.decode
49
+++ b/hw/net/lan9118_phy.c
43
@@ -XXX,XX +XXX,XX @@ TBL 00000101 .. 1 ..... 001100 ..... ..... @rd_rn_rm
50
@@ -XXX,XX +XXX,XX @@
44
# SVE unpack vector elements
51
45
UNPK 00000101 esz:2 1100 u:1 h:1 001110 rn:5 rd:5
52
#include "qemu/osdep.h"
46
53
#include "hw/net/lan9118_phy.h"
47
+# SVE2 Table Lookup (three sources)
54
+#include "hw/net/mii.h"
48
+
55
#include "hw/irq.h"
49
+TBL_sve2 00000101 .. 1 ..... 001010 ..... ..... @rd_rn_rm
56
#include "hw/resettable.h"
50
+TBX 00000101 .. 1 ..... 001011 ..... ..... @rd_rn_rm
57
#include "migration/vmstate.h"
51
+
58
@@ -XXX,XX +XXX,XX @@ uint16_t lan9118_phy_read(Lan9118PhyState *s, int reg)
52
### SVE Permute - Predicates Group
59
uint16_t val;
53
60
54
# SVE permute predicate elements
61
switch (reg) {
55
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
62
- case 0: /* Basic Control */
56
index XXXXXXX..XXXXXXX 100644
63
+ case MII_BMCR:
57
--- a/target/arm/sve_helper.c
64
val = s->control;
58
+++ b/target/arm/sve_helper.c
65
break;
59
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_rev_d)(void *vd, void *vn, uint32_t desc)
66
- case 1: /* Basic Status */
67
+ case MII_BMSR:
68
val = s->status;
69
break;
70
- case 2: /* ID1 */
71
- val = 0x0007;
72
+ case MII_PHYID1:
73
+ val = SMSCLAN9118_PHYID1;
74
break;
75
- case 3: /* ID2 */
76
- val = 0xc0d1;
77
+ case MII_PHYID2:
78
+ val = SMSCLAN9118_PHYID2;
79
break;
80
- case 4: /* Auto-neg advertisement */
81
+ case MII_ANAR:
82
val = s->advertise;
83
break;
84
- case 5: /* Auto-neg Link Partner Ability */
85
- val = 0x0fe1;
86
+ case MII_ANLPAR:
87
+ val = MII_ANLPAR_PAUSEASY | MII_ANLPAR_PAUSE | MII_ANLPAR_T4 |
88
+ MII_ANLPAR_TXFD | MII_ANLPAR_TX | MII_ANLPAR_10FD |
89
+ MII_ANLPAR_10 | MII_ANLPAR_CSMACD;
90
break;
91
- case 6: /* Auto-neg Expansion */
92
- val = 1;
93
+ case MII_ANER:
94
+ val = MII_ANER_NWAY;
95
break;
96
case 29: /* Interrupt source. */
97
val = s->ints;
98
@@ -XXX,XX +XXX,XX @@ void lan9118_phy_write(Lan9118PhyState *s, int reg, uint16_t val)
99
trace_lan9118_phy_write(val, reg);
100
101
switch (reg) {
102
- case 0: /* Basic Control */
103
- if (val & 0x8000) {
104
+ case MII_BMCR:
105
+ if (val & MII_BMCR_RESET) {
106
lan9118_phy_reset(s);
107
} else {
108
- s->control = val & 0x7980;
109
+ s->control = val & (MII_BMCR_LOOPBACK | MII_BMCR_SPEED100 |
110
+ MII_BMCR_AUTOEN | MII_BMCR_PDOWN | MII_BMCR_FD |
111
+ MII_BMCR_CTST);
112
/* Complete autonegotiation immediately. */
113
- if (val & 0x1000) {
114
- s->status |= 0x0020;
115
+ if (val & MII_BMCR_AUTOEN) {
116
+ s->status |= MII_BMSR_AN_COMP;
117
}
118
}
119
break;
120
- case 4: /* Auto-neg advertisement */
121
- s->advertise = (val & 0x2d7f) | 0x80;
122
+ case MII_ANAR:
123
+ s->advertise = (val & (MII_ANAR_RFAULT | MII_ANAR_PAUSE_ASYM |
124
+ MII_ANAR_PAUSE | MII_ANAR_10FD | MII_ANAR_10 |
125
+ MII_ANAR_SELECT))
126
+ | MII_ANAR_TX;
127
break;
128
case 30: /* Interrupt mask */
129
s->int_mask = val & 0xff;
130
@@ -XXX,XX +XXX,XX @@ void lan9118_phy_update_link(Lan9118PhyState *s, bool link_down)
131
/* Autonegotiation status mirrors link status. */
132
if (link_down) {
133
trace_lan9118_phy_update_link("down");
134
- s->status &= ~0x0024;
135
+ s->status &= ~(MII_BMSR_AN_COMP | MII_BMSR_LINK_ST);
136
s->ints |= PHY_INT_DOWN;
137
} else {
138
trace_lan9118_phy_update_link("up");
139
- s->status |= 0x0024;
140
+ s->status |= MII_BMSR_AN_COMP | MII_BMSR_LINK_ST;
141
s->ints |= PHY_INT_ENERGYON;
142
s->ints |= PHY_INT_AUTONEG_COMPLETE;
60
}
143
}
61
}
144
@@ -XXX,XX +XXX,XX @@ void lan9118_phy_reset(Lan9118PhyState *s)
62
63
-#define DO_TBL(NAME, TYPE, H) \
64
-void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
65
-{ \
66
- intptr_t i, opr_sz = simd_oprsz(desc); \
67
- uintptr_t elem = opr_sz / sizeof(TYPE); \
68
- TYPE *d = vd, *n = vn, *m = vm; \
69
- ARMVectorReg tmp; \
70
- if (unlikely(vd == vn)) { \
71
- n = memcpy(&tmp, vn, opr_sz); \
72
- } \
73
- for (i = 0; i < elem; i++) { \
74
- TYPE j = m[H(i)]; \
75
- d[H(i)] = j < elem ? n[H(j)] : 0; \
76
- } \
77
+typedef void tb_impl_fn(void *, void *, void *, void *, uintptr_t, bool);
78
+
79
+static inline void do_tbl1(void *vd, void *vn, void *vm, uint32_t desc,
80
+ bool is_tbx, tb_impl_fn *fn)
81
+{
82
+ ARMVectorReg scratch;
83
+ uintptr_t oprsz = simd_oprsz(desc);
84
+
85
+ if (unlikely(vd == vn)) {
86
+ vn = memcpy(&scratch, vn, oprsz);
87
+ }
88
+
89
+ fn(vd, vn, NULL, vm, oprsz, is_tbx);
90
}
91
92
-DO_TBL(sve_tbl_b, uint8_t, H1)
93
-DO_TBL(sve_tbl_h, uint16_t, H2)
94
-DO_TBL(sve_tbl_s, uint32_t, H4)
95
-DO_TBL(sve_tbl_d, uint64_t, )
96
+static inline void do_tbl2(void *vd, void *vn0, void *vn1, void *vm,
97
+ uint32_t desc, bool is_tbx, tb_impl_fn *fn)
98
+{
99
+ ARMVectorReg scratch;
100
+ uintptr_t oprsz = simd_oprsz(desc);
101
102
-#undef TBL
103
+ if (unlikely(vd == vn0)) {
104
+ vn0 = memcpy(&scratch, vn0, oprsz);
105
+ if (vd == vn1) {
106
+ vn1 = vn0;
107
+ }
108
+ } else if (unlikely(vd == vn1)) {
109
+ vn1 = memcpy(&scratch, vn1, oprsz);
110
+ }
111
+
112
+ fn(vd, vn0, vn1, vm, oprsz, is_tbx);
113
+}
114
+
115
+#define DO_TB(SUFF, TYPE, H) \
116
+static inline void do_tb_##SUFF(void *vd, void *vt0, void *vt1, \
117
+ void *vm, uintptr_t oprsz, bool is_tbx) \
118
+{ \
119
+ TYPE *d = vd, *tbl0 = vt0, *tbl1 = vt1, *indexes = vm; \
120
+ uintptr_t i, nelem = oprsz / sizeof(TYPE); \
121
+ for (i = 0; i < nelem; ++i) { \
122
+ TYPE index = indexes[H1(i)], val = 0; \
123
+ if (index < nelem) { \
124
+ val = tbl0[H(index)]; \
125
+ } else { \
126
+ index -= nelem; \
127
+ if (tbl1 && index < nelem) { \
128
+ val = tbl1[H(index)]; \
129
+ } else if (is_tbx) { \
130
+ continue; \
131
+ } \
132
+ } \
133
+ d[H(i)] = val; \
134
+ } \
135
+} \
136
+void HELPER(sve_tbl_##SUFF)(void *vd, void *vn, void *vm, uint32_t desc) \
137
+{ \
138
+ do_tbl1(vd, vn, vm, desc, false, do_tb_##SUFF); \
139
+} \
140
+void HELPER(sve2_tbl_##SUFF)(void *vd, void *vn0, void *vn1, \
141
+ void *vm, uint32_t desc) \
142
+{ \
143
+ do_tbl2(vd, vn0, vn1, vm, desc, false, do_tb_##SUFF); \
144
+} \
145
+void HELPER(sve2_tbx_##SUFF)(void *vd, void *vn, void *vm, uint32_t desc) \
146
+{ \
147
+ do_tbl1(vd, vn, vm, desc, true, do_tb_##SUFF); \
148
+}
149
+
150
+DO_TB(b, uint8_t, H1)
151
+DO_TB(h, uint16_t, H2)
152
+DO_TB(s, uint32_t, H4)
153
+DO_TB(d, uint64_t, )
154
+
155
+#undef DO_TB
156
157
#define DO_UNPK(NAME, TYPED, TYPES, HD, HS) \
158
void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
159
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/target/arm/translate-sve.c
162
+++ b/target/arm/translate-sve.c
163
@@ -XXX,XX +XXX,XX @@ static bool trans_TBL(DisasContext *s, arg_rrr_esz *a)
164
return true;
165
}
166
167
+static bool trans_TBL_sve2(DisasContext *s, arg_rrr_esz *a)
168
+{
169
+ static gen_helper_gvec_4 * const fns[4] = {
170
+ gen_helper_sve2_tbl_b, gen_helper_sve2_tbl_h,
171
+ gen_helper_sve2_tbl_s, gen_helper_sve2_tbl_d
172
+ };
173
+
174
+ if (!dc_isar_feature(aa64_sve2, s)) {
175
+ return false;
176
+ }
177
+ if (sve_access_check(s)) {
178
+ gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn,
179
+ (a->rn + 1) % 32, a->rm, 0);
180
+ }
181
+ return true;
182
+}
183
+
184
+static bool trans_TBX(DisasContext *s, arg_rrr_esz *a)
185
+{
186
+ static gen_helper_gvec_3 * const fns[4] = {
187
+ gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h,
188
+ gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d
189
+ };
190
+
191
+ if (!dc_isar_feature(aa64_sve2, s)) {
192
+ return false;
193
+ }
194
+ if (sve_access_check(s)) {
195
+ gen_gvec_ool_zzz(s, fns[a->esz], a->rd, a->rn, a->rm, 0);
196
+ }
197
+ return true;
198
+}
199
+
200
static bool trans_UNPK(DisasContext *s, arg_UNPK *a)
201
{
145
{
202
static gen_helper_gvec_2 * const fns[4][2] = {
146
trace_lan9118_phy_reset();
147
148
- s->control = 0x3000;
149
- s->status = 0x7809;
150
- s->advertise = 0x01e1;
151
+ s->control = MII_BMCR_AUTOEN | MII_BMCR_SPEED100;
152
+ s->status = MII_BMSR_100TX_FD
153
+ | MII_BMSR_100TX_HD
154
+ | MII_BMSR_10T_FD
155
+ | MII_BMSR_10T_HD
156
+ | MII_BMSR_AUTONEG
157
+ | MII_BMSR_EXTCAP;
158
+ s->advertise = MII_ANAR_TXFD
159
+ | MII_ANAR_TX
160
+ | MII_ANAR_10FD
161
+ | MII_ANAR_10
162
+ | MII_ANAR_CSMACD;
163
s->int_mask = 0;
164
s->ints = 0;
165
lan9118_phy_update_link(s, s->link_down);
203
--
166
--
204
2.20.1
167
2.34.1
205
206
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Bernhard Beschow <shentey@gmail.com>
2
3
The real device advertises this mode and the device model already advertises
4
100 mbps half duplex and 10 mbps full+half duplex. So advertise this mode to
5
make the model more realistic.
2
6
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Bernhard Beschow <shentey@gmail.com>
5
Message-id: 20210525010358.152808-69-richard.henderson@linaro.org
9
Tested-by: Guenter Roeck <linux@roeck-us.net>
10
Message-id: 20241102125724.532843-6-shentey@gmail.com
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
12
---
8
target/arm/sve.decode | 6 ++++++
13
hw/net/lan9118_phy.c | 4 ++--
9
target/arm/translate-sve.c | 11 +++++++++++
14
1 file changed, 2 insertions(+), 2 deletions(-)
10
2 files changed, 17 insertions(+)
11
15
12
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
16
diff --git a/hw/net/lan9118_phy.c b/hw/net/lan9118_phy.c
13
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/sve.decode
18
--- a/hw/net/lan9118_phy.c
15
+++ b/target/arm/sve.decode
19
+++ b/hw/net/lan9118_phy.c
16
@@ -XXX,XX +XXX,XX @@ STNT1_zprz 1110010 .. 00 ..... 001 ... ..... ..... \
20
@@ -XXX,XX +XXX,XX @@ void lan9118_phy_write(Lan9118PhyState *s, int reg, uint16_t val)
17
# SVE2 32-bit scatter non-temporal store (vector plus scalar)
21
break;
18
STNT1_zprz 1110010 .. 10 ..... 001 ... ..... ..... \
22
case MII_ANAR:
19
@rprr_scatter_store xs=0 esz=2 scale=0
23
s->advertise = (val & (MII_ANAR_RFAULT | MII_ANAR_PAUSE_ASYM |
20
+
24
- MII_ANAR_PAUSE | MII_ANAR_10FD | MII_ANAR_10 |
21
+### SVE2 Crypto Extensions
25
- MII_ANAR_SELECT))
22
+
26
+ MII_ANAR_PAUSE | MII_ANAR_TXFD | MII_ANAR_10FD |
23
+# SVE2 crypto unary operations
27
+ MII_ANAR_10 | MII_ANAR_SELECT))
24
+# AESMC and AESIMC
28
| MII_ANAR_TX;
25
+AESMC 01000101 00 10000011100 decrypt:1 00000 rd:5
29
break;
26
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
30
case 30: /* Interrupt mask */
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/translate-sve.c
29
+++ b/target/arm/translate-sve.c
30
@@ -XXX,XX +XXX,XX @@ static bool trans_USDOT_zzzz(DisasContext *s, arg_USDOT_zzzz *a)
31
}
32
return true;
33
}
34
+
35
+static bool trans_AESMC(DisasContext *s, arg_AESMC *a)
36
+{
37
+ if (!dc_isar_feature(aa64_sve2_aes, s)) {
38
+ return false;
39
+ }
40
+ if (sve_access_check(s)) {
41
+ gen_gvec_ool_zz(s, gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt);
42
+ }
43
+ return true;
44
+}
45
--
31
--
46
2.20.1
32
2.34.1
47
48
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
For IEEE fused multiply-add, the (0 * inf) + NaN case should raise
2
Invalid for the multiplication of 0 by infinity. Currently we handle
3
this in the per-architecture ifdef ladder in pickNaNMulAdd().
4
However, since this isn't really architecture specific we can hoist
5
it up to the generic code.
2
6
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
For the cases where the infzero test in pickNaNMulAdd was
4
Message-id: 20210525010358.152808-63-richard.henderson@linaro.org
8
returning 2, we can delete the check entirely and allow the
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
code to fall into the normal pick-a-NaN handling, because this
10
will return 2 anyway (input 'c' being the only NaN in this case).
11
For the cases where infzero was returning 3 to indicate "return
12
the default NaN", we must retain that "return 3".
13
14
For Arm, this looks like it might be a behaviour change because we
15
used to set float_flag_invalid | float_flag_invalid_imz only if C is
16
a quiet NaN. However, it is not, because Arm target code never looks
17
at float_flag_invalid_imz, and for the (0 * inf) + SNaN case we
18
already raised float_flag_invalid via the "abc_mask &
19
float_cmask_snan" check in pick_nan_muladd.
20
21
For any target architecture using the "default implementation" at the
22
bottom of the ifdef, this is a behaviour change but will be fixing a
23
bug (where we failed to raise the Invalid exception for (0 * inf +
24
QNaN). The architectures using the default case are:
25
* hppa
26
* i386
27
* sh4
28
* tricore
29
30
The x86, Tricore and SH4 CPU architecture manuals are clear that this
31
should have raised Invalid; HPPA is a bit vaguer but still seems
32
clear enough.
33
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
34
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
35
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
36
Message-id: 20241202131347.498124-2-peter.maydell@linaro.org
7
---
37
---
8
target/arm/helper-sve.h | 9 +++++++++
38
fpu/softfloat-parts.c.inc | 13 +++++++------
9
target/arm/sve.decode | 12 ++++++++++++
39
fpu/softfloat-specialize.c.inc | 29 +----------------------------
10
target/arm/sve_helper.c | 28 ++++++++++++++++++++++++++++
40
2 files changed, 8 insertions(+), 34 deletions(-)
11
target/arm/translate-sve.c | 15 +++++++++++++++
12
4 files changed, 64 insertions(+)
13
41
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
42
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
15
index XXXXXXX..XXXXXXX 100644
43
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
44
--- a/fpu/softfloat-parts.c.inc
17
+++ b/target/arm/helper-sve.h
45
+++ b/fpu/softfloat-parts.c.inc
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_smull_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
46
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
19
DEF_HELPER_FLAGS_4(sve2_smull_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
47
int ab_mask, int abc_mask)
20
DEF_HELPER_FLAGS_4(sve2_umull_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
48
{
21
DEF_HELPER_FLAGS_4(sve2_umull_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
49
int which;
22
+
50
+ bool infzero = (ab_mask == float_cmask_infzero);
23
+DEF_HELPER_FLAGS_5(sve2_cmla_idx_h, TCG_CALL_NO_RWG,
51
24
+ void, ptr, ptr, ptr, ptr, i32)
52
if (unlikely(abc_mask & float_cmask_snan)) {
25
+DEF_HELPER_FLAGS_5(sve2_cmla_idx_s, TCG_CALL_NO_RWG,
53
float_raise(float_flag_invalid | float_flag_invalid_snan, s);
26
+ void, ptr, ptr, ptr, ptr, i32)
54
}
27
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_h, TCG_CALL_NO_RWG,
55
28
+ void, ptr, ptr, ptr, ptr, i32)
56
- which = pickNaNMulAdd(a->cls, b->cls, c->cls,
29
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_s, TCG_CALL_NO_RWG,
57
- ab_mask == float_cmask_infzero, s);
30
+ void, ptr, ptr, ptr, ptr, i32)
58
+ if (infzero) {
31
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
59
+ /* This is (0 * inf) + NaN or (inf * 0) + NaN */
32
index XXXXXXX..XXXXXXX 100644
60
+ float_raise(float_flag_invalid | float_flag_invalid_imz, s);
33
--- a/target/arm/sve.decode
34
+++ b/target/arm/sve.decode
35
@@ -XXX,XX +XXX,XX @@ SQDMLSLB_zzxw_d 01000100 11 1 ..... 0011.0 ..... ..... @rrxr_2a esz=3
36
SQDMLSLT_zzxw_s 01000100 10 1 ..... 0011.1 ..... ..... @rrxr_3a esz=2
37
SQDMLSLT_zzxw_d 01000100 11 1 ..... 0011.1 ..... ..... @rrxr_2a esz=3
38
39
+# SVE2 complex integer multiply-add (indexed)
40
+CMLA_zzxz_h 01000100 10 1 index:2 rm:3 0110 rot:2 rn:5 rd:5 \
41
+ ra=%reg_movprfx
42
+CMLA_zzxz_s 01000100 11 1 index:1 rm:4 0110 rot:2 rn:5 rd:5 \
43
+ ra=%reg_movprfx
44
+
45
+# SVE2 complex saturating integer multiply-add (indexed)
46
+SQRDCMLAH_zzxz_h 01000100 10 1 index:2 rm:3 0111 rot:2 rn:5 rd:5 \
47
+ ra=%reg_movprfx
48
+SQRDCMLAH_zzxz_s 01000100 11 1 index:1 rm:4 0111 rot:2 rn:5 rd:5 \
49
+ ra=%reg_movprfx
50
+
51
# SVE2 multiply-add long (indexed)
52
SMLALB_zzxw_s 01000100 10 1 ..... 1000.0 ..... ..... @rrxr_3a esz=2
53
SMLALB_zzxw_d 01000100 11 1 ..... 1000.0 ..... ..... @rrxr_2a esz=3
54
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/arm/sve_helper.c
57
+++ b/target/arm/sve_helper.c
58
@@ -XXX,XX +XXX,XX @@ DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_h, int16_t, H2, DO_SQRDMLAH_H)
59
DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_s, int32_t, H4, DO_SQRDMLAH_S)
60
DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_d, int64_t, , DO_SQRDMLAH_D)
61
62
+#define DO_CMLA_IDX_FUNC(NAME, TYPE, H, OP) \
63
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
64
+{ \
65
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
66
+ int rot = extract32(desc, SIMD_DATA_SHIFT, 2); \
67
+ int idx = extract32(desc, SIMD_DATA_SHIFT + 2, 2) * 2; \
68
+ int sel_a = rot & 1, sel_b = sel_a ^ 1; \
69
+ bool sub_r = rot == 1 || rot == 2; \
70
+ bool sub_i = rot >= 2; \
71
+ TYPE *d = vd, *n = vn, *m = vm, *a = va; \
72
+ for (i = 0; i < oprsz / sizeof(TYPE); i += 16 / sizeof(TYPE)) { \
73
+ TYPE elt2_a = m[H(i + idx + sel_a)]; \
74
+ TYPE elt2_b = m[H(i + idx + sel_b)]; \
75
+ for (j = 0; j < 16 / sizeof(TYPE); j += 2) { \
76
+ TYPE elt1_a = n[H(i + j + sel_a)]; \
77
+ d[H2(i + j)] = OP(elt1_a, elt2_a, a[H(i + j)], sub_r); \
78
+ d[H2(i + j + 1)] = OP(elt1_a, elt2_b, a[H(i + j + 1)], sub_i); \
79
+ } \
80
+ } \
81
+}
82
+
83
+DO_CMLA_IDX_FUNC(sve2_cmla_idx_h, int16_t, H2, DO_CMLA)
84
+DO_CMLA_IDX_FUNC(sve2_cmla_idx_s, int32_t, H4, DO_CMLA)
85
+
86
+DO_CMLA_IDX_FUNC(sve2_sqrdcmlah_idx_h, int16_t, H2, DO_SQRDMLAH_H)
87
+DO_CMLA_IDX_FUNC(sve2_sqrdcmlah_idx_s, int32_t, H4, DO_SQRDMLAH_S)
88
+
89
#undef DO_CMLA
90
#undef DO_CMLA_FUNC
91
+#undef DO_CMLA_IDX_FUNC
92
#undef DO_SQRDMLAH_B
93
#undef DO_SQRDMLAH_H
94
#undef DO_SQRDMLAH_S
95
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
96
index XXXXXXX..XXXXXXX 100644
97
--- a/target/arm/translate-sve.c
98
+++ b/target/arm/translate-sve.c
99
@@ -XXX,XX +XXX,XX @@ DO_SVE2_RRXR_TB(trans_UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true)
100
101
#undef DO_SVE2_RRXR_TB
102
103
+#define DO_SVE2_RRXR_ROT(NAME, FUNC) \
104
+ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
105
+ { \
106
+ return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->ra, \
107
+ (a->index << 2) | a->rot, FUNC); \
108
+ }
61
+ }
109
+
62
+
110
+DO_SVE2_RRXR_ROT(CMLA_zzxz_h, gen_helper_sve2_cmla_idx_h)
63
+ which = pickNaNMulAdd(a->cls, b->cls, c->cls, infzero, s);
111
+DO_SVE2_RRXR_ROT(CMLA_zzxz_s, gen_helper_sve2_cmla_idx_s)
64
65
if (s->default_nan_mode || which == 3) {
66
- /*
67
- * Note that this check is after pickNaNMulAdd so that function
68
- * has an opportunity to set the Invalid flag for infzero.
69
- */
70
parts_default_nan(a, s);
71
return a;
72
}
73
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
74
index XXXXXXX..XXXXXXX 100644
75
--- a/fpu/softfloat-specialize.c.inc
76
+++ b/fpu/softfloat-specialize.c.inc
77
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
78
* the default NaN
79
*/
80
if (infzero && is_qnan(c_cls)) {
81
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
82
return 3;
83
}
84
85
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
86
* case sets InvalidOp and returns the default NaN
87
*/
88
if (infzero) {
89
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
90
return 3;
91
}
92
/* Prefer sNaN over qNaN, in the a, b, c order. */
93
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
94
* For MIPS systems that conform to IEEE754-2008, the (inf,zero,nan)
95
* case sets InvalidOp and returns the input value 'c'
96
*/
97
- if (infzero) {
98
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
99
- return 2;
100
- }
101
/* Prefer sNaN over qNaN, in the c, a, b order. */
102
if (is_snan(c_cls)) {
103
return 2;
104
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
105
* For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan)
106
* case sets InvalidOp and returns the input value 'c'
107
*/
108
- if (infzero) {
109
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
110
- return 2;
111
- }
112
+
112
+
113
+DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_h, gen_helper_sve2_sqrdcmlah_idx_h)
113
/* Prefer sNaN over qNaN, in the c, a, b order. */
114
+DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_s, gen_helper_sve2_sqrdcmlah_idx_s)
114
if (is_snan(c_cls)) {
115
+
115
return 2;
116
+#undef DO_SVE2_RRXR_ROT
116
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
117
+
117
* to return an input NaN if we have one (ie c) rather than generating
118
/*
118
* a default NaN
119
*** SVE Floating Point Multiply-Add Indexed Group
119
*/
120
*/
120
- if (infzero) {
121
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
122
- return 2;
123
- }
124
125
/* If fRA is a NaN return it; otherwise if fRB is a NaN return it;
126
* otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB
127
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
128
return 1;
129
}
130
#elif defined(TARGET_RISCV)
131
- /* For RISC-V, InvalidOp is set when multiplicands are Inf and zero */
132
- if (infzero) {
133
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
134
- }
135
return 3; /* default NaN */
136
#elif defined(TARGET_S390X)
137
if (infzero) {
138
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
139
return 3;
140
}
141
142
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
143
return 2;
144
}
145
#elif defined(TARGET_SPARC)
146
- /* For (inf,0,nan) return c. */
147
- if (infzero) {
148
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
149
- return 2;
150
- }
151
/* Prefer SNaN over QNaN, order C, B, A. */
152
if (is_snan(c_cls)) {
153
return 2;
154
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
155
* For Xtensa, the (inf,zero,nan) case sets InvalidOp and returns
156
* an input NaN if we have one (ie c).
157
*/
158
- if (infzero) {
159
- float_raise(float_flag_invalid | float_flag_invalid_imz, status);
160
- return 2;
161
- }
162
if (status->use_first_nan) {
163
if (is_nan(a_cls)) {
164
return 0;
121
--
165
--
122
2.20.1
166
2.34.1
123
124
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
If the target sets default_nan_mode then we're always going to return
2
the default NaN, and pickNaNMulAdd() no longer has any side effects.
3
For consistency with pickNaN(), check for default_nan_mode before
4
calling pickNaNMulAdd().
2
5
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
When we convert pickNaNMulAdd() to allow runtime selection of the NaN
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
propagation rule, this means we won't have to make the targets which
5
Message-id: 20210525010358.152808-68-richard.henderson@linaro.org
8
use default_nan_mode also set a propagation rule.
9
10
Since RiscV always uses default_nan_mode, this allows us to remove
11
its ifdef case from pickNaNMulAdd().
12
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20241202131347.498124-3-peter.maydell@linaro.org
7
---
16
---
8
target/arm/helper.h | 1 +
17
fpu/softfloat-parts.c.inc | 8 ++++++--
9
target/arm/sve.decode | 4 ++++
18
fpu/softfloat-specialize.c.inc | 9 +++++++--
10
target/arm/translate-sve.c | 16 ++++++++++++++++
19
2 files changed, 13 insertions(+), 4 deletions(-)
11
target/arm/vec_helper.c | 1 +
12
4 files changed, 22 insertions(+)
13
20
14
diff --git a/target/arm/helper.h b/target/arm/helper.h
21
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
15
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.h
23
--- a/fpu/softfloat-parts.c.inc
17
+++ b/target/arm/helper.h
24
+++ b/fpu/softfloat-parts.c.inc
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
19
DEF_HELPER_FLAGS_5(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
26
float_raise(float_flag_invalid | float_flag_invalid_imz, s);
20
DEF_HELPER_FLAGS_5(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
27
}
21
DEF_HELPER_FLAGS_5(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
28
22
+DEF_HELPER_FLAGS_5(gvec_usdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
29
- which = pickNaNMulAdd(a->cls, b->cls, c->cls, infzero, s);
23
30
+ if (s->default_nan_mode) {
24
DEF_HELPER_FLAGS_5(gvec_sdot_idx_b, TCG_CALL_NO_RWG,
31
+ which = 3;
25
void, ptr, ptr, ptr, ptr, i32)
32
+ } else {
26
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
33
+ which = pickNaNMulAdd(a->cls, b->cls, c->cls, infzero, s);
34
+ }
35
36
- if (s->default_nan_mode || which == 3) {
37
+ if (which == 3) {
38
parts_default_nan(a, s);
39
return a;
40
}
41
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
27
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/sve.decode
43
--- a/fpu/softfloat-specialize.c.inc
29
+++ b/target/arm/sve.decode
44
+++ b/fpu/softfloat-specialize.c.inc
30
@@ -XXX,XX +XXX,XX @@ UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm
45
@@ -XXX,XX +XXX,XX @@ static int pickNaN(FloatClass a_cls, FloatClass b_cls,
31
CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx
46
static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
32
SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx
47
bool infzero, float_status *status)
33
48
{
34
+## SVE mixed sign dot product
49
+ /*
35
+
50
+ * We guarantee not to require the target to tell us how to
36
+USDOT_zzzz 01000100 .. 0 ..... 011 110 ..... ..... @rda_rn_rm
51
+ * pick a NaN if we're always returning the default NaN.
37
+
52
+ * But if we're not in default-NaN mode then the target must
38
### SVE2 floating point matrix multiply accumulate
53
+ * specify.
39
54
+ */
40
FMMLA 01100100 .. 1 ..... 111001 ..... ..... @rda_rn_rm
55
+ assert(!status->default_nan_mode);
41
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
56
#if defined(TARGET_ARM)
42
index XXXXXXX..XXXXXXX 100644
57
/* For ARM, the (inf,zero,qnan) case sets InvalidOp and returns
43
--- a/target/arm/translate-sve.c
58
* the default NaN
44
+++ b/target/arm/translate-sve.c
59
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
45
@@ -XXX,XX +XXX,XX @@ static bool trans_SQRDCMLAH_zzzz(DisasContext *s, arg_SQRDCMLAH_zzzz *a)
60
} else {
61
return 1;
46
}
62
}
47
return true;
63
-#elif defined(TARGET_RISCV)
48
}
64
- return 3; /* default NaN */
49
+
65
#elif defined(TARGET_S390X)
50
+static bool trans_USDOT_zzzz(DisasContext *s, arg_USDOT_zzzz *a)
66
if (infzero) {
51
+{
67
return 3;
52
+ if (a->esz != 2 || !dc_isar_feature(aa64_sve_i8mm, s)) {
53
+ return false;
54
+ }
55
+ if (sve_access_check(s)) {
56
+ unsigned vsz = vec_full_reg_size(s);
57
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, a->rd),
58
+ vec_full_reg_offset(s, a->rn),
59
+ vec_full_reg_offset(s, a->rm),
60
+ vec_full_reg_offset(s, a->ra),
61
+ vsz, vsz, 0, gen_helper_gvec_usdot_b);
62
+ }
63
+ return true;
64
+}
65
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/target/arm/vec_helper.c
68
+++ b/target/arm/vec_helper.c
69
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
70
71
DO_DOT(gvec_sdot_b, int32_t, int8_t, int8_t)
72
DO_DOT(gvec_udot_b, uint32_t, uint8_t, uint8_t)
73
+DO_DOT(gvec_usdot_b, uint32_t, uint8_t, int8_t)
74
DO_DOT(gvec_sdot_h, int64_t, int16_t, int16_t)
75
DO_DOT(gvec_udot_h, uint64_t, uint16_t, uint16_t)
76
77
--
68
--
78
2.20.1
69
2.34.1
79
80
diff view generated by jsdifflib
1
From: Stephen Long <steplong@quicinc.com>
1
IEEE 758 does not define a fixed rule for what NaN to return in
2
2
the case of a fused multiply-add of inf * 0 + NaN. Different
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
architectures thus do different things:
4
Signed-off-by: Stephen Long <steplong@quicinc.com>
4
* some return the default NaN
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
* some return the input NaN
6
Message-id: 20210525010358.152808-47-richard.henderson@linaro.org
6
* Arm returns the default NaN if the input NaN is quiet,
7
Message-Id: <20200422165503.13511-1-steplong@quicinc.com>
7
and the input NaN if it is signalling
8
[rth: Fix indexing in helpers, expand macro to straight functions.]
8
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
We want to make this logic be runtime selected rather than
10
hardcoded into the binary, because:
11
* this will let us have multiple targets in one QEMU binary
12
* the Arm FEAT_AFP architectural feature includes letting
13
the guest select a NaN propagation rule at runtime
14
15
In this commit we add an enum for the propagation rule, the field in
16
float_status, and the corresponding getters and setters. We change
17
pickNaNMulAdd to honour this, but because all targets still leave
18
this field at its default 0 value, the fallback logic will pick the
19
rule type with the old ifdef ladder.
20
21
Note that four architectures both use the muladd softfloat functions
22
and did not have a branch of the ifdef ladder to specify their
23
behaviour (and so were ending up with the "default" case, probably
24
wrongly): i386, HPPA, SH4 and Tricore. SH4 and Tricore both set
25
default_nan_mode, and so will never get into pickNaNMulAdd(). For
26
HPPA and i386 we retain the same behaviour as the old default-case,
27
which is to not ever return the default NaN. This might not be
28
correct but it is not a behaviour change.
29
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
30
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
31
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
32
Message-id: 20241202131347.498124-4-peter.maydell@linaro.org
11
---
33
---
12
target/arm/cpu.h | 10 ++++++
34
include/fpu/softfloat-helpers.h | 11 ++++
13
target/arm/helper-sve.h | 3 ++
35
include/fpu/softfloat-types.h | 23 +++++++++
14
target/arm/sve.decode | 4 +++
36
fpu/softfloat-specialize.c.inc | 91 ++++++++++++++++++++++-----------
15
target/arm/sve_helper.c | 74 ++++++++++++++++++++++++++++++++++++++
37
3 files changed, 95 insertions(+), 30 deletions(-)
16
target/arm/translate-sve.c | 34 ++++++++++++++++++
38
17
5 files changed, 125 insertions(+)
39
diff --git a/include/fpu/softfloat-helpers.h b/include/fpu/softfloat-helpers.h
18
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
41
--- a/include/fpu/softfloat-helpers.h
22
+++ b/target/arm/cpu.h
42
+++ b/include/fpu/softfloat-helpers.h
23
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id)
43
@@ -XXX,XX +XXX,XX @@ static inline void set_float_2nan_prop_rule(Float2NaNPropRule rule,
24
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0;
44
status->float_2nan_prop_rule = rule;
25
}
45
}
26
46
27
+static inline bool isar_feature_aa64_sve_f32mm(const ARMISARegisters *id)
47
+static inline void set_float_infzeronan_rule(FloatInfZeroNaNRule rule,
48
+ float_status *status)
28
+{
49
+{
29
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F32MM) != 0;
50
+ status->float_infzeronan_rule = rule;
30
+}
51
+}
31
+
52
+
32
+static inline bool isar_feature_aa64_sve_f64mm(const ARMISARegisters *id)
53
static inline void set_flush_to_zero(bool val, float_status *status)
54
{
55
status->flush_to_zero = val;
56
@@ -XXX,XX +XXX,XX @@ static inline Float2NaNPropRule get_float_2nan_prop_rule(float_status *status)
57
return status->float_2nan_prop_rule;
58
}
59
60
+static inline FloatInfZeroNaNRule get_float_infzeronan_rule(float_status *status)
33
+{
61
+{
34
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F64MM) != 0;
62
+ return status->float_infzeronan_rule;
35
+}
63
+}
36
+
64
+
65
static inline bool get_flush_to_zero(float_status *status)
66
{
67
return status->flush_to_zero;
68
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
69
index XXXXXXX..XXXXXXX 100644
70
--- a/include/fpu/softfloat-types.h
71
+++ b/include/fpu/softfloat-types.h
72
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
73
float_2nan_prop_x87,
74
} Float2NaNPropRule;
75
76
+/*
77
+ * Rule for result of fused multiply-add 0 * Inf + NaN.
78
+ * This must be a NaN, but implementations differ on whether this
79
+ * is the input NaN or the default NaN.
80
+ *
81
+ * You don't need to set this if default_nan_mode is enabled.
82
+ * When not in default-NaN mode, it is an error for the target
83
+ * not to set the rule in float_status if it uses muladd, and we
84
+ * will assert if we need to handle an input NaN and no rule was
85
+ * selected.
86
+ */
87
+typedef enum __attribute__((__packed__)) {
88
+ /* No propagation rule specified */
89
+ float_infzeronan_none = 0,
90
+ /* Result is never the default NaN (so always the input NaN) */
91
+ float_infzeronan_dnan_never,
92
+ /* Result is always the default NaN */
93
+ float_infzeronan_dnan_always,
94
+ /* Result is the default NaN if the input NaN is quiet */
95
+ float_infzeronan_dnan_if_qnan,
96
+} FloatInfZeroNaNRule;
97
+
37
/*
98
/*
38
* Feature tests for "does this exist in either 32-bit or 64-bit?"
99
* Floating Point Status. Individual architectures may maintain
39
*/
100
* several versions of float_status for different functions. The
40
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
101
@@ -XXX,XX +XXX,XX @@ typedef struct float_status {
102
FloatRoundMode float_rounding_mode;
103
FloatX80RoundPrec floatx80_rounding_precision;
104
Float2NaNPropRule float_2nan_prop_rule;
105
+ FloatInfZeroNaNRule float_infzeronan_rule;
106
bool tininess_before_rounding;
107
/* should denormalised results go to zero and set the inexact flag? */
108
bool flush_to_zero;
109
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
41
index XXXXXXX..XXXXXXX 100644
110
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/helper-sve.h
111
--- a/fpu/softfloat-specialize.c.inc
43
+++ b/target/arm/helper-sve.h
112
+++ b/fpu/softfloat-specialize.c.inc
44
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_s, TCG_CALL_NO_RWG,
113
@@ -XXX,XX +XXX,XX @@ static int pickNaN(FloatClass a_cls, FloatClass b_cls,
45
void, ptr, ptr, ptr, ptr, i32)
114
static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
46
DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_d, TCG_CALL_NO_RWG,
115
bool infzero, float_status *status)
47
void, ptr, ptr, ptr, ptr, i32)
116
{
48
+
117
+ FloatInfZeroNaNRule rule = status->float_infzeronan_rule;
49
+DEF_HELPER_FLAGS_6(fmmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32)
118
+
50
+DEF_HELPER_FLAGS_6(fmmla_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32)
119
/*
51
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
120
* We guarantee not to require the target to tell us how to
52
index XXXXXXX..XXXXXXX 100644
121
* pick a NaN if we're always returning the default NaN.
53
--- a/target/arm/sve.decode
122
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
54
+++ b/target/arm/sve.decode
123
* specify.
55
@@ -XXX,XX +XXX,XX @@ UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm
124
*/
56
CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx
125
assert(!status->default_nan_mode);
57
SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx
126
+
58
127
+ if (rule == float_infzeronan_none) {
59
+### SVE2 floating point matrix multiply accumulate
128
+ /*
60
+
129
+ * Temporarily fall back to ifdef ladder
61
+FMMLA 01100100 .. 1 ..... 111001 ..... ..... @rda_rn_rm
130
+ */
62
+
131
#if defined(TARGET_ARM)
63
### SVE2 Memory Gather Load Group
132
- /* For ARM, the (inf,zero,qnan) case sets InvalidOp and returns
64
133
- * the default NaN
65
# SVE2 64-bit gather non-temporal load
134
- */
66
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
135
- if (infzero && is_qnan(c_cls)) {
67
index XXXXXXX..XXXXXXX 100644
136
- return 3;
68
--- a/target/arm/sve_helper.c
137
+ /*
69
+++ b/target/arm/sve_helper.c
138
+ * For ARM, the (inf,zero,qnan) case returns the default NaN,
70
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_xar_s)(void *vd, void *vn, void *vm, uint32_t desc)
139
+ * but (inf,zero,snan) returns the input NaN.
71
d[i] = ror32(n[i] ^ m[i], shr);
140
+ */
72
}
141
+ rule = float_infzeronan_dnan_if_qnan;
73
}
142
+#elif defined(TARGET_MIPS)
74
+
143
+ if (snan_bit_is_one(status)) {
75
+void HELPER(fmmla_s)(void *vd, void *vn, void *vm, void *va,
144
+ /*
76
+ void *status, uint32_t desc)
145
+ * For MIPS systems that conform to IEEE754-1985, the (inf,zero,nan)
77
+{
146
+ * case sets InvalidOp and returns the default NaN
78
+ intptr_t s, opr_sz = simd_oprsz(desc) / (sizeof(float32) * 4);
147
+ */
79
+
148
+ rule = float_infzeronan_dnan_always;
80
+ for (s = 0; s < opr_sz; ++s) {
149
+ } else {
81
+ float32 *n = vn + s * sizeof(float32) * 4;
150
+ /*
82
+ float32 *m = vm + s * sizeof(float32) * 4;
151
+ * For MIPS systems that conform to IEEE754-2008, the (inf,zero,nan)
83
+ float32 *a = va + s * sizeof(float32) * 4;
152
+ * case sets InvalidOp and returns the input value 'c'
84
+ float32 *d = vd + s * sizeof(float32) * 4;
153
+ */
85
+ float32 n00 = n[H4(0)], n01 = n[H4(1)];
154
+ rule = float_infzeronan_dnan_never;
86
+ float32 n10 = n[H4(2)], n11 = n[H4(3)];
155
+ }
87
+ float32 m00 = m[H4(0)], m01 = m[H4(1)];
156
+#elif defined(TARGET_PPC) || defined(TARGET_SPARC) || \
88
+ float32 m10 = m[H4(2)], m11 = m[H4(3)];
157
+ defined(TARGET_XTENSA) || defined(TARGET_HPPA) || \
89
+ float32 p0, p1;
158
+ defined(TARGET_I386) || defined(TARGET_LOONGARCH)
90
+
159
+ /*
91
+ /* i = 0, j = 0 */
160
+ * For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan)
92
+ p0 = float32_mul(n00, m00, status);
161
+ * case sets InvalidOp and returns the input value 'c'
93
+ p1 = float32_mul(n01, m01, status);
162
+ */
94
+ d[H4(0)] = float32_add(a[H4(0)], float32_add(p0, p1, status), status);
163
+ /*
95
+
164
+ * For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer
96
+ /* i = 0, j = 1 */
165
+ * to return an input NaN if we have one (ie c) rather than generating
97
+ p0 = float32_mul(n00, m10, status);
166
+ * a default NaN
98
+ p1 = float32_mul(n01, m11, status);
167
+ */
99
+ d[H4(1)] = float32_add(a[H4(1)], float32_add(p0, p1, status), status);
168
+ rule = float_infzeronan_dnan_never;
100
+
169
+#elif defined(TARGET_S390X)
101
+ /* i = 1, j = 0 */
170
+ rule = float_infzeronan_dnan_always;
102
+ p0 = float32_mul(n10, m00, status);
171
+#endif
103
+ p1 = float32_mul(n11, m01, status);
172
}
104
+ d[H4(2)] = float32_add(a[H4(2)], float32_add(p0, p1, status), status);
173
105
+
174
+ if (infzero) {
106
+ /* i = 1, j = 1 */
175
+ /*
107
+ p0 = float32_mul(n10, m10, status);
176
+ * Inf * 0 + NaN -- some implementations return the default NaN here,
108
+ p1 = float32_mul(n11, m11, status);
177
+ * and some return the input NaN.
109
+ d[H4(3)] = float32_add(a[H4(3)], float32_add(p0, p1, status), status);
178
+ */
179
+ switch (rule) {
180
+ case float_infzeronan_dnan_never:
181
+ return 2;
182
+ case float_infzeronan_dnan_always:
183
+ return 3;
184
+ case float_infzeronan_dnan_if_qnan:
185
+ return is_qnan(c_cls) ? 3 : 2;
186
+ default:
187
+ g_assert_not_reached();
188
+ }
110
+ }
189
+ }
111
+}
190
+
112
+
191
+#if defined(TARGET_ARM)
113
+void HELPER(fmmla_d)(void *vd, void *vn, void *vm, void *va,
192
+
114
+ void *status, uint32_t desc)
193
/* This looks different from the ARM ARM pseudocode, because the ARM ARM
115
+{
194
* puts the operands to a fused mac operation (a*b)+c in the order c,a,b.
116
+ intptr_t s, opr_sz = simd_oprsz(desc) / (sizeof(float64) * 4);
195
*/
117
+
196
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
118
+ for (s = 0; s < opr_sz; ++s) {
197
}
119
+ float64 *n = vn + s * sizeof(float64) * 4;
198
#elif defined(TARGET_MIPS)
120
+ float64 *m = vm + s * sizeof(float64) * 4;
199
if (snan_bit_is_one(status)) {
121
+ float64 *a = va + s * sizeof(float64) * 4;
200
- /*
122
+ float64 *d = vd + s * sizeof(float64) * 4;
201
- * For MIPS systems that conform to IEEE754-1985, the (inf,zero,nan)
123
+ float64 n00 = n[0], n01 = n[1], n10 = n[2], n11 = n[3];
202
- * case sets InvalidOp and returns the default NaN
124
+ float64 m00 = m[0], m01 = m[1], m10 = m[2], m11 = m[3];
203
- */
125
+ float64 p0, p1;
204
- if (infzero) {
126
+
205
- return 3;
127
+ /* i = 0, j = 0 */
206
- }
128
+ p0 = float64_mul(n00, m00, status);
207
/* Prefer sNaN over qNaN, in the a, b, c order. */
129
+ p1 = float64_mul(n01, m01, status);
208
if (is_snan(a_cls)) {
130
+ d[0] = float64_add(a[0], float64_add(p0, p1, status), status);
209
return 0;
131
+
210
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
132
+ /* i = 0, j = 1 */
211
return 2;
133
+ p0 = float64_mul(n00, m10, status);
212
}
134
+ p1 = float64_mul(n01, m11, status);
213
} else {
135
+ d[1] = float64_add(a[1], float64_add(p0, p1, status), status);
214
- /*
136
+
215
- * For MIPS systems that conform to IEEE754-2008, the (inf,zero,nan)
137
+ /* i = 1, j = 0 */
216
- * case sets InvalidOp and returns the input value 'c'
138
+ p0 = float64_mul(n10, m00, status);
217
- */
139
+ p1 = float64_mul(n11, m01, status);
218
/* Prefer sNaN over qNaN, in the c, a, b order. */
140
+ d[2] = float64_add(a[2], float64_add(p0, p1, status), status);
219
if (is_snan(c_cls)) {
141
+
220
return 2;
142
+ /* i = 1, j = 1 */
221
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
143
+ p0 = float64_mul(n10, m10, status);
222
}
144
+ p1 = float64_mul(n11, m11, status);
223
}
145
+ d[3] = float64_add(a[3], float64_add(p0, p1, status), status);
224
#elif defined(TARGET_LOONGARCH64)
146
+ }
225
- /*
147
+}
226
- * For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan)
148
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
227
- * case sets InvalidOp and returns the input value 'c'
149
index XXXXXXX..XXXXXXX 100644
228
- */
150
--- a/target/arm/translate-sve.c
229
-
151
+++ b/target/arm/translate-sve.c
230
/* Prefer sNaN over qNaN, in the c, a, b order. */
152
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZPZZ_FP(FMINP, fminp)
231
if (is_snan(c_cls)) {
153
* SVE Integer Multiply-Add (unpredicated)
232
return 2;
154
*/
233
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
155
234
return 1;
156
+static bool trans_FMMLA(DisasContext *s, arg_rrrr_esz *a)
235
}
157
+{
236
#elif defined(TARGET_PPC)
158
+ gen_helper_gvec_4_ptr *fn;
237
- /* For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer
159
+
238
- * to return an input NaN if we have one (ie c) rather than generating
160
+ switch (a->esz) {
239
- * a default NaN
161
+ case MO_32:
240
- */
162
+ if (!dc_isar_feature(aa64_sve_f32mm, s)) {
241
-
163
+ return false;
242
/* If fRA is a NaN return it; otherwise if fRB is a NaN return it;
164
+ }
243
* otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB
165
+ fn = gen_helper_fmmla_s;
244
*/
166
+ break;
245
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
167
+ case MO_64:
246
return 1;
168
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
247
}
169
+ return false;
248
#elif defined(TARGET_S390X)
170
+ }
249
- if (infzero) {
171
+ fn = gen_helper_fmmla_d;
250
- return 3;
172
+ break;
251
- }
173
+ default:
252
-
174
+ return false;
253
if (is_snan(a_cls)) {
175
+ }
254
return 0;
176
+
255
} else if (is_snan(b_cls)) {
177
+ if (sve_access_check(s)) {
178
+ unsigned vsz = vec_full_reg_size(s);
179
+ TCGv_ptr status = fpstatus_ptr(FPST_FPCR);
180
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
181
+ vec_full_reg_offset(s, a->rn),
182
+ vec_full_reg_offset(s, a->rm),
183
+ vec_full_reg_offset(s, a->ra),
184
+ status, vsz, vsz, 0, fn);
185
+ tcg_temp_free_ptr(status);
186
+ }
187
+ return true;
188
+}
189
+
190
static bool do_sqdmlal_zzzw(DisasContext *s, arg_rrrr_esz *a,
191
bool sel1, bool sel2)
192
{
193
--
256
--
194
2.20.1
257
2.34.1
195
196
diff view generated by jsdifflib
1
From: Stephen Long <steplong@quicinc.com>
1
Explicitly set a rule in the softfloat tests for the inf-zero-nan
2
muladd special case. In meson.build we put -DTARGET_ARM in fpcflags,
3
and so we should select here the Arm rule of
4
float_infzeronan_dnan_if_qnan.
2
5
3
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Stephen Long <steplong@quicinc.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210525010358.152808-34-richard.henderson@linaro.org
7
Message-Id: <20200415145915.2859-1-steplong@quicinc.com>
8
[rth: Expanded comment for do_match2]
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20241202131347.498124-5-peter.maydell@linaro.org
11
---
9
---
12
target/arm/helper-sve.h | 10 ++++++
10
tests/fp/fp-bench.c | 5 +++++
13
target/arm/sve.decode | 5 +++
11
tests/fp/fp-test.c | 5 +++++
14
target/arm/sve_helper.c | 64 ++++++++++++++++++++++++++++++++++++++
12
2 files changed, 10 insertions(+)
15
target/arm/translate-sve.c | 22 +++++++++++++
16
4 files changed, 101 insertions(+)
17
13
18
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
14
diff --git a/tests/fp/fp-bench.c b/tests/fp/fp-bench.c
19
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-sve.h
16
--- a/tests/fp/fp-bench.c
21
+++ b/target/arm/helper-sve.h
17
+++ b/tests/fp/fp-bench.c
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_uqrshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
18
@@ -XXX,XX +XXX,XX @@ static void run_bench(void)
23
DEF_HELPER_FLAGS_3(sve2_uqrshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
19
{
24
DEF_HELPER_FLAGS_3(sve2_uqrshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
20
bench_func_t f;
25
21
26
+DEF_HELPER_FLAGS_5(sve2_match_ppzz_b, TCG_CALL_NO_RWG,
22
+ /*
27
+ i32, ptr, ptr, ptr, ptr, i32)
23
+ * These implementation-defined choices for various things IEEE
28
+DEF_HELPER_FLAGS_5(sve2_match_ppzz_h, TCG_CALL_NO_RWG,
24
+ * doesn't specify match those used by the Arm architecture.
29
+ i32, ptr, ptr, ptr, ptr, i32)
25
+ */
30
+
26
set_float_2nan_prop_rule(float_2nan_prop_s_ab, &soft_status);
31
+DEF_HELPER_FLAGS_5(sve2_nmatch_ppzz_b, TCG_CALL_NO_RWG,
27
+ set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, &soft_status);
32
+ i32, ptr, ptr, ptr, ptr, i32)
28
33
+DEF_HELPER_FLAGS_5(sve2_nmatch_ppzz_h, TCG_CALL_NO_RWG,
29
f = bench_funcs[operation][precision];
34
+ i32, ptr, ptr, ptr, ptr, i32)
30
g_assert(f);
35
+
31
diff --git a/tests/fp/fp-test.c b/tests/fp/fp-test.c
36
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG,
37
void, ptr, ptr, ptr, ptr, ptr, i32)
38
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG,
39
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
40
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/sve.decode
33
--- a/tests/fp/fp-test.c
42
+++ b/target/arm/sve.decode
34
+++ b/tests/fp/fp-test.c
43
@@ -XXX,XX +XXX,XX @@ UQSHRNT 01000101 .. 1 ..... 00 1101 ..... ..... @rd_rn_tszimm_shr
35
@@ -XXX,XX +XXX,XX @@ void run_test(void)
44
UQRSHRNB 01000101 .. 1 ..... 00 1110 ..... ..... @rd_rn_tszimm_shr
45
UQRSHRNT 01000101 .. 1 ..... 00 1111 ..... ..... @rd_rn_tszimm_shr
46
47
+### SVE2 Character Match
48
+
49
+MATCH 01000101 .. 1 ..... 100 ... ..... 0 .... @pd_pg_rn_rm
50
+NMATCH 01000101 .. 1 ..... 100 ... ..... 1 .... @pd_pg_rn_rm
51
+
52
## SVE2 floating-point pairwise operations
53
54
FADDP 01100100 .. 010 00 0 100 ... ..... ..... @rdn_pg_rm
55
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/sve_helper.c
58
+++ b/target/arm/sve_helper.c
59
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_nbsl)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
60
d[i] = ~((n[i] & k[i]) | (m[i] & ~k[i]));
61
}
62
}
63
+
64
+/*
65
+ * Returns true if m0 or m1 contains the low uint8_t/uint16_t in n.
66
+ * See hasless(v,1) from
67
+ * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
68
+ */
69
+static inline bool do_match2(uint64_t n, uint64_t m0, uint64_t m1, int esz)
70
+{
71
+ int bits = 8 << esz;
72
+ uint64_t ones = dup_const(esz, 1);
73
+ uint64_t signs = ones << (bits - 1);
74
+ uint64_t cmp0, cmp1;
75
+
76
+ cmp1 = dup_const(esz, n);
77
+ cmp0 = cmp1 ^ m0;
78
+ cmp1 = cmp1 ^ m1;
79
+ cmp0 = (cmp0 - ones) & ~cmp0;
80
+ cmp1 = (cmp1 - ones) & ~cmp1;
81
+ return (cmp0 | cmp1) & signs;
82
+}
83
+
84
+static inline uint32_t do_match(void *vd, void *vn, void *vm, void *vg,
85
+ uint32_t desc, int esz, bool nmatch)
86
+{
87
+ uint16_t esz_mask = pred_esz_masks[esz];
88
+ intptr_t opr_sz = simd_oprsz(desc);
89
+ uint32_t flags = PREDTEST_INIT;
90
+ intptr_t i, j, k;
91
+
92
+ for (i = 0; i < opr_sz; i += 16) {
93
+ uint64_t m0 = *(uint64_t *)(vm + i);
94
+ uint64_t m1 = *(uint64_t *)(vm + i + 8);
95
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)) & esz_mask;
96
+ uint16_t out = 0;
97
+
98
+ for (j = 0; j < 16; j += 8) {
99
+ uint64_t n = *(uint64_t *)(vn + i + j);
100
+
101
+ for (k = 0; k < 8; k += 1 << esz) {
102
+ if (pg & (1 << (j + k))) {
103
+ bool o = do_match2(n >> (k * 8), m0, m1, esz);
104
+ out |= (o ^ nmatch) << (j + k);
105
+ }
106
+ }
107
+ }
108
+ *(uint16_t *)(vd + H1_2(i >> 3)) = out;
109
+ flags = iter_predtest_fwd(out, pg, flags);
110
+ }
111
+ return flags;
112
+}
113
+
114
+#define DO_PPZZ_MATCH(NAME, ESZ, INV) \
115
+uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
116
+{ \
117
+ return do_match(vd, vn, vm, vg, desc, ESZ, INV); \
118
+}
119
+
120
+DO_PPZZ_MATCH(sve2_match_ppzz_b, MO_8, false)
121
+DO_PPZZ_MATCH(sve2_match_ppzz_h, MO_16, false)
122
+
123
+DO_PPZZ_MATCH(sve2_nmatch_ppzz_b, MO_8, true)
124
+DO_PPZZ_MATCH(sve2_nmatch_ppzz_h, MO_16, true)
125
+
126
+#undef DO_PPZZ_MATCH
127
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
128
index XXXXXXX..XXXXXXX 100644
129
--- a/target/arm/translate-sve.c
130
+++ b/target/arm/translate-sve.c
131
@@ -XXX,XX +XXX,XX @@ static bool trans_UQRSHRNT(DisasContext *s, arg_rri_esz *a)
132
return do_sve2_shr_narrow(s, a, ops);
133
}
134
135
+static bool do_sve2_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
136
+ gen_helper_gvec_flags_4 *fn)
137
+{
138
+ if (!dc_isar_feature(aa64_sve2, s)) {
139
+ return false;
140
+ }
141
+ return do_ppzz_flags(s, a, fn);
142
+}
143
+
144
+#define DO_SVE2_PPZZ_MATCH(NAME, name) \
145
+static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
146
+{ \
147
+ static gen_helper_gvec_flags_4 * const fns[4] = { \
148
+ gen_helper_sve2_##name##_ppzz_b, gen_helper_sve2_##name##_ppzz_h, \
149
+ NULL, NULL \
150
+ }; \
151
+ return do_sve2_ppzz_flags(s, a, fns[a->esz]); \
152
+}
153
+
154
+DO_SVE2_PPZZ_MATCH(MATCH, match)
155
+DO_SVE2_PPZZ_MATCH(NMATCH, nmatch)
156
+
157
static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
158
gen_helper_gvec_4_ptr *fn)
159
{
36
{
37
unsigned int i;
38
39
+ /*
40
+ * These implementation-defined choices for various things IEEE
41
+ * doesn't specify match those used by the Arm architecture.
42
+ */
43
set_float_2nan_prop_rule(float_2nan_prop_s_ab, &qsf);
44
+ set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, &qsf);
45
46
genCases_setLevel(test_level);
47
verCases_maxErrorCount = n_max_errors;
160
--
48
--
161
2.20.1
49
2.34.1
162
163
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the FloatInfZeroNaNRule explicitly for the Arm target,
2
so we can remove the ifdef from pickNaNMulAdd().
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-71-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-6-peter.maydell@linaro.org
7
---
7
---
8
target/arm/cpu.h | 5 +++++
8
target/arm/cpu.c | 3 +++
9
target/arm/sve.decode | 4 ++++
9
fpu/softfloat-specialize.c.inc | 8 +-------
10
target/arm/translate-sve.c | 16 ++++++++++++++++
10
2 files changed, 4 insertions(+), 7 deletions(-)
11
3 files changed, 25 insertions(+)
12
11
13
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
12
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
14
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/cpu.h
14
--- a/target/arm/cpu.c
16
+++ b/target/arm/cpu.h
15
+++ b/target/arm/cpu.c
17
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id)
16
@@ -XXX,XX +XXX,XX @@ void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
18
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0;
17
* * tininess-before-rounding
18
* * 2-input NaN propagation prefers SNaN over QNaN, and then
19
* operand A over operand B (see FPProcessNaNs() pseudocode)
20
+ * * 0 * Inf + NaN returns the default NaN if the input NaN is quiet,
21
+ * and the input NaN if it is signalling
22
*/
23
static void arm_set_default_fp_behaviours(float_status *s)
24
{
25
set_float_detect_tininess(float_tininess_before_rounding, s);
26
set_float_2nan_prop_rule(float_2nan_prop_s_ab, s);
27
+ set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, s);
19
}
28
}
20
29
21
+static inline bool isar_feature_aa64_sve2_sha3(const ARMISARegisters *id)
30
static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
22
+{
31
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
23
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SHA3) != 0;
24
+}
25
+
26
static inline bool isar_feature_aa64_sve2_sm4(const ARMISARegisters *id)
27
{
28
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SM4) != 0;
29
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
30
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/sve.decode
33
--- a/fpu/softfloat-specialize.c.inc
32
+++ b/target/arm/sve.decode
34
+++ b/fpu/softfloat-specialize.c.inc
33
@@ -XXX,XX +XXX,XX @@ AESMC 01000101 00 10000011100 decrypt:1 00000 rd:5
35
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
34
AESE 01000101 00 10001 0 11100 0 ..... ..... @rdn_rm_e0
36
/*
35
AESD 01000101 00 10001 0 11100 1 ..... ..... @rdn_rm_e0
37
* Temporarily fall back to ifdef ladder
36
SM4E 01000101 00 10001 1 11100 0 ..... ..... @rdn_rm_e0
38
*/
37
+
39
-#if defined(TARGET_ARM)
38
+# SVE2 crypto constructive binary operations
40
- /*
39
+SM4EKEY 01000101 00 1 ..... 11110 0 ..... ..... @rd_rn_rm_e0
41
- * For ARM, the (inf,zero,qnan) case returns the default NaN,
40
+RAX1 01000101 00 1 ..... 11110 1 ..... ..... @rd_rn_rm_e0
42
- * but (inf,zero,snan) returns the input NaN.
41
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
43
- */
42
index XXXXXXX..XXXXXXX 100644
44
- rule = float_infzeronan_dnan_if_qnan;
43
--- a/target/arm/translate-sve.c
45
-#elif defined(TARGET_MIPS)
44
+++ b/target/arm/translate-sve.c
46
+#if defined(TARGET_MIPS)
45
@@ -XXX,XX +XXX,XX @@ static bool trans_SM4E(DisasContext *s, arg_rrr_esz *a)
47
if (snan_bit_is_one(status)) {
46
{
48
/*
47
return do_sm4(s, a, gen_helper_crypto_sm4e);
49
* For MIPS systems that conform to IEEE754-1985, the (inf,zero,nan)
48
}
49
+
50
+static bool trans_SM4EKEY(DisasContext *s, arg_rrr_esz *a)
51
+{
52
+ return do_sm4(s, a, gen_helper_crypto_sm4ekey);
53
+}
54
+
55
+static bool trans_RAX1(DisasContext *s, arg_rrr_esz *a)
56
+{
57
+ if (!dc_isar_feature(aa64_sve2_sha3, s)) {
58
+ return false;
59
+ }
60
+ if (sve_access_check(s)) {
61
+ gen_gvec_fn_zzz(s, gen_gvec_rax1, MO_64, a->rd, a->rn, a->rm);
62
+ }
63
+ return true;
64
+}
65
--
50
--
66
2.20.1
51
2.34.1
67
68
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the FloatInfZeroNaNRule explicitly for s390, so we
2
can remove the ifdef from pickNaNMulAdd().
2
3
3
Currently only used by FMUL, but will shortly be used more.
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-7-peter.maydell@linaro.org
7
---
8
target/s390x/cpu.c | 2 ++
9
fpu/softfloat-specialize.c.inc | 2 --
10
2 files changed, 2 insertions(+), 2 deletions(-)
4
11
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210525010358.152808-52-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/sve.decode | 14 ++++++++++----
11
1 file changed, 10 insertions(+), 4 deletions(-)
12
13
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
14
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/sve.decode
14
--- a/target/s390x/cpu.c
16
+++ b/target/arm/sve.decode
15
+++ b/target/s390x/cpu.c
17
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_reset_hold(Object *obj, ResetType type)
18
&rri_esz rd rn imm esz
17
set_float_detect_tininess(float_tininess_before_rounding,
19
&rrri_esz rd rn rm imm esz
18
&env->fpu_status);
20
&rrr_esz rd rn rm esz
19
set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->fpu_status);
21
+&rrx_esz rd rn rm index esz
20
+ set_float_infzeronan_rule(float_infzeronan_dnan_always,
22
&rpr_esz rd pg rn esz
21
+ &env->fpu_status);
23
&rpr_s rd pg rn s
22
/* fall through */
24
&rprr_s rd pg rn rm s
23
case RESET_TYPE_S390_CPU_NORMAL:
25
@@ -XXX,XX +XXX,XX @@
24
env->psw.mask &= ~PSW_MASK_RI;
26
@rpri_scatter_store ....... msz:2 .. imm:5 ... pg:3 rn:5 rd:5 \
25
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
27
&rpri_scatter_store
26
index XXXXXXX..XXXXXXX 100644
28
27
--- a/fpu/softfloat-specialize.c.inc
29
+# Two registers and a scalar by N-bit index
28
+++ b/fpu/softfloat-specialize.c.inc
30
+@rrx_3 ........ .. . .. rm:3 ...... rn:5 rd:5 \
29
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
31
+ &rrx_esz index=%index3_22_19
30
* a default NaN
32
+@rrx_2 ........ .. . index:2 rm:3 ...... rn:5 rd:5 &rrx_esz
31
*/
33
+@rrx_1 ........ .. . index:1 rm:4 ...... rn:5 rd:5 &rrx_esz
32
rule = float_infzeronan_dnan_never;
34
+
33
-#elif defined(TARGET_S390X)
35
###########################################################################
34
- rule = float_infzeronan_dnan_always;
36
# Instruction patterns. Grouped according to the SVE encodingindex.xhtml.
35
#endif
37
36
}
38
@@ -XXX,XX +XXX,XX @@ FMLA_zzxz 01100100 111 index:1 rm:4 00000 sub:1 rn:5 rd:5 \
39
### SVE FP Multiply Indexed Group
40
41
# SVE floating-point multiply (indexed)
42
-FMUL_zzx 01100100 0.1 .. rm:3 001000 rn:5 rd:5 \
43
- index=%index3_22_19 esz=1
44
-FMUL_zzx 01100100 101 index:2 rm:3 001000 rn:5 rd:5 esz=2
45
-FMUL_zzx 01100100 111 index:1 rm:4 001000 rn:5 rd:5 esz=3
46
+FMUL_zzx 01100100 0. 1 ..... 001000 ..... ..... @rrx_3 esz=1
47
+FMUL_zzx 01100100 10 1 ..... 001000 ..... ..... @rrx_2 esz=2
48
+FMUL_zzx 01100100 11 1 ..... 001000 ..... ..... @rrx_1 esz=3
49
50
### SVE FP Fast Reduction Group
51
37
52
--
38
--
53
2.20.1
39
2.34.1
54
55
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the FloatInfZeroNaNRule explicitly for the PPC target,
2
so we can remove the ifdef from pickNaNMulAdd().
2
3
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20210525010358.152808-64-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-8-peter.maydell@linaro.org
7
---
7
---
8
target/arm/helper-sve.h | 10 ++++
8
target/ppc/cpu_init.c | 7 +++++++
9
target/arm/sve.decode | 9 ++++
9
fpu/softfloat-specialize.c.inc | 7 +------
10
target/arm/sve_helper.c | 99 ++++++++++++++++++++++++++++++++++++++
10
2 files changed, 8 insertions(+), 6 deletions(-)
11
target/arm/translate-sve.c | 17 +++++++
12
4 files changed, 135 insertions(+)
13
11
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
12
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
15
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
14
--- a/target/ppc/cpu_init.c
17
+++ b/target/arm/helper-sve.h
15
+++ b/target/ppc/cpu_init.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_h, TCG_CALL_NO_RWG,
16
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_reset_hold(Object *obj, ResetType type)
19
void, ptr, ptr, ptr, ptr, i32)
17
*/
20
DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_s, TCG_CALL_NO_RWG,
18
set_float_2nan_prop_rule(float_2nan_prop_ab, &env->fp_status);
21
void, ptr, ptr, ptr, ptr, i32)
19
set_float_2nan_prop_rule(float_2nan_prop_ab, &env->vec_status);
22
+
20
+ /*
23
+DEF_HELPER_FLAGS_5(sve2_cdot_zzzz_s, TCG_CALL_NO_RWG,
21
+ * For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer
24
+ void, ptr, ptr, ptr, ptr, i32)
22
+ * to return an input NaN if we have one (ie c) rather than generating
25
+DEF_HELPER_FLAGS_5(sve2_cdot_zzzz_d, TCG_CALL_NO_RWG,
23
+ * a default NaN
26
+ void, ptr, ptr, ptr, ptr, i32)
24
+ */
27
+
25
+ set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
28
+DEF_HELPER_FLAGS_5(sve2_cdot_idx_s, TCG_CALL_NO_RWG,
26
+ set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->vec_status);
29
+ void, ptr, ptr, ptr, ptr, i32)
27
30
+DEF_HELPER_FLAGS_5(sve2_cdot_idx_d, TCG_CALL_NO_RWG,
28
for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) {
31
+ void, ptr, ptr, ptr, ptr, i32)
29
ppc_spr_t *spr = &env->spr_cb[i];
32
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
30
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
33
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/sve.decode
32
--- a/fpu/softfloat-specialize.c.inc
35
+++ b/target/arm/sve.decode
33
+++ b/fpu/softfloat-specialize.c.inc
36
@@ -XXX,XX +XXX,XX @@ MUL_zzi 00100101 .. 110 000 110 ........ ..... @rdn_i8s
34
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
37
DOT_zzzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 \
35
*/
38
ra=%reg_movprfx
36
rule = float_infzeronan_dnan_never;
39
37
}
40
+# SVE2 complex dot product (vectors)
38
-#elif defined(TARGET_PPC) || defined(TARGET_SPARC) || \
41
+CDOT_zzzz 01000100 esz:2 0 rm:5 0001 rot:2 rn:5 rd:5 ra=%reg_movprfx
39
+#elif defined(TARGET_SPARC) || \
42
+
40
defined(TARGET_XTENSA) || defined(TARGET_HPPA) || \
43
#### SVE Multiply - Indexed
41
defined(TARGET_I386) || defined(TARGET_LOONGARCH)
44
42
/*
45
# SVE integer dot product (indexed)
43
* For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan)
46
@@ -XXX,XX +XXX,XX @@ SQDMLSLB_zzxw_d 01000100 11 1 ..... 0011.0 ..... ..... @rrxr_2a esz=3
44
* case sets InvalidOp and returns the input value 'c'
47
SQDMLSLT_zzxw_s 01000100 10 1 ..... 0011.1 ..... ..... @rrxr_3a esz=2
45
*/
48
SQDMLSLT_zzxw_d 01000100 11 1 ..... 0011.1 ..... ..... @rrxr_2a esz=3
46
- /*
49
47
- * For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer
50
+# SVE2 complex integer dot product (indexed)
48
- * to return an input NaN if we have one (ie c) rather than generating
51
+CDOT_zzxw_s 01000100 10 1 index:2 rm:3 0100 rot:2 rn:5 rd:5 \
49
- * a default NaN
52
+ ra=%reg_movprfx
50
- */
53
+CDOT_zzxw_d 01000100 11 1 index:1 rm:4 0100 rot:2 rn:5 rd:5 \
51
rule = float_infzeronan_dnan_never;
54
+ ra=%reg_movprfx
52
#endif
55
+
53
}
56
# SVE2 complex integer multiply-add (indexed)
57
CMLA_zzxz_h 01000100 10 1 index:2 rm:3 0110 rot:2 rn:5 rd:5 \
58
ra=%reg_movprfx
59
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/sve_helper.c
62
+++ b/target/arm/sve_helper.c
63
@@ -XXX,XX +XXX,XX @@ DO_CMLA_IDX_FUNC(sve2_sqrdcmlah_idx_s, int32_t, H4, DO_SQRDMLAH_S)
64
#undef DO_SQRDMLAH_S
65
#undef DO_SQRDMLAH_D
66
67
+/* Note N and M are 4 elements bundled into one unit. */
68
+static int32_t do_cdot_s(uint32_t n, uint32_t m, int32_t a,
69
+ int sel_a, int sel_b, int sub_i)
70
+{
71
+ for (int i = 0; i <= 1; i++) {
72
+ int32_t elt1_r = (int8_t)(n >> (16 * i));
73
+ int32_t elt1_i = (int8_t)(n >> (16 * i + 8));
74
+ int32_t elt2_a = (int8_t)(m >> (16 * i + 8 * sel_a));
75
+ int32_t elt2_b = (int8_t)(m >> (16 * i + 8 * sel_b));
76
+
77
+ a += elt1_r * elt2_a + elt1_i * elt2_b * sub_i;
78
+ }
79
+ return a;
80
+}
81
+
82
+static int64_t do_cdot_d(uint64_t n, uint64_t m, int64_t a,
83
+ int sel_a, int sel_b, int sub_i)
84
+{
85
+ for (int i = 0; i <= 1; i++) {
86
+ int64_t elt1_r = (int16_t)(n >> (32 * i + 0));
87
+ int64_t elt1_i = (int16_t)(n >> (32 * i + 16));
88
+ int64_t elt2_a = (int16_t)(m >> (32 * i + 16 * sel_a));
89
+ int64_t elt2_b = (int16_t)(m >> (32 * i + 16 * sel_b));
90
+
91
+ a += elt1_r * elt2_a + elt1_i * elt2_b * sub_i;
92
+ }
93
+ return a;
94
+}
95
+
96
+void HELPER(sve2_cdot_zzzz_s)(void *vd, void *vn, void *vm,
97
+ void *va, uint32_t desc)
98
+{
99
+ int opr_sz = simd_oprsz(desc);
100
+ int rot = simd_data(desc);
101
+ int sel_a = rot & 1;
102
+ int sel_b = sel_a ^ 1;
103
+ int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
104
+ uint32_t *d = vd, *n = vn, *m = vm, *a = va;
105
+
106
+ for (int e = 0; e < opr_sz / 4; e++) {
107
+ d[e] = do_cdot_s(n[e], m[e], a[e], sel_a, sel_b, sub_i);
108
+ }
109
+}
110
+
111
+void HELPER(sve2_cdot_zzzz_d)(void *vd, void *vn, void *vm,
112
+ void *va, uint32_t desc)
113
+{
114
+ int opr_sz = simd_oprsz(desc);
115
+ int rot = simd_data(desc);
116
+ int sel_a = rot & 1;
117
+ int sel_b = sel_a ^ 1;
118
+ int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
119
+ uint64_t *d = vd, *n = vn, *m = vm, *a = va;
120
+
121
+ for (int e = 0; e < opr_sz / 8; e++) {
122
+ d[e] = do_cdot_d(n[e], m[e], a[e], sel_a, sel_b, sub_i);
123
+ }
124
+}
125
+
126
+void HELPER(sve2_cdot_idx_s)(void *vd, void *vn, void *vm,
127
+ void *va, uint32_t desc)
128
+{
129
+ int opr_sz = simd_oprsz(desc);
130
+ int rot = extract32(desc, SIMD_DATA_SHIFT, 2);
131
+ int idx = H4(extract32(desc, SIMD_DATA_SHIFT + 2, 2));
132
+ int sel_a = rot & 1;
133
+ int sel_b = sel_a ^ 1;
134
+ int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
135
+ uint32_t *d = vd, *n = vn, *m = vm, *a = va;
136
+
137
+ for (int seg = 0; seg < opr_sz / 4; seg += 4) {
138
+ uint32_t seg_m = m[seg + idx];
139
+ for (int e = 0; e < 4; e++) {
140
+ d[seg + e] = do_cdot_s(n[seg + e], seg_m, a[seg + e],
141
+ sel_a, sel_b, sub_i);
142
+ }
143
+ }
144
+}
145
+
146
+void HELPER(sve2_cdot_idx_d)(void *vd, void *vn, void *vm,
147
+ void *va, uint32_t desc)
148
+{
149
+ int seg, opr_sz = simd_oprsz(desc);
150
+ int rot = extract32(desc, SIMD_DATA_SHIFT, 2);
151
+ int idx = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
152
+ int sel_a = rot & 1;
153
+ int sel_b = sel_a ^ 1;
154
+ int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
155
+ uint64_t *d = vd, *n = vn, *m = vm, *a = va;
156
+
157
+ for (seg = 0; seg < opr_sz / 8; seg += 2) {
158
+ uint64_t seg_m = m[seg + idx];
159
+ for (int e = 0; e < 2; e++) {
160
+ d[seg + e] = do_cdot_d(n[seg + e], seg_m, a[seg + e],
161
+ sel_a, sel_b, sub_i);
162
+ }
163
+ }
164
+}
165
+
166
#define DO_ZZXZ(NAME, TYPE, H, OP) \
167
void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
168
{ \
169
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
170
index XXXXXXX..XXXXXXX 100644
171
--- a/target/arm/translate-sve.c
172
+++ b/target/arm/translate-sve.c
173
@@ -XXX,XX +XXX,XX @@ DO_SVE2_RRXR_ROT(CMLA_zzxz_s, gen_helper_sve2_cmla_idx_s)
174
DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_h, gen_helper_sve2_sqrdcmlah_idx_h)
175
DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_s, gen_helper_sve2_sqrdcmlah_idx_s)
176
177
+DO_SVE2_RRXR_ROT(CDOT_zzxw_s, gen_helper_sve2_cdot_idx_s)
178
+DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d)
179
+
180
#undef DO_SVE2_RRXR_ROT
181
182
/*
183
@@ -XXX,XX +XXX,XX @@ static bool trans_CMLA_zzzz(DisasContext *s, arg_CMLA_zzzz *a)
184
return true;
185
}
186
187
+static bool trans_CDOT_zzzz(DisasContext *s, arg_CMLA_zzzz *a)
188
+{
189
+ if (!dc_isar_feature(aa64_sve2, s) || a->esz < MO_32) {
190
+ return false;
191
+ }
192
+ if (sve_access_check(s)) {
193
+ gen_helper_gvec_4 *fn = (a->esz == MO_32
194
+ ? gen_helper_sve2_cdot_zzzz_s
195
+ : gen_helper_sve2_cdot_zzzz_d);
196
+ gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->rot);
197
+ }
198
+ return true;
199
+}
200
+
201
static bool trans_SQRDCMLAH_zzzz(DisasContext *s, arg_SQRDCMLAH_zzzz *a)
202
{
203
static gen_helper_gvec_4 * const fns[] = {
204
--
54
--
205
2.20.1
55
2.34.1
206
207
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the FloatInfZeroNaNRule explicitly for the MIPS target,
2
so we can remove the ifdef from pickNaNMulAdd().
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-70-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-9-peter.maydell@linaro.org
7
---
7
---
8
target/arm/cpu.h | 5 +++++
8
target/mips/fpu_helper.h | 9 +++++++++
9
target/arm/sve.decode | 7 +++++++
9
target/mips/msa.c | 4 ++++
10
target/arm/translate-sve.c | 38 ++++++++++++++++++++++++++++++++++++++
10
fpu/softfloat-specialize.c.inc | 16 +---------------
11
3 files changed, 50 insertions(+)
11
3 files changed, 14 insertions(+), 15 deletions(-)
12
12
13
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
13
diff --git a/target/mips/fpu_helper.h b/target/mips/fpu_helper.h
14
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/cpu.h
15
--- a/target/mips/fpu_helper.h
16
+++ b/target/arm/cpu.h
16
+++ b/target/mips/fpu_helper.h
17
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id)
17
@@ -XXX,XX +XXX,XX @@ static inline void restore_flush_mode(CPUMIPSState *env)
18
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0;
18
static inline void restore_snan_bit_mode(CPUMIPSState *env)
19
{
20
bool nan2008 = env->active_fpu.fcr31 & (1 << FCR31_NAN2008);
21
+ FloatInfZeroNaNRule izn_rule;
22
23
/*
24
* With nan2008, SNaNs are silenced in the usual way.
25
@@ -XXX,XX +XXX,XX @@ static inline void restore_snan_bit_mode(CPUMIPSState *env)
26
*/
27
set_snan_bit_is_one(!nan2008, &env->active_fpu.fp_status);
28
set_default_nan_mode(!nan2008, &env->active_fpu.fp_status);
29
+ /*
30
+ * For MIPS systems that conform to IEEE754-1985, the (inf,zero,nan)
31
+ * case sets InvalidOp and returns the default NaN.
32
+ * For MIPS systems that conform to IEEE754-2008, the (inf,zero,nan)
33
+ * case sets InvalidOp and returns the input value 'c'.
34
+ */
35
+ izn_rule = nan2008 ? float_infzeronan_dnan_never : float_infzeronan_dnan_always;
36
+ set_float_infzeronan_rule(izn_rule, &env->active_fpu.fp_status);
19
}
37
}
20
38
21
+static inline bool isar_feature_aa64_sve2_sm4(const ARMISARegisters *id)
39
static inline void restore_fp_status(CPUMIPSState *env)
22
+{
40
diff --git a/target/mips/msa.c b/target/mips/msa.c
23
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SM4) != 0;
41
index XXXXXXX..XXXXXXX 100644
24
+}
42
--- a/target/mips/msa.c
43
+++ b/target/mips/msa.c
44
@@ -XXX,XX +XXX,XX @@ void msa_reset(CPUMIPSState *env)
45
46
/* set proper signanling bit meaning ("1" means "quiet") */
47
set_snan_bit_is_one(0, &env->active_tc.msa_fp_status);
25
+
48
+
26
static inline bool isar_feature_aa64_sve_i8mm(const ARMISARegisters *id)
49
+ /* Inf * 0 + NaN returns the input NaN */
27
{
50
+ set_float_infzeronan_rule(float_infzeronan_dnan_never,
28
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, I8MM) != 0;
51
+ &env->active_tc.msa_fp_status);
29
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
52
}
53
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
30
index XXXXXXX..XXXXXXX 100644
54
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/sve.decode
55
--- a/fpu/softfloat-specialize.c.inc
32
+++ b/target/arm/sve.decode
56
+++ b/fpu/softfloat-specialize.c.inc
33
@@ -XXX,XX +XXX,XX @@
57
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
34
@pd_pn_pm ........ esz:2 .. rm:4 ....... rn:4 . rd:4 &rrr_esz
58
/*
35
@rdn_rm ........ esz:2 ...... ...... rm:5 rd:5 \
59
* Temporarily fall back to ifdef ladder
36
&rrr_esz rn=%reg_movprfx
60
*/
37
+@rdn_rm_e0 ........ .. ...... ...... rm:5 rd:5 \
61
-#if defined(TARGET_MIPS)
38
+ &rrr_esz rn=%reg_movprfx esz=0
62
- if (snan_bit_is_one(status)) {
39
@rdn_sh_i8u ........ esz:2 ...... ...... ..... rd:5 \
63
- /*
40
&rri_esz rn=%reg_movprfx imm=%sh8_i8u
64
- * For MIPS systems that conform to IEEE754-1985, the (inf,zero,nan)
41
@rdn_i8u ........ esz:2 ...... ... imm:8 rd:5 \
65
- * case sets InvalidOp and returns the default NaN
42
@@ -XXX,XX +XXX,XX @@ STNT1_zprz 1110010 .. 10 ..... 001 ... ..... ..... \
66
- */
43
# SVE2 crypto unary operations
67
- rule = float_infzeronan_dnan_always;
44
# AESMC and AESIMC
68
- } else {
45
AESMC 01000101 00 10000011100 decrypt:1 00000 rd:5
69
- /*
46
+
70
- * For MIPS systems that conform to IEEE754-2008, the (inf,zero,nan)
47
+# SVE2 crypto destructive binary operations
71
- * case sets InvalidOp and returns the input value 'c'
48
+AESE 01000101 00 10001 0 11100 0 ..... ..... @rdn_rm_e0
72
- */
49
+AESD 01000101 00 10001 0 11100 1 ..... ..... @rdn_rm_e0
73
- rule = float_infzeronan_dnan_never;
50
+SM4E 01000101 00 10001 1 11100 0 ..... ..... @rdn_rm_e0
74
- }
51
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
75
-#elif defined(TARGET_SPARC) || \
52
index XXXXXXX..XXXXXXX 100644
76
+#if defined(TARGET_SPARC) || \
53
--- a/target/arm/translate-sve.c
77
defined(TARGET_XTENSA) || defined(TARGET_HPPA) || \
54
+++ b/target/arm/translate-sve.c
78
defined(TARGET_I386) || defined(TARGET_LOONGARCH)
55
@@ -XXX,XX +XXX,XX @@ static bool trans_AESMC(DisasContext *s, arg_AESMC *a)
79
/*
56
}
57
return true;
58
}
59
+
60
+static bool do_aese(DisasContext *s, arg_rrr_esz *a, bool decrypt)
61
+{
62
+ if (!dc_isar_feature(aa64_sve2_aes, s)) {
63
+ return false;
64
+ }
65
+ if (sve_access_check(s)) {
66
+ gen_gvec_ool_zzz(s, gen_helper_crypto_aese,
67
+ a->rd, a->rn, a->rm, decrypt);
68
+ }
69
+ return true;
70
+}
71
+
72
+static bool trans_AESE(DisasContext *s, arg_rrr_esz *a)
73
+{
74
+ return do_aese(s, a, false);
75
+}
76
+
77
+static bool trans_AESD(DisasContext *s, arg_rrr_esz *a)
78
+{
79
+ return do_aese(s, a, true);
80
+}
81
+
82
+static bool do_sm4(DisasContext *s, arg_rrr_esz *a, gen_helper_gvec_3 *fn)
83
+{
84
+ if (!dc_isar_feature(aa64_sve2_sm4, s)) {
85
+ return false;
86
+ }
87
+ if (sve_access_check(s)) {
88
+ gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, 0);
89
+ }
90
+ return true;
91
+}
92
+
93
+static bool trans_SM4E(DisasContext *s, arg_rrr_esz *a)
94
+{
95
+ return do_sm4(s, a, gen_helper_crypto_sm4e);
96
+}
97
--
80
--
98
2.20.1
81
2.34.1
99
100
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the FloatInfZeroNaNRule explicitly for the SPARC target,
2
so we can remove the ifdef from pickNaNMulAdd().
2
3
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20210525010358.152808-62-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-10-peter.maydell@linaro.org
7
---
7
---
8
target/arm/helper-sve.h | 5 +++++
8
target/sparc/cpu.c | 2 ++
9
target/arm/sve.decode | 10 ++++++++++
9
fpu/softfloat-specialize.c.inc | 3 +--
10
target/arm/sve_helper.c | 6 ++++++
10
2 files changed, 3 insertions(+), 2 deletions(-)
11
target/arm/translate-sve.c | 10 ++++++++++
12
4 files changed, 31 insertions(+)
13
11
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
12
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
15
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
14
--- a/target/sparc/cpu.c
17
+++ b/target/arm/helper-sve.h
15
+++ b/target/sparc/cpu.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_umlsl_idx_s, TCG_CALL_NO_RWG,
16
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_realizefn(DeviceState *dev, Error **errp)
19
void, ptr, ptr, ptr, ptr, i32)
17
* the CPU state struct so it won't get zeroed on reset.
20
DEF_HELPER_FLAGS_5(sve2_umlsl_idx_d, TCG_CALL_NO_RWG,
18
*/
21
void, ptr, ptr, ptr, ptr, i32)
19
set_float_2nan_prop_rule(float_2nan_prop_s_ba, &env->fp_status);
22
+
20
+ /* For inf * 0 + NaN, return the input NaN */
23
+DEF_HELPER_FLAGS_4(sve2_smull_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
21
+ set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
24
+DEF_HELPER_FLAGS_4(sve2_smull_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
22
25
+DEF_HELPER_FLAGS_4(sve2_umull_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
23
cpu_exec_realizefn(cs, &local_err);
26
+DEF_HELPER_FLAGS_4(sve2_umull_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
if (local_err != NULL) {
27
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
25
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
28
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/sve.decode
27
--- a/fpu/softfloat-specialize.c.inc
30
+++ b/target/arm/sve.decode
28
+++ b/fpu/softfloat-specialize.c.inc
31
@@ -XXX,XX +XXX,XX @@ UMLSLB_zzxw_d 01000100 11 1 ..... 1011.0 ..... ..... @rrxr_2a esz=3
29
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
32
UMLSLT_zzxw_s 01000100 10 1 ..... 1011.1 ..... ..... @rrxr_3a esz=2
30
/*
33
UMLSLT_zzxw_d 01000100 11 1 ..... 1011.1 ..... ..... @rrxr_2a esz=3
31
* Temporarily fall back to ifdef ladder
34
32
*/
35
+# SVE2 integer multiply long (indexed)
33
-#if defined(TARGET_SPARC) || \
36
+SMULLB_zzx_s 01000100 10 1 ..... 1100.0 ..... ..... @rrx_3a esz=2
34
- defined(TARGET_XTENSA) || defined(TARGET_HPPA) || \
37
+SMULLB_zzx_d 01000100 11 1 ..... 1100.0 ..... ..... @rrx_2a esz=3
35
+#if defined(TARGET_XTENSA) || defined(TARGET_HPPA) || \
38
+SMULLT_zzx_s 01000100 10 1 ..... 1100.1 ..... ..... @rrx_3a esz=2
36
defined(TARGET_I386) || defined(TARGET_LOONGARCH)
39
+SMULLT_zzx_d 01000100 11 1 ..... 1100.1 ..... ..... @rrx_2a esz=3
37
/*
40
+UMULLB_zzx_s 01000100 10 1 ..... 1101.0 ..... ..... @rrx_3a esz=2
38
* For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan)
41
+UMULLB_zzx_d 01000100 11 1 ..... 1101.0 ..... ..... @rrx_2a esz=3
42
+UMULLT_zzx_s 01000100 10 1 ..... 1101.1 ..... ..... @rrx_3a esz=2
43
+UMULLT_zzx_d 01000100 11 1 ..... 1101.1 ..... ..... @rrx_2a esz=3
44
+
45
# SVE2 saturating multiply (indexed)
46
SQDMULLB_zzx_s 01000100 10 1 ..... 1110.0 ..... ..... @rrx_3a esz=2
47
SQDMULLB_zzx_d 01000100 11 1 ..... 1110.0 ..... ..... @rrx_2a esz=3
48
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/target/arm/sve_helper.c
51
+++ b/target/arm/sve_helper.c
52
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
53
DO_ZZX(sve2_sqdmull_idx_s, int32_t, int16_t, H1_4, H1_2, do_sqdmull_s)
54
DO_ZZX(sve2_sqdmull_idx_d, int64_t, int32_t, , H1_4, do_sqdmull_d)
55
56
+DO_ZZX(sve2_smull_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MUL)
57
+DO_ZZX(sve2_smull_idx_d, int64_t, int32_t, , H1_4, DO_MUL)
58
+
59
+DO_ZZX(sve2_umull_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL)
60
+DO_ZZX(sve2_umull_idx_d, uint64_t, uint32_t, , H1_4, DO_MUL)
61
+
62
#undef DO_ZZX
63
64
#define DO_BITPERM(NAME, TYPE, OP) \
65
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/target/arm/translate-sve.c
68
+++ b/target/arm/translate-sve.c
69
@@ -XXX,XX +XXX,XX @@ DO_SVE2_RRX_TB(trans_SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false)
70
DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true)
71
DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true)
72
73
+DO_SVE2_RRX_TB(trans_SMULLB_zzx_s, gen_helper_sve2_smull_idx_s, false)
74
+DO_SVE2_RRX_TB(trans_SMULLB_zzx_d, gen_helper_sve2_smull_idx_d, false)
75
+DO_SVE2_RRX_TB(trans_SMULLT_zzx_s, gen_helper_sve2_smull_idx_s, true)
76
+DO_SVE2_RRX_TB(trans_SMULLT_zzx_d, gen_helper_sve2_smull_idx_d, true)
77
+
78
+DO_SVE2_RRX_TB(trans_UMULLB_zzx_s, gen_helper_sve2_umull_idx_s, false)
79
+DO_SVE2_RRX_TB(trans_UMULLB_zzx_d, gen_helper_sve2_umull_idx_d, false)
80
+DO_SVE2_RRX_TB(trans_UMULLT_zzx_s, gen_helper_sve2_umull_idx_s, true)
81
+DO_SVE2_RRX_TB(trans_UMULLT_zzx_d, gen_helper_sve2_umull_idx_d, true)
82
+
83
#undef DO_SVE2_RRX_TB
84
85
static bool do_sve2_zzzz_data(DisasContext *s, int rd, int rn, int rm, int ra,
86
--
39
--
87
2.20.1
40
2.34.1
88
89
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the FloatInfZeroNaNRule explicitly for the xtensa target,
2
so we can remove the ifdef from pickNaNMulAdd().
2
3
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20210525010358.152808-61-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-11-peter.maydell@linaro.org
7
---
7
---
8
target/arm/helper-sve.h | 17 +++++++++++++++++
8
target/xtensa/cpu.c | 2 ++
9
target/arm/sve.decode | 18 ++++++++++++++++++
9
fpu/softfloat-specialize.c.inc | 2 +-
10
target/arm/sve_helper.c | 16 ++++++++++++++++
10
2 files changed, 3 insertions(+), 1 deletion(-)
11
target/arm/translate-sve.c | 20 ++++++++++++++++++++
12
4 files changed, 71 insertions(+)
13
11
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
12
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
15
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
14
--- a/target/xtensa/cpu.c
17
+++ b/target/arm/helper-sve.h
15
+++ b/target/xtensa/cpu.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_sqdmull_idx_s, TCG_CALL_NO_RWG,
16
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_reset_hold(Object *obj, ResetType type)
19
void, ptr, ptr, ptr, i32)
17
reset_mmu(env);
20
DEF_HELPER_FLAGS_4(sve2_sqdmull_idx_d, TCG_CALL_NO_RWG,
18
cs->halted = env->runstall;
21
void, ptr, ptr, ptr, i32)
19
#endif
22
+
20
+ /* For inf * 0 + NaN, return the input NaN */
23
+DEF_HELPER_FLAGS_5(sve2_smlal_idx_s, TCG_CALL_NO_RWG,
21
+ set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
24
+ void, ptr, ptr, ptr, ptr, i32)
22
set_no_signaling_nans(!dfpu, &env->fp_status);
25
+DEF_HELPER_FLAGS_5(sve2_smlal_idx_d, TCG_CALL_NO_RWG,
23
xtensa_use_first_nan(env, !dfpu);
26
+ void, ptr, ptr, ptr, ptr, i32)
24
}
27
+DEF_HELPER_FLAGS_5(sve2_smlsl_idx_s, TCG_CALL_NO_RWG,
25
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
28
+ void, ptr, ptr, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_5(sve2_smlsl_idx_d, TCG_CALL_NO_RWG,
30
+ void, ptr, ptr, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_5(sve2_umlal_idx_s, TCG_CALL_NO_RWG,
32
+ void, ptr, ptr, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_5(sve2_umlal_idx_d, TCG_CALL_NO_RWG,
34
+ void, ptr, ptr, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_5(sve2_umlsl_idx_s, TCG_CALL_NO_RWG,
36
+ void, ptr, ptr, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_5(sve2_umlsl_idx_d, TCG_CALL_NO_RWG,
38
+ void, ptr, ptr, ptr, ptr, i32)
39
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
40
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/sve.decode
27
--- a/fpu/softfloat-specialize.c.inc
42
+++ b/target/arm/sve.decode
28
+++ b/fpu/softfloat-specialize.c.inc
43
@@ -XXX,XX +XXX,XX @@ SQDMLSLB_zzxw_d 01000100 11 1 ..... 0011.0 ..... ..... @rrxr_2a esz=3
29
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
44
SQDMLSLT_zzxw_s 01000100 10 1 ..... 0011.1 ..... ..... @rrxr_3a esz=2
30
/*
45
SQDMLSLT_zzxw_d 01000100 11 1 ..... 0011.1 ..... ..... @rrxr_2a esz=3
31
* Temporarily fall back to ifdef ladder
46
32
*/
47
+# SVE2 multiply-add long (indexed)
33
-#if defined(TARGET_XTENSA) || defined(TARGET_HPPA) || \
48
+SMLALB_zzxw_s 01000100 10 1 ..... 1000.0 ..... ..... @rrxr_3a esz=2
34
+#if defined(TARGET_HPPA) || \
49
+SMLALB_zzxw_d 01000100 11 1 ..... 1000.0 ..... ..... @rrxr_2a esz=3
35
defined(TARGET_I386) || defined(TARGET_LOONGARCH)
50
+SMLALT_zzxw_s 01000100 10 1 ..... 1000.1 ..... ..... @rrxr_3a esz=2
36
/*
51
+SMLALT_zzxw_d 01000100 11 1 ..... 1000.1 ..... ..... @rrxr_2a esz=3
37
* For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan)
52
+UMLALB_zzxw_s 01000100 10 1 ..... 1001.0 ..... ..... @rrxr_3a esz=2
53
+UMLALB_zzxw_d 01000100 11 1 ..... 1001.0 ..... ..... @rrxr_2a esz=3
54
+UMLALT_zzxw_s 01000100 10 1 ..... 1001.1 ..... ..... @rrxr_3a esz=2
55
+UMLALT_zzxw_d 01000100 11 1 ..... 1001.1 ..... ..... @rrxr_2a esz=3
56
+SMLSLB_zzxw_s 01000100 10 1 ..... 1010.0 ..... ..... @rrxr_3a esz=2
57
+SMLSLB_zzxw_d 01000100 11 1 ..... 1010.0 ..... ..... @rrxr_2a esz=3
58
+SMLSLT_zzxw_s 01000100 10 1 ..... 1010.1 ..... ..... @rrxr_3a esz=2
59
+SMLSLT_zzxw_d 01000100 11 1 ..... 1010.1 ..... ..... @rrxr_2a esz=3
60
+UMLSLB_zzxw_s 01000100 10 1 ..... 1011.0 ..... ..... @rrxr_3a esz=2
61
+UMLSLB_zzxw_d 01000100 11 1 ..... 1011.0 ..... ..... @rrxr_2a esz=3
62
+UMLSLT_zzxw_s 01000100 10 1 ..... 1011.1 ..... ..... @rrxr_3a esz=2
63
+UMLSLT_zzxw_d 01000100 11 1 ..... 1011.1 ..... ..... @rrxr_2a esz=3
64
+
65
# SVE2 saturating multiply (indexed)
66
SQDMULLB_zzx_s 01000100 10 1 ..... 1110.0 ..... ..... @rrx_3a esz=2
67
SQDMULLB_zzx_d 01000100 11 1 ..... 1110.0 ..... ..... @rrx_2a esz=3
68
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/target/arm/sve_helper.c
71
+++ b/target/arm/sve_helper.c
72
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
73
} \
74
}
75
76
+#define DO_MLA(N, M, A) (A + N * M)
77
+
78
+DO_ZZXW(sve2_smlal_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MLA)
79
+DO_ZZXW(sve2_smlal_idx_d, int64_t, int32_t, , H1_4, DO_MLA)
80
+DO_ZZXW(sve2_umlal_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MLA)
81
+DO_ZZXW(sve2_umlal_idx_d, uint64_t, uint32_t, , H1_4, DO_MLA)
82
+
83
+#define DO_MLS(N, M, A) (A - N * M)
84
+
85
+DO_ZZXW(sve2_smlsl_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MLS)
86
+DO_ZZXW(sve2_smlsl_idx_d, int64_t, int32_t, , H1_4, DO_MLS)
87
+DO_ZZXW(sve2_umlsl_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MLS)
88
+DO_ZZXW(sve2_umlsl_idx_d, uint64_t, uint32_t, , H1_4, DO_MLS)
89
+
90
#define DO_SQDMLAL_S(N, M, A) DO_SQADD_S(A, do_sqdmull_s(N, M))
91
#define DO_SQDMLAL_D(N, M, A) do_sqadd_d(A, do_sqdmull_d(N, M))
92
93
@@ -XXX,XX +XXX,XX @@ DO_ZZXW(sve2_sqdmlal_idx_d, int64_t, int32_t, , H1_4, DO_SQDMLAL_D)
94
DO_ZZXW(sve2_sqdmlsl_idx_s, int32_t, int16_t, H1_4, H1_2, DO_SQDMLSL_S)
95
DO_ZZXW(sve2_sqdmlsl_idx_d, int64_t, int32_t, , H1_4, DO_SQDMLSL_D)
96
97
+#undef DO_MLA
98
+#undef DO_MLS
99
#undef DO_ZZXW
100
101
#define DO_ZZX(NAME, TYPEW, TYPEN, HW, HN, OP) \
102
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
103
index XXXXXXX..XXXXXXX 100644
104
--- a/target/arm/translate-sve.c
105
+++ b/target/arm/translate-sve.c
106
@@ -XXX,XX +XXX,XX @@ DO_SVE2_RRXR_TB(trans_SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false)
107
DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true)
108
DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true)
109
110
+DO_SVE2_RRXR_TB(trans_SMLALB_zzxw_s, gen_helper_sve2_smlal_idx_s, false)
111
+DO_SVE2_RRXR_TB(trans_SMLALB_zzxw_d, gen_helper_sve2_smlal_idx_d, false)
112
+DO_SVE2_RRXR_TB(trans_SMLALT_zzxw_s, gen_helper_sve2_smlal_idx_s, true)
113
+DO_SVE2_RRXR_TB(trans_SMLALT_zzxw_d, gen_helper_sve2_smlal_idx_d, true)
114
+
115
+DO_SVE2_RRXR_TB(trans_UMLALB_zzxw_s, gen_helper_sve2_umlal_idx_s, false)
116
+DO_SVE2_RRXR_TB(trans_UMLALB_zzxw_d, gen_helper_sve2_umlal_idx_d, false)
117
+DO_SVE2_RRXR_TB(trans_UMLALT_zzxw_s, gen_helper_sve2_umlal_idx_s, true)
118
+DO_SVE2_RRXR_TB(trans_UMLALT_zzxw_d, gen_helper_sve2_umlal_idx_d, true)
119
+
120
+DO_SVE2_RRXR_TB(trans_SMLSLB_zzxw_s, gen_helper_sve2_smlsl_idx_s, false)
121
+DO_SVE2_RRXR_TB(trans_SMLSLB_zzxw_d, gen_helper_sve2_smlsl_idx_d, false)
122
+DO_SVE2_RRXR_TB(trans_SMLSLT_zzxw_s, gen_helper_sve2_smlsl_idx_s, true)
123
+DO_SVE2_RRXR_TB(trans_SMLSLT_zzxw_d, gen_helper_sve2_smlsl_idx_d, true)
124
+
125
+DO_SVE2_RRXR_TB(trans_UMLSLB_zzxw_s, gen_helper_sve2_umlsl_idx_s, false)
126
+DO_SVE2_RRXR_TB(trans_UMLSLB_zzxw_d, gen_helper_sve2_umlsl_idx_d, false)
127
+DO_SVE2_RRXR_TB(trans_UMLSLT_zzxw_s, gen_helper_sve2_umlsl_idx_s, true)
128
+DO_SVE2_RRXR_TB(trans_UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true)
129
+
130
#undef DO_SVE2_RRXR_TB
131
132
/*
133
--
38
--
134
2.20.1
39
2.34.1
135
136
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the FloatInfZeroNaNRule explicitly for the x86 target.
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-67-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20241202131347.498124-12-peter.maydell@linaro.org
7
---
6
---
8
target/arm/cpu.h | 5 +++++
7
target/i386/tcg/fpu_helper.c | 7 +++++++
9
target/arm/helper.h | 4 ++++
8
fpu/softfloat-specialize.c.inc | 2 +-
10
target/arm/sve.decode | 4 ++++
9
2 files changed, 8 insertions(+), 1 deletion(-)
11
target/arm/translate-sve.c | 16 ++++++++++++++++
12
target/arm/vec_helper.c | 2 ++
13
5 files changed, 31 insertions(+)
14
10
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
11
diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
13
--- a/target/i386/tcg/fpu_helper.c
18
+++ b/target/arm/cpu.h
14
+++ b/target/i386/tcg/fpu_helper.c
19
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id)
15
@@ -XXX,XX +XXX,XX @@ void cpu_init_fp_statuses(CPUX86State *env)
20
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0;
16
*/
17
set_float_2nan_prop_rule(float_2nan_prop_x87, &env->mmx_status);
18
set_float_2nan_prop_rule(float_2nan_prop_x87, &env->sse_status);
19
+ /*
20
+ * Only SSE has multiply-add instructions. In the SDM Section 14.5.2
21
+ * "Fused-Multiply-ADD (FMA) Numeric Behavior" the NaN handling is
22
+ * specified -- for 0 * inf + NaN the input NaN is selected, and if
23
+ * there are multiple input NaNs they are selected in the order a, b, c.
24
+ */
25
+ set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->sse_status);
21
}
26
}
22
27
23
+static inline bool isar_feature_aa64_sve_i8mm(const ARMISARegisters *id)
28
static inline uint8_t save_exception_flags(CPUX86State *env)
24
+{
29
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
25
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, I8MM) != 0;
26
+}
27
+
28
static inline bool isar_feature_aa64_sve_f32mm(const ARMISARegisters *id)
29
{
30
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F32MM) != 0;
31
diff --git a/target/arm/helper.h b/target/arm/helper.h
32
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/helper.h
31
--- a/fpu/softfloat-specialize.c.inc
34
+++ b/target/arm/helper.h
32
+++ b/fpu/softfloat-specialize.c.inc
35
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_sdot_idx_h, TCG_CALL_NO_RWG,
33
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
36
void, ptr, ptr, ptr, ptr, i32)
34
* Temporarily fall back to ifdef ladder
37
DEF_HELPER_FLAGS_5(gvec_udot_idx_h, TCG_CALL_NO_RWG,
35
*/
38
void, ptr, ptr, ptr, ptr, i32)
36
#if defined(TARGET_HPPA) || \
39
+DEF_HELPER_FLAGS_5(gvec_sudot_idx_b, TCG_CALL_NO_RWG,
37
- defined(TARGET_I386) || defined(TARGET_LOONGARCH)
40
+ void, ptr, ptr, ptr, ptr, i32)
38
+ defined(TARGET_LOONGARCH)
41
+DEF_HELPER_FLAGS_5(gvec_usdot_idx_b, TCG_CALL_NO_RWG,
39
/*
42
+ void, ptr, ptr, ptr, ptr, i32)
40
* For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan)
43
41
* case sets InvalidOp and returns the input value 'c'
44
DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,
45
void, ptr, ptr, ptr, ptr, i32)
46
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/sve.decode
49
+++ b/target/arm/sve.decode
50
@@ -XXX,XX +XXX,XX @@ SQRDMLSH_zzxz_h 01000100 0. 1 ..... 000101 ..... ..... @rrxr_3 esz=1
51
SQRDMLSH_zzxz_s 01000100 10 1 ..... 000101 ..... ..... @rrxr_2 esz=2
52
SQRDMLSH_zzxz_d 01000100 11 1 ..... 000101 ..... ..... @rrxr_1 esz=3
53
54
+# SVE mixed sign dot product (indexed)
55
+USDOT_zzxw_s 01000100 10 1 ..... 000110 ..... ..... @rrxr_2 esz=2
56
+SUDOT_zzxw_s 01000100 10 1 ..... 000111 ..... ..... @rrxr_2 esz=2
57
+
58
# SVE2 saturating multiply-add (indexed)
59
SQDMLALB_zzxw_s 01000100 10 1 ..... 0010.0 ..... ..... @rrxr_3a esz=2
60
SQDMLALB_zzxw_d 01000100 11 1 ..... 0010.0 ..... ..... @rrxr_2a esz=3
61
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/arm/translate-sve.c
64
+++ b/target/arm/translate-sve.c
65
@@ -XXX,XX +XXX,XX @@ DO_RRXR(trans_SDOT_zzxw_d, gen_helper_gvec_sdot_idx_h)
66
DO_RRXR(trans_UDOT_zzxw_s, gen_helper_gvec_udot_idx_b)
67
DO_RRXR(trans_UDOT_zzxw_d, gen_helper_gvec_udot_idx_h)
68
69
+static bool trans_SUDOT_zzxw_s(DisasContext *s, arg_rrxr_esz *a)
70
+{
71
+ if (!dc_isar_feature(aa64_sve_i8mm, s)) {
72
+ return false;
73
+ }
74
+ return do_zzxz_ool(s, a, gen_helper_gvec_sudot_idx_b);
75
+}
76
+
77
+static bool trans_USDOT_zzxw_s(DisasContext *s, arg_rrxr_esz *a)
78
+{
79
+ if (!dc_isar_feature(aa64_sve_i8mm, s)) {
80
+ return false;
81
+ }
82
+ return do_zzxz_ool(s, a, gen_helper_gvec_usdot_idx_b);
83
+}
84
+
85
#undef DO_RRXR
86
87
static bool do_sve2_zzz_data(DisasContext *s, int rd, int rn, int rm, int data,
88
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/arm/vec_helper.c
91
+++ b/target/arm/vec_helper.c
92
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
93
94
DO_DOT_IDX(gvec_sdot_idx_b, int32_t, int8_t, int8_t, H4)
95
DO_DOT_IDX(gvec_udot_idx_b, uint32_t, uint8_t, uint8_t, H4)
96
+DO_DOT_IDX(gvec_sudot_idx_b, int32_t, int8_t, uint8_t, H4)
97
+DO_DOT_IDX(gvec_usdot_idx_b, int32_t, uint8_t, int8_t, H4)
98
DO_DOT_IDX(gvec_sdot_idx_h, int64_t, int16_t, int16_t, )
99
DO_DOT_IDX(gvec_udot_idx_h, uint64_t, uint16_t, uint16_t, )
100
101
--
42
--
102
2.20.1
43
2.34.1
103
104
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the FloatInfZeroNaNRule explicitly for the loongarch target.
2
2
3
We're about to add more variations on this theme.
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Accept the inner loop for the _h variants, rather
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
than keep it unrolled.
5
Message-id: 20241202131347.498124-13-peter.maydell@linaro.org
6
---
7
target/loongarch/tcg/fpu_helper.c | 5 +++++
8
fpu/softfloat-specialize.c.inc | 7 +------
9
2 files changed, 6 insertions(+), 6 deletions(-)
6
10
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
diff --git a/target/loongarch/tcg/fpu_helper.c b/target/loongarch/tcg/fpu_helper.c
8
Message-id: 20210525010358.152808-66-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/vec_helper.c | 160 ++++++++--------------------------------
13
1 file changed, 29 insertions(+), 131 deletions(-)
14
15
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/vec_helper.c
13
--- a/target/loongarch/tcg/fpu_helper.c
18
+++ b/target/arm/vec_helper.c
14
+++ b/target/loongarch/tcg/fpu_helper.c
19
@@ -XXX,XX +XXX,XX @@ DO_DOT(gvec_udot_b, uint32_t, uint8_t, uint8_t)
15
@@ -XXX,XX +XXX,XX @@ void restore_fp_status(CPULoongArchState *env)
20
DO_DOT(gvec_sdot_h, int64_t, int16_t, int16_t)
16
&env->fp_status);
21
DO_DOT(gvec_udot_h, uint64_t, uint16_t, uint16_t)
17
set_flush_to_zero(0, &env->fp_status);
22
18
set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->fp_status);
23
-void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm,
19
+ /*
24
- void *va, uint32_t desc)
20
+ * For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan)
25
-{
21
+ * case sets InvalidOp and returns the input value 'c'
26
- intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
22
+ */
27
- intptr_t index = simd_data(desc);
23
+ set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
28
- int32_t *d = vd, *a = va;
29
- int8_t *n = vn;
30
- int8_t *m_indexed = (int8_t *)vm + H4(index) * 4;
31
-
32
- /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
33
- * Otherwise opr_sz is a multiple of 16.
34
- */
35
- segend = MIN(4, opr_sz_4);
36
- i = 0;
37
- do {
38
- int8_t m0 = m_indexed[i * 4 + 0];
39
- int8_t m1 = m_indexed[i * 4 + 1];
40
- int8_t m2 = m_indexed[i * 4 + 2];
41
- int8_t m3 = m_indexed[i * 4 + 3];
42
-
43
- do {
44
- d[i] = (a[i] +
45
- n[i * 4 + 0] * m0 +
46
- n[i * 4 + 1] * m1 +
47
- n[i * 4 + 2] * m2 +
48
- n[i * 4 + 3] * m3);
49
- } while (++i < segend);
50
- segend = i + 4;
51
- } while (i < opr_sz_4);
52
-
53
- clear_tail(d, opr_sz, simd_maxsz(desc));
54
+#define DO_DOT_IDX(NAME, TYPED, TYPEN, TYPEM, HD) \
55
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
56
+{ \
57
+ intptr_t i = 0, opr_sz = simd_oprsz(desc); \
58
+ intptr_t opr_sz_n = opr_sz / sizeof(TYPED); \
59
+ intptr_t segend = MIN(16 / sizeof(TYPED), opr_sz_n); \
60
+ intptr_t index = simd_data(desc); \
61
+ TYPED *d = vd, *a = va; \
62
+ TYPEN *n = vn; \
63
+ TYPEM *m_indexed = (TYPEM *)vm + HD(index) * 4; \
64
+ do { \
65
+ TYPED m0 = m_indexed[i * 4 + 0]; \
66
+ TYPED m1 = m_indexed[i * 4 + 1]; \
67
+ TYPED m2 = m_indexed[i * 4 + 2]; \
68
+ TYPED m3 = m_indexed[i * 4 + 3]; \
69
+ do { \
70
+ d[i] = (a[i] + \
71
+ n[i * 4 + 0] * m0 + \
72
+ n[i * 4 + 1] * m1 + \
73
+ n[i * 4 + 2] * m2 + \
74
+ n[i * 4 + 3] * m3); \
75
+ } while (++i < segend); \
76
+ segend = i + 4; \
77
+ } while (i < opr_sz_n); \
78
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
79
}
24
}
80
25
81
-void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm,
26
int ieee_ex_to_loongarch(int xcpt)
82
- void *va, uint32_t desc)
27
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
83
-{
28
index XXXXXXX..XXXXXXX 100644
84
- intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
29
--- a/fpu/softfloat-specialize.c.inc
85
- intptr_t index = simd_data(desc);
30
+++ b/fpu/softfloat-specialize.c.inc
86
- uint32_t *d = vd, *a = va;
31
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
87
- uint8_t *n = vn;
32
/*
88
- uint8_t *m_indexed = (uint8_t *)vm + H4(index) * 4;
33
* Temporarily fall back to ifdef ladder
89
-
34
*/
90
- /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
35
-#if defined(TARGET_HPPA) || \
91
- * Otherwise opr_sz is a multiple of 16.
36
- defined(TARGET_LOONGARCH)
92
- */
37
- /*
93
- segend = MIN(4, opr_sz_4);
38
- * For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan)
94
- i = 0;
39
- * case sets InvalidOp and returns the input value 'c'
95
- do {
40
- */
96
- uint8_t m0 = m_indexed[i * 4 + 0];
41
+#if defined(TARGET_HPPA)
97
- uint8_t m1 = m_indexed[i * 4 + 1];
42
rule = float_infzeronan_dnan_never;
98
- uint8_t m2 = m_indexed[i * 4 + 2];
43
#endif
99
- uint8_t m3 = m_indexed[i * 4 + 3];
44
}
100
-
101
- do {
102
- d[i] = (a[i] +
103
- n[i * 4 + 0] * m0 +
104
- n[i * 4 + 1] * m1 +
105
- n[i * 4 + 2] * m2 +
106
- n[i * 4 + 3] * m3);
107
- } while (++i < segend);
108
- segend = i + 4;
109
- } while (i < opr_sz_4);
110
-
111
- clear_tail(d, opr_sz, simd_maxsz(desc));
112
-}
113
-
114
-void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm,
115
- void *va, uint32_t desc)
116
-{
117
- intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
118
- intptr_t index = simd_data(desc);
119
- int64_t *d = vd, *a = va;
120
- int16_t *n = vn;
121
- int16_t *m_indexed = (int16_t *)vm + index * 4;
122
-
123
- /* This is supported by SVE only, so opr_sz is always a multiple of 16.
124
- * Process the entire segment all at once, writing back the results
125
- * only after we've consumed all of the inputs.
126
- */
127
- for (i = 0; i < opr_sz_8; i += 2) {
128
- int64_t d0, d1;
129
-
130
- d0 = a[i + 0];
131
- d0 += n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0];
132
- d0 += n[i * 4 + 1] * (int64_t)m_indexed[i * 4 + 1];
133
- d0 += n[i * 4 + 2] * (int64_t)m_indexed[i * 4 + 2];
134
- d0 += n[i * 4 + 3] * (int64_t)m_indexed[i * 4 + 3];
135
-
136
- d1 = a[i + 1];
137
- d1 += n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0];
138
- d1 += n[i * 4 + 5] * (int64_t)m_indexed[i * 4 + 1];
139
- d1 += n[i * 4 + 6] * (int64_t)m_indexed[i * 4 + 2];
140
- d1 += n[i * 4 + 7] * (int64_t)m_indexed[i * 4 + 3];
141
-
142
- d[i + 0] = d0;
143
- d[i + 1] = d1;
144
- }
145
- clear_tail(d, opr_sz, simd_maxsz(desc));
146
-}
147
-
148
-void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm,
149
- void *va, uint32_t desc)
150
-{
151
- intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
152
- intptr_t index = simd_data(desc);
153
- uint64_t *d = vd, *a = va;
154
- uint16_t *n = vn;
155
- uint16_t *m_indexed = (uint16_t *)vm + index * 4;
156
-
157
- /* This is supported by SVE only, so opr_sz is always a multiple of 16.
158
- * Process the entire segment all at once, writing back the results
159
- * only after we've consumed all of the inputs.
160
- */
161
- for (i = 0; i < opr_sz_8; i += 2) {
162
- uint64_t d0, d1;
163
-
164
- d0 = a[i + 0];
165
- d0 += n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0];
166
- d0 += n[i * 4 + 1] * (uint64_t)m_indexed[i * 4 + 1];
167
- d0 += n[i * 4 + 2] * (uint64_t)m_indexed[i * 4 + 2];
168
- d0 += n[i * 4 + 3] * (uint64_t)m_indexed[i * 4 + 3];
169
-
170
- d1 = a[i + 1];
171
- d1 += n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0];
172
- d1 += n[i * 4 + 5] * (uint64_t)m_indexed[i * 4 + 1];
173
- d1 += n[i * 4 + 6] * (uint64_t)m_indexed[i * 4 + 2];
174
- d1 += n[i * 4 + 7] * (uint64_t)m_indexed[i * 4 + 3];
175
-
176
- d[i + 0] = d0;
177
- d[i + 1] = d1;
178
- }
179
- clear_tail(d, opr_sz, simd_maxsz(desc));
180
-}
181
+DO_DOT_IDX(gvec_sdot_idx_b, int32_t, int8_t, int8_t, H4)
182
+DO_DOT_IDX(gvec_udot_idx_b, uint32_t, uint8_t, uint8_t, H4)
183
+DO_DOT_IDX(gvec_sdot_idx_h, int64_t, int16_t, int16_t, )
184
+DO_DOT_IDX(gvec_udot_idx_h, uint64_t, uint16_t, uint16_t, )
185
186
void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm,
187
void *vfpst, uint32_t desc)
188
--
45
--
189
2.20.1
46
2.34.1
190
191
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the FloatInfZeroNaNRule explicitly for the HPPA target,
2
so we can remove the ifdef from pickNaNMulAdd().
2
3
3
Will be used for SVE2 isa subset enablement.
4
As this is the last target to be converted to explicitly setting
5
the rule, we can remove the fallback code in pickNaNMulAdd()
6
entirely.
4
7
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210525010358.152808-2-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20241202131347.498124-14-peter.maydell@linaro.org
10
---
11
---
11
target/arm/cpu.h | 16 ++++++++++++++++
12
target/hppa/fpu_helper.c | 2 ++
12
target/arm/helper.c | 3 +--
13
fpu/softfloat-specialize.c.inc | 13 +------------
13
target/arm/kvm64.c | 21 +++++++++++++++------
14
2 files changed, 3 insertions(+), 12 deletions(-)
14
3 files changed, 32 insertions(+), 8 deletions(-)
15
15
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
diff --git a/target/hppa/fpu_helper.c b/target/hppa/fpu_helper.c
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
18
--- a/target/hppa/fpu_helper.c
19
+++ b/target/arm/cpu.h
19
+++ b/target/hppa/fpu_helper.c
20
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
20
@@ -XXX,XX +XXX,XX @@ void HELPER(loaded_fr0)(CPUHPPAState *env)
21
uint64_t id_aa64mmfr2;
21
* HPPA does note implement a CPU reset method at all...
22
uint64_t id_aa64dfr0;
22
*/
23
uint64_t id_aa64dfr1;
23
set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->fp_status);
24
+ uint64_t id_aa64zfr0;
24
+ /* For inf * 0 + NaN, return the input NaN */
25
} isar;
25
+ set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
26
uint64_t midr;
27
uint32_t revidr;
28
@@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4)
29
FIELD(ID_AA64DFR0, TRACEFILT, 40, 4)
30
FIELD(ID_AA64DFR0, MTPMU, 48, 4)
31
32
+FIELD(ID_AA64ZFR0, SVEVER, 0, 4)
33
+FIELD(ID_AA64ZFR0, AES, 4, 4)
34
+FIELD(ID_AA64ZFR0, BITPERM, 16, 4)
35
+FIELD(ID_AA64ZFR0, BFLOAT16, 20, 4)
36
+FIELD(ID_AA64ZFR0, SHA3, 32, 4)
37
+FIELD(ID_AA64ZFR0, SM4, 40, 4)
38
+FIELD(ID_AA64ZFR0, I8MM, 44, 4)
39
+FIELD(ID_AA64ZFR0, F32MM, 52, 4)
40
+FIELD(ID_AA64ZFR0, F64MM, 56, 4)
41
+
42
FIELD(ID_DFR0, COPDBG, 0, 4)
43
FIELD(ID_DFR0, COPSDBG, 4, 4)
44
FIELD(ID_DFR0, MMAPDBG, 8, 4)
45
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id)
46
return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0;
47
}
26
}
48
27
49
+static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id)
28
void cpu_hppa_loaded_fr0(CPUHPPAState *env)
50
+{
29
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
51
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0;
52
+}
53
+
54
/*
55
* Feature tests for "does this exist in either 32-bit or 64-bit?"
56
*/
57
diff --git a/target/arm/helper.c b/target/arm/helper.c
58
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/helper.c
31
--- a/fpu/softfloat-specialize.c.inc
60
+++ b/target/arm/helper.c
32
+++ b/fpu/softfloat-specialize.c.inc
61
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
33
@@ -XXX,XX +XXX,XX @@ static int pickNaN(FloatClass a_cls, FloatClass b_cls,
62
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
34
static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
63
.access = PL1_R, .type = ARM_CP_CONST,
35
bool infzero, float_status *status)
64
.accessfn = access_aa64_tid3,
36
{
65
- /* At present, only SVEver == 0 is defined anyway. */
37
- FloatInfZeroNaNRule rule = status->float_infzeronan_rule;
66
- .resetvalue = 0 },
67
+ .resetvalue = cpu->isar.id_aa64zfr0 },
68
{ .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
69
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
70
.access = PL1_R, .type = ARM_CP_CONST,
71
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/kvm64.c
74
+++ b/target/arm/kvm64.c
75
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
76
77
sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
78
79
- kvm_arm_destroy_scratch_host_vcpu(fdarray);
80
-
38
-
81
- if (err < 0) {
39
/*
82
- return false;
40
* We guarantee not to require the target to tell us how to
41
* pick a NaN if we're always returning the default NaN.
42
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
43
*/
44
assert(!status->default_nan_mode);
45
46
- if (rule == float_infzeronan_none) {
47
- /*
48
- * Temporarily fall back to ifdef ladder
49
- */
50
-#if defined(TARGET_HPPA)
51
- rule = float_infzeronan_dnan_never;
52
-#endif
83
- }
53
- }
84
-
54
-
85
/* Add feature bits that can't appear until after VCPU init. */
55
if (infzero) {
86
if (sve_supported) {
56
/*
87
t = ahcf->isar.id_aa64pfr0;
57
* Inf * 0 + NaN -- some implementations return the default NaN here,
88
t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
58
* and some return the input NaN.
89
ahcf->isar.id_aa64pfr0 = t;
59
*/
90
+
60
- switch (rule) {
91
+ /*
61
+ switch (status->float_infzeronan_rule) {
92
+ * Before v5.1, KVM did not support SVE and did not expose
62
case float_infzeronan_dnan_never:
93
+ * ID_AA64ZFR0_EL1 even as RAZ. After v5.1, KVM still does
63
return 2;
94
+ * not expose the register to "user" requests like this
64
case float_infzeronan_dnan_always:
95
+ * unless the host supports SVE.
96
+ */
97
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
98
+ ARM64_SYS_REG(3, 0, 0, 4, 4));
99
+ }
100
+
101
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
102
+
103
+ if (err < 0) {
104
+ return false;
105
}
106
107
/*
108
--
65
--
109
2.20.1
66
2.34.1
110
111
diff view generated by jsdifflib
1
In icc_eoir_write() we assume that we can identify the group of the
1
The new implementation of pickNaNMulAdd() will find it convenient
2
IRQ being completed based purely on which register is being written
2
to know whether at least one of the three arguments to the muladd
3
to and the current CPU state, and that "CPU state matches group
3
was a signaling NaN. We already calculate that in the caller,
4
indicated by register" is the only necessary access check.
4
so pass it in as a new bool have_snan.
5
5
6
This isn't correct: if the CPU is not in Secure state then EOIR1 will
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
only complete Group 1 NS IRQs, but if the CPU is in EL3 it can
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
complete both Group 1 S and Group 1 NS IRQs. (The pseudocode
8
Message-id: 20241202131347.498124-15-peter.maydell@linaro.org
9
ICC_EOIR1_EL1 makes this clear.) We were also missing the logic to
9
---
10
prevent EOIR0 writes completing G0 IRQs when they should not.
10
fpu/softfloat-parts.c.inc | 5 +++--
11
fpu/softfloat-specialize.c.inc | 2 +-
12
2 files changed, 4 insertions(+), 3 deletions(-)
11
13
12
Rearrange the logic to first identify the group of the current
14
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
13
highest priority interrupt and then look at whether we should
14
complete it or ignore the access based on which register was accessed
15
and the state of the CPU. The resulting behavioural change is:
16
* EL3 can now complete G1NS interrupts
17
* G0 interrupt completion is now ignored if the GIC
18
and the CPU have the security extension enabled and
19
the CPU is not secure
20
21
Reported-by: Chan Kim <ckim@etri.re.kr>
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
24
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
25
Message-id: 20210510150016.24910-1-peter.maydell@linaro.org
26
---
27
hw/intc/arm_gicv3_cpuif.c | 48 ++++++++++++++++++++++++++-------------
28
1 file changed, 32 insertions(+), 16 deletions(-)
29
30
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
31
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
32
--- a/hw/intc/arm_gicv3_cpuif.c
16
--- a/fpu/softfloat-parts.c.inc
33
+++ b/hw/intc/arm_gicv3_cpuif.c
17
+++ b/fpu/softfloat-parts.c.inc
34
@@ -XXX,XX +XXX,XX @@ static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
18
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
35
GICv3CPUState *cs = icc_cs_from_env(env);
19
{
36
int irq = value & 0xffffff;
20
int which;
37
int grp;
21
bool infzero = (ab_mask == float_cmask_infzero);
38
+ bool is_eoir0 = ri->crm == 8;
22
+ bool have_snan = (abc_mask & float_cmask_snan);
39
23
40
- if (icv_access(env, ri->crm == 8 ? HCR_FMO : HCR_IMO)) {
24
- if (unlikely(abc_mask & float_cmask_snan)) {
41
+ if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) {
25
+ if (unlikely(have_snan)) {
42
icv_eoir_write(env, ri, value);
26
float_raise(float_flag_invalid | float_flag_invalid_snan, s);
43
return;
44
}
27
}
45
28
46
- trace_gicv3_icc_eoir_write(ri->crm == 8 ? 0 : 1,
29
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
47
+ trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1,
30
if (s->default_nan_mode) {
48
gicv3_redist_affid(cs), value);
31
which = 3;
49
32
} else {
50
- if (ri->crm == 8) {
33
- which = pickNaNMulAdd(a->cls, b->cls, c->cls, infzero, s);
51
- /* EOIR0 */
34
+ which = pickNaNMulAdd(a->cls, b->cls, c->cls, infzero, have_snan, s);
52
- grp = GICV3_G0;
53
- } else {
54
- /* EOIR1 */
55
- if (arm_is_secure(env)) {
56
- grp = GICV3_G1;
57
- } else {
58
- grp = GICV3_G1NS;
59
- }
60
- }
61
-
62
if (irq >= cs->gic->num_irq) {
63
/* This handles two cases:
64
* 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
65
@@ -XXX,XX +XXX,XX @@ static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
66
return;
67
}
35
}
68
36
69
- if (icc_highest_active_group(cs) != grp) {
37
if (which == 3) {
70
- return;
38
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
71
+ grp = icc_highest_active_group(cs);
39
index XXXXXXX..XXXXXXX 100644
72
+ switch (grp) {
40
--- a/fpu/softfloat-specialize.c.inc
73
+ case GICV3_G0:
41
+++ b/fpu/softfloat-specialize.c.inc
74
+ if (!is_eoir0) {
42
@@ -XXX,XX +XXX,XX @@ static int pickNaN(FloatClass a_cls, FloatClass b_cls,
75
+ return;
43
| Return values : 0 : a; 1 : b; 2 : c; 3 : default-NaN
76
+ }
44
*----------------------------------------------------------------------------*/
77
+ if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS)
45
static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
78
+ && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) {
46
- bool infzero, float_status *status)
79
+ return;
47
+ bool infzero, bool have_snan, float_status *status)
80
+ }
48
{
81
+ break;
49
/*
82
+ case GICV3_G1:
50
* We guarantee not to require the target to tell us how to
83
+ if (is_eoir0) {
84
+ return;
85
+ }
86
+ if (!arm_is_secure(env)) {
87
+ return;
88
+ }
89
+ break;
90
+ case GICV3_G1NS:
91
+ if (is_eoir0) {
92
+ return;
93
+ }
94
+ if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
95
+ return;
96
+ }
97
+ break;
98
+ default:
99
+ g_assert_not_reached();
100
}
101
102
icc_drop_prio(cs, grp);
103
--
51
--
104
2.20.1
52
2.34.1
105
106
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
IEEE 758 does not define a fixed rule for which NaN to pick as the
2
2
result if both operands of a 3-operand fused multiply-add operation
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
are NaNs. As a result different architectures have ended up with
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
different rules for propagating NaNs.
5
Message-id: 20210525010358.152808-15-richard.henderson@linaro.org
5
6
QEMU currently hardcodes the NaN propagation logic into the binary
7
because pickNaNMulAdd() has an ifdef ladder for different targets.
8
We want to make the propagation rule instead be selectable at
9
runtime, because:
10
* this will let us have multiple targets in one QEMU binary
11
* the Arm FEAT_AFP architectural feature includes letting
12
the guest select a NaN propagation rule at runtime
13
14
In this commit we add an enum for the propagation rule, the field in
15
float_status, and the corresponding getters and setters. We change
16
pickNaNMulAdd to honour this, but because all targets still leave
17
this field at its default 0 value, the fallback logic will pick the
18
rule type with the old ifdef ladder.
19
20
It's valid not to set a propagation rule if default_nan_mode is
21
enabled, because in that case there's no need to pick a NaN; all the
22
callers of pickNaNMulAdd() catch this case and skip calling it.
23
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
26
Message-id: 20241202131347.498124-16-peter.maydell@linaro.org
7
---
27
---
8
target/arm/cpu.h | 10 ++++++++++
28
include/fpu/softfloat-helpers.h | 11 +++
9
target/arm/helper-sve.h | 1 +
29
include/fpu/softfloat-types.h | 55 +++++++++++
10
target/arm/sve.decode | 2 ++
30
fpu/softfloat-specialize.c.inc | 167 ++++++++------------------------
11
target/arm/translate-sve.c | 22 ++++++++++++++++++++++
31
3 files changed, 107 insertions(+), 126 deletions(-)
12
target/arm/vec_helper.c | 24 ++++++++++++++++++++++++
32
13
5 files changed, 59 insertions(+)
33
diff --git a/include/fpu/softfloat-helpers.h b/include/fpu/softfloat-helpers.h
14
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
35
--- a/include/fpu/softfloat-helpers.h
18
+++ b/target/arm/cpu.h
36
+++ b/include/fpu/softfloat-helpers.h
19
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id)
37
@@ -XXX,XX +XXX,XX @@ static inline void set_float_2nan_prop_rule(Float2NaNPropRule rule,
20
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0;
38
status->float_2nan_prop_rule = rule;
21
}
39
}
22
40
23
+static inline bool isar_feature_aa64_sve2_aes(const ARMISARegisters *id)
41
+static inline void set_float_3nan_prop_rule(Float3NaNPropRule rule,
42
+ float_status *status)
24
+{
43
+{
25
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) != 0;
44
+ status->float_3nan_prop_rule = rule;
26
+}
45
+}
27
+
46
+
28
+static inline bool isar_feature_aa64_sve2_pmull128(const ARMISARegisters *id)
47
static inline void set_float_infzeronan_rule(FloatInfZeroNaNRule rule,
48
float_status *status)
49
{
50
@@ -XXX,XX +XXX,XX @@ static inline Float2NaNPropRule get_float_2nan_prop_rule(float_status *status)
51
return status->float_2nan_prop_rule;
52
}
53
54
+static inline Float3NaNPropRule get_float_3nan_prop_rule(float_status *status)
29
+{
55
+{
30
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) >= 2;
56
+ return status->float_3nan_prop_rule;
31
+}
57
+}
32
+
58
+
59
static inline FloatInfZeroNaNRule get_float_infzeronan_rule(float_status *status)
60
{
61
return status->float_infzeronan_rule;
62
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
63
index XXXXXXX..XXXXXXX 100644
64
--- a/include/fpu/softfloat-types.h
65
+++ b/include/fpu/softfloat-types.h
66
@@ -XXX,XX +XXX,XX @@ this code that are retained.
67
#ifndef SOFTFLOAT_TYPES_H
68
#define SOFTFLOAT_TYPES_H
69
70
+#include "hw/registerfields.h"
71
+
33
/*
72
/*
34
* Feature tests for "does this exist in either 32-bit or 64-bit?"
73
* Software IEC/IEEE floating-point types.
35
*/
74
*/
36
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
75
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
76
float_2nan_prop_x87,
77
} Float2NaNPropRule;
78
79
+/*
80
+ * 3-input NaN propagation rule, for fused multiply-add. Individual
81
+ * architectures have different rules for which input NaN is
82
+ * propagated to the output when there is more than one NaN on the
83
+ * input.
84
+ *
85
+ * If default_nan_mode is enabled then it is valid not to set a NaN
86
+ * propagation rule, because the softfloat code guarantees not to try
87
+ * to pick a NaN to propagate in default NaN mode. When not in
88
+ * default-NaN mode, it is an error for the target not to set the rule
89
+ * in float_status if it uses a muladd, and we will assert if we need
90
+ * to handle an input NaN and no rule was selected.
91
+ *
92
+ * The naming scheme for Float3NaNPropRule values is:
93
+ * float_3nan_prop_s_abc:
94
+ * = "Prefer SNaN over QNaN, then operand A over B over C"
95
+ * float_3nan_prop_abc:
96
+ * = "Prefer A over B over C regardless of SNaN vs QNAN"
97
+ *
98
+ * For QEMU, the multiply-add operation is A * B + C.
99
+ */
100
+
101
+/*
102
+ * We set the Float3NaNPropRule enum values up so we can select the
103
+ * right value in pickNaNMulAdd in a data driven way.
104
+ */
105
+FIELD(3NAN, 1ST, 0, 2) /* which operand is most preferred ? */
106
+FIELD(3NAN, 2ND, 2, 2) /* which operand is next most preferred ? */
107
+FIELD(3NAN, 3RD, 4, 2) /* which operand is least preferred ? */
108
+FIELD(3NAN, SNAN, 6, 1) /* do we prefer SNaN over QNaN ? */
109
+
110
+#define PROPRULE(X, Y, Z) \
111
+ ((X << R_3NAN_1ST_SHIFT) | (Y << R_3NAN_2ND_SHIFT) | (Z << R_3NAN_3RD_SHIFT))
112
+
113
+typedef enum __attribute__((__packed__)) {
114
+ float_3nan_prop_none = 0, /* No propagation rule specified */
115
+ float_3nan_prop_abc = PROPRULE(0, 1, 2),
116
+ float_3nan_prop_acb = PROPRULE(0, 2, 1),
117
+ float_3nan_prop_bac = PROPRULE(1, 0, 2),
118
+ float_3nan_prop_bca = PROPRULE(1, 2, 0),
119
+ float_3nan_prop_cab = PROPRULE(2, 0, 1),
120
+ float_3nan_prop_cba = PROPRULE(2, 1, 0),
121
+ float_3nan_prop_s_abc = float_3nan_prop_abc | R_3NAN_SNAN_MASK,
122
+ float_3nan_prop_s_acb = float_3nan_prop_acb | R_3NAN_SNAN_MASK,
123
+ float_3nan_prop_s_bac = float_3nan_prop_bac | R_3NAN_SNAN_MASK,
124
+ float_3nan_prop_s_bca = float_3nan_prop_bca | R_3NAN_SNAN_MASK,
125
+ float_3nan_prop_s_cab = float_3nan_prop_cab | R_3NAN_SNAN_MASK,
126
+ float_3nan_prop_s_cba = float_3nan_prop_cba | R_3NAN_SNAN_MASK,
127
+} Float3NaNPropRule;
128
+
129
+#undef PROPRULE
130
+
131
/*
132
* Rule for result of fused multiply-add 0 * Inf + NaN.
133
* This must be a NaN, but implementations differ on whether this
134
@@ -XXX,XX +XXX,XX @@ typedef struct float_status {
135
FloatRoundMode float_rounding_mode;
136
FloatX80RoundPrec floatx80_rounding_precision;
137
Float2NaNPropRule float_2nan_prop_rule;
138
+ Float3NaNPropRule float_3nan_prop_rule;
139
FloatInfZeroNaNRule float_infzeronan_rule;
140
bool tininess_before_rounding;
141
/* should denormalised results go to zero and set the inexact flag? */
142
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
37
index XXXXXXX..XXXXXXX 100644
143
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/helper-sve.h
144
--- a/fpu/softfloat-specialize.c.inc
39
+++ b/target/arm/helper-sve.h
145
+++ b/fpu/softfloat-specialize.c.inc
40
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_umull_zzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
146
@@ -XXX,XX +XXX,XX @@ static int pickNaN(FloatClass a_cls, FloatClass b_cls,
41
DEF_HELPER_FLAGS_4(sve2_umull_zzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
147
static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
42
148
bool infzero, bool have_snan, float_status *status)
43
DEF_HELPER_FLAGS_4(sve2_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
149
{
44
+DEF_HELPER_FLAGS_4(sve2_pmull_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
150
+ FloatClass cls[3] = { a_cls, b_cls, c_cls };
45
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
151
+ Float3NaNPropRule rule = status->float_3nan_prop_rule;
46
index XXXXXXX..XXXXXXX 100644
152
+ int which;
47
--- a/target/arm/sve.decode
153
+
48
+++ b/target/arm/sve.decode
154
/*
49
@@ -XXX,XX +XXX,XX @@ USUBWT 01000101 .. 0 ..... 010 111 ..... ..... @rd_rn_rm
155
* We guarantee not to require the target to tell us how to
50
156
* pick a NaN if we're always returning the default NaN.
51
SQDMULLB_zzz 01000101 .. 0 ..... 011 000 ..... ..... @rd_rn_rm
157
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
52
SQDMULLT_zzz 01000101 .. 0 ..... 011 001 ..... ..... @rd_rn_rm
158
}
53
+PMULLB 01000101 .. 0 ..... 011 010 ..... ..... @rd_rn_rm
159
}
54
+PMULLT 01000101 .. 0 ..... 011 011 ..... ..... @rd_rn_rm
160
55
SMULLB_zzz 01000101 .. 0 ..... 011 100 ..... ..... @rd_rn_rm
161
+ if (rule == float_3nan_prop_none) {
56
SMULLT_zzz 01000101 .. 0 ..... 011 101 ..... ..... @rd_rn_rm
162
#if defined(TARGET_ARM)
57
UMULLB_zzz 01000101 .. 0 ..... 011 110 ..... ..... @rd_rn_rm
163
-
58
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
164
- /* This looks different from the ARM ARM pseudocode, because the ARM ARM
59
index XXXXXXX..XXXXXXX 100644
165
- * puts the operands to a fused mac operation (a*b)+c in the order c,a,b.
60
--- a/target/arm/translate-sve.c
166
- */
61
+++ b/target/arm/translate-sve.c
167
- if (is_snan(c_cls)) {
62
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_TB(SMULLT_zzz, smull_zzz, true, true)
168
- return 2;
63
DO_SVE2_ZZZ_TB(UMULLB_zzz, umull_zzz, false, false)
169
- } else if (is_snan(a_cls)) {
64
DO_SVE2_ZZZ_TB(UMULLT_zzz, umull_zzz, true, true)
170
- return 0;
65
171
- } else if (is_snan(b_cls)) {
66
+static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
172
- return 1;
67
+{
173
- } else if (is_qnan(c_cls)) {
68
+ static gen_helper_gvec_3 * const fns[4] = {
174
- return 2;
69
+ gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h,
175
- } else if (is_qnan(a_cls)) {
70
+ NULL, gen_helper_sve2_pmull_d,
176
- return 0;
71
+ };
177
- } else {
72
+ if (a->esz == 0 && !dc_isar_feature(aa64_sve2_pmull128, s)) {
178
- return 1;
73
+ return false;
179
- }
180
+ /*
181
+ * This looks different from the ARM ARM pseudocode, because the ARM ARM
182
+ * puts the operands to a fused mac operation (a*b)+c in the order c,a,b
183
+ */
184
+ rule = float_3nan_prop_s_cab;
185
#elif defined(TARGET_MIPS)
186
- if (snan_bit_is_one(status)) {
187
- /* Prefer sNaN over qNaN, in the a, b, c order. */
188
- if (is_snan(a_cls)) {
189
- return 0;
190
- } else if (is_snan(b_cls)) {
191
- return 1;
192
- } else if (is_snan(c_cls)) {
193
- return 2;
194
- } else if (is_qnan(a_cls)) {
195
- return 0;
196
- } else if (is_qnan(b_cls)) {
197
- return 1;
198
+ if (snan_bit_is_one(status)) {
199
+ rule = float_3nan_prop_s_abc;
200
} else {
201
- return 2;
202
+ rule = float_3nan_prop_s_cab;
203
}
204
- } else {
205
- /* Prefer sNaN over qNaN, in the c, a, b order. */
206
- if (is_snan(c_cls)) {
207
- return 2;
208
- } else if (is_snan(a_cls)) {
209
- return 0;
210
- } else if (is_snan(b_cls)) {
211
- return 1;
212
- } else if (is_qnan(c_cls)) {
213
- return 2;
214
- } else if (is_qnan(a_cls)) {
215
- return 0;
216
- } else {
217
- return 1;
218
- }
219
- }
220
#elif defined(TARGET_LOONGARCH64)
221
- /* Prefer sNaN over qNaN, in the c, a, b order. */
222
- if (is_snan(c_cls)) {
223
- return 2;
224
- } else if (is_snan(a_cls)) {
225
- return 0;
226
- } else if (is_snan(b_cls)) {
227
- return 1;
228
- } else if (is_qnan(c_cls)) {
229
- return 2;
230
- } else if (is_qnan(a_cls)) {
231
- return 0;
232
- } else {
233
- return 1;
234
- }
235
+ rule = float_3nan_prop_s_cab;
236
#elif defined(TARGET_PPC)
237
- /* If fRA is a NaN return it; otherwise if fRB is a NaN return it;
238
- * otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB
239
- */
240
- if (is_nan(a_cls)) {
241
- return 0;
242
- } else if (is_nan(c_cls)) {
243
- return 2;
244
- } else {
245
- return 1;
246
- }
247
+ /*
248
+ * If fRA is a NaN return it; otherwise if fRB is a NaN return it;
249
+ * otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB
250
+ */
251
+ rule = float_3nan_prop_acb;
252
#elif defined(TARGET_S390X)
253
- if (is_snan(a_cls)) {
254
- return 0;
255
- } else if (is_snan(b_cls)) {
256
- return 1;
257
- } else if (is_snan(c_cls)) {
258
- return 2;
259
- } else if (is_qnan(a_cls)) {
260
- return 0;
261
- } else if (is_qnan(b_cls)) {
262
- return 1;
263
- } else {
264
- return 2;
265
- }
266
+ rule = float_3nan_prop_s_abc;
267
#elif defined(TARGET_SPARC)
268
- /* Prefer SNaN over QNaN, order C, B, A. */
269
- if (is_snan(c_cls)) {
270
- return 2;
271
- } else if (is_snan(b_cls)) {
272
- return 1;
273
- } else if (is_snan(a_cls)) {
274
- return 0;
275
- } else if (is_qnan(c_cls)) {
276
- return 2;
277
- } else if (is_qnan(b_cls)) {
278
- return 1;
279
- } else {
280
- return 0;
281
- }
282
+ rule = float_3nan_prop_s_cba;
283
#elif defined(TARGET_XTENSA)
284
- /*
285
- * For Xtensa, the (inf,zero,nan) case sets InvalidOp and returns
286
- * an input NaN if we have one (ie c).
287
- */
288
- if (status->use_first_nan) {
289
- if (is_nan(a_cls)) {
290
- return 0;
291
- } else if (is_nan(b_cls)) {
292
- return 1;
293
+ if (status->use_first_nan) {
294
+ rule = float_3nan_prop_abc;
295
} else {
296
- return 2;
297
+ rule = float_3nan_prop_cba;
298
}
299
- } else {
300
- if (is_nan(c_cls)) {
301
- return 2;
302
- } else if (is_nan(b_cls)) {
303
- return 1;
304
- } else {
305
- return 0;
306
- }
307
- }
308
#else
309
- /* A default implementation: prefer a to b to c.
310
- * This is unlikely to actually match any real implementation.
311
- */
312
- if (is_nan(a_cls)) {
313
- return 0;
314
- } else if (is_nan(b_cls)) {
315
- return 1;
316
- } else {
317
- return 2;
318
- }
319
+ rule = float_3nan_prop_abc;
320
#endif
74
+ }
321
+ }
75
+ return do_sve2_zzw_ool(s, a, fns[a->esz], sel);
322
+
76
+}
323
+ assert(rule != float_3nan_prop_none);
77
+
324
+ if (have_snan && (rule & R_3NAN_SNAN_MASK)) {
78
+static bool trans_PMULLB(DisasContext *s, arg_rrr_esz *a)
325
+ /* We have at least one SNaN input and should prefer it */
79
+{
326
+ do {
80
+ return do_trans_pmull(s, a, false);
327
+ which = rule & R_3NAN_1ST_MASK;
81
+}
328
+ rule >>= R_3NAN_1ST_LENGTH;
82
+
329
+ } while (!is_snan(cls[which]));
83
+static bool trans_PMULLT(DisasContext *s, arg_rrr_esz *a)
330
+ } else {
84
+{
331
+ do {
85
+ return do_trans_pmull(s, a, true);
332
+ which = rule & R_3NAN_1ST_MASK;
86
+}
333
+ rule >>= R_3NAN_1ST_LENGTH;
87
+
334
+ } while (!is_nan(cls[which]));
88
#define DO_SVE2_ZZZ_WTB(NAME, name, SEL2) \
335
+ }
89
static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
336
+ return which;
90
{ \
91
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/target/arm/vec_helper.c
94
+++ b/target/arm/vec_helper.c
95
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc)
96
d[i] = pmull_h(nn, mm);
97
}
98
}
337
}
99
+
338
100
+static uint64_t pmull_d(uint64_t op1, uint64_t op2)
339
/*----------------------------------------------------------------------------
101
+{
102
+ uint64_t result = 0;
103
+ int i;
104
+
105
+ for (i = 0; i < 32; ++i) {
106
+ uint64_t mask = -((op1 >> i) & 1);
107
+ result ^= (op2 << i) & mask;
108
+ }
109
+ return result;
110
+}
111
+
112
+void HELPER(sve2_pmull_d)(void *vd, void *vn, void *vm, uint32_t desc)
113
+{
114
+ intptr_t sel = H4(simd_data(desc));
115
+ intptr_t i, opr_sz = simd_oprsz(desc);
116
+ uint32_t *n = vn, *m = vm;
117
+ uint64_t *d = vd;
118
+
119
+ for (i = 0; i < opr_sz / 8; ++i) {
120
+ d[i] = pmull_d(n[2 * i + sel], m[2 * i + sel]);
121
+ }
122
+}
123
#endif
124
125
#define DO_CMP0(NAME, TYPE, OP) \
126
--
340
--
127
2.20.1
341
2.34.1
128
129
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Explicitly set a rule in the softfloat tests for propagating NaNs in
2
the muladd case. In meson.build we put -DTARGET_ARM in fpcflags, and
3
so we should select here the Arm rule of float_3nan_prop_s_cab.
2
4
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-60-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20241202131347.498124-17-peter.maydell@linaro.org
7
---
8
---
8
target/arm/helper.h | 14 ++++++
9
tests/fp/fp-bench.c | 1 +
9
target/arm/sve.decode | 8 ++++
10
tests/fp/fp-test.c | 1 +
10
target/arm/translate-sve.c | 8 ++++
11
2 files changed, 2 insertions(+)
11
target/arm/vec_helper.c | 88 ++++++++++++++++++++++++++++++++++++++
12
4 files changed, 118 insertions(+)
13
12
14
diff --git a/target/arm/helper.h b/target/arm/helper.h
13
diff --git a/tests/fp/fp-bench.c b/tests/fp/fp-bench.c
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.h
15
--- a/tests/fp/fp-bench.c
17
+++ b/target/arm/helper.h
16
+++ b/tests/fp/fp-bench.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
17
@@ -XXX,XX +XXX,XX @@ static void run_bench(void)
19
DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
18
* doesn't specify match those used by the Arm architecture.
20
DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
19
*/
21
20
set_float_2nan_prop_rule(float_2nan_prop_s_ab, &soft_status);
22
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_h, TCG_CALL_NO_RWG,
21
+ set_float_3nan_prop_rule(float_3nan_prop_s_cab, &soft_status);
23
+ void, ptr, ptr, ptr, i32)
22
set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, &soft_status);
24
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_s, TCG_CALL_NO_RWG,
23
25
+ void, ptr, ptr, ptr, i32)
24
f = bench_funcs[operation][precision];
26
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_d, TCG_CALL_NO_RWG,
25
diff --git a/tests/fp/fp-test.c b/tests/fp/fp-test.c
27
+ void, ptr, ptr, ptr, i32)
28
+
29
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
30
+ void, ptr, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
32
+ void, ptr, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG,
34
+ void, ptr, ptr, ptr, i32)
35
+
36
DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
37
38
#ifdef TARGET_AARCH64
39
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
40
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/sve.decode
27
--- a/tests/fp/fp-test.c
42
+++ b/target/arm/sve.decode
28
+++ b/tests/fp/fp-test.c
43
@@ -XXX,XX +XXX,XX @@ SQDMULLB_zzx_d 01000100 11 1 ..... 1110.0 ..... ..... @rrx_2a esz=3
29
@@ -XXX,XX +XXX,XX @@ void run_test(void)
44
SQDMULLT_zzx_s 01000100 10 1 ..... 1110.1 ..... ..... @rrx_3a esz=2
30
* doesn't specify match those used by the Arm architecture.
45
SQDMULLT_zzx_d 01000100 11 1 ..... 1110.1 ..... ..... @rrx_2a esz=3
31
*/
46
32
set_float_2nan_prop_rule(float_2nan_prop_s_ab, &qsf);
47
+# SVE2 saturating multiply high (indexed)
33
+ set_float_3nan_prop_rule(float_3nan_prop_s_cab, &qsf);
48
+SQDMULH_zzx_h 01000100 0. 1 ..... 111100 ..... ..... @rrx_3 esz=1
34
set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, &qsf);
49
+SQDMULH_zzx_s 01000100 10 1 ..... 111100 ..... ..... @rrx_2 esz=2
35
50
+SQDMULH_zzx_d 01000100 11 1 ..... 111100 ..... ..... @rrx_1 esz=3
36
genCases_setLevel(test_level);
51
+SQRDMULH_zzx_h 01000100 0. 1 ..... 111101 ..... ..... @rrx_3 esz=1
52
+SQRDMULH_zzx_s 01000100 10 1 ..... 111101 ..... ..... @rrx_2 esz=2
53
+SQRDMULH_zzx_d 01000100 11 1 ..... 111101 ..... ..... @rrx_1 esz=3
54
+
55
# SVE2 integer multiply (indexed)
56
MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1
57
MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2
58
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/translate-sve.c
61
+++ b/target/arm/translate-sve.c
62
@@ -XXX,XX +XXX,XX @@ DO_SVE2_RRX(trans_MUL_zzx_h, gen_helper_gvec_mul_idx_h)
63
DO_SVE2_RRX(trans_MUL_zzx_s, gen_helper_gvec_mul_idx_s)
64
DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d)
65
66
+DO_SVE2_RRX(trans_SQDMULH_zzx_h, gen_helper_sve2_sqdmulh_idx_h)
67
+DO_SVE2_RRX(trans_SQDMULH_zzx_s, gen_helper_sve2_sqdmulh_idx_s)
68
+DO_SVE2_RRX(trans_SQDMULH_zzx_d, gen_helper_sve2_sqdmulh_idx_d)
69
+
70
+DO_SVE2_RRX(trans_SQRDMULH_zzx_h, gen_helper_sve2_sqrdmulh_idx_h)
71
+DO_SVE2_RRX(trans_SQRDMULH_zzx_s, gen_helper_sve2_sqrdmulh_idx_s)
72
+DO_SVE2_RRX(trans_SQRDMULH_zzx_d, gen_helper_sve2_sqrdmulh_idx_d)
73
+
74
#undef DO_SVE2_RRX
75
76
#define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \
77
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/vec_helper.c
80
+++ b/target/arm/vec_helper.c
81
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmulh_h)(void *vd, void *vn, void *vm, uint32_t desc)
82
}
83
}
84
85
+void HELPER(sve2_sqdmulh_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
86
+{
87
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
88
+ int idx = simd_data(desc);
89
+ int16_t *d = vd, *n = vn, *m = (int16_t *)vm + H2(idx);
90
+ uint32_t discard;
91
+
92
+ for (i = 0; i < opr_sz / 2; i += 16 / 2) {
93
+ int16_t mm = m[i];
94
+ for (j = 0; j < 16 / 2; ++j) {
95
+ d[i + j] = do_sqrdmlah_h(n[i + j], mm, 0, false, false, &discard);
96
+ }
97
+ }
98
+}
99
+
100
+void HELPER(sve2_sqrdmulh_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
101
+{
102
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
103
+ int idx = simd_data(desc);
104
+ int16_t *d = vd, *n = vn, *m = (int16_t *)vm + H2(idx);
105
+ uint32_t discard;
106
+
107
+ for (i = 0; i < opr_sz / 2; i += 16 / 2) {
108
+ int16_t mm = m[i];
109
+ for (j = 0; j < 16 / 2; ++j) {
110
+ d[i + j] = do_sqrdmlah_h(n[i + j], mm, 0, false, true, &discard);
111
+ }
112
+ }
113
+}
114
+
115
/* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */
116
int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3,
117
bool neg, bool round, uint32_t *sat)
118
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmulh_s)(void *vd, void *vn, void *vm, uint32_t desc)
119
}
120
}
121
122
+void HELPER(sve2_sqdmulh_idx_s)(void *vd, void *vn, void *vm, uint32_t desc)
123
+{
124
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
125
+ int idx = simd_data(desc);
126
+ int32_t *d = vd, *n = vn, *m = (int32_t *)vm + H4(idx);
127
+ uint32_t discard;
128
+
129
+ for (i = 0; i < opr_sz / 4; i += 16 / 4) {
130
+ int32_t mm = m[i];
131
+ for (j = 0; j < 16 / 4; ++j) {
132
+ d[i + j] = do_sqrdmlah_s(n[i + j], mm, 0, false, false, &discard);
133
+ }
134
+ }
135
+}
136
+
137
+void HELPER(sve2_sqrdmulh_idx_s)(void *vd, void *vn, void *vm, uint32_t desc)
138
+{
139
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
140
+ int idx = simd_data(desc);
141
+ int32_t *d = vd, *n = vn, *m = (int32_t *)vm + H4(idx);
142
+ uint32_t discard;
143
+
144
+ for (i = 0; i < opr_sz / 4; i += 16 / 4) {
145
+ int32_t mm = m[i];
146
+ for (j = 0; j < 16 / 4; ++j) {
147
+ d[i + j] = do_sqrdmlah_s(n[i + j], mm, 0, false, true, &discard);
148
+ }
149
+ }
150
+}
151
+
152
/* Signed saturating rounding doubling multiply-accumulate high half, 64-bit */
153
static int64_t do_sat128_d(Int128 r)
154
{
155
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
156
}
157
}
158
159
+void HELPER(sve2_sqdmulh_idx_d)(void *vd, void *vn, void *vm, uint32_t desc)
160
+{
161
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
162
+ int idx = simd_data(desc);
163
+ int64_t *d = vd, *n = vn, *m = (int64_t *)vm + idx;
164
+
165
+ for (i = 0; i < opr_sz / 8; i += 16 / 8) {
166
+ int64_t mm = m[i];
167
+ for (j = 0; j < 16 / 8; ++j) {
168
+ d[i + j] = do_sqrdmlah_d(n[i + j], mm, 0, false, false);
169
+ }
170
+ }
171
+}
172
+
173
+void HELPER(sve2_sqrdmulh_idx_d)(void *vd, void *vn, void *vm, uint32_t desc)
174
+{
175
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
176
+ int idx = simd_data(desc);
177
+ int64_t *d = vd, *n = vn, *m = (int64_t *)vm + idx;
178
+
179
+ for (i = 0; i < opr_sz / 8; i += 16 / 8) {
180
+ int64_t mm = m[i];
181
+ for (j = 0; j < 16 / 8; ++j) {
182
+ d[i + j] = do_sqrdmlah_d(n[i + j], mm, 0, false, true);
183
+ }
184
+ }
185
+}
186
+
187
/* Integer 8 and 16-bit dot-product.
188
*
189
* Note that for the loops herein, host endianness does not matter
190
--
37
--
191
2.20.1
38
2.34.1
192
193
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the Float3NaNPropRule explicitly for Arm, and remove the
2
ifdef from pickNaNMulAdd().
2
3
3
We're about to add more variations on this theme.
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-18-peter.maydell@linaro.org
7
---
8
target/arm/cpu.c | 5 +++++
9
fpu/softfloat-specialize.c.inc | 8 +-------
10
2 files changed, 6 insertions(+), 7 deletions(-)
4
11
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
6
Message-id: 20210525010358.152808-65-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/vec_helper.c | 82 ++++++++++-------------------------------
11
1 file changed, 20 insertions(+), 62 deletions(-)
12
13
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
14
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/vec_helper.c
14
--- a/target/arm/cpu.c
16
+++ b/target/arm/vec_helper.c
15
+++ b/target/arm/cpu.c
17
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmulh_idx_d)(void *vd, void *vn, void *vm, uint32_t desc)
16
@@ -XXX,XX +XXX,XX @@ void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
18
/* Integer 8 and 16-bit dot-product.
17
* * tininess-before-rounding
19
*
18
* * 2-input NaN propagation prefers SNaN over QNaN, and then
20
* Note that for the loops herein, host endianness does not matter
19
* operand A over operand B (see FPProcessNaNs() pseudocode)
21
- * with respect to the ordering of data within the 64-bit lanes.
20
+ * * 3-input NaN propagation prefers SNaN over QNaN, and then
22
+ * with respect to the ordering of data within the quad-width lanes.
21
+ * operand C over A over B (see FPProcessNaNs3() pseudocode,
23
* All elements are treated equally, no matter where they are.
22
+ * but note that for QEMU muladd is a * b + c, whereas for
23
+ * the pseudocode function the arguments are in the order c, a, b.
24
* * 0 * Inf + NaN returns the default NaN if the input NaN is quiet,
25
* and the input NaN if it is signalling
24
*/
26
*/
25
27
@@ -XXX,XX +XXX,XX @@ static void arm_set_default_fp_behaviours(float_status *s)
26
-void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
28
{
27
-{
29
set_float_detect_tininess(float_tininess_before_rounding, s);
28
- intptr_t i, opr_sz = simd_oprsz(desc);
30
set_float_2nan_prop_rule(float_2nan_prop_s_ab, s);
29
- int32_t *d = vd, *a = va;
31
+ set_float_3nan_prop_rule(float_3nan_prop_s_cab, s);
30
- int8_t *n = vn, *m = vm;
32
set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, s);
31
-
32
- for (i = 0; i < opr_sz / 4; ++i) {
33
- d[i] = (a[i] +
34
- n[i * 4 + 0] * m[i * 4 + 0] +
35
- n[i * 4 + 1] * m[i * 4 + 1] +
36
- n[i * 4 + 2] * m[i * 4 + 2] +
37
- n[i * 4 + 3] * m[i * 4 + 3]);
38
- }
39
- clear_tail(d, opr_sz, simd_maxsz(desc));
40
+#define DO_DOT(NAME, TYPED, TYPEN, TYPEM) \
41
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
42
+{ \
43
+ intptr_t i, opr_sz = simd_oprsz(desc); \
44
+ TYPED *d = vd, *a = va; \
45
+ TYPEN *n = vn; \
46
+ TYPEM *m = vm; \
47
+ for (i = 0; i < opr_sz / sizeof(TYPED); ++i) { \
48
+ d[i] = (a[i] + \
49
+ (TYPED)n[i * 4 + 0] * m[i * 4 + 0] + \
50
+ (TYPED)n[i * 4 + 1] * m[i * 4 + 1] + \
51
+ (TYPED)n[i * 4 + 2] * m[i * 4 + 2] + \
52
+ (TYPED)n[i * 4 + 3] * m[i * 4 + 3]); \
53
+ } \
54
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
55
}
33
}
56
34
57
-void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
35
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
58
-{
36
index XXXXXXX..XXXXXXX 100644
59
- intptr_t i, opr_sz = simd_oprsz(desc);
37
--- a/fpu/softfloat-specialize.c.inc
60
- uint32_t *d = vd, *a = va;
38
+++ b/fpu/softfloat-specialize.c.inc
61
- uint8_t *n = vn, *m = vm;
39
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
62
-
40
}
63
- for (i = 0; i < opr_sz / 4; ++i) {
41
64
- d[i] = (a[i] +
42
if (rule == float_3nan_prop_none) {
65
- n[i * 4 + 0] * m[i * 4 + 0] +
43
-#if defined(TARGET_ARM)
66
- n[i * 4 + 1] * m[i * 4 + 1] +
44
- /*
67
- n[i * 4 + 2] * m[i * 4 + 2] +
45
- * This looks different from the ARM ARM pseudocode, because the ARM ARM
68
- n[i * 4 + 3] * m[i * 4 + 3]);
46
- * puts the operands to a fused mac operation (a*b)+c in the order c,a,b
69
- }
47
- */
70
- clear_tail(d, opr_sz, simd_maxsz(desc));
48
- rule = float_3nan_prop_s_cab;
71
-}
49
-#elif defined(TARGET_MIPS)
72
-
50
+#if defined(TARGET_MIPS)
73
-void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
51
if (snan_bit_is_one(status)) {
74
-{
52
rule = float_3nan_prop_s_abc;
75
- intptr_t i, opr_sz = simd_oprsz(desc);
53
} else {
76
- int64_t *d = vd, *a = va;
77
- int16_t *n = vn, *m = vm;
78
-
79
- for (i = 0; i < opr_sz / 8; ++i) {
80
- d[i] = (a[i] +
81
- (int64_t)n[i * 4 + 0] * m[i * 4 + 0] +
82
- (int64_t)n[i * 4 + 1] * m[i * 4 + 1] +
83
- (int64_t)n[i * 4 + 2] * m[i * 4 + 2] +
84
- (int64_t)n[i * 4 + 3] * m[i * 4 + 3]);
85
- }
86
- clear_tail(d, opr_sz, simd_maxsz(desc));
87
-}
88
-
89
-void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
90
-{
91
- intptr_t i, opr_sz = simd_oprsz(desc);
92
- uint64_t *d = vd, *a = va;
93
- uint16_t *n = vn, *m = vm;
94
-
95
- for (i = 0; i < opr_sz / 8; ++i) {
96
- d[i] = (a[i] +
97
- (uint64_t)n[i * 4 + 0] * m[i * 4 + 0] +
98
- (uint64_t)n[i * 4 + 1] * m[i * 4 + 1] +
99
- (uint64_t)n[i * 4 + 2] * m[i * 4 + 2] +
100
- (uint64_t)n[i * 4 + 3] * m[i * 4 + 3]);
101
- }
102
- clear_tail(d, opr_sz, simd_maxsz(desc));
103
-}
104
+DO_DOT(gvec_sdot_b, int32_t, int8_t, int8_t)
105
+DO_DOT(gvec_udot_b, uint32_t, uint8_t, uint8_t)
106
+DO_DOT(gvec_sdot_h, int64_t, int16_t, int16_t)
107
+DO_DOT(gvec_udot_h, uint64_t, uint16_t, uint16_t)
108
109
void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm,
110
void *va, uint32_t desc)
111
--
54
--
112
2.20.1
55
2.34.1
113
114
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the Float3NaNPropRule explicitly for loongarch, and remove the
2
ifdef from pickNaNMulAdd().
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-58-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-19-peter.maydell@linaro.org
7
---
7
---
8
target/arm/helper-sve.h | 5 +++++
8
target/loongarch/tcg/fpu_helper.c | 1 +
9
target/arm/sve.decode | 12 ++++++++++++
9
fpu/softfloat-specialize.c.inc | 2 --
10
target/arm/sve_helper.c | 20 ++++++++++++++++++++
10
2 files changed, 1 insertion(+), 2 deletions(-)
11
target/arm/translate-sve.c | 14 ++++++++++++++
12
4 files changed, 51 insertions(+)
13
11
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
12
diff --git a/target/loongarch/tcg/fpu_helper.c b/target/loongarch/tcg/fpu_helper.c
15
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
14
--- a/target/loongarch/tcg/fpu_helper.c
17
+++ b/target/arm/helper-sve.h
15
+++ b/target/loongarch/tcg/fpu_helper.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_sqdmlsl_idx_s, TCG_CALL_NO_RWG,
16
@@ -XXX,XX +XXX,XX @@ void restore_fp_status(CPULoongArchState *env)
19
void, ptr, ptr, ptr, ptr, i32)
17
* case sets InvalidOp and returns the input value 'c'
20
DEF_HELPER_FLAGS_5(sve2_sqdmlsl_idx_d, TCG_CALL_NO_RWG,
18
*/
21
void, ptr, ptr, ptr, ptr, i32)
19
set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
22
+
20
+ set_float_3nan_prop_rule(float_3nan_prop_s_cab, &env->fp_status);
23
+DEF_HELPER_FLAGS_4(sve2_sqdmull_idx_s, TCG_CALL_NO_RWG,
21
}
24
+ void, ptr, ptr, ptr, i32)
22
25
+DEF_HELPER_FLAGS_4(sve2_sqdmull_idx_d, TCG_CALL_NO_RWG,
23
int ieee_ex_to_loongarch(int xcpt)
26
+ void, ptr, ptr, ptr, i32)
24
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
27
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
28
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/sve.decode
26
--- a/fpu/softfloat-specialize.c.inc
30
+++ b/target/arm/sve.decode
27
+++ b/fpu/softfloat-specialize.c.inc
31
@@ -XXX,XX +XXX,XX @@
28
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
32
@rrx_2 ........ .. . index:2 rm:3 ...... rn:5 rd:5 &rrx_esz
29
} else {
33
@rrx_1 ........ .. . index:1 rm:4 ...... rn:5 rd:5 &rrx_esz
30
rule = float_3nan_prop_s_cab;
34
31
}
35
+# Two registers and a scalar by N-bit index, alternate
32
-#elif defined(TARGET_LOONGARCH64)
36
+@rrx_3a ........ .. . .. rm:3 ...... rn:5 rd:5 \
33
- rule = float_3nan_prop_s_cab;
37
+ &rrx_esz index=%index3_19_11
34
#elif defined(TARGET_PPC)
38
+@rrx_2a ........ .. . . rm:4 ...... rn:5 rd:5 \
35
/*
39
+ &rrx_esz index=%index2_20_11
36
* If fRA is a NaN return it; otherwise if fRB is a NaN return it;
40
+
41
# Three registers and a scalar by N-bit index
42
@rrxr_3 ........ .. . .. rm:3 ...... rn:5 rd:5 \
43
&rrxr_esz ra=%reg_movprfx index=%index3_22_19
44
@@ -XXX,XX +XXX,XX @@ SQDMLSLB_zzxw_d 01000100 11 1 ..... 0011.0 ..... ..... @rrxr_2a esz=3
45
SQDMLSLT_zzxw_s 01000100 10 1 ..... 0011.1 ..... ..... @rrxr_3a esz=2
46
SQDMLSLT_zzxw_d 01000100 11 1 ..... 0011.1 ..... ..... @rrxr_2a esz=3
47
48
+# SVE2 saturating multiply (indexed)
49
+SQDMULLB_zzx_s 01000100 10 1 ..... 1110.0 ..... ..... @rrx_3a esz=2
50
+SQDMULLB_zzx_d 01000100 11 1 ..... 1110.0 ..... ..... @rrx_2a esz=3
51
+SQDMULLT_zzx_s 01000100 10 1 ..... 1110.1 ..... ..... @rrx_3a esz=2
52
+SQDMULLT_zzx_d 01000100 11 1 ..... 1110.1 ..... ..... @rrx_2a esz=3
53
+
54
# SVE2 integer multiply (indexed)
55
MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1
56
MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2
57
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/sve_helper.c
60
+++ b/target/arm/sve_helper.c
61
@@ -XXX,XX +XXX,XX @@ DO_ZZXW(sve2_sqdmlsl_idx_d, int64_t, int32_t, , H1_4, DO_SQDMLSL_D)
62
63
#undef DO_ZZXW
64
65
+#define DO_ZZX(NAME, TYPEW, TYPEN, HW, HN, OP) \
66
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
67
+{ \
68
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
69
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
70
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 1, 3) * sizeof(TYPEN); \
71
+ for (i = 0; i < oprsz; i += 16) { \
72
+ TYPEW mm = *(TYPEN *)(vm + HN(i + idx)); \
73
+ for (j = 0; j < 16; j += sizeof(TYPEW)) { \
74
+ TYPEW nn = *(TYPEN *)(vn + HN(i + j + sel)); \
75
+ *(TYPEW *)(vd + HW(i + j)) = OP(nn, mm); \
76
+ } \
77
+ } \
78
+}
79
+
80
+DO_ZZX(sve2_sqdmull_idx_s, int32_t, int16_t, H1_4, H1_2, do_sqdmull_s)
81
+DO_ZZX(sve2_sqdmull_idx_d, int64_t, int32_t, , H1_4, do_sqdmull_d)
82
+
83
+#undef DO_ZZX
84
+
85
#define DO_BITPERM(NAME, TYPE, OP) \
86
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
87
{ \
88
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/arm/translate-sve.c
91
+++ b/target/arm/translate-sve.c
92
@@ -XXX,XX +XXX,XX @@ DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d)
93
94
#undef DO_SVE2_RRX
95
96
+#define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \
97
+ static bool NAME(DisasContext *s, arg_rrx_esz *a) \
98
+ { \
99
+ return do_sve2_zzz_data(s, a->rd, a->rn, a->rm, \
100
+ (a->index << 1) | TOP, FUNC); \
101
+ }
102
+
103
+DO_SVE2_RRX_TB(trans_SQDMULLB_zzx_s, gen_helper_sve2_sqdmull_idx_s, false)
104
+DO_SVE2_RRX_TB(trans_SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false)
105
+DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true)
106
+DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true)
107
+
108
+#undef DO_SVE2_RRX_TB
109
+
110
static bool do_sve2_zzzz_data(DisasContext *s, int rd, int rn, int rm, int ra,
111
int data, gen_helper_gvec_4 *fn)
112
{
113
--
37
--
114
2.20.1
38
2.34.1
115
116
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the Float3NaNPropRule explicitly for PPC, and remove the
2
ifdef from pickNaNMulAdd().
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-57-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-20-peter.maydell@linaro.org
7
---
7
---
8
target/arm/helper-sve.h | 9 +++++++++
8
target/ppc/cpu_init.c | 8 ++++++++
9
target/arm/sve.decode | 18 ++++++++++++++++++
9
fpu/softfloat-specialize.c.inc | 6 ------
10
target/arm/sve_helper.c | 30 ++++++++++++++++++++++++++++++
10
2 files changed, 8 insertions(+), 6 deletions(-)
11
target/arm/translate-sve.c | 19 +++++++++++++++++++
12
4 files changed, 76 insertions(+)
13
11
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
12
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
15
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
14
--- a/target/ppc/cpu_init.c
17
+++ b/target/arm/helper-sve.h
15
+++ b/target/ppc/cpu_init.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_s, TCG_CALL_NO_RWG,
16
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_reset_hold(Object *obj, ResetType type)
19
void, ptr, ptr, ptr, ptr, i32)
17
*/
20
DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_d, TCG_CALL_NO_RWG,
18
set_float_2nan_prop_rule(float_2nan_prop_ab, &env->fp_status);
21
void, ptr, ptr, ptr, ptr, i32)
19
set_float_2nan_prop_rule(float_2nan_prop_ab, &env->vec_status);
22
+
20
+ /*
23
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_idx_s, TCG_CALL_NO_RWG,
21
+ * NaN propagation for fused multiply-add:
24
+ void, ptr, ptr, ptr, ptr, i32)
22
+ * if fRA is a NaN return it; otherwise if fRB is a NaN return it;
25
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_idx_d, TCG_CALL_NO_RWG,
23
+ * otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB
26
+ void, ptr, ptr, ptr, ptr, i32)
24
+ * whereas QEMU labels the operands as (a * b) + c.
27
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_idx_s, TCG_CALL_NO_RWG,
25
+ */
28
+ void, ptr, ptr, ptr, ptr, i32)
26
+ set_float_3nan_prop_rule(float_3nan_prop_acb, &env->fp_status);
29
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_idx_d, TCG_CALL_NO_RWG,
27
+ set_float_3nan_prop_rule(float_3nan_prop_acb, &env->vec_status);
30
+ void, ptr, ptr, ptr, ptr, i32)
28
/*
31
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
29
* For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer
30
* to return an input NaN if we have one (ie c) rather than generating
31
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
32
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/sve.decode
33
--- a/fpu/softfloat-specialize.c.inc
34
+++ b/target/arm/sve.decode
34
+++ b/fpu/softfloat-specialize.c.inc
35
@@ -XXX,XX +XXX,XX @@
35
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
36
%size_23 23:2
36
} else {
37
%dtype_23_13 23:2 13:2
37
rule = float_3nan_prop_s_cab;
38
%index3_22_19 22:1 19:2
38
}
39
+%index3_19_11 19:2 11:1
39
-#elif defined(TARGET_PPC)
40
+%index2_20_11 20:1 11:1
40
- /*
41
41
- * If fRA is a NaN return it; otherwise if fRB is a NaN return it;
42
# A combination of tsz:imm3 -- extract esize.
42
- * otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB
43
%tszimm_esz 22:2 5:5 !function=tszimm_esz
43
- */
44
@@ -XXX,XX +XXX,XX @@
44
- rule = float_3nan_prop_acb;
45
@rrxr_1 ........ .. . index:1 rm:4 ...... rn:5 rd:5 \
45
#elif defined(TARGET_S390X)
46
&rrxr_esz ra=%reg_movprfx
46
rule = float_3nan_prop_s_abc;
47
47
#elif defined(TARGET_SPARC)
48
+# Three registers and a scalar by N-bit index, alternate
49
+@rrxr_3a ........ .. ... rm:3 ...... rn:5 rd:5 \
50
+ &rrxr_esz ra=%reg_movprfx index=%index3_19_11
51
+@rrxr_2a ........ .. .. rm:4 ...... rn:5 rd:5 \
52
+ &rrxr_esz ra=%reg_movprfx index=%index2_20_11
53
+
54
###########################################################################
55
# Instruction patterns. Grouped according to the SVE encodingindex.xhtml.
56
57
@@ -XXX,XX +XXX,XX @@ SQRDMLSH_zzxz_h 01000100 0. 1 ..... 000101 ..... ..... @rrxr_3 esz=1
58
SQRDMLSH_zzxz_s 01000100 10 1 ..... 000101 ..... ..... @rrxr_2 esz=2
59
SQRDMLSH_zzxz_d 01000100 11 1 ..... 000101 ..... ..... @rrxr_1 esz=3
60
61
+# SVE2 saturating multiply-add (indexed)
62
+SQDMLALB_zzxw_s 01000100 10 1 ..... 0010.0 ..... ..... @rrxr_3a esz=2
63
+SQDMLALB_zzxw_d 01000100 11 1 ..... 0010.0 ..... ..... @rrxr_2a esz=3
64
+SQDMLALT_zzxw_s 01000100 10 1 ..... 0010.1 ..... ..... @rrxr_3a esz=2
65
+SQDMLALT_zzxw_d 01000100 11 1 ..... 0010.1 ..... ..... @rrxr_2a esz=3
66
+SQDMLSLB_zzxw_s 01000100 10 1 ..... 0011.0 ..... ..... @rrxr_3a esz=2
67
+SQDMLSLB_zzxw_d 01000100 11 1 ..... 0011.0 ..... ..... @rrxr_2a esz=3
68
+SQDMLSLT_zzxw_s 01000100 10 1 ..... 0011.1 ..... ..... @rrxr_3a esz=2
69
+SQDMLSLT_zzxw_d 01000100 11 1 ..... 0011.1 ..... ..... @rrxr_2a esz=3
70
+
71
# SVE2 integer multiply (indexed)
72
MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1
73
MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2
74
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/arm/sve_helper.c
77
+++ b/target/arm/sve_helper.c
78
@@ -XXX,XX +XXX,XX @@ DO_ZZXZ(sve2_sqrdmlsh_idx_d, int64_t, , DO_SQRDMLSH_D)
79
80
#undef DO_ZZXZ
81
82
+#define DO_ZZXW(NAME, TYPEW, TYPEN, HW, HN, OP) \
83
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
84
+{ \
85
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
86
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
87
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 1, 3) * sizeof(TYPEN); \
88
+ for (i = 0; i < oprsz; i += 16) { \
89
+ TYPEW mm = *(TYPEN *)(vm + HN(i + idx)); \
90
+ for (j = 0; j < 16; j += sizeof(TYPEW)) { \
91
+ TYPEW nn = *(TYPEN *)(vn + HN(i + j + sel)); \
92
+ TYPEW aa = *(TYPEW *)(va + HW(i + j)); \
93
+ *(TYPEW *)(vd + HW(i + j)) = OP(nn, mm, aa); \
94
+ } \
95
+ } \
96
+}
97
+
98
+#define DO_SQDMLAL_S(N, M, A) DO_SQADD_S(A, do_sqdmull_s(N, M))
99
+#define DO_SQDMLAL_D(N, M, A) do_sqadd_d(A, do_sqdmull_d(N, M))
100
+
101
+DO_ZZXW(sve2_sqdmlal_idx_s, int32_t, int16_t, H1_4, H1_2, DO_SQDMLAL_S)
102
+DO_ZZXW(sve2_sqdmlal_idx_d, int64_t, int32_t, , H1_4, DO_SQDMLAL_D)
103
+
104
+#define DO_SQDMLSL_S(N, M, A) DO_SQSUB_S(A, do_sqdmull_s(N, M))
105
+#define DO_SQDMLSL_D(N, M, A) do_sqsub_d(A, do_sqdmull_d(N, M))
106
+
107
+DO_ZZXW(sve2_sqdmlsl_idx_s, int32_t, int16_t, H1_4, H1_2, DO_SQDMLSL_S)
108
+DO_ZZXW(sve2_sqdmlsl_idx_d, int64_t, int32_t, , H1_4, DO_SQDMLSL_D)
109
+
110
+#undef DO_ZZXW
111
+
112
#define DO_BITPERM(NAME, TYPE, OP) \
113
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
114
{ \
115
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
116
index XXXXXXX..XXXXXXX 100644
117
--- a/target/arm/translate-sve.c
118
+++ b/target/arm/translate-sve.c
119
@@ -XXX,XX +XXX,XX @@ DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d)
120
121
#undef DO_SVE2_RRXR
122
123
+#define DO_SVE2_RRXR_TB(NAME, FUNC, TOP) \
124
+ static bool NAME(DisasContext *s, arg_rrxr_esz *a) \
125
+ { \
126
+ return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->rd, \
127
+ (a->index << 1) | TOP, FUNC); \
128
+ }
129
+
130
+DO_SVE2_RRXR_TB(trans_SQDMLALB_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, false)
131
+DO_SVE2_RRXR_TB(trans_SQDMLALB_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, false)
132
+DO_SVE2_RRXR_TB(trans_SQDMLALT_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, true)
133
+DO_SVE2_RRXR_TB(trans_SQDMLALT_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, true)
134
+
135
+DO_SVE2_RRXR_TB(trans_SQDMLSLB_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, false)
136
+DO_SVE2_RRXR_TB(trans_SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false)
137
+DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true)
138
+DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true)
139
+
140
+#undef DO_SVE2_RRXR_TB
141
+
142
/*
143
*** SVE Floating Point Multiply-Add Indexed Group
144
*/
145
--
48
--
146
2.20.1
49
2.34.1
147
148
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the Float3NaNPropRule explicitly for s390x, and remove the
2
ifdef from pickNaNMulAdd().
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-56-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-21-peter.maydell@linaro.org
7
---
7
---
8
target/arm/helper-sve.h | 14 ++++++++++++++
8
target/s390x/cpu.c | 1 +
9
target/arm/sve.decode | 8 ++++++++
9
fpu/softfloat-specialize.c.inc | 2 --
10
target/arm/sve_helper.c | 36 ++++++++++++++++++++++++++++++++++++
10
2 files changed, 1 insertion(+), 2 deletions(-)
11
target/arm/translate-sve.c | 8 ++++++++
12
4 files changed, 66 insertions(+)
13
11
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
12
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
15
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
14
--- a/target/s390x/cpu.c
17
+++ b/target/arm/helper-sve.h
15
+++ b/target/s390x/cpu.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_d, TCG_CALL_NO_RWG,
16
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_reset_hold(Object *obj, ResetType type)
19
17
set_float_detect_tininess(float_tininess_before_rounding,
20
DEF_HELPER_FLAGS_6(fmmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32)
18
&env->fpu_status);
21
DEF_HELPER_FLAGS_6(fmmla_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32)
19
set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->fpu_status);
22
+
20
+ set_float_3nan_prop_rule(float_3nan_prop_s_abc, &env->fpu_status);
23
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_idx_h, TCG_CALL_NO_RWG,
21
set_float_infzeronan_rule(float_infzeronan_dnan_always,
24
+ void, ptr, ptr, ptr, ptr, i32)
22
&env->fpu_status);
25
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_idx_s, TCG_CALL_NO_RWG,
23
/* fall through */
26
+ void, ptr, ptr, ptr, ptr, i32)
24
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
27
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_idx_d, TCG_CALL_NO_RWG,
28
+ void, ptr, ptr, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_h, TCG_CALL_NO_RWG,
31
+ void, ptr, ptr, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_s, TCG_CALL_NO_RWG,
33
+ void, ptr, ptr, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_d, TCG_CALL_NO_RWG,
35
+ void, ptr, ptr, ptr, ptr, i32)
36
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
37
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/sve.decode
26
--- a/fpu/softfloat-specialize.c.inc
39
+++ b/target/arm/sve.decode
27
+++ b/fpu/softfloat-specialize.c.inc
40
@@ -XXX,XX +XXX,XX @@ MLS_zzxz_h 01000100 0. 1 ..... 000011 ..... ..... @rrxr_3 esz=1
28
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
41
MLS_zzxz_s 01000100 10 1 ..... 000011 ..... ..... @rrxr_2 esz=2
29
} else {
42
MLS_zzxz_d 01000100 11 1 ..... 000011 ..... ..... @rrxr_1 esz=3
30
rule = float_3nan_prop_s_cab;
43
31
}
44
+# SVE2 saturating multiply-add high (indexed)
32
-#elif defined(TARGET_S390X)
45
+SQRDMLAH_zzxz_h 01000100 0. 1 ..... 000100 ..... ..... @rrxr_3 esz=1
33
- rule = float_3nan_prop_s_abc;
46
+SQRDMLAH_zzxz_s 01000100 10 1 ..... 000100 ..... ..... @rrxr_2 esz=2
34
#elif defined(TARGET_SPARC)
47
+SQRDMLAH_zzxz_d 01000100 11 1 ..... 000100 ..... ..... @rrxr_1 esz=3
35
rule = float_3nan_prop_s_cba;
48
+SQRDMLSH_zzxz_h 01000100 0. 1 ..... 000101 ..... ..... @rrxr_3 esz=1
36
#elif defined(TARGET_XTENSA)
49
+SQRDMLSH_zzxz_s 01000100 10 1 ..... 000101 ..... ..... @rrxr_2 esz=2
50
+SQRDMLSH_zzxz_d 01000100 11 1 ..... 000101 ..... ..... @rrxr_1 esz=3
51
+
52
# SVE2 integer multiply (indexed)
53
MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1
54
MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2
55
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/sve_helper.c
58
+++ b/target/arm/sve_helper.c
59
@@ -XXX,XX +XXX,XX @@ DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_d, int64_t, , DO_SQRDMLAH_D)
60
#undef DO_SQRDMLAH_S
61
#undef DO_SQRDMLAH_D
62
63
+#define DO_ZZXZ(NAME, TYPE, H, OP) \
64
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
65
+{ \
66
+ intptr_t oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
67
+ intptr_t i, j, idx = simd_data(desc); \
68
+ TYPE *d = vd, *a = va, *n = vn, *m = (TYPE *)vm + H(idx); \
69
+ for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
70
+ TYPE mm = m[i]; \
71
+ for (j = 0; j < segment; j++) { \
72
+ d[i + j] = OP(n[i + j], mm, a[i + j]); \
73
+ } \
74
+ } \
75
+}
76
+
77
+#define DO_SQRDMLAH_H(N, M, A) \
78
+ ({ uint32_t discard; do_sqrdmlah_h(N, M, A, false, true, &discard); })
79
+#define DO_SQRDMLAH_S(N, M, A) \
80
+ ({ uint32_t discard; do_sqrdmlah_s(N, M, A, false, true, &discard); })
81
+#define DO_SQRDMLAH_D(N, M, A) do_sqrdmlah_d(N, M, A, false, true)
82
+
83
+DO_ZZXZ(sve2_sqrdmlah_idx_h, int16_t, H2, DO_SQRDMLAH_H)
84
+DO_ZZXZ(sve2_sqrdmlah_idx_s, int32_t, H4, DO_SQRDMLAH_S)
85
+DO_ZZXZ(sve2_sqrdmlah_idx_d, int64_t, , DO_SQRDMLAH_D)
86
+
87
+#define DO_SQRDMLSH_H(N, M, A) \
88
+ ({ uint32_t discard; do_sqrdmlah_h(N, M, A, true, true, &discard); })
89
+#define DO_SQRDMLSH_S(N, M, A) \
90
+ ({ uint32_t discard; do_sqrdmlah_s(N, M, A, true, true, &discard); })
91
+#define DO_SQRDMLSH_D(N, M, A) do_sqrdmlah_d(N, M, A, true, true)
92
+
93
+DO_ZZXZ(sve2_sqrdmlsh_idx_h, int16_t, H2, DO_SQRDMLSH_H)
94
+DO_ZZXZ(sve2_sqrdmlsh_idx_s, int32_t, H4, DO_SQRDMLSH_S)
95
+DO_ZZXZ(sve2_sqrdmlsh_idx_d, int64_t, , DO_SQRDMLSH_D)
96
+
97
+#undef DO_ZZXZ
98
+
99
#define DO_BITPERM(NAME, TYPE, OP) \
100
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
101
{ \
102
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
103
index XXXXXXX..XXXXXXX 100644
104
--- a/target/arm/translate-sve.c
105
+++ b/target/arm/translate-sve.c
106
@@ -XXX,XX +XXX,XX @@ DO_SVE2_RRXR(trans_MLS_zzxz_h, gen_helper_gvec_mls_idx_h)
107
DO_SVE2_RRXR(trans_MLS_zzxz_s, gen_helper_gvec_mls_idx_s)
108
DO_SVE2_RRXR(trans_MLS_zzxz_d, gen_helper_gvec_mls_idx_d)
109
110
+DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_h, gen_helper_sve2_sqrdmlah_idx_h)
111
+DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_s, gen_helper_sve2_sqrdmlah_idx_s)
112
+DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_d, gen_helper_sve2_sqrdmlah_idx_d)
113
+
114
+DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_h, gen_helper_sve2_sqrdmlsh_idx_h)
115
+DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_s, gen_helper_sve2_sqrdmlsh_idx_s)
116
+DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d)
117
+
118
#undef DO_SVE2_RRXR
119
120
/*
121
--
37
--
122
2.20.1
38
2.34.1
123
124
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the Float3NaNPropRule explicitly for SPARC, and remove the
2
ifdef from pickNaNMulAdd().
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-55-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-22-peter.maydell@linaro.org
7
---
7
---
8
target/arm/sve.decode | 8 ++++++++
8
target/sparc/cpu.c | 2 ++
9
target/arm/translate-sve.c | 31 +++++++++++++++++++++++++++++++
9
fpu/softfloat-specialize.c.inc | 2 --
10
2 files changed, 39 insertions(+)
10
2 files changed, 2 insertions(+), 2 deletions(-)
11
11
12
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
12
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/sve.decode
14
--- a/target/sparc/cpu.c
15
+++ b/target/arm/sve.decode
15
+++ b/target/sparc/cpu.c
16
@@ -XXX,XX +XXX,XX @@ SDOT_zzxw_d 01000100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3
16
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_realizefn(DeviceState *dev, Error **errp)
17
UDOT_zzxw_s 01000100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2
17
* the CPU state struct so it won't get zeroed on reset.
18
UDOT_zzxw_d 01000100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3
18
*/
19
19
set_float_2nan_prop_rule(float_2nan_prop_s_ba, &env->fp_status);
20
+# SVE2 integer multiply-add (indexed)
20
+ /* For fused-multiply add, prefer SNaN over QNaN, then C->B->A */
21
+MLA_zzxz_h 01000100 0. 1 ..... 000010 ..... ..... @rrxr_3 esz=1
21
+ set_float_3nan_prop_rule(float_3nan_prop_s_cba, &env->fp_status);
22
+MLA_zzxz_s 01000100 10 1 ..... 000010 ..... ..... @rrxr_2 esz=2
22
/* For inf * 0 + NaN, return the input NaN */
23
+MLA_zzxz_d 01000100 11 1 ..... 000010 ..... ..... @rrxr_1 esz=3
23
set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
24
+MLS_zzxz_h 01000100 0. 1 ..... 000011 ..... ..... @rrxr_3 esz=1
24
25
+MLS_zzxz_s 01000100 10 1 ..... 000011 ..... ..... @rrxr_2 esz=2
25
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
26
+MLS_zzxz_d 01000100 11 1 ..... 000011 ..... ..... @rrxr_1 esz=3
27
+
28
# SVE2 integer multiply (indexed)
29
MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1
30
MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2
31
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
32
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/translate-sve.c
27
--- a/fpu/softfloat-specialize.c.inc
34
+++ b/target/arm/translate-sve.c
28
+++ b/fpu/softfloat-specialize.c.inc
35
@@ -XXX,XX +XXX,XX @@ DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d)
29
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
36
30
} else {
37
#undef DO_SVE2_RRX
31
rule = float_3nan_prop_s_cab;
38
32
}
39
+static bool do_sve2_zzzz_data(DisasContext *s, int rd, int rn, int rm, int ra,
33
-#elif defined(TARGET_SPARC)
40
+ int data, gen_helper_gvec_4 *fn)
34
- rule = float_3nan_prop_s_cba;
41
+{
35
#elif defined(TARGET_XTENSA)
42
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
36
if (status->use_first_nan) {
43
+ return false;
37
rule = float_3nan_prop_abc;
44
+ }
45
+ if (sve_access_check(s)) {
46
+ unsigned vsz = vec_full_reg_size(s);
47
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
48
+ vec_full_reg_offset(s, rn),
49
+ vec_full_reg_offset(s, rm),
50
+ vec_full_reg_offset(s, ra),
51
+ vsz, vsz, data, fn);
52
+ }
53
+ return true;
54
+}
55
+
56
+#define DO_SVE2_RRXR(NAME, FUNC) \
57
+ static bool NAME(DisasContext *s, arg_rrxr_esz *a) \
58
+ { return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->ra, a->index, FUNC); }
59
+
60
+DO_SVE2_RRXR(trans_MLA_zzxz_h, gen_helper_gvec_mla_idx_h)
61
+DO_SVE2_RRXR(trans_MLA_zzxz_s, gen_helper_gvec_mla_idx_s)
62
+DO_SVE2_RRXR(trans_MLA_zzxz_d, gen_helper_gvec_mla_idx_d)
63
+
64
+DO_SVE2_RRXR(trans_MLS_zzxz_h, gen_helper_gvec_mls_idx_h)
65
+DO_SVE2_RRXR(trans_MLS_zzxz_s, gen_helper_gvec_mls_idx_s)
66
+DO_SVE2_RRXR(trans_MLS_zzxz_d, gen_helper_gvec_mls_idx_d)
67
+
68
+#undef DO_SVE2_RRXR
69
+
70
/*
71
*** SVE Floating Point Multiply-Add Indexed Group
72
*/
73
--
38
--
74
2.20.1
39
2.34.1
75
76
diff view generated by jsdifflib
1
From: Stephen Long <steplong@quicinc.com>
1
Set the Float3NaNPropRule explicitly for Arm, and remove the
2
ifdef from pickNaNMulAdd().
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Stephen Long <steplong@quicinc.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210525010358.152808-48-richard.henderson@linaro.org
7
Message-Id: <20200423180347.9403-1-steplong@quicinc.com>
8
[rth: Rename the trans_* functions to *_sve2.]
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-23-peter.maydell@linaro.org
11
---
7
---
12
target/arm/sve.decode | 11 +++++++++--
8
target/mips/fpu_helper.h | 4 ++++
13
target/arm/translate-sve.c | 35 ++++++++++++++++++++++++++++++-----
9
target/mips/msa.c | 3 +++
14
2 files changed, 39 insertions(+), 7 deletions(-)
10
fpu/softfloat-specialize.c.inc | 8 +-------
11
3 files changed, 8 insertions(+), 7 deletions(-)
15
12
16
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
13
diff --git a/target/mips/fpu_helper.h b/target/mips/fpu_helper.h
17
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/sve.decode
15
--- a/target/mips/fpu_helper.h
19
+++ b/target/arm/sve.decode
16
+++ b/target/mips/fpu_helper.h
20
@@ -XXX,XX +XXX,XX @@ CPY_z_i 00000101 .. 01 .... 00 . ........ ..... @rdn_pg4 imm=%sh8_i8s
17
@@ -XXX,XX +XXX,XX @@ static inline void restore_snan_bit_mode(CPUMIPSState *env)
21
18
{
22
### SVE Permute - Extract Group
19
bool nan2008 = env->active_fpu.fcr31 & (1 << FCR31_NAN2008);
23
20
FloatInfZeroNaNRule izn_rule;
24
-# SVE extract vector (immediate offset)
21
+ Float3NaNPropRule nan3_rule;
25
+# SVE extract vector (destructive)
22
26
EXT 00000101 001 ..... 000 ... rm:5 rd:5 \
23
/*
27
&rrri rn=%reg_movprfx imm=%imm8_16_10
24
* With nan2008, SNaNs are silenced in the usual way.
28
25
@@ -XXX,XX +XXX,XX @@ static inline void restore_snan_bit_mode(CPUMIPSState *env)
29
+# SVE2 extract vector (constructive)
26
*/
30
+EXT_sve2 00000101 011 ..... 000 ... rn:5 rd:5 \
27
izn_rule = nan2008 ? float_infzeronan_dnan_never : float_infzeronan_dnan_always;
31
+ &rri imm=%imm8_16_10
28
set_float_infzeronan_rule(izn_rule, &env->active_fpu.fp_status);
29
+ nan3_rule = nan2008 ? float_3nan_prop_s_cab : float_3nan_prop_s_abc;
30
+ set_float_3nan_prop_rule(nan3_rule, &env->active_fpu.fp_status);
32
+
31
+
33
### SVE Permute - Unpredicated Group
32
}
34
33
35
# SVE broadcast general register
34
static inline void restore_fp_status(CPUMIPSState *env)
36
@@ -XXX,XX +XXX,XX @@ REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn
35
diff --git a/target/mips/msa.c b/target/mips/msa.c
37
REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn
36
index XXXXXXX..XXXXXXX 100644
38
RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn
37
--- a/target/mips/msa.c
39
38
+++ b/target/mips/msa.c
40
-# SVE vector splice (predicated)
39
@@ -XXX,XX +XXX,XX @@ void msa_reset(CPUMIPSState *env)
41
+# SVE vector splice (predicated, destructive)
40
set_float_2nan_prop_rule(float_2nan_prop_s_ab,
42
SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm
41
&env->active_tc.msa_fp_status);
43
42
44
+# SVE2 vector splice (predicated, constructive)
43
+ set_float_3nan_prop_rule(float_3nan_prop_s_cab,
45
+SPLICE_sve2 00000101 .. 101 101 100 ... ..... ..... @rd_pg_rn
44
+ &env->active_tc.msa_fp_status);
46
+
45
+
47
### SVE Select Vectors Group
46
/* clear float_status exception flags */
48
47
set_float_exception_flags(0, &env->active_tc.msa_fp_status);
49
# SVE select vector elements (predicated)
48
50
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
49
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
51
index XXXXXXX..XXXXXXX 100644
50
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/translate-sve.c
51
--- a/fpu/softfloat-specialize.c.inc
53
+++ b/target/arm/translate-sve.c
52
+++ b/fpu/softfloat-specialize.c.inc
54
@@ -XXX,XX +XXX,XX @@ static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a)
53
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
55
*** SVE Permute Extract Group
56
*/
57
58
-static bool trans_EXT(DisasContext *s, arg_EXT *a)
59
+static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm)
60
{
61
if (!sve_access_check(s)) {
62
return true;
63
}
54
}
64
55
65
unsigned vsz = vec_full_reg_size(s);
56
if (rule == float_3nan_prop_none) {
66
- unsigned n_ofs = a->imm >= vsz ? 0 : a->imm;
57
-#if defined(TARGET_MIPS)
67
+ unsigned n_ofs = imm >= vsz ? 0 : imm;
58
- if (snan_bit_is_one(status)) {
68
unsigned n_siz = vsz - n_ofs;
59
- rule = float_3nan_prop_s_abc;
69
- unsigned d = vec_full_reg_offset(s, a->rd);
60
- } else {
70
- unsigned n = vec_full_reg_offset(s, a->rn);
61
- rule = float_3nan_prop_s_cab;
71
- unsigned m = vec_full_reg_offset(s, a->rm);
62
- }
72
+ unsigned d = vec_full_reg_offset(s, rd);
63
-#elif defined(TARGET_XTENSA)
73
+ unsigned n = vec_full_reg_offset(s, rn);
64
+#if defined(TARGET_XTENSA)
74
+ unsigned m = vec_full_reg_offset(s, rm);
65
if (status->use_first_nan) {
75
66
rule = float_3nan_prop_abc;
76
/* Use host vector move insns if we have appropriate sizes
67
} else {
77
* and no unfortunate overlap.
78
@@ -XXX,XX +XXX,XX @@ static bool trans_EXT(DisasContext *s, arg_EXT *a)
79
return true;
80
}
81
82
+static bool trans_EXT(DisasContext *s, arg_EXT *a)
83
+{
84
+ return do_EXT(s, a->rd, a->rn, a->rm, a->imm);
85
+}
86
+
87
+static bool trans_EXT_sve2(DisasContext *s, arg_rri *a)
88
+{
89
+ if (!dc_isar_feature(aa64_sve2, s)) {
90
+ return false;
91
+ }
92
+ return do_EXT(s, a->rd, a->rn, (a->rn + 1) % 32, a->imm);
93
+}
94
+
95
/*
96
*** SVE Permute - Unpredicated Group
97
*/
98
@@ -XXX,XX +XXX,XX @@ static bool trans_SPLICE(DisasContext *s, arg_rprr_esz *a)
99
return true;
100
}
101
102
+static bool trans_SPLICE_sve2(DisasContext *s, arg_rpr_esz *a)
103
+{
104
+ if (!dc_isar_feature(aa64_sve2, s)) {
105
+ return false;
106
+ }
107
+ if (sve_access_check(s)) {
108
+ gen_gvec_ool_zzzp(s, gen_helper_sve_splice,
109
+ a->rd, a->rn, (a->rn + 1) % 32, a->pg, a->esz);
110
+ }
111
+ return true;
112
+}
113
+
114
/*
115
*** SVE Integer Compare - Vectors Group
116
*/
117
--
68
--
118
2.20.1
69
2.34.1
119
120
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the Float3NaNPropRule explicitly for xtensa, and remove the
2
ifdef from pickNaNMulAdd().
2
3
3
Rename tlb_flush_page_bits_locked() -> tlb_flush_range_locked(), and
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
have callers pass a length argument (currently TARGET_PAGE_SIZE) via
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
the TLBFlushPageBitsByMMUIdxData structure.
6
Message-id: 20241202131347.498124-24-peter.maydell@linaro.org
7
---
8
target/xtensa/fpu_helper.c | 2 ++
9
fpu/softfloat-specialize.c.inc | 8 --------
10
2 files changed, 2 insertions(+), 8 deletions(-)
6
11
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
diff --git a/target/xtensa/fpu_helper.c b/target/xtensa/fpu_helper.c
8
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Message-id: 20210509151618.2331764-3-f4bug@amsat.org
10
Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org>
11
[PMD: Split from bigger patch]
12
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
accel/tcg/cputlb.c | 48 +++++++++++++++++++++++++++++++---------------
17
1 file changed, 33 insertions(+), 15 deletions(-)
18
19
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
20
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
21
--- a/accel/tcg/cputlb.c
14
--- a/target/xtensa/fpu_helper.c
22
+++ b/accel/tcg/cputlb.c
15
+++ b/target/xtensa/fpu_helper.c
23
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
16
@@ -XXX,XX +XXX,XX @@ void xtensa_use_first_nan(CPUXtensaState *env, bool use_first)
24
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
17
set_use_first_nan(use_first, &env->fp_status);
18
set_float_2nan_prop_rule(use_first ? float_2nan_prop_ab : float_2nan_prop_ba,
19
&env->fp_status);
20
+ set_float_3nan_prop_rule(use_first ? float_3nan_prop_abc : float_3nan_prop_cba,
21
+ &env->fp_status);
25
}
22
}
26
23
27
-static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
24
void HELPER(wur_fpu2k_fcr)(CPUXtensaState *env, uint32_t v)
28
- target_ulong page, unsigned bits)
25
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
29
+static void tlb_flush_range_locked(CPUArchState *env, int midx,
26
index XXXXXXX..XXXXXXX 100644
30
+ target_ulong addr, target_ulong len,
27
--- a/fpu/softfloat-specialize.c.inc
31
+ unsigned bits)
28
+++ b/fpu/softfloat-specialize.c.inc
32
{
29
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
33
CPUTLBDesc *d = &env_tlb(env)->d[midx];
34
CPUTLBDescFast *f = &env_tlb(env)->f[midx];
35
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
36
* If @bits is smaller than the tlb size, there may be multiple entries
37
* within the TLB; otherwise all addresses that match under @mask hit
38
* the same TLB entry.
39
- *
40
* TODO: Perhaps allow bits to be a few bits less than the size.
41
* For now, just flush the entire TLB.
42
+ *
43
+ * If @len is larger than the tlb size, then it will take longer to
44
+ * test all of the entries in the TLB than it will to flush it all.
45
*/
46
- if (mask < f->mask) {
47
+ if (mask < f->mask || len > f->mask) {
48
tlb_debug("forcing full flush midx %d ("
49
- TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
50
- midx, page, mask);
51
+ TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
52
+ midx, addr, mask, len);
53
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
54
return;
55
}
30
}
56
31
57
- /* Check if we need to flush due to large pages. */
32
if (rule == float_3nan_prop_none) {
58
- if ((page & d->large_page_mask) == d->large_page_addr) {
33
-#if defined(TARGET_XTENSA)
59
+ /*
34
- if (status->use_first_nan) {
60
+ * Check if we need to flush due to large pages.
35
- rule = float_3nan_prop_abc;
61
+ * Because large_page_mask contains all 1's from the msb,
36
- } else {
62
+ * we only need to test the end of the range.
37
- rule = float_3nan_prop_cba;
63
+ */
38
- }
64
+ if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
39
-#else
65
tlb_debug("forcing full flush midx %d ("
40
rule = float_3nan_prop_abc;
66
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
41
-#endif
67
midx, d->large_page_addr, d->large_page_mask);
68
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
69
return;
70
}
42
}
71
43
72
- if (tlb_flush_entry_mask_locked(tlb_entry(env, midx, page), page, mask)) {
44
assert(rule != float_3nan_prop_none);
73
- tlb_n_used_entries_dec(env, midx);
74
+ for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
75
+ target_ulong page = addr + i;
76
+ CPUTLBEntry *entry = tlb_entry(env, midx, page);
77
+
78
+ if (tlb_flush_entry_mask_locked(entry, page, mask)) {
79
+ tlb_n_used_entries_dec(env, midx);
80
+ }
81
+ tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
82
}
83
- tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
84
}
85
86
typedef struct {
87
target_ulong addr;
88
+ target_ulong len;
89
uint16_t idxmap;
90
uint16_t bits;
91
} TLBFlushPageBitsByMMUIdxData;
92
@@ -XXX,XX +XXX,XX @@ tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu,
93
94
assert_cpu_is_self(cpu);
95
96
- tlb_debug("page addr:" TARGET_FMT_lx "/%u mmu_map:0x%x\n",
97
- d.addr, d.bits, d.idxmap);
98
+ tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
99
+ d.addr, d.bits, d.len, d.idxmap);
100
101
qemu_spin_lock(&env_tlb(env)->c.lock);
102
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
103
if ((d.idxmap >> mmu_idx) & 1) {
104
- tlb_flush_page_bits_locked(env, mmu_idx, d.addr, d.bits);
105
+ tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
106
}
107
}
108
qemu_spin_unlock(&env_tlb(env)->c.lock);
109
110
- tb_flush_jmp_cache(cpu, d.addr);
111
+ for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
112
+ tb_flush_jmp_cache(cpu, d.addr + i);
113
+ }
114
}
115
116
static bool encode_pbm_to_runon(run_on_cpu_data *out,
117
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
118
119
/* This should already be page aligned */
120
d.addr = addr & TARGET_PAGE_MASK;
121
+ d.len = TARGET_PAGE_SIZE;
122
d.idxmap = idxmap;
123
d.bits = bits;
124
125
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
126
127
/* This should already be page aligned */
128
d.addr = addr & TARGET_PAGE_MASK;
129
+ d.len = TARGET_PAGE_SIZE;
130
d.idxmap = idxmap;
131
d.bits = bits;
132
133
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
134
135
/* This should already be page aligned */
136
d.addr = addr & TARGET_PAGE_MASK;
137
+ d.len = TARGET_PAGE_SIZE;
138
d.idxmap = idxmap;
139
d.bits = bits;
140
141
--
45
--
142
2.20.1
46
2.34.1
143
144
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the Float3NaNPropRule explicitly for i386. We had no
2
i386-specific behaviour in the old ifdef ladder, so we were using the
3
default "prefer a then b then c" fallback; this is actually the
4
correct per-the-spec handling for i386.
2
5
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-54-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20241202131347.498124-25-peter.maydell@linaro.org
7
---
9
---
8
target/arm/sve.decode | 7 +++++++
10
target/i386/tcg/fpu_helper.c | 1 +
9
target/arm/translate-sve.c | 30 ++++++++++++++++++++++++++++++
11
1 file changed, 1 insertion(+)
10
2 files changed, 37 insertions(+)
11
12
12
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
13
diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/sve.decode
15
--- a/target/i386/tcg/fpu_helper.c
15
+++ b/target/arm/sve.decode
16
+++ b/target/i386/tcg/fpu_helper.c
16
@@ -XXX,XX +XXX,XX @@ MUL_zzi 00100101 .. 110 000 110 ........ ..... @rdn_i8s
17
@@ -XXX,XX +XXX,XX @@ void cpu_init_fp_statuses(CPUX86State *env)
17
DOT_zzzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 \
18
* there are multiple input NaNs they are selected in the order a, b, c.
18
ra=%reg_movprfx
19
*/
19
20
set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->sse_status);
20
+#### SVE Multiply - Indexed
21
+ set_float_3nan_prop_rule(float_3nan_prop_abc, &env->sse_status);
21
+
22
# SVE integer dot product (indexed)
23
SDOT_zzxw_s 01000100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2
24
SDOT_zzxw_d 01000100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3
25
UDOT_zzxw_s 01000100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2
26
UDOT_zzxw_d 01000100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3
27
28
+# SVE2 integer multiply (indexed)
29
+MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1
30
+MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2
31
+MUL_zzx_d 01000100 11 1 ..... 111110 ..... ..... @rrx_1 esz=3
32
+
33
# SVE floating-point complex add (predicated)
34
FCADD 01100100 esz:2 00000 rot:1 100 pg:3 rm:5 rd:5 \
35
rn=%reg_movprfx
36
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/translate-sve.c
39
+++ b/target/arm/translate-sve.c
40
@@ -XXX,XX +XXX,XX @@ static bool trans_DOT_zzzz(DisasContext *s, arg_DOT_zzzz *a)
41
return true;
42
}
22
}
43
23
44
+/*
24
static inline uint8_t save_exception_flags(CPUX86State *env)
45
+ * SVE Multiply - Indexed
46
+ */
47
+
48
static bool do_zzxz_ool(DisasContext *s, arg_rrxr_esz *a,
49
gen_helper_gvec_4 *fn)
50
{
51
@@ -XXX,XX +XXX,XX @@ DO_RRXR(trans_UDOT_zzxw_d, gen_helper_gvec_udot_idx_h)
52
53
#undef DO_RRXR
54
55
+static bool do_sve2_zzz_data(DisasContext *s, int rd, int rn, int rm, int data,
56
+ gen_helper_gvec_3 *fn)
57
+{
58
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
59
+ return false;
60
+ }
61
+ if (sve_access_check(s)) {
62
+ unsigned vsz = vec_full_reg_size(s);
63
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
64
+ vec_full_reg_offset(s, rn),
65
+ vec_full_reg_offset(s, rm),
66
+ vsz, vsz, data, fn);
67
+ }
68
+ return true;
69
+}
70
+
71
+#define DO_SVE2_RRX(NAME, FUNC) \
72
+ static bool NAME(DisasContext *s, arg_rrx_esz *a) \
73
+ { return do_sve2_zzz_data(s, a->rd, a->rn, a->rm, a->index, FUNC); }
74
+
75
+DO_SVE2_RRX(trans_MUL_zzx_h, gen_helper_gvec_mul_idx_h)
76
+DO_SVE2_RRX(trans_MUL_zzx_s, gen_helper_gvec_mul_idx_s)
77
+DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d)
78
+
79
+#undef DO_SVE2_RRX
80
+
81
/*
82
*** SVE Floating Point Multiply-Add Indexed Group
83
*/
84
--
25
--
85
2.20.1
26
2.34.1
86
87
diff view generated by jsdifflib
1
The SSE-300 has an ITCM at 0x0000_0000 and a DTCM at 0x2000_0000.
1
Set the Float3NaNPropRule explicitly for HPPA, and remove the
2
Currently we model these in the AN547 board, but this is conceptually
2
ifdef from pickNaNMulAdd().
3
wrong, because they are a part of the SSE-300 itself. Move the
4
modelling of the TCMs out of mps2-tz.c into sse300.c.
5
3
6
This has no guest-visible effects.
4
HPPA is the only target that was using the default branch of the
5
ifdef ladder (other targets either do not use muladd or set
6
default_nan_mode), so we can remove the ifdef fallback entirely now
7
(allowing the "rule not set" case to fall into the default of the
8
switch statement and assert).
9
10
We add a TODO note that the HPPA rule is probably wrong; this is
11
not a behavioural change for this refactoring.
7
12
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210510190844.17799-7-peter.maydell@linaro.org
15
Message-id: 20241202131347.498124-26-peter.maydell@linaro.org
11
---
16
---
12
include/hw/arm/armsse.h | 2 ++
17
target/hppa/fpu_helper.c | 8 ++++++++
13
hw/arm/armsse.c | 19 +++++++++++++++++++
18
fpu/softfloat-specialize.c.inc | 4 ----
14
hw/arm/mps2-tz.c | 12 ------------
19
2 files changed, 8 insertions(+), 4 deletions(-)
15
3 files changed, 21 insertions(+), 12 deletions(-)
16
20
17
diff --git a/include/hw/arm/armsse.h b/include/hw/arm/armsse.h
21
diff --git a/target/hppa/fpu_helper.c b/target/hppa/fpu_helper.c
18
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/arm/armsse.h
23
--- a/target/hppa/fpu_helper.c
20
+++ b/include/hw/arm/armsse.h
24
+++ b/target/hppa/fpu_helper.c
21
@@ -XXX,XX +XXX,XX @@ struct ARMSSE {
25
@@ -XXX,XX +XXX,XX @@ void HELPER(loaded_fr0)(CPUHPPAState *env)
22
MemoryRegion alias2;
26
* HPPA does note implement a CPU reset method at all...
23
MemoryRegion alias3[SSE_MAX_CPUS];
27
*/
24
MemoryRegion sram[MAX_SRAM_BANKS];
28
set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->fp_status);
25
+ MemoryRegion itcm;
29
+ /*
26
+ MemoryRegion dtcm;
30
+ * TODO: The HPPA architecture reference only documents its NaN
27
31
+ * propagation rule for 2-operand operations. Testing on real hardware
28
qemu_irq *exp_irqs[SSE_MAX_CPUS];
32
+ * might be necessary to confirm whether this order for muladd is correct.
29
qemu_irq ppc0_irq;
33
+ * Not preferring the SNaN is almost certainly incorrect as it diverges
30
diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c
34
+ * from the documented rules for 2-operand operations.
35
+ */
36
+ set_float_3nan_prop_rule(float_3nan_prop_abc, &env->fp_status);
37
/* For inf * 0 + NaN, return the input NaN */
38
set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
39
}
40
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
31
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
32
--- a/hw/arm/armsse.c
42
--- a/fpu/softfloat-specialize.c.inc
33
+++ b/hw/arm/armsse.c
43
+++ b/fpu/softfloat-specialize.c.inc
34
@@ -XXX,XX +XXX,XX @@
44
@@ -XXX,XX +XXX,XX @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
35
#include "qemu/log.h"
45
}
36
#include "qemu/module.h"
37
#include "qemu/bitops.h"
38
+#include "qemu/units.h"
39
#include "qapi/error.h"
40
#include "trace.h"
41
#include "hw/sysbus.h"
42
@@ -XXX,XX +XXX,XX @@ struct ARMSSEInfo {
43
bool has_cpuid;
44
bool has_cpu_pwrctrl;
45
bool has_sse_counter;
46
+ bool has_tcms;
47
Property *props;
48
const ARMSSEDeviceInfo *devinfo;
49
const bool *irq_is_common;
50
@@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = {
51
.has_cpuid = false,
52
.has_cpu_pwrctrl = false,
53
.has_sse_counter = false,
54
+ .has_tcms = false,
55
.props = iotkit_properties,
56
.devinfo = iotkit_devices,
57
.irq_is_common = sse200_irq_is_common,
58
@@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = {
59
.has_cpuid = true,
60
.has_cpu_pwrctrl = false,
61
.has_sse_counter = false,
62
+ .has_tcms = false,
63
.props = sse200_properties,
64
.devinfo = sse200_devices,
65
.irq_is_common = sse200_irq_is_common,
66
@@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = {
67
.has_cpuid = true,
68
.has_cpu_pwrctrl = true,
69
.has_sse_counter = true,
70
+ .has_tcms = true,
71
.props = sse300_properties,
72
.devinfo = sse300_devices,
73
.irq_is_common = sse300_irq_is_common,
74
@@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp)
75
sysbus_mmio_get_region(sbd, 1));
76
}
46
}
77
47
78
+ if (info->has_tcms) {
48
- if (rule == float_3nan_prop_none) {
79
+ /* The SSE-300 has an ITCM at 0x0000_0000 and a DTCM at 0x2000_0000 */
49
- rule = float_3nan_prop_abc;
80
+ memory_region_init_ram(&s->itcm, NULL, "sse300-itcm", 512 * KiB, errp);
50
- }
81
+ if (*errp) {
51
-
82
+ return;
52
assert(rule != float_3nan_prop_none);
83
+ }
53
if (have_snan && (rule & R_3NAN_SNAN_MASK)) {
84
+ memory_region_init_ram(&s->dtcm, NULL, "sse300-dtcm", 512 * KiB, errp);
54
/* We have at least one SNaN input and should prefer it */
85
+ if (*errp) {
86
+ return;
87
+ }
88
+ memory_region_add_subregion(&s->container, 0x00000000, &s->itcm);
89
+ memory_region_add_subregion(&s->container, 0x20000000, &s->dtcm);
90
+ }
91
+
92
/* Devices behind APB PPC0:
93
* 0x40000000: timer0
94
* 0x40001000: timer1
95
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
96
index XXXXXXX..XXXXXXX 100644
97
--- a/hw/arm/mps2-tz.c
98
+++ b/hw/arm/mps2-tz.c
99
@@ -XXX,XX +XXX,XX @@ static const RAMInfo an524_raminfo[] = { {
100
};
101
102
static const RAMInfo an547_raminfo[] = { {
103
- .name = "itcm",
104
- .base = 0x00000000,
105
- .size = 512 * KiB,
106
- .mpc = -1,
107
- .mrindex = 0,
108
- }, {
109
.name = "sram",
110
.base = 0x01000000,
111
.size = 2 * MiB,
112
.mpc = 0,
113
.mrindex = 1,
114
- }, {
115
- .name = "dtcm",
116
- .base = 0x20000000,
117
- .size = 4 * 128 * KiB,
118
- .mpc = -1,
119
- .mrindex = 2,
120
}, {
121
.name = "sram 2",
122
.base = 0x21000000,
123
--
55
--
124
2.20.1
56
2.34.1
125
126
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
The use_first_nan field in float_status was an xtensa-specific way to
2
select at runtime from two different NaN propagation rules. Now that
3
xtensa is using the target-agnostic NaN propagation rule selection
4
that we've just added, we can remove use_first_nan, because there is
5
no longer any code that reads it.
2
6
3
Used by FMLA and DOT, but will shortly be used more.
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Split FMLA from FMLS to avoid an extra sub field;
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
similarly for SDOT from UDOT.
9
Message-id: 20241202131347.498124-27-peter.maydell@linaro.org
10
---
11
include/fpu/softfloat-helpers.h | 5 -----
12
include/fpu/softfloat-types.h | 1 -
13
target/xtensa/fpu_helper.c | 1 -
14
3 files changed, 7 deletions(-)
6
15
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
diff --git a/include/fpu/softfloat-helpers.h b/include/fpu/softfloat-helpers.h
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210525010358.152808-53-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/sve.decode | 29 +++++++++++++++++++----------
13
target/arm/translate-sve.c | 38 ++++++++++++++++++++++++++++----------
14
2 files changed, 47 insertions(+), 20 deletions(-)
15
16
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/sve.decode
18
--- a/include/fpu/softfloat-helpers.h
19
+++ b/target/arm/sve.decode
19
+++ b/include/fpu/softfloat-helpers.h
20
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ static inline void set_snan_bit_is_one(bool val, float_status *status)
21
&rprr_s rd pg rn rm s
21
status->snan_bit_is_one = val;
22
&rprr_esz rd pg rn rm esz
22
}
23
&rrrr_esz rd ra rn rm esz
23
24
+&rrxr_esz rd rn rm ra index esz
24
-static inline void set_use_first_nan(bool val, float_status *status)
25
&rprrr_esz rd pg rn rm ra esz
25
-{
26
&rpri_esz rd pg rn imm esz
26
- status->use_first_nan = val;
27
&ptrue rd esz pat s
27
-}
28
@@ -XXX,XX +XXX,XX @@
28
-
29
@rrx_2 ........ .. . index:2 rm:3 ...... rn:5 rd:5 &rrx_esz
29
static inline void set_no_signaling_nans(bool val, float_status *status)
30
@rrx_1 ........ .. . index:1 rm:4 ...... rn:5 rd:5 &rrx_esz
30
{
31
31
status->no_signaling_nans = val;
32
+# Three registers and a scalar by N-bit index
32
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
33
+@rrxr_3 ........ .. . .. rm:3 ...... rn:5 rd:5 \
34
+ &rrxr_esz ra=%reg_movprfx index=%index3_22_19
35
+@rrxr_2 ........ .. . index:2 rm:3 ...... rn:5 rd:5 \
36
+ &rrxr_esz ra=%reg_movprfx
37
+@rrxr_1 ........ .. . index:1 rm:4 ...... rn:5 rd:5 \
38
+ &rrxr_esz ra=%reg_movprfx
39
+
40
###########################################################################
41
# Instruction patterns. Grouped according to the SVE encodingindex.xhtml.
42
43
@@ -XXX,XX +XXX,XX @@ DOT_zzzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 \
44
ra=%reg_movprfx
45
46
# SVE integer dot product (indexed)
47
-DOT_zzxw 01000100 101 index:2 rm:3 00000 u:1 rn:5 rd:5 \
48
- sz=0 ra=%reg_movprfx
49
-DOT_zzxw 01000100 111 index:1 rm:4 00000 u:1 rn:5 rd:5 \
50
- sz=1 ra=%reg_movprfx
51
+SDOT_zzxw_s 01000100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2
52
+SDOT_zzxw_d 01000100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3
53
+UDOT_zzxw_s 01000100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2
54
+UDOT_zzxw_d 01000100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3
55
56
# SVE floating-point complex add (predicated)
57
FCADD 01100100 esz:2 00000 rot:1 100 pg:3 rm:5 rd:5 \
58
@@ -XXX,XX +XXX,XX @@ FCMLA_zzxz 01100100 11 1 index:1 rm:4 0001 rot:2 rn:5 rd:5 \
59
### SVE FP Multiply-Add Indexed Group
60
61
# SVE floating-point multiply-add (indexed)
62
-FMLA_zzxz 01100100 0.1 .. rm:3 00000 sub:1 rn:5 rd:5 \
63
- ra=%reg_movprfx index=%index3_22_19 esz=1
64
-FMLA_zzxz 01100100 101 index:2 rm:3 00000 sub:1 rn:5 rd:5 \
65
- ra=%reg_movprfx esz=2
66
-FMLA_zzxz 01100100 111 index:1 rm:4 00000 sub:1 rn:5 rd:5 \
67
- ra=%reg_movprfx esz=3
68
+FMLA_zzxz 01100100 0. 1 ..... 000000 ..... ..... @rrxr_3 esz=1
69
+FMLA_zzxz 01100100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2
70
+FMLA_zzxz 01100100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3
71
+FMLS_zzxz 01100100 0. 1 ..... 000001 ..... ..... @rrxr_3 esz=1
72
+FMLS_zzxz 01100100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2
73
+FMLS_zzxz 01100100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3
74
75
### SVE FP Multiply Indexed Group
76
77
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
78
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/translate-sve.c
34
--- a/include/fpu/softfloat-types.h
80
+++ b/target/arm/translate-sve.c
35
+++ b/include/fpu/softfloat-types.h
81
@@ -XXX,XX +XXX,XX @@ static bool trans_DOT_zzzz(DisasContext *s, arg_DOT_zzzz *a)
36
@@ -XXX,XX +XXX,XX @@ typedef struct float_status {
82
return true;
37
* softfloat-specialize.inc.c)
83
}
38
*/
84
39
bool snan_bit_is_one;
85
-static bool trans_DOT_zzxw(DisasContext *s, arg_DOT_zzxw *a)
40
- bool use_first_nan;
86
+static bool do_zzxz_ool(DisasContext *s, arg_rrxr_esz *a,
41
bool no_signaling_nans;
87
+ gen_helper_gvec_4 *fn)
42
/* should overflowed results subtract re_bias to its exponent? */
43
bool rebias_overflow;
44
diff --git a/target/xtensa/fpu_helper.c b/target/xtensa/fpu_helper.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/xtensa/fpu_helper.c
47
+++ b/target/xtensa/fpu_helper.c
48
@@ -XXX,XX +XXX,XX @@ static const struct {
49
50
void xtensa_use_first_nan(CPUXtensaState *env, bool use_first)
88
{
51
{
89
- static gen_helper_gvec_4 * const fns[2][2] = {
52
- set_use_first_nan(use_first, &env->fp_status);
90
- { gen_helper_gvec_sdot_idx_b, gen_helper_gvec_sdot_idx_h },
53
set_float_2nan_prop_rule(use_first ? float_2nan_prop_ab : float_2nan_prop_ba,
91
- { gen_helper_gvec_udot_idx_b, gen_helper_gvec_udot_idx_h }
54
&env->fp_status);
92
- };
55
set_float_3nan_prop_rule(use_first ? float_3nan_prop_abc : float_3nan_prop_cba,
93
-
94
+ if (fn == NULL) {
95
+ return false;
96
+ }
97
if (sve_access_check(s)) {
98
- gen_gvec_ool_zzzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm,
99
- a->ra, a->index);
100
+ gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index);
101
}
102
return true;
103
}
104
105
+#define DO_RRXR(NAME, FUNC) \
106
+ static bool NAME(DisasContext *s, arg_rrxr_esz *a) \
107
+ { return do_zzxz_ool(s, a, FUNC); }
108
+
109
+DO_RRXR(trans_SDOT_zzxw_s, gen_helper_gvec_sdot_idx_b)
110
+DO_RRXR(trans_SDOT_zzxw_d, gen_helper_gvec_sdot_idx_h)
111
+DO_RRXR(trans_UDOT_zzxw_s, gen_helper_gvec_udot_idx_b)
112
+DO_RRXR(trans_UDOT_zzxw_d, gen_helper_gvec_udot_idx_h)
113
+
114
+#undef DO_RRXR
115
116
/*
117
*** SVE Floating Point Multiply-Add Indexed Group
118
*/
119
120
-static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a)
121
+static bool do_FMLA_zzxz(DisasContext *s, arg_rrxr_esz *a, bool sub)
122
{
123
static gen_helper_gvec_4_ptr * const fns[3] = {
124
gen_helper_gvec_fmla_idx_h,
125
@@ -XXX,XX +XXX,XX @@ static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a)
126
vec_full_reg_offset(s, a->rn),
127
vec_full_reg_offset(s, a->rm),
128
vec_full_reg_offset(s, a->ra),
129
- status, vsz, vsz, (a->index << 1) | a->sub,
130
+ status, vsz, vsz, (a->index << 1) | sub,
131
fns[a->esz - 1]);
132
tcg_temp_free_ptr(status);
133
}
134
return true;
135
}
136
137
+static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a)
138
+{
139
+ return do_FMLA_zzxz(s, a, false);
140
+}
141
+
142
+static bool trans_FMLS_zzxz(DisasContext *s, arg_FMLA_zzxz *a)
143
+{
144
+ return do_FMLA_zzxz(s, a, true);
145
+}
146
+
147
/*
148
*** SVE Floating Point Multiply Indexed Group
149
*/
150
--
56
--
151
2.20.1
57
2.34.1
152
153
diff view generated by jsdifflib
1
From: Stephen Long <steplong@quicinc.com>
1
Currently m68k_cpu_reset_hold() calls floatx80_default_nan(NULL)
2
to get the NaN bit pattern to reset the FPU registers. This
3
works because it happens that our implementation of
4
floatx80_default_nan() doesn't actually look at the float_status
5
pointer except for TARGET_MIPS. However, this isn't guaranteed,
6
and to be able to remove the ifdef in floatx80_default_nan()
7
we're going to need a real float_status here.
2
8
3
Add decoding logic for SVE2 64-bit/32-bit gather non-temporal
9
Rearrange m68k_cpu_reset_hold() so that we initialize env->fp_status
4
load insns.
10
earlier, and thus can pass it to floatx80_default_nan().
5
11
6
64-bit
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
* LDNT1SB
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
* LDNT1B (vector plus scalar)
14
Message-id: 20241202131347.498124-28-peter.maydell@linaro.org
9
* LDNT1SH
15
---
10
* LDNT1H (vector plus scalar)
16
target/m68k/cpu.c | 12 +++++++-----
11
* LDNT1SW
17
1 file changed, 7 insertions(+), 5 deletions(-)
12
* LDNT1W (vector plus scalar)
13
* LDNT1D (vector plus scalar)
14
18
15
32-bit
19
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
16
* LDNT1SB
17
* LDNT1B (vector plus scalar)
18
* LDNT1SH
19
* LDNT1H (vector plus scalar)
20
* LDNT1W (vector plus scalar)
21
22
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
23
Signed-off-by: Stephen Long <steplong@quicinc.com>
24
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
25
Message-id: 20210525010358.152808-46-richard.henderson@linaro.org
26
Message-Id: <20200422152343.12493-1-steplong@quicinc.com>
27
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
28
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
29
---
30
target/arm/sve.decode | 11 +++++++++++
31
target/arm/translate-sve.c | 8 ++++++++
32
2 files changed, 19 insertions(+)
33
34
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
35
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/sve.decode
21
--- a/target/m68k/cpu.c
37
+++ b/target/arm/sve.decode
22
+++ b/target/m68k/cpu.c
38
@@ -XXX,XX +XXX,XX @@ UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm
23
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_reset_hold(Object *obj, ResetType type)
39
CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx
24
CPUState *cs = CPU(obj);
40
SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx
25
M68kCPUClass *mcc = M68K_CPU_GET_CLASS(obj);
41
26
CPUM68KState *env = cpu_env(cs);
42
+### SVE2 Memory Gather Load Group
27
- floatx80 nan = floatx80_default_nan(NULL);
28
+ floatx80 nan;
29
int i;
30
31
if (mcc->parent_phases.hold) {
32
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_reset_hold(Object *obj, ResetType type)
33
#else
34
cpu_m68k_set_sr(env, SR_S | SR_I);
35
#endif
36
- for (i = 0; i < 8; i++) {
37
- env->fregs[i].d = nan;
38
- }
39
- cpu_m68k_set_fpcr(env, 0);
40
/*
41
* M68000 FAMILY PROGRAMMER'S REFERENCE MANUAL
42
* 3.4 FLOATING-POINT INSTRUCTION DETAILS
43
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_reset_hold(Object *obj, ResetType type)
44
* preceding paragraph for nonsignaling NaNs.
45
*/
46
set_float_2nan_prop_rule(float_2nan_prop_ab, &env->fp_status);
43
+
47
+
44
+# SVE2 64-bit gather non-temporal load
48
+ nan = floatx80_default_nan(&env->fp_status);
45
+# (scalar plus unpacked 32-bit unscaled offsets)
49
+ for (i = 0; i < 8; i++) {
46
+LDNT1_zprz 1100010 msz:2 00 rm:5 1 u:1 0 pg:3 rn:5 rd:5 \
50
+ env->fregs[i].d = nan;
47
+ &rprr_gather_load xs=0 esz=3 scale=0 ff=0
48
+
49
+# SVE2 32-bit gather non-temporal load (scalar plus 32-bit unscaled offsets)
50
+LDNT1_zprz 1000010 msz:2 00 rm:5 10 u:1 pg:3 rn:5 rd:5 \
51
+ &rprr_gather_load xs=0 esz=2 scale=0 ff=0
52
+
53
### SVE2 Memory Store Group
54
55
# SVE2 64-bit scatter non-temporal store (vector plus scalar)
56
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/translate-sve.c
59
+++ b/target/arm/translate-sve.c
60
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a)
61
return true;
62
}
63
64
+static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a)
65
+{
66
+ if (!dc_isar_feature(aa64_sve2, s)) {
67
+ return false;
68
+ }
51
+ }
69
+ return trans_LD1_zprz(s, a);
52
+ cpu_m68k_set_fpcr(env, 0);
70
+}
53
env->fpsr = 0;
71
+
54
72
/* Indexed by [mte][be][xs][msz]. */
55
/* TODO: We should set PC from the interrupt vector. */
73
static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][2][3] = {
74
{ /* MTE Inactive */
75
--
56
--
76
2.20.1
57
2.34.1
77
78
diff view generated by jsdifflib
1
From: Stephen Long <steplong@quicinc.com>
1
We create our 128-bit default NaN by calling parts64_default_nan()
2
and then adjusting the result. We can do the same trick for creating
3
the floatx80 default NaN, which lets us drop a target ifdef.
2
4
3
Add decoding logic for SVE2 64-bit/32-bit scatter non-temporal
5
floatx80 is used only by:
4
store insns.
6
i386
7
m68k
8
arm nwfpe old floating-point emulation emulation support
9
(which is essentially dead, especially the parts involving floatx80)
10
PPC (only in the xsrqpxp instruction, which just rounds an input
11
value by converting to floatx80 and back, so will never generate
12
the default NaN)
5
13
6
64-bit
14
The floatx80 default NaN as currently implemented is:
7
* STNT1B (vector plus scalar)
15
m68k: sign = 0, exp = 1...1, int = 1, frac = 1....1
8
* STNT1H (vector plus scalar)
16
i386: sign = 1, exp = 1...1, int = 1, frac = 10...0
9
* STNT1W (vector plus scalar)
10
* STNT1D (vector plus scalar)
11
17
12
32-bit
18
These are the same as the parts64_default_nan for these architectures.
13
* STNT1B (vector plus scalar)
14
* STNT1H (vector plus scalar)
15
* STNT1W (vector plus scalar)
16
19
17
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
20
This is technically a possible behaviour change for arm linux-user
18
Signed-off-by: Stephen Long <steplong@quicinc.com>
21
nwfpe emulation emulation, because the default NaN will now have the
19
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
sign bit clear. But we were already generating a different floatx80
20
Message-id: 20210525010358.152808-45-richard.henderson@linaro.org
23
default NaN from the real kernel emulation we are supposedly
21
Message-Id: <20200422141553.8037-1-steplong@quicinc.com>
24
following, which appears to use an all-bits-1 value:
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
25
https://elixir.bootlin.com/linux/v6.12/source/arch/arm/nwfpe/softfloat-specialize#L267
26
27
This won't affect the only "real" use of the nwfpe emulation, which
28
is ancient binaries that used it as part of the old floating point
29
calling convention; that only uses loads and stores of 32 and 64 bit
30
floats, not any of the floatx80 behaviour the original hardware had.
31
We also get the nwfpe float64 default NaN value wrong:
32
https://elixir.bootlin.com/linux/v6.12/source/arch/arm/nwfpe/softfloat-specialize#L166
33
so if we ever cared about this obscure corner the right fix would be
34
to correct that so nwfpe used its own default-NaN setting rather
35
than the Arm VFP one.
36
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
37
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
38
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
39
Message-id: 20241202131347.498124-29-peter.maydell@linaro.org
24
---
40
---
25
target/arm/sve.decode | 10 ++++++++++
41
fpu/softfloat-specialize.c.inc | 20 ++++++++++----------
26
target/arm/translate-sve.c | 8 ++++++++
42
1 file changed, 10 insertions(+), 10 deletions(-)
27
2 files changed, 18 insertions(+)
28
43
29
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
44
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
30
index XXXXXXX..XXXXXXX 100644
45
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/sve.decode
46
--- a/fpu/softfloat-specialize.c.inc
32
+++ b/target/arm/sve.decode
47
+++ b/fpu/softfloat-specialize.c.inc
33
@@ -XXX,XX +XXX,XX @@ UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm
48
@@ -XXX,XX +XXX,XX @@ static void parts128_silence_nan(FloatParts128 *p, float_status *status)
34
49
floatx80 floatx80_default_nan(float_status *status)
35
CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx
50
{
36
SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx
51
floatx80 r;
37
+
52
+ /*
38
+### SVE2 Memory Store Group
53
+ * Extrapolate from the choices made by parts64_default_nan to fill
39
+
54
+ * in the floatx80 format. We assume that floatx80's explicit
40
+# SVE2 64-bit scatter non-temporal store (vector plus scalar)
55
+ * integer bit is always set (this is true for i386 and m68k,
41
+STNT1_zprz 1110010 .. 00 ..... 001 ... ..... ..... \
56
+ * which are the only real users of this format).
42
+ @rprr_scatter_store xs=2 esz=3 scale=0
57
+ */
43
+
58
+ FloatParts64 p64;
44
+# SVE2 32-bit scatter non-temporal store (vector plus scalar)
59
+ parts64_default_nan(&p64, status);
45
+STNT1_zprz 1110010 .. 10 ..... 001 ... ..... ..... \
60
46
+ @rprr_scatter_store xs=0 esz=2 scale=0
61
- /* None of the targets that have snan_bit_is_one use floatx80. */
47
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
62
- assert(!snan_bit_is_one(status));
48
index XXXXXXX..XXXXXXX 100644
63
-#if defined(TARGET_M68K)
49
--- a/target/arm/translate-sve.c
64
- r.low = UINT64_C(0xFFFFFFFFFFFFFFFF);
50
+++ b/target/arm/translate-sve.c
65
- r.high = 0x7FFF;
51
@@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a)
66
-#else
52
return true;
67
- /* X86 */
68
- r.low = UINT64_C(0xC000000000000000);
69
- r.high = 0xFFFF;
70
-#endif
71
+ r.high = 0x7FFF | (p64.sign << 15);
72
+ r.low = (1ULL << DECOMPOSED_BINARY_POINT) | p64.frac;
73
return r;
53
}
74
}
54
75
55
+static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a)
56
+{
57
+ if (!dc_isar_feature(aa64_sve2, s)) {
58
+ return false;
59
+ }
60
+ return trans_ST1_zprz(s, a);
61
+}
62
+
63
/*
64
* Prefetches
65
*/
66
--
76
--
67
2.20.1
77
2.34.1
68
69
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
In target/loongarch's helper_fclass_s() and helper_fclass_d() we pass
2
a zero-initialized float_status struct to float32_is_quiet_nan() and
3
float64_is_quiet_nan(), with the cryptic comment "for
4
snan_bit_is_one".
2
5
3
In addition, use the same vector generator interface for AdvSIMD.
6
This pattern appears to have been copied from target/riscv, where it
4
This fixes a bug in which the AdvSIMD insn failed to clear the
7
is used because the functions there do not have ready access to the
5
high bits of the SVE register.
8
CPU state struct. The comment presumably refers to the fact that the
9
main reason the is_quiet_nan() functions want the float_state is
10
because they want to know about the snan_bit_is_one config.
6
11
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
In the loongarch helpers, though, we have the CPU state struct
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
to hand. Use the usual env->fp_status here. This avoids our needing
9
Message-id: 20210525010358.152808-44-richard.henderson@linaro.org
14
to track that we need to update the initializer of the local
15
float_status structs when the core softfloat code adds new
16
options for targets to configure their behaviour.
17
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
20
Message-id: 20241202131347.498124-30-peter.maydell@linaro.org
11
---
21
---
12
target/arm/helper-sve.h | 4 ++
22
target/loongarch/tcg/fpu_helper.c | 6 ++----
13
target/arm/helper.h | 2 +
23
1 file changed, 2 insertions(+), 4 deletions(-)
14
target/arm/translate-a64.h | 3 ++
15
target/arm/sve.decode | 4 ++
16
target/arm/sve_helper.c | 39 ++++++++++++++
17
target/arm/translate-a64.c | 25 ++-------
18
target/arm/translate-sve.c | 104 +++++++++++++++++++++++++++++++++++++
19
target/arm/vec_helper.c | 12 +++++
20
8 files changed, 172 insertions(+), 21 deletions(-)
21
24
22
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
25
diff --git a/target/loongarch/tcg/fpu_helper.c b/target/loongarch/tcg/fpu_helper.c
23
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/helper-sve.h
27
--- a/target/loongarch/tcg/fpu_helper.c
25
+++ b/target/arm/helper-sve.h
28
+++ b/target/loongarch/tcg/fpu_helper.c
26
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_histcnt_d, TCG_CALL_NO_RWG,
29
@@ -XXX,XX +XXX,XX @@ uint64_t helper_fclass_s(CPULoongArchState *env, uint64_t fj)
27
30
} else if (float32_is_zero_or_denormal(f)) {
28
DEF_HELPER_FLAGS_4(sve2_histseg, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
return sign ? 1 << 4 : 1 << 8;
29
32
} else if (float32_is_any_nan(f)) {
30
+DEF_HELPER_FLAGS_4(sve2_xar_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
33
- float_status s = { }; /* for snan_bit_is_one */
31
+DEF_HELPER_FLAGS_4(sve2_xar_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
34
- return float32_is_quiet_nan(f, &s) ? 1 << 1 : 1 << 0;
32
+DEF_HELPER_FLAGS_4(sve2_xar_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
35
+ return float32_is_quiet_nan(f, &env->fp_status) ? 1 << 1 : 1 << 0;
33
+
36
} else {
34
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG,
37
return sign ? 1 << 3 : 1 << 7;
35
void, ptr, ptr, ptr, ptr, ptr, i32)
36
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG,
37
diff --git a/target/arm/helper.h b/target/arm/helper.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/helper.h
40
+++ b/target/arm/helper.h
41
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG,
42
DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG,
43
void, ptr, ptr, ptr, ptr, i32)
44
45
+DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
46
+
47
#ifdef TARGET_AARCH64
48
#include "helper-a64.h"
49
#include "helper-sve.h"
50
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/translate-a64.h
53
+++ b/target/arm/translate-a64.h
54
@@ -XXX,XX +XXX,XX @@ bool disas_sve(DisasContext *, uint32_t);
55
56
void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
57
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
58
+void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
59
+ uint32_t rm_ofs, int64_t shift,
60
+ uint32_t opr_sz, uint32_t max_sz);
61
62
#endif /* TARGET_ARM_TRANSLATE_A64_H */
63
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/sve.decode
66
+++ b/target/arm/sve.decode
67
@@ -XXX,XX +XXX,XX @@
68
&rr_dbm rd rn dbm
69
&rrri rd rn rm imm
70
&rri_esz rd rn imm esz
71
+&rrri_esz rd rn rm imm esz
72
&rrr_esz rd rn rm esz
73
&rpr_esz rd pg rn esz
74
&rpr_s rd pg rn s
75
@@ -XXX,XX +XXX,XX @@ ORR_zzz 00000100 01 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
76
EOR_zzz 00000100 10 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
77
BIC_zzz 00000100 11 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
78
79
+XAR 00000100 .. 1 ..... 001 101 rm:5 rd:5 &rrri_esz \
80
+ rn=%reg_movprfx esz=%tszimm16_esz imm=%tszimm16_shr
81
+
82
# SVE2 bitwise ternary operations
83
EOR3 00000100 00 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0
84
BSL 00000100 00 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
85
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/target/arm/sve_helper.c
88
+++ b/target/arm/sve_helper.c
89
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_histseg)(void *vd, void *vn, void *vm, uint32_t desc)
90
*(uint64_t *)(vd + i + 8) = out1;
91
}
38
}
92
}
39
@@ -XXX,XX +XXX,XX @@ uint64_t helper_fclass_d(CPULoongArchState *env, uint64_t fj)
93
+
40
} else if (float64_is_zero_or_denormal(f)) {
94
+void HELPER(sve2_xar_b)(void *vd, void *vn, void *vm, uint32_t desc)
41
return sign ? 1 << 4 : 1 << 8;
95
+{
42
} else if (float64_is_any_nan(f)) {
96
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
43
- float_status s = { }; /* for snan_bit_is_one */
97
+ int shr = simd_data(desc);
44
- return float64_is_quiet_nan(f, &s) ? 1 << 1 : 1 << 0;
98
+ int shl = 8 - shr;
45
+ return float64_is_quiet_nan(f, &env->fp_status) ? 1 << 1 : 1 << 0;
99
+ uint64_t mask = dup_const(MO_8, 0xff >> shr);
46
} else {
100
+ uint64_t *d = vd, *n = vn, *m = vm;
47
return sign ? 1 << 3 : 1 << 7;
101
+
102
+ for (i = 0; i < opr_sz; ++i) {
103
+ uint64_t t = n[i] ^ m[i];
104
+ d[i] = ((t >> shr) & mask) | ((t << shl) & ~mask);
105
+ }
106
+}
107
+
108
+void HELPER(sve2_xar_h)(void *vd, void *vn, void *vm, uint32_t desc)
109
+{
110
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
111
+ int shr = simd_data(desc);
112
+ int shl = 16 - shr;
113
+ uint64_t mask = dup_const(MO_16, 0xffff >> shr);
114
+ uint64_t *d = vd, *n = vn, *m = vm;
115
+
116
+ for (i = 0; i < opr_sz; ++i) {
117
+ uint64_t t = n[i] ^ m[i];
118
+ d[i] = ((t >> shr) & mask) | ((t << shl) & ~mask);
119
+ }
120
+}
121
+
122
+void HELPER(sve2_xar_s)(void *vd, void *vn, void *vm, uint32_t desc)
123
+{
124
+ intptr_t i, opr_sz = simd_oprsz(desc) / 4;
125
+ int shr = simd_data(desc);
126
+ uint32_t *d = vd, *n = vn, *m = vm;
127
+
128
+ for (i = 0; i < opr_sz; ++i) {
129
+ d[i] = ror32(n[i] ^ m[i], shr);
130
+ }
131
+}
132
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
133
index XXXXXXX..XXXXXXX 100644
134
--- a/target/arm/translate-a64.c
135
+++ b/target/arm/translate-a64.c
136
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_xar(DisasContext *s, uint32_t insn)
137
int imm6 = extract32(insn, 10, 6);
138
int rn = extract32(insn, 5, 5);
139
int rd = extract32(insn, 0, 5);
140
- TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
141
- int pass;
142
143
if (!dc_isar_feature(aa64_sha3, s)) {
144
unallocated_encoding(s);
145
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_xar(DisasContext *s, uint32_t insn)
146
return;
147
}
48
}
148
149
- tcg_op1 = tcg_temp_new_i64();
150
- tcg_op2 = tcg_temp_new_i64();
151
- tcg_res[0] = tcg_temp_new_i64();
152
- tcg_res[1] = tcg_temp_new_i64();
153
-
154
- for (pass = 0; pass < 2; pass++) {
155
- read_vec_element(s, tcg_op1, rn, pass, MO_64);
156
- read_vec_element(s, tcg_op2, rm, pass, MO_64);
157
-
158
- tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
159
- tcg_gen_rotri_i64(tcg_res[pass], tcg_res[pass], imm6);
160
- }
161
- write_vec_element(s, tcg_res[0], rd, 0, MO_64);
162
- write_vec_element(s, tcg_res[1], rd, 1, MO_64);
163
-
164
- tcg_temp_free_i64(tcg_op1);
165
- tcg_temp_free_i64(tcg_op2);
166
- tcg_temp_free_i64(tcg_res[0]);
167
- tcg_temp_free_i64(tcg_res[1]);
168
+ gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd),
169
+ vec_full_reg_offset(s, rn),
170
+ vec_full_reg_offset(s, rm), imm6, 16,
171
+ vec_full_reg_size(s));
172
}
173
174
/* Crypto three-reg imm2
175
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
176
index XXXXXXX..XXXXXXX 100644
177
--- a/target/arm/translate-sve.c
178
+++ b/target/arm/translate-sve.c
179
@@ -XXX,XX +XXX,XX @@ static bool trans_BIC_zzz(DisasContext *s, arg_rrr_esz *a)
180
return do_zzz_fn(s, a, tcg_gen_gvec_andc);
181
}
182
183
+static void gen_xar8_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
184
+{
185
+ TCGv_i64 t = tcg_temp_new_i64();
186
+ uint64_t mask = dup_const(MO_8, 0xff >> sh);
187
+
188
+ tcg_gen_xor_i64(t, n, m);
189
+ tcg_gen_shri_i64(d, t, sh);
190
+ tcg_gen_shli_i64(t, t, 8 - sh);
191
+ tcg_gen_andi_i64(d, d, mask);
192
+ tcg_gen_andi_i64(t, t, ~mask);
193
+ tcg_gen_or_i64(d, d, t);
194
+ tcg_temp_free_i64(t);
195
+}
196
+
197
+static void gen_xar16_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
198
+{
199
+ TCGv_i64 t = tcg_temp_new_i64();
200
+ uint64_t mask = dup_const(MO_16, 0xffff >> sh);
201
+
202
+ tcg_gen_xor_i64(t, n, m);
203
+ tcg_gen_shri_i64(d, t, sh);
204
+ tcg_gen_shli_i64(t, t, 16 - sh);
205
+ tcg_gen_andi_i64(d, d, mask);
206
+ tcg_gen_andi_i64(t, t, ~mask);
207
+ tcg_gen_or_i64(d, d, t);
208
+ tcg_temp_free_i64(t);
209
+}
210
+
211
+static void gen_xar_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, int32_t sh)
212
+{
213
+ tcg_gen_xor_i32(d, n, m);
214
+ tcg_gen_rotri_i32(d, d, sh);
215
+}
216
+
217
+static void gen_xar_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
218
+{
219
+ tcg_gen_xor_i64(d, n, m);
220
+ tcg_gen_rotri_i64(d, d, sh);
221
+}
222
+
223
+static void gen_xar_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
224
+ TCGv_vec m, int64_t sh)
225
+{
226
+ tcg_gen_xor_vec(vece, d, n, m);
227
+ tcg_gen_rotri_vec(vece, d, d, sh);
228
+}
229
+
230
+void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
231
+ uint32_t rm_ofs, int64_t shift,
232
+ uint32_t opr_sz, uint32_t max_sz)
233
+{
234
+ static const TCGOpcode vecop[] = { INDEX_op_rotli_vec, 0 };
235
+ static const GVecGen3i ops[4] = {
236
+ { .fni8 = gen_xar8_i64,
237
+ .fniv = gen_xar_vec,
238
+ .fno = gen_helper_sve2_xar_b,
239
+ .opt_opc = vecop,
240
+ .vece = MO_8 },
241
+ { .fni8 = gen_xar16_i64,
242
+ .fniv = gen_xar_vec,
243
+ .fno = gen_helper_sve2_xar_h,
244
+ .opt_opc = vecop,
245
+ .vece = MO_16 },
246
+ { .fni4 = gen_xar_i32,
247
+ .fniv = gen_xar_vec,
248
+ .fno = gen_helper_sve2_xar_s,
249
+ .opt_opc = vecop,
250
+ .vece = MO_32 },
251
+ { .fni8 = gen_xar_i64,
252
+ .fniv = gen_xar_vec,
253
+ .fno = gen_helper_gvec_xar_d,
254
+ .opt_opc = vecop,
255
+ .vece = MO_64 }
256
+ };
257
+ int esize = 8 << vece;
258
+
259
+ /* The SVE2 range is 1 .. esize; the AdvSIMD range is 0 .. esize-1. */
260
+ tcg_debug_assert(shift >= 0);
261
+ tcg_debug_assert(shift <= esize);
262
+ shift &= esize - 1;
263
+
264
+ if (shift == 0) {
265
+ /* xar with no rotate devolves to xor. */
266
+ tcg_gen_gvec_xor(vece, rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz);
267
+ } else {
268
+ tcg_gen_gvec_3i(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz,
269
+ shift, &ops[vece]);
270
+ }
271
+}
272
+
273
+static bool trans_XAR(DisasContext *s, arg_rrri_esz *a)
274
+{
275
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
276
+ return false;
277
+ }
278
+ if (sve_access_check(s)) {
279
+ unsigned vsz = vec_full_reg_size(s);
280
+ gen_gvec_xar(a->esz, vec_full_reg_offset(s, a->rd),
281
+ vec_full_reg_offset(s, a->rn),
282
+ vec_full_reg_offset(s, a->rm), a->imm, vsz, vsz);
283
+ }
284
+ return true;
285
+}
286
+
287
static bool do_sve2_zzzz_fn(DisasContext *s, arg_rrrr_esz *a, GVecGen4Fn *fn)
288
{
289
if (!dc_isar_feature(aa64_sve2, s)) {
290
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
291
index XXXXXXX..XXXXXXX 100644
292
--- a/target/arm/vec_helper.c
293
+++ b/target/arm/vec_helper.c
294
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_umulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
295
}
296
clear_tail(d, opr_sz, simd_maxsz(desc));
297
}
298
+
299
+void HELPER(gvec_xar_d)(void *vd, void *vn, void *vm, uint32_t desc)
300
+{
301
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
302
+ int shr = simd_data(desc);
303
+ uint64_t *d = vd, *n = vn, *m = vm;
304
+
305
+ for (i = 0; i < opr_sz; ++i) {
306
+ d[i] = ror64(n[i] ^ m[i], shr);
307
+ }
308
+ clear_tail(d, opr_sz * 8, simd_maxsz(desc));
309
+}
310
--
49
--
311
2.20.1
50
2.34.1
312
313
diff view generated by jsdifflib
1
From: Stephen Long <steplong@quicinc.com>
1
In the frem helper, we have a local float_status because we want to
2
execute the floatx80_div() with a custom rounding mode. Instead of
3
zero-initializing the local float_status and then having to set it up
4
with the m68k standard behaviour (including the NaN propagation rule
5
and copying the rounding precision from env->fp_status), initialize
6
it as a complete copy of env->fp_status. This will avoid our having
7
to add new code in this function for every new config knob we add
8
to fp_status.
2
9
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Stephen Long <steplong@quicinc.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210525010358.152808-43-richard.henderson@linaro.org
7
Message-Id: <20200416173109.8856-1-steplong@quicinc.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20241202131347.498124-31-peter.maydell@linaro.org
10
---
13
---
11
target/arm/helper-sve.h | 7 ++
14
target/m68k/fpu_helper.c | 6 ++----
12
target/arm/sve.decode | 6 ++
15
1 file changed, 2 insertions(+), 4 deletions(-)
13
target/arm/sve_helper.c | 131 +++++++++++++++++++++++++++++++++++++
14
target/arm/translate-sve.c | 19 ++++++
15
4 files changed, 163 insertions(+)
16
16
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
17
diff --git a/target/m68k/fpu_helper.c b/target/m68k/fpu_helper.c
18
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-sve.h
19
--- a/target/m68k/fpu_helper.c
20
+++ b/target/arm/helper-sve.h
20
+++ b/target/m68k/fpu_helper.c
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_nmatch_ppzz_b, TCG_CALL_NO_RWG,
21
@@ -XXX,XX +XXX,XX @@ void HELPER(frem)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
22
DEF_HELPER_FLAGS_5(sve2_nmatch_ppzz_h, TCG_CALL_NO_RWG,
22
23
i32, ptr, ptr, ptr, ptr, i32)
23
fp_rem = floatx80_rem(val1->d, val0->d, &env->fp_status);
24
24
if (!floatx80_is_any_nan(fp_rem)) {
25
+DEF_HELPER_FLAGS_5(sve2_histcnt_s, TCG_CALL_NO_RWG,
25
- float_status fp_status = { };
26
+ void, ptr, ptr, ptr, ptr, i32)
26
+ /* Use local temporary fp_status to set different rounding mode */
27
+DEF_HELPER_FLAGS_5(sve2_histcnt_d, TCG_CALL_NO_RWG,
27
+ float_status fp_status = env->fp_status;
28
+ void, ptr, ptr, ptr, ptr, i32)
28
uint32_t quotient;
29
+
29
int sign;
30
+DEF_HELPER_FLAGS_4(sve2_histseg, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
30
31
+
31
/* Calculate quotient directly using round to nearest mode */
32
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG,
32
- set_float_2nan_prop_rule(float_2nan_prop_ab, &fp_status);
33
void, ptr, ptr, ptr, ptr, ptr, i32)
33
set_float_rounding_mode(float_round_nearest_even, &fp_status);
34
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG,
34
- set_floatx80_rounding_precision(
35
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
35
- get_floatx80_rounding_precision(&env->fp_status), &fp_status);
36
index XXXXXXX..XXXXXXX 100644
36
fp_quot.d = floatx80_div(val1->d, val0->d, &fp_status);
37
--- a/target/arm/sve.decode
37
38
+++ b/target/arm/sve.decode
38
sign = extractFloatx80Sign(fp_quot.d);
39
@@ -XXX,XX +XXX,XX @@
40
&rprrr_esz rn=%reg_movprfx
41
@rdn_pg_rm_ra ........ esz:2 . ra:5 ... pg:3 rm:5 rd:5 \
42
&rprrr_esz rn=%reg_movprfx
43
+@rd_pg_rn_rm ........ esz:2 . rm:5 ... pg:3 rn:5 rd:5 &rprr_esz
44
45
# One register operand, with governing predicate, vector element size
46
@rd_pg_rn ........ esz:2 ... ... ... pg:3 rn:5 rd:5 &rpr_esz
47
@@ -XXX,XX +XXX,XX @@ RSUBHNT 01000101 .. 1 ..... 011 111 ..... ..... @rd_rn_rm
48
MATCH 01000101 .. 1 ..... 100 ... ..... 0 .... @pd_pg_rn_rm
49
NMATCH 01000101 .. 1 ..... 100 ... ..... 1 .... @pd_pg_rn_rm
50
51
+### SVE2 Histogram Computation
52
+
53
+HISTCNT 01000101 .. 1 ..... 110 ... ..... ..... @rd_pg_rn_rm
54
+HISTSEG 01000101 .. 1 ..... 101 000 ..... ..... @rd_rn_rm
55
+
56
## SVE2 floating-point pairwise operations
57
58
FADDP 01100100 .. 010 00 0 100 ... ..... ..... @rdn_pg_rm
59
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/sve_helper.c
62
+++ b/target/arm/sve_helper.c
63
@@ -XXX,XX +XXX,XX @@ DO_PPZZ_MATCH(sve2_nmatch_ppzz_b, MO_8, true)
64
DO_PPZZ_MATCH(sve2_nmatch_ppzz_h, MO_16, true)
65
66
#undef DO_PPZZ_MATCH
67
+
68
+void HELPER(sve2_histcnt_s)(void *vd, void *vn, void *vm, void *vg,
69
+ uint32_t desc)
70
+{
71
+ ARMVectorReg scratch;
72
+ intptr_t i, j;
73
+ intptr_t opr_sz = simd_oprsz(desc);
74
+ uint32_t *d = vd, *n = vn, *m = vm;
75
+ uint8_t *pg = vg;
76
+
77
+ if (d == n) {
78
+ n = memcpy(&scratch, n, opr_sz);
79
+ if (d == m) {
80
+ m = n;
81
+ }
82
+ } else if (d == m) {
83
+ m = memcpy(&scratch, m, opr_sz);
84
+ }
85
+
86
+ for (i = 0; i < opr_sz; i += 4) {
87
+ uint64_t count = 0;
88
+ uint8_t pred;
89
+
90
+ pred = pg[H1(i >> 3)] >> (i & 7);
91
+ if (pred & 1) {
92
+ uint32_t nn = n[H4(i >> 2)];
93
+
94
+ for (j = 0; j <= i; j += 4) {
95
+ pred = pg[H1(j >> 3)] >> (j & 7);
96
+ if ((pred & 1) && nn == m[H4(j >> 2)]) {
97
+ ++count;
98
+ }
99
+ }
100
+ }
101
+ d[H4(i >> 2)] = count;
102
+ }
103
+}
104
+
105
+void HELPER(sve2_histcnt_d)(void *vd, void *vn, void *vm, void *vg,
106
+ uint32_t desc)
107
+{
108
+ ARMVectorReg scratch;
109
+ intptr_t i, j;
110
+ intptr_t opr_sz = simd_oprsz(desc);
111
+ uint64_t *d = vd, *n = vn, *m = vm;
112
+ uint8_t *pg = vg;
113
+
114
+ if (d == n) {
115
+ n = memcpy(&scratch, n, opr_sz);
116
+ if (d == m) {
117
+ m = n;
118
+ }
119
+ } else if (d == m) {
120
+ m = memcpy(&scratch, m, opr_sz);
121
+ }
122
+
123
+ for (i = 0; i < opr_sz / 8; ++i) {
124
+ uint64_t count = 0;
125
+ if (pg[H1(i)] & 1) {
126
+ uint64_t nn = n[i];
127
+ for (j = 0; j <= i; ++j) {
128
+ if ((pg[H1(j)] & 1) && nn == m[j]) {
129
+ ++count;
130
+ }
131
+ }
132
+ }
133
+ d[i] = count;
134
+ }
135
+}
136
+
137
+/*
138
+ * Returns the number of bytes in m0 and m1 that match n.
139
+ * Unlike do_match2 we don't just need true/false, we need an exact count.
140
+ * This requires two extra logical operations.
141
+ */
142
+static inline uint64_t do_histseg_cnt(uint8_t n, uint64_t m0, uint64_t m1)
143
+{
144
+ const uint64_t mask = dup_const(MO_8, 0x7f);
145
+ uint64_t cmp0, cmp1;
146
+
147
+ cmp1 = dup_const(MO_8, n);
148
+ cmp0 = cmp1 ^ m0;
149
+ cmp1 = cmp1 ^ m1;
150
+
151
+ /*
152
+ * 1: clear msb of each byte to avoid carry to next byte (& mask)
153
+ * 2: carry in to msb if byte != 0 (+ mask)
154
+ * 3: set msb if cmp has msb set (| cmp)
155
+ * 4: set ~msb to ignore them (| mask)
156
+ * We now have 0xff for byte != 0 or 0x7f for byte == 0.
157
+ * 5: invert, resulting in 0x80 if and only if byte == 0.
158
+ */
159
+ cmp0 = ~(((cmp0 & mask) + mask) | cmp0 | mask);
160
+ cmp1 = ~(((cmp1 & mask) + mask) | cmp1 | mask);
161
+
162
+ /*
163
+ * Combine the two compares in a way that the bits do
164
+ * not overlap, and so preserves the count of set bits.
165
+ * If the host has an efficient instruction for ctpop,
166
+ * then ctpop(x) + ctpop(y) has the same number of
167
+ * operations as ctpop(x | (y >> 1)). If the host does
168
+ * not have an efficient ctpop, then we only want to
169
+ * use it once.
170
+ */
171
+ return ctpop64(cmp0 | (cmp1 >> 1));
172
+}
173
+
174
+void HELPER(sve2_histseg)(void *vd, void *vn, void *vm, uint32_t desc)
175
+{
176
+ intptr_t i, j;
177
+ intptr_t opr_sz = simd_oprsz(desc);
178
+
179
+ for (i = 0; i < opr_sz; i += 16) {
180
+ uint64_t n0 = *(uint64_t *)(vn + i);
181
+ uint64_t m0 = *(uint64_t *)(vm + i);
182
+ uint64_t n1 = *(uint64_t *)(vn + i + 8);
183
+ uint64_t m1 = *(uint64_t *)(vm + i + 8);
184
+ uint64_t out0 = 0;
185
+ uint64_t out1 = 0;
186
+
187
+ for (j = 0; j < 64; j += 8) {
188
+ uint64_t cnt0 = do_histseg_cnt(n0 >> j, m0, m1);
189
+ uint64_t cnt1 = do_histseg_cnt(n1 >> j, m0, m1);
190
+ out0 |= cnt0 << j;
191
+ out1 |= cnt1 << j;
192
+ }
193
+
194
+ *(uint64_t *)(vd + i) = out0;
195
+ *(uint64_t *)(vd + i + 8) = out1;
196
+ }
197
+}
198
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
199
index XXXXXXX..XXXXXXX 100644
200
--- a/target/arm/translate-sve.c
201
+++ b/target/arm/translate-sve.c
202
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
203
DO_SVE2_PPZZ_MATCH(MATCH, match)
204
DO_SVE2_PPZZ_MATCH(NMATCH, nmatch)
205
206
+static bool trans_HISTCNT(DisasContext *s, arg_rprr_esz *a)
207
+{
208
+ static gen_helper_gvec_4 * const fns[2] = {
209
+ gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d
210
+ };
211
+ if (a->esz < 2) {
212
+ return false;
213
+ }
214
+ return do_sve2_zpzz_ool(s, a, fns[a->esz - 2]);
215
+}
216
+
217
+static bool trans_HISTSEG(DisasContext *s, arg_rrr_esz *a)
218
+{
219
+ if (a->esz != 0) {
220
+ return false;
221
+ }
222
+ return do_sve2_zzz_ool(s, a, gen_helper_sve2_histseg);
223
+}
224
+
225
static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
226
gen_helper_gvec_4_ptr *fn)
227
{
228
--
39
--
229
2.20.1
40
2.34.1
230
231
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
In cf_fpu_gdb_get_reg() and cf_fpu_gdb_set_reg() we do the conversion
2
from float64 to floatx80 using a scratch float_status, because we
3
don't want the conversion to affect the CPU's floating point exception
4
status. Currently we use a zero-initialized float_status. This will
5
get steadily more awkward as we add config knobs to float_status
6
that the target must initialize. Avoid having to add any of that
7
configuration here by instead initializing our local float_status
8
from the env->fp_status.
2
9
3
Using g_memdup is a bit more compact than g_new + memcpy.
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20241202131347.498124-32-peter.maydell@linaro.org
13
---
14
target/m68k/helper.c | 6 ++++--
15
1 file changed, 4 insertions(+), 2 deletions(-)
4
16
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
diff --git a/target/m68k/helper.c b/target/m68k/helper.c
6
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Message-id: 20210509151618.2331764-2-f4bug@amsat.org
8
Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org>
9
[PMD: Split from bigger patch]
10
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
accel/tcg/cputlb.c | 15 ++++-----------
15
1 file changed, 4 insertions(+), 11 deletions(-)
16
17
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
18
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/cputlb.c
19
--- a/target/m68k/helper.c
20
+++ b/accel/tcg/cputlb.c
20
+++ b/target/m68k/helper.c
21
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
21
@@ -XXX,XX +XXX,XX @@ static int cf_fpu_gdb_get_reg(CPUState *cs, GByteArray *mem_buf, int n)
22
} else if (encode_pbm_to_runon(&runon, d)) {
22
CPUM68KState *env = &cpu->env;
23
async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
23
24
} else {
24
if (n < 8) {
25
- TLBFlushPageBitsByMMUIdxData *p
25
- float_status s = {};
26
- = g_new(TLBFlushPageBitsByMMUIdxData, 1);
26
+ /* Use scratch float_status so any exceptions don't change CPU state */
27
-
27
+ float_status s = env->fp_status;
28
/* Otherwise allocate a structure, freed by the worker. */
28
return gdb_get_reg64(mem_buf, floatx80_to_float64(env->fregs[n].d, &s));
29
- *p = d;
30
+ TLBFlushPageBitsByMMUIdxData *p = g_memdup(&d, sizeof(d));
31
async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2,
32
RUN_ON_CPU_HOST_PTR(p));
33
}
29
}
34
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
30
switch (n) {
35
flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
31
@@ -XXX,XX +XXX,XX @@ static int cf_fpu_gdb_set_reg(CPUState *cs, uint8_t *mem_buf, int n)
36
} else {
32
CPUM68KState *env = &cpu->env;
37
CPUState *dst_cpu;
33
38
- TLBFlushPageBitsByMMUIdxData *p;
34
if (n < 8) {
39
35
- float_status s = {};
40
/* Allocate a separate data block for each destination cpu. */
36
+ /* Use scratch float_status so any exceptions don't change CPU state */
41
CPU_FOREACH(dst_cpu) {
37
+ float_status s = env->fp_status;
42
if (dst_cpu != src_cpu) {
38
env->fregs[n].d = float64_to_floatx80(ldq_be_p(mem_buf), &s);
43
- p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
39
return 8;
44
- *p = d;
45
+ TLBFlushPageBitsByMMUIdxData *p = g_memdup(&d, sizeof(d));
46
async_run_on_cpu(dst_cpu,
47
tlb_flush_page_bits_by_mmuidx_async_2,
48
RUN_ON_CPU_HOST_PTR(p));
49
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
50
/* Allocate a separate data block for each destination cpu. */
51
CPU_FOREACH(dst_cpu) {
52
if (dst_cpu != src_cpu) {
53
- p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
54
- *p = d;
55
+ p = g_memdup(&d, sizeof(d));
56
async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
57
RUN_ON_CPU_HOST_PTR(p));
58
}
59
}
60
61
- p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
62
- *p = d;
63
+ p = g_memdup(&d, sizeof(d));
64
async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
65
RUN_ON_CPU_HOST_PTR(p));
66
}
40
}
67
--
41
--
68
2.20.1
42
2.34.1
69
70
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
In the helper functions flcmps and flcmpd we use a scratch float_status
2
so that we don't change the CPU state if the comparison raises any
3
floating point exception flags. Instead of zero-initializing this
4
scratch float_status, initialize it as a copy of env->fp_status. This
5
avoids the need to explicitly initialize settings like the NaN
6
propagation rule or others we might add to softfloat in future.
2
7
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
To do this we need to pass the CPU env pointer in to the helper.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
5
Message-id: 20210525010358.152808-38-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20241202131347.498124-33-peter.maydell@linaro.org
7
---
13
---
8
target/arm/helper-sve.h | 18 +++++++++++++++
14
target/sparc/helper.h | 4 ++--
9
target/arm/vec_internal.h | 5 +++++
15
target/sparc/fop_helper.c | 8 ++++----
10
target/arm/sve.decode | 5 +++++
16
target/sparc/translate.c | 4 ++--
11
target/arm/sve_helper.c | 46 ++++++++++++++++++++++++++++++++++++++
17
3 files changed, 8 insertions(+), 8 deletions(-)
12
target/arm/translate-sve.c | 32 ++++++++++++++++++++++++++
13
target/arm/vec_helper.c | 15 ++++++-------
14
6 files changed, 113 insertions(+), 8 deletions(-)
15
18
16
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
19
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
17
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-sve.h
21
--- a/target/sparc/helper.h
19
+++ b/target/arm/helper-sve.h
22
+++ b/target/sparc/helper.h
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_s, TCG_CALL_NO_RWG,
23
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fcmpd, TCG_CALL_NO_WG, i32, env, f64, f64)
21
void, ptr, ptr, ptr, ptr, i32)
24
DEF_HELPER_FLAGS_3(fcmped, TCG_CALL_NO_WG, i32, env, f64, f64)
22
DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_d, TCG_CALL_NO_RWG,
25
DEF_HELPER_FLAGS_3(fcmpq, TCG_CALL_NO_WG, i32, env, i128, i128)
23
void, ptr, ptr, ptr, ptr, i32)
26
DEF_HELPER_FLAGS_3(fcmpeq, TCG_CALL_NO_WG, i32, env, i128, i128)
24
+
27
-DEF_HELPER_FLAGS_2(flcmps, TCG_CALL_NO_RWG_SE, i32, f32, f32)
25
+DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_b, TCG_CALL_NO_RWG,
28
-DEF_HELPER_FLAGS_2(flcmpd, TCG_CALL_NO_RWG_SE, i32, f64, f64)
26
+ void, ptr, ptr, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_3(flcmps, TCG_CALL_NO_RWG_SE, i32, env, f32, f32)
27
+DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_h, TCG_CALL_NO_RWG,
30
+DEF_HELPER_FLAGS_3(flcmpd, TCG_CALL_NO_RWG_SE, i32, env, f64, f64)
28
+ void, ptr, ptr, ptr, ptr, i32)
31
DEF_HELPER_2(raise_exception, noreturn, env, int)
29
+DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_s, TCG_CALL_NO_RWG,
32
30
+ void, ptr, ptr, ptr, ptr, i32)
33
DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
31
+DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_d, TCG_CALL_NO_RWG,
34
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
32
+ void, ptr, ptr, ptr, ptr, i32)
33
+
34
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_b, TCG_CALL_NO_RWG,
35
+ void, ptr, ptr, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_h, TCG_CALL_NO_RWG,
37
+ void, ptr, ptr, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_s, TCG_CALL_NO_RWG,
39
+ void, ptr, ptr, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_d, TCG_CALL_NO_RWG,
41
+ void, ptr, ptr, ptr, ptr, i32)
42
diff --git a/target/arm/vec_internal.h b/target/arm/vec_internal.h
43
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/vec_internal.h
36
--- a/target/sparc/fop_helper.c
45
+++ b/target/arm/vec_internal.h
37
+++ b/target/sparc/fop_helper.c
46
@@ -XXX,XX +XXX,XX @@ static inline int64_t do_suqrshl_d(int64_t src, int64_t shift,
38
@@ -XXX,XX +XXX,XX @@ uint32_t helper_fcmpeq(CPUSPARCState *env, Int128 src1, Int128 src2)
47
return do_uqrshl_d(src, shift, round, sat);
39
return finish_fcmp(env, r, GETPC());
48
}
40
}
49
41
50
+int8_t do_sqrdmlah_b(int8_t, int8_t, int8_t, bool, bool);
42
-uint32_t helper_flcmps(float32 src1, float32 src2)
51
+int16_t do_sqrdmlah_h(int16_t, int16_t, int16_t, bool, bool, uint32_t *);
43
+uint32_t helper_flcmps(CPUSPARCState *env, float32 src1, float32 src2)
52
+int32_t do_sqrdmlah_s(int32_t, int32_t, int32_t, bool, bool, uint32_t *);
53
+int64_t do_sqrdmlah_d(int64_t, int64_t, int64_t, bool, bool);
54
+
55
#endif /* TARGET_ARM_VEC_INTERNALS_H */
56
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/sve.decode
59
+++ b/target/arm/sve.decode
60
@@ -XXX,XX +XXX,XX @@ SMLSLB_zzzw 01000100 .. 0 ..... 010 100 ..... ..... @rda_rn_rm
61
SMLSLT_zzzw 01000100 .. 0 ..... 010 101 ..... ..... @rda_rn_rm
62
UMLSLB_zzzw 01000100 .. 0 ..... 010 110 ..... ..... @rda_rn_rm
63
UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm
64
+
65
+## SVE2 complex integer multiply-add
66
+
67
+CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx
68
+SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx
69
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/sve_helper.c
72
+++ b/target/arm/sve_helper.c
73
@@ -XXX,XX +XXX,XX @@ DO_SQDMLAL(sve2_sqdmlsl_zzzw_d, int64_t, int32_t, , H1_4,
74
75
#undef DO_SQDMLAL
76
77
+#define DO_CMLA_FUNC(NAME, TYPE, H, OP) \
78
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
79
+{ \
80
+ intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(TYPE); \
81
+ int rot = simd_data(desc); \
82
+ int sel_a = rot & 1, sel_b = sel_a ^ 1; \
83
+ bool sub_r = rot == 1 || rot == 2; \
84
+ bool sub_i = rot >= 2; \
85
+ TYPE *d = vd, *n = vn, *m = vm, *a = va; \
86
+ for (i = 0; i < opr_sz; i += 2) { \
87
+ TYPE elt1_a = n[H(i + sel_a)]; \
88
+ TYPE elt2_a = m[H(i + sel_a)]; \
89
+ TYPE elt2_b = m[H(i + sel_b)]; \
90
+ d[H(i)] = OP(elt1_a, elt2_a, a[H(i)], sub_r); \
91
+ d[H(i + 1)] = OP(elt1_a, elt2_b, a[H(i + 1)], sub_i); \
92
+ } \
93
+}
94
+
95
+#define DO_CMLA(N, M, A, S) (A + (N * M) * (S ? -1 : 1))
96
+
97
+DO_CMLA_FUNC(sve2_cmla_zzzz_b, uint8_t, H1, DO_CMLA)
98
+DO_CMLA_FUNC(sve2_cmla_zzzz_h, uint16_t, H2, DO_CMLA)
99
+DO_CMLA_FUNC(sve2_cmla_zzzz_s, uint32_t, H4, DO_CMLA)
100
+DO_CMLA_FUNC(sve2_cmla_zzzz_d, uint64_t, , DO_CMLA)
101
+
102
+#define DO_SQRDMLAH_B(N, M, A, S) \
103
+ do_sqrdmlah_b(N, M, A, S, true)
104
+#define DO_SQRDMLAH_H(N, M, A, S) \
105
+ ({ uint32_t discard; do_sqrdmlah_h(N, M, A, S, true, &discard); })
106
+#define DO_SQRDMLAH_S(N, M, A, S) \
107
+ ({ uint32_t discard; do_sqrdmlah_s(N, M, A, S, true, &discard); })
108
+#define DO_SQRDMLAH_D(N, M, A, S) \
109
+ do_sqrdmlah_d(N, M, A, S, true)
110
+
111
+DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_b, int8_t, H1, DO_SQRDMLAH_B)
112
+DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_h, int16_t, H2, DO_SQRDMLAH_H)
113
+DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_s, int32_t, H4, DO_SQRDMLAH_S)
114
+DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_d, int64_t, , DO_SQRDMLAH_D)
115
+
116
+#undef DO_CMLA
117
+#undef DO_CMLA_FUNC
118
+#undef DO_SQRDMLAH_B
119
+#undef DO_SQRDMLAH_H
120
+#undef DO_SQRDMLAH_S
121
+#undef DO_SQRDMLAH_D
122
+
123
#define DO_BITPERM(NAME, TYPE, OP) \
124
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
125
{ \
126
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
127
index XXXXXXX..XXXXXXX 100644
128
--- a/target/arm/translate-sve.c
129
+++ b/target/arm/translate-sve.c
130
@@ -XXX,XX +XXX,XX @@ static bool trans_UMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
131
{
132
return do_umlsl_zzzw(s, a, true);
133
}
134
+
135
+static bool trans_CMLA_zzzz(DisasContext *s, arg_CMLA_zzzz *a)
136
+{
137
+ static gen_helper_gvec_4 * const fns[] = {
138
+ gen_helper_sve2_cmla_zzzz_b, gen_helper_sve2_cmla_zzzz_h,
139
+ gen_helper_sve2_cmla_zzzz_s, gen_helper_sve2_cmla_zzzz_d,
140
+ };
141
+
142
+ if (!dc_isar_feature(aa64_sve2, s)) {
143
+ return false;
144
+ }
145
+ if (sve_access_check(s)) {
146
+ gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot);
147
+ }
148
+ return true;
149
+}
150
+
151
+static bool trans_SQRDCMLAH_zzzz(DisasContext *s, arg_SQRDCMLAH_zzzz *a)
152
+{
153
+ static gen_helper_gvec_4 * const fns[] = {
154
+ gen_helper_sve2_sqrdcmlah_zzzz_b, gen_helper_sve2_sqrdcmlah_zzzz_h,
155
+ gen_helper_sve2_sqrdcmlah_zzzz_s, gen_helper_sve2_sqrdcmlah_zzzz_d,
156
+ };
157
+
158
+ if (!dc_isar_feature(aa64_sve2, s)) {
159
+ return false;
160
+ }
161
+ if (sve_access_check(s)) {
162
+ gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot);
163
+ }
164
+ return true;
165
+}
166
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
167
index XXXXXXX..XXXXXXX 100644
168
--- a/target/arm/vec_helper.c
169
+++ b/target/arm/vec_helper.c
170
@@ -XXX,XX +XXX,XX @@
171
#endif
172
173
/* Signed saturating rounding doubling multiply-accumulate high half, 8-bit */
174
-static int8_t do_sqrdmlah_b(int8_t src1, int8_t src2, int8_t src3,
175
- bool neg, bool round)
176
+int8_t do_sqrdmlah_b(int8_t src1, int8_t src2, int8_t src3,
177
+ bool neg, bool round)
178
{
44
{
179
/*
45
/*
180
* Simplify:
46
* FLCMP never raises an exception nor modifies any FSR fields.
181
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_b)(void *vd, void *vn, void *vm,
47
* Perform the comparison with a dummy fp environment.
48
*/
49
- float_status discard = { };
50
+ float_status discard = env->fp_status;
51
FloatRelation r;
52
53
set_float_2nan_prop_rule(float_2nan_prop_s_ba, &discard);
54
@@ -XXX,XX +XXX,XX @@ uint32_t helper_flcmps(float32 src1, float32 src2)
55
g_assert_not_reached();
182
}
56
}
183
57
184
/* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */
58
-uint32_t helper_flcmpd(float64 src1, float64 src2)
185
-static int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3,
59
+uint32_t helper_flcmpd(CPUSPARCState *env, float64 src1, float64 src2)
186
- bool neg, bool round, uint32_t *sat)
187
+int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3,
188
+ bool neg, bool round, uint32_t *sat)
189
{
60
{
190
/* Simplify similarly to do_sqrdmlah_b above. */
61
- float_status discard = { };
191
int32_t ret = (int32_t)src1 * src2;
62
+ float_status discard = env->fp_status;
192
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_h)(void *vd, void *vn, void *vm,
63
FloatRelation r;
64
65
set_float_2nan_prop_rule(float_2nan_prop_s_ba, &discard);
66
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/sparc/translate.c
69
+++ b/target/sparc/translate.c
70
@@ -XXX,XX +XXX,XX @@ static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
71
72
src1 = gen_load_fpr_F(dc, a->rs1);
73
src2 = gen_load_fpr_F(dc, a->rs2);
74
- gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
75
+ gen_helper_flcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
76
return advance_pc(dc);
193
}
77
}
194
78
195
/* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */
79
@@ -XXX,XX +XXX,XX @@ static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
196
-static int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3,
80
197
- bool neg, bool round, uint32_t *sat)
81
src1 = gen_load_fpr_D(dc, a->rs1);
198
+int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3,
82
src2 = gen_load_fpr_D(dc, a->rs2);
199
+ bool neg, bool round, uint32_t *sat)
83
- gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
200
{
84
+ gen_helper_flcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
201
/* Simplify similarly to do_sqrdmlah_b above. */
85
return advance_pc(dc);
202
int64_t ret = (int64_t)src1 * src2;
203
@@ -XXX,XX +XXX,XX @@ static int64_t do_sat128_d(Int128 r)
204
return ls;
205
}
86
}
206
87
207
-static int64_t do_sqrdmlah_d(int64_t n, int64_t m, int64_t a,
208
- bool neg, bool round)
209
+int64_t do_sqrdmlah_d(int64_t n, int64_t m, int64_t a, bool neg, bool round)
210
{
211
uint64_t l, h;
212
Int128 r, t;
213
--
88
--
214
2.20.1
89
2.34.1
215
216
diff view generated by jsdifflib
1
From: Stephen Long <steplong@quicinc.com>
1
In the helper_compute_fprf functions, we pass a dummy float_status
2
in to the is_signaling_nan() function. This is unnecessary, because
3
we have convenient access to the CPU env pointer here and that
4
is already set up with the correct values for the snan_bit_is_one
5
and no_signaling_nans config settings. is_signaling_nan() doesn't
6
ever update the fp_status with any exception flags, so there is
7
no reason not to use env->fp_status here.
2
8
3
This completes the section 'SVE2 integer add/subtract narrow high part'
9
Use env->fp_status instead of the dummy fp_status.
4
10
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Stephen Long <steplong@quicinc.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210525010358.152808-42-richard.henderson@linaro.org
9
Message-Id: <20200417162231.10374-5-steplong@quicinc.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20241202131347.498124-34-peter.maydell@linaro.org
12
---
14
---
13
target/arm/helper-sve.h | 8 ++++++++
15
target/ppc/fpu_helper.c | 3 +--
14
target/arm/sve.decode | 2 ++
16
1 file changed, 1 insertion(+), 2 deletions(-)
15
target/arm/sve_helper.c | 10 ++++++++++
16
target/arm/translate-sve.c | 2 ++
17
4 files changed, 22 insertions(+)
18
17
19
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
18
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
20
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper-sve.h
20
--- a/target/ppc/fpu_helper.c
22
+++ b/target/arm/helper-sve.h
21
+++ b/target/ppc/fpu_helper.c
23
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_subhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
22
@@ -XXX,XX +XXX,XX @@ void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
24
DEF_HELPER_FLAGS_4(sve2_subhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
23
} else if (tp##_is_infinity(arg)) { \
25
DEF_HELPER_FLAGS_4(sve2_subhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
fprf = neg ? 0x09 << FPSCR_FPRF : 0x05 << FPSCR_FPRF; \
26
25
} else { \
27
+DEF_HELPER_FLAGS_4(sve2_rsubhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
26
- float_status dummy = { }; /* snan_bit_is_one = 0 */ \
28
+DEF_HELPER_FLAGS_4(sve2_rsubhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
27
- if (tp##_is_signaling_nan(arg, &dummy)) { \
29
+DEF_HELPER_FLAGS_4(sve2_rsubhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
28
+ if (tp##_is_signaling_nan(arg, &env->fp_status)) { \
30
+
29
fprf = 0x00 << FPSCR_FPRF; \
31
+DEF_HELPER_FLAGS_4(sve2_rsubhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
30
} else { \
32
+DEF_HELPER_FLAGS_4(sve2_rsubhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
fprf = 0x11 << FPSCR_FPRF; \
33
+DEF_HELPER_FLAGS_4(sve2_rsubhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
34
+
35
DEF_HELPER_FLAGS_5(sve2_match_ppzz_b, TCG_CALL_NO_RWG,
36
i32, ptr, ptr, ptr, ptr, i32)
37
DEF_HELPER_FLAGS_5(sve2_match_ppzz_h, TCG_CALL_NO_RWG,
38
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/sve.decode
41
+++ b/target/arm/sve.decode
42
@@ -XXX,XX +XXX,XX @@ RADDHNB 01000101 .. 1 ..... 011 010 ..... ..... @rd_rn_rm
43
RADDHNT 01000101 .. 1 ..... 011 011 ..... ..... @rd_rn_rm
44
SUBHNB 01000101 .. 1 ..... 011 100 ..... ..... @rd_rn_rm
45
SUBHNT 01000101 .. 1 ..... 011 101 ..... ..... @rd_rn_rm
46
+RSUBHNB 01000101 .. 1 ..... 011 110 ..... ..... @rd_rn_rm
47
+RSUBHNT 01000101 .. 1 ..... 011 111 ..... ..... @rd_rn_rm
48
49
### SVE2 Character Match
50
51
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/sve_helper.c
54
+++ b/target/arm/sve_helper.c
55
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
56
#define DO_ADDHN(N, M, SH) ((N + M) >> SH)
57
#define DO_RADDHN(N, M, SH) ((N + M + ((__typeof(N))1 << (SH - 1))) >> SH)
58
#define DO_SUBHN(N, M, SH) ((N - M) >> SH)
59
+#define DO_RSUBHN(N, M, SH) ((N - M + ((__typeof(N))1 << (SH - 1))) >> SH)
60
61
DO_BINOPNB(sve2_addhnb_h, uint16_t, uint8_t, 8, DO_ADDHN)
62
DO_BINOPNB(sve2_addhnb_s, uint32_t, uint16_t, 16, DO_ADDHN)
63
@@ -XXX,XX +XXX,XX @@ DO_BINOPNT(sve2_subhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_SUBHN)
64
DO_BINOPNT(sve2_subhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_SUBHN)
65
DO_BINOPNT(sve2_subhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_SUBHN)
66
67
+DO_BINOPNB(sve2_rsubhnb_h, uint16_t, uint8_t, 8, DO_RSUBHN)
68
+DO_BINOPNB(sve2_rsubhnb_s, uint32_t, uint16_t, 16, DO_RSUBHN)
69
+DO_BINOPNB(sve2_rsubhnb_d, uint64_t, uint32_t, 32, DO_RSUBHN)
70
+
71
+DO_BINOPNT(sve2_rsubhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_RSUBHN)
72
+DO_BINOPNT(sve2_rsubhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_RSUBHN)
73
+DO_BINOPNT(sve2_rsubhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_RSUBHN)
74
+
75
+#undef DO_RSUBHN
76
#undef DO_SUBHN
77
#undef DO_RADDHN
78
#undef DO_ADDHN
79
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/target/arm/translate-sve.c
82
+++ b/target/arm/translate-sve.c
83
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt)
84
85
DO_SVE2_ZZZ_NARROW(SUBHNB, subhnb)
86
DO_SVE2_ZZZ_NARROW(SUBHNT, subhnt)
87
+DO_SVE2_ZZZ_NARROW(RSUBHNB, rsubhnb)
88
+DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt)
89
90
static bool do_sve2_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
91
gen_helper_gvec_flags_4 *fn)
92
--
32
--
93
2.20.1
33
2.34.1
94
95
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
For SVE, we potentially have a 4th argument coming from the
3
Now that float_status has a bunch of fp parameters,
4
movprfx instruction. Currently we do not optimize movprfx,
4
it is easier to copy an existing structure than create
5
so the problem is not visible.
5
one from scratch. Begin by copying the structure that
6
corresponds to the FPSR and make only the adjustments
7
required for BFloat16 semantics.
6
8
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20241203203949.483774-2-richard.henderson@linaro.org
9
Message-id: 20210525010358.152808-50-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
14
---
12
target/arm/helper.h | 20 +++---
15
target/arm/tcg/vec_helper.c | 20 +++++++-------------
13
target/arm/sve.decode | 7 ++-
16
1 file changed, 7 insertions(+), 13 deletions(-)
14
target/arm/translate-a64.c | 15 ++++-
15
target/arm/translate-neon.c | 10 +--
16
target/arm/translate-sve.c | 13 ++--
17
target/arm/vec_helper.c | 120 ++++++++++++++++++++----------------
18
6 files changed, 109 insertions(+), 76 deletions(-)
19
17
20
diff --git a/target/arm/helper.h b/target/arm/helper.h
18
diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c
21
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/helper.h
20
--- a/target/arm/tcg/vec_helper.c
23
+++ b/target/arm/helper.h
21
+++ b/target/arm/tcg/vec_helper.c
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_sqrdmlah_d, TCG_CALL_NO_RWG,
22
@@ -XXX,XX +XXX,XX @@ bool is_ebf(CPUARMState *env, float_status *statusp, float_status *oddstatusp)
25
DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_d, TCG_CALL_NO_RWG,
23
* no effect on AArch32 instructions.
26
void, ptr, ptr, ptr, ptr, i32)
24
*/
27
25
bool ebf = is_a64(env) && env->vfp.fpcr & FPCR_EBF;
28
-DEF_HELPER_FLAGS_4(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
26
- *statusp = (float_status){
29
-DEF_HELPER_FLAGS_4(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
27
- .tininess_before_rounding = float_tininess_before_rounding,
30
-DEF_HELPER_FLAGS_4(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
28
- .float_rounding_mode = float_round_to_odd_inf,
31
-DEF_HELPER_FLAGS_4(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
29
- .flush_to_zero = true,
32
+DEF_HELPER_FLAGS_5(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
30
- .flush_inputs_to_zero = true,
33
+DEF_HELPER_FLAGS_5(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
31
- .default_nan_mode = true,
34
+DEF_HELPER_FLAGS_5(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
32
- };
35
+DEF_HELPER_FLAGS_5(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
36
37
-DEF_HELPER_FLAGS_4(gvec_sdot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
38
-DEF_HELPER_FLAGS_4(gvec_udot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
39
-DEF_HELPER_FLAGS_4(gvec_sdot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
40
-DEF_HELPER_FLAGS_4(gvec_udot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
41
+DEF_HELPER_FLAGS_5(gvec_sdot_idx_b, TCG_CALL_NO_RWG,
42
+ void, ptr, ptr, ptr, ptr, i32)
43
+DEF_HELPER_FLAGS_5(gvec_udot_idx_b, TCG_CALL_NO_RWG,
44
+ void, ptr, ptr, ptr, ptr, i32)
45
+DEF_HELPER_FLAGS_5(gvec_sdot_idx_h, TCG_CALL_NO_RWG,
46
+ void, ptr, ptr, ptr, ptr, i32)
47
+DEF_HELPER_FLAGS_5(gvec_udot_idx_h, TCG_CALL_NO_RWG,
48
+ void, ptr, ptr, ptr, ptr, i32)
49
50
DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,
51
void, ptr, ptr, ptr, ptr, i32)
52
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/sve.decode
55
+++ b/target/arm/sve.decode
56
@@ -XXX,XX +XXX,XX @@ UMIN_zzi 00100101 .. 101 011 110 ........ ..... @rdn_i8u
57
MUL_zzi 00100101 .. 110 000 110 ........ ..... @rdn_i8s
58
59
# SVE integer dot product (unpredicated)
60
-DOT_zzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 ra=%reg_movprfx
61
+DOT_zzzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 \
62
+ ra=%reg_movprfx
63
64
# SVE integer dot product (indexed)
65
-DOT_zzx 01000100 101 index:2 rm:3 00000 u:1 rn:5 rd:5 \
66
+DOT_zzxw 01000100 101 index:2 rm:3 00000 u:1 rn:5 rd:5 \
67
sz=0 ra=%reg_movprfx
68
-DOT_zzx 01000100 111 index:1 rm:4 00000 u:1 rn:5 rd:5 \
69
+DOT_zzxw 01000100 111 index:1 rm:4 00000 u:1 rn:5 rd:5 \
70
sz=1 ra=%reg_movprfx
71
72
# SVE floating-point complex add (predicated)
73
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/target/arm/translate-a64.c
76
+++ b/target/arm/translate-a64.c
77
@@ -XXX,XX +XXX,XX @@ static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn,
78
tcg_temp_free_ptr(qc_ptr);
79
}
80
81
+/* Expand a 4-operand operation using an out-of-line helper. */
82
+static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
83
+ int rm, int ra, int data, gen_helper_gvec_4 *fn)
84
+{
85
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
86
+ vec_full_reg_offset(s, rn),
87
+ vec_full_reg_offset(s, rm),
88
+ vec_full_reg_offset(s, ra),
89
+ is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
90
+}
91
+
33
+
92
/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
34
+ *statusp = env->vfp.fp_status;
93
* than the 32 bit equivalent.
35
+ set_default_nan_mode(true, statusp);
94
*/
36
95
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
37
if (ebf) {
96
return;
38
- float_status *fpst = &env->vfp.fp_status;
97
39
- set_flush_to_zero(get_flush_to_zero(fpst), statusp);
98
case 0x2: /* SDOT / UDOT */
40
- set_flush_inputs_to_zero(get_flush_inputs_to_zero(fpst), statusp);
99
- gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0,
41
- set_float_rounding_mode(get_float_rounding_mode(fpst), statusp);
100
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0,
42
-
101
u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
43
/* EBF=1 needs to do a step with round-to-odd semantics */
102
return;
44
*oddstatusp = *statusp;
103
45
set_float_rounding_mode(float_round_to_odd, oddstatusp);
104
@@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
46
+ } else {
105
switch (16 * u + opcode) {
47
+ set_flush_to_zero(true, statusp);
106
case 0x0e: /* SDOT */
48
+ set_flush_inputs_to_zero(true, statusp);
107
case 0x1e: /* UDOT */
49
+ set_float_rounding_mode(float_round_to_odd_inf, statusp);
108
- gen_gvec_op3_ool(s, is_q, rd, rn, rm, index,
109
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
110
u ? gen_helper_gvec_udot_idx_b
111
: gen_helper_gvec_sdot_idx_b);
112
return;
113
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
114
index XXXXXXX..XXXXXXX 100644
115
--- a/target/arm/translate-neon.c
116
+++ b/target/arm/translate-neon.c
117
@@ -XXX,XX +XXX,XX @@ static bool trans_VCADD(DisasContext *s, arg_VCADD *a)
118
static bool trans_VDOT(DisasContext *s, arg_VDOT *a)
119
{
120
int opr_sz;
121
- gen_helper_gvec_3 *fn_gvec;
122
+ gen_helper_gvec_4 *fn_gvec;
123
124
if (!dc_isar_feature(aa32_dp, s)) {
125
return false;
126
@@ -XXX,XX +XXX,XX @@ static bool trans_VDOT(DisasContext *s, arg_VDOT *a)
127
128
opr_sz = (1 + a->q) * 8;
129
fn_gvec = a->u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
130
- tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
131
+ tcg_gen_gvec_4_ool(vfp_reg_offset(1, a->vd),
132
vfp_reg_offset(1, a->vn),
133
vfp_reg_offset(1, a->vm),
134
+ vfp_reg_offset(1, a->vd),
135
opr_sz, opr_sz, 0, fn_gvec);
136
return true;
137
}
138
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
139
140
static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a)
141
{
142
- gen_helper_gvec_3 *fn_gvec;
143
+ gen_helper_gvec_4 *fn_gvec;
144
int opr_sz;
145
TCGv_ptr fpst;
146
147
@@ -XXX,XX +XXX,XX @@ static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a)
148
fn_gvec = a->u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
149
opr_sz = (1 + a->q) * 8;
150
fpst = fpstatus_ptr(FPST_STD);
151
- tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
152
+ tcg_gen_gvec_4_ool(vfp_reg_offset(1, a->vd),
153
vfp_reg_offset(1, a->vn),
154
vfp_reg_offset(1, a->rm),
155
+ vfp_reg_offset(1, a->vd),
156
opr_sz, opr_sz, a->index, fn_gvec);
157
tcg_temp_free_ptr(fpst);
158
return true;
159
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/target/arm/translate-sve.c
162
+++ b/target/arm/translate-sve.c
163
@@ -XXX,XX +XXX,XX @@ DO_ZZI(UMIN, umin)
164
165
#undef DO_ZZI
166
167
-static bool trans_DOT_zzz(DisasContext *s, arg_DOT_zzz *a)
168
+static bool trans_DOT_zzzz(DisasContext *s, arg_DOT_zzzz *a)
169
{
170
- static gen_helper_gvec_3 * const fns[2][2] = {
171
+ static gen_helper_gvec_4 * const fns[2][2] = {
172
{ gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h },
173
{ gen_helper_gvec_udot_b, gen_helper_gvec_udot_h }
174
};
175
176
if (sve_access_check(s)) {
177
- gen_gvec_ool_zzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, 0);
178
+ gen_gvec_ool_zzzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0);
179
}
180
return true;
181
}
182
183
-static bool trans_DOT_zzx(DisasContext *s, arg_DOT_zzx *a)
184
+static bool trans_DOT_zzxw(DisasContext *s, arg_DOT_zzxw *a)
185
{
186
- static gen_helper_gvec_3 * const fns[2][2] = {
187
+ static gen_helper_gvec_4 * const fns[2][2] = {
188
{ gen_helper_gvec_sdot_idx_b, gen_helper_gvec_sdot_idx_h },
189
{ gen_helper_gvec_udot_idx_b, gen_helper_gvec_udot_idx_h }
190
};
191
192
if (sve_access_check(s)) {
193
- gen_gvec_ool_zzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, a->index);
194
+ gen_gvec_ool_zzzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm,
195
+ a->ra, a->index);
196
}
197
return true;
198
}
199
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/arm/vec_helper.c
202
+++ b/target/arm/vec_helper.c
203
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_d)(void *vd, void *vn, void *vm,
204
* All elements are treated equally, no matter where they are.
205
*/
206
207
-void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, uint32_t desc)
208
+void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
209
{
210
intptr_t i, opr_sz = simd_oprsz(desc);
211
- int32_t *d = vd;
212
+ int32_t *d = vd, *a = va;
213
int8_t *n = vn, *m = vm;
214
215
for (i = 0; i < opr_sz / 4; ++i) {
216
- d[i] += n[i * 4 + 0] * m[i * 4 + 0]
217
- + n[i * 4 + 1] * m[i * 4 + 1]
218
- + n[i * 4 + 2] * m[i * 4 + 2]
219
- + n[i * 4 + 3] * m[i * 4 + 3];
220
+ d[i] = (a[i] +
221
+ n[i * 4 + 0] * m[i * 4 + 0] +
222
+ n[i * 4 + 1] * m[i * 4 + 1] +
223
+ n[i * 4 + 2] * m[i * 4 + 2] +
224
+ n[i * 4 + 3] * m[i * 4 + 3]);
225
}
226
clear_tail(d, opr_sz, simd_maxsz(desc));
227
}
228
229
-void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, uint32_t desc)
230
+void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
231
{
232
intptr_t i, opr_sz = simd_oprsz(desc);
233
- uint32_t *d = vd;
234
+ uint32_t *d = vd, *a = va;
235
uint8_t *n = vn, *m = vm;
236
237
for (i = 0; i < opr_sz / 4; ++i) {
238
- d[i] += n[i * 4 + 0] * m[i * 4 + 0]
239
- + n[i * 4 + 1] * m[i * 4 + 1]
240
- + n[i * 4 + 2] * m[i * 4 + 2]
241
- + n[i * 4 + 3] * m[i * 4 + 3];
242
+ d[i] = (a[i] +
243
+ n[i * 4 + 0] * m[i * 4 + 0] +
244
+ n[i * 4 + 1] * m[i * 4 + 1] +
245
+ n[i * 4 + 2] * m[i * 4 + 2] +
246
+ n[i * 4 + 3] * m[i * 4 + 3]);
247
}
248
clear_tail(d, opr_sz, simd_maxsz(desc));
249
}
250
251
-void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, uint32_t desc)
252
+void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
253
{
254
intptr_t i, opr_sz = simd_oprsz(desc);
255
- int64_t *d = vd;
256
+ int64_t *d = vd, *a = va;
257
int16_t *n = vn, *m = vm;
258
259
for (i = 0; i < opr_sz / 8; ++i) {
260
- d[i] += (int64_t)n[i * 4 + 0] * m[i * 4 + 0]
261
- + (int64_t)n[i * 4 + 1] * m[i * 4 + 1]
262
- + (int64_t)n[i * 4 + 2] * m[i * 4 + 2]
263
- + (int64_t)n[i * 4 + 3] * m[i * 4 + 3];
264
+ d[i] = (a[i] +
265
+ (int64_t)n[i * 4 + 0] * m[i * 4 + 0] +
266
+ (int64_t)n[i * 4 + 1] * m[i * 4 + 1] +
267
+ (int64_t)n[i * 4 + 2] * m[i * 4 + 2] +
268
+ (int64_t)n[i * 4 + 3] * m[i * 4 + 3]);
269
}
270
clear_tail(d, opr_sz, simd_maxsz(desc));
271
}
272
273
-void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, uint32_t desc)
274
+void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
275
{
276
intptr_t i, opr_sz = simd_oprsz(desc);
277
- uint64_t *d = vd;
278
+ uint64_t *d = vd, *a = va;
279
uint16_t *n = vn, *m = vm;
280
281
for (i = 0; i < opr_sz / 8; ++i) {
282
- d[i] += (uint64_t)n[i * 4 + 0] * m[i * 4 + 0]
283
- + (uint64_t)n[i * 4 + 1] * m[i * 4 + 1]
284
- + (uint64_t)n[i * 4 + 2] * m[i * 4 + 2]
285
- + (uint64_t)n[i * 4 + 3] * m[i * 4 + 3];
286
+ d[i] = (a[i] +
287
+ (uint64_t)n[i * 4 + 0] * m[i * 4 + 0] +
288
+ (uint64_t)n[i * 4 + 1] * m[i * 4 + 1] +
289
+ (uint64_t)n[i * 4 + 2] * m[i * 4 + 2] +
290
+ (uint64_t)n[i * 4 + 3] * m[i * 4 + 3]);
291
}
292
clear_tail(d, opr_sz, simd_maxsz(desc));
293
}
294
295
-void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
296
+void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm,
297
+ void *va, uint32_t desc)
298
{
299
intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
300
intptr_t index = simd_data(desc);
301
- int32_t *d = vd;
302
+ int32_t *d = vd, *a = va;
303
int8_t *n = vn;
304
int8_t *m_indexed = (int8_t *)vm + H4(index) * 4;
305
306
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
307
int8_t m3 = m_indexed[i * 4 + 3];
308
309
do {
310
- d[i] += n[i * 4 + 0] * m0
311
- + n[i * 4 + 1] * m1
312
- + n[i * 4 + 2] * m2
313
- + n[i * 4 + 3] * m3;
314
+ d[i] = (a[i] +
315
+ n[i * 4 + 0] * m0 +
316
+ n[i * 4 + 1] * m1 +
317
+ n[i * 4 + 2] * m2 +
318
+ n[i * 4 + 3] * m3);
319
} while (++i < segend);
320
segend = i + 4;
321
} while (i < opr_sz_4);
322
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
323
clear_tail(d, opr_sz, simd_maxsz(desc));
324
}
325
326
-void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
327
+void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm,
328
+ void *va, uint32_t desc)
329
{
330
intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
331
intptr_t index = simd_data(desc);
332
- uint32_t *d = vd;
333
+ uint32_t *d = vd, *a = va;
334
uint8_t *n = vn;
335
uint8_t *m_indexed = (uint8_t *)vm + H4(index) * 4;
336
337
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
338
uint8_t m3 = m_indexed[i * 4 + 3];
339
340
do {
341
- d[i] += n[i * 4 + 0] * m0
342
- + n[i * 4 + 1] * m1
343
- + n[i * 4 + 2] * m2
344
- + n[i * 4 + 3] * m3;
345
+ d[i] = (a[i] +
346
+ n[i * 4 + 0] * m0 +
347
+ n[i * 4 + 1] * m1 +
348
+ n[i * 4 + 2] * m2 +
349
+ n[i * 4 + 3] * m3);
350
} while (++i < segend);
351
segend = i + 4;
352
} while (i < opr_sz_4);
353
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
354
clear_tail(d, opr_sz, simd_maxsz(desc));
355
}
356
357
-void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
358
+void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm,
359
+ void *va, uint32_t desc)
360
{
361
intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
362
intptr_t index = simd_data(desc);
363
- int64_t *d = vd;
364
+ int64_t *d = vd, *a = va;
365
int16_t *n = vn;
366
int16_t *m_indexed = (int16_t *)vm + index * 4;
367
368
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
369
* Process the entire segment all at once, writing back the results
370
* only after we've consumed all of the inputs.
371
*/
372
- for (i = 0; i < opr_sz_8 ; i += 2) {
373
- uint64_t d0, d1;
374
+ for (i = 0; i < opr_sz_8; i += 2) {
375
+ int64_t d0, d1;
376
377
- d0 = n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0];
378
+ d0 = a[i + 0];
379
+ d0 += n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0];
380
d0 += n[i * 4 + 1] * (int64_t)m_indexed[i * 4 + 1];
381
d0 += n[i * 4 + 2] * (int64_t)m_indexed[i * 4 + 2];
382
d0 += n[i * 4 + 3] * (int64_t)m_indexed[i * 4 + 3];
383
- d1 = n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0];
384
+
385
+ d1 = a[i + 1];
386
+ d1 += n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0];
387
d1 += n[i * 4 + 5] * (int64_t)m_indexed[i * 4 + 1];
388
d1 += n[i * 4 + 6] * (int64_t)m_indexed[i * 4 + 2];
389
d1 += n[i * 4 + 7] * (int64_t)m_indexed[i * 4 + 3];
390
391
- d[i + 0] += d0;
392
- d[i + 1] += d1;
393
+ d[i + 0] = d0;
394
+ d[i + 1] = d1;
395
}
50
}
396
-
51
-
397
clear_tail(d, opr_sz, simd_maxsz(desc));
52
return ebf;
398
}
53
}
399
54
400
-void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
401
+void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm,
402
+ void *va, uint32_t desc)
403
{
404
intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
405
intptr_t index = simd_data(desc);
406
- uint64_t *d = vd;
407
+ uint64_t *d = vd, *a = va;
408
uint16_t *n = vn;
409
uint16_t *m_indexed = (uint16_t *)vm + index * 4;
410
411
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
412
* Process the entire segment all at once, writing back the results
413
* only after we've consumed all of the inputs.
414
*/
415
- for (i = 0; i < opr_sz_8 ; i += 2) {
416
+ for (i = 0; i < opr_sz_8; i += 2) {
417
uint64_t d0, d1;
418
419
- d0 = n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0];
420
+ d0 = a[i + 0];
421
+ d0 += n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0];
422
d0 += n[i * 4 + 1] * (uint64_t)m_indexed[i * 4 + 1];
423
d0 += n[i * 4 + 2] * (uint64_t)m_indexed[i * 4 + 2];
424
d0 += n[i * 4 + 3] * (uint64_t)m_indexed[i * 4 + 3];
425
- d1 = n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0];
426
+
427
+ d1 = a[i + 1];
428
+ d1 += n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0];
429
d1 += n[i * 4 + 5] * (uint64_t)m_indexed[i * 4 + 1];
430
d1 += n[i * 4 + 6] * (uint64_t)m_indexed[i * 4 + 2];
431
d1 += n[i * 4 + 7] * (uint64_t)m_indexed[i * 4 + 3];
432
433
- d[i + 0] += d0;
434
- d[i + 1] += d1;
435
+ d[i + 0] = d0;
436
+ d[i + 1] = d1;
437
}
438
-
439
clear_tail(d, opr_sz, simd_maxsz(desc));
440
}
441
442
--
55
--
443
2.20.1
56
2.34.1
444
57
445
58
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Currently we hardcode the default NaN value in parts64_default_nan()
2
using a compile-time ifdef ladder. This is awkward for two cases:
3
* for single-QEMU-binary we can't hard-code target-specifics like this
4
* for Arm FEAT_AFP the default NaN value depends on FPCR.AH
5
(specifically the sign bit is different)
2
6
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Add a field to float_status to specify the default NaN value; fall
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
back to the old ifdef behaviour if these are not set.
5
Message-id: 20210525010358.152808-28-richard.henderson@linaro.org
9
10
The default NaN value is specified by setting a uint8_t to a
11
pattern corresponding to the sign and upper fraction parts of
12
the NaN; the lower bits of the fraction are set from bit 0 of
13
the pattern.
14
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20241202131347.498124-35-peter.maydell@linaro.org
7
---
18
---
8
target/arm/helper-sve.h | 16 +++++++
19
include/fpu/softfloat-helpers.h | 11 +++++++
9
target/arm/sve.decode | 4 ++
20
include/fpu/softfloat-types.h | 10 ++++++
10
target/arm/sve_helper.c | 35 ++++++++++++++
21
fpu/softfloat-specialize.c.inc | 55 ++++++++++++++++++++-------------
11
target/arm/translate-sve.c | 98 ++++++++++++++++++++++++++++++++++++++
22
3 files changed, 54 insertions(+), 22 deletions(-)
12
4 files changed, 153 insertions(+)
13
23
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
24
diff --git a/include/fpu/softfloat-helpers.h b/include/fpu/softfloat-helpers.h
15
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
26
--- a/include/fpu/softfloat-helpers.h
17
+++ b/target/arm/helper-sve.h
27
+++ b/include/fpu/softfloat-helpers.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_rshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
28
@@ -XXX,XX +XXX,XX @@ static inline void set_float_infzeronan_rule(FloatInfZeroNaNRule rule,
19
DEF_HELPER_FLAGS_3(sve2_rshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
29
status->float_infzeronan_rule = rule;
20
DEF_HELPER_FLAGS_3(sve2_rshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
21
22
+DEF_HELPER_FLAGS_3(sve2_sqshrunb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_3(sve2_sqshrunb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_3(sve2_sqshrunb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
25
+
26
+DEF_HELPER_FLAGS_3(sve2_sqshrunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_3(sve2_sqshrunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_3(sve2_sqshrunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_3(sve2_sqrshrunb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_3(sve2_sqrshrunb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_3(sve2_sqrshrunb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
33
+
34
+DEF_HELPER_FLAGS_3(sve2_sqrshrunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_3(sve2_sqrshrunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_3(sve2_sqrshrunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
37
+
38
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG,
39
void, ptr, ptr, ptr, ptr, ptr, i32)
40
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG,
41
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/sve.decode
44
+++ b/target/arm/sve.decode
45
@@ -XXX,XX +XXX,XX @@ SQXTUNT 01000101 .. 1 ..... 010 101 ..... ..... @rd_rn_tszimm_shl
46
## SVE2 bitwise shift right narrow
47
48
# Bit 23 == 0 is handled by esz > 0 in the translator.
49
+SQSHRUNB 01000101 .. 1 ..... 00 0000 ..... ..... @rd_rn_tszimm_shr
50
+SQSHRUNT 01000101 .. 1 ..... 00 0001 ..... ..... @rd_rn_tszimm_shr
51
+SQRSHRUNB 01000101 .. 1 ..... 00 0010 ..... ..... @rd_rn_tszimm_shr
52
+SQRSHRUNT 01000101 .. 1 ..... 00 0011 ..... ..... @rd_rn_tszimm_shr
53
SHRNB 01000101 .. 1 ..... 00 0100 ..... ..... @rd_rn_tszimm_shr
54
SHRNT 01000101 .. 1 ..... 00 0101 ..... ..... @rd_rn_tszimm_shr
55
RSHRNB 01000101 .. 1 ..... 00 0110 ..... ..... @rd_rn_tszimm_shr
56
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/sve_helper.c
59
+++ b/target/arm/sve_helper.c
60
@@ -XXX,XX +XXX,XX @@ static inline uint64_t do_urshr(uint64_t x, unsigned sh)
61
}
62
}
30
}
63
31
64
+static inline int64_t do_srshr(int64_t x, unsigned sh)
32
+static inline void set_float_default_nan_pattern(uint8_t dnan_pattern,
33
+ float_status *status)
65
+{
34
+{
66
+ if (likely(sh < 64)) {
35
+ status->default_nan_pattern = dnan_pattern;
67
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
68
+ } else {
69
+ /* Rounding the sign bit always produces 0. */
70
+ return 0;
71
+ }
72
+}
36
+}
73
+
37
+
74
DO_ZPZI(sve_asr_zpzi_b, int8_t, H1, DO_SHR)
38
static inline void set_flush_to_zero(bool val, float_status *status)
75
DO_ZPZI(sve_asr_zpzi_h, int16_t, H1_2, DO_SHR)
39
{
76
DO_ZPZI(sve_asr_zpzi_s, int32_t, H1_4, DO_SHR)
40
status->flush_to_zero = val;
77
@@ -XXX,XX +XXX,XX @@ DO_SHRNT(sve2_rshrnt_h, uint16_t, uint8_t, H1_2, H1, do_urshr)
41
@@ -XXX,XX +XXX,XX @@ static inline FloatInfZeroNaNRule get_float_infzeronan_rule(float_status *status
78
DO_SHRNT(sve2_rshrnt_s, uint32_t, uint16_t, H1_4, H1_2, do_urshr)
42
return status->float_infzeronan_rule;
79
DO_SHRNT(sve2_rshrnt_d, uint64_t, uint32_t, , H1_4, do_urshr)
80
81
+#define DO_SQSHRUN_H(x, sh) do_sat_bhs((int64_t)(x) >> sh, 0, UINT8_MAX)
82
+#define DO_SQSHRUN_S(x, sh) do_sat_bhs((int64_t)(x) >> sh, 0, UINT16_MAX)
83
+#define DO_SQSHRUN_D(x, sh) \
84
+ do_sat_bhs((int64_t)(x) >> (sh < 64 ? sh : 63), 0, UINT32_MAX)
85
+
86
+DO_SHRNB(sve2_sqshrunb_h, int16_t, uint8_t, DO_SQSHRUN_H)
87
+DO_SHRNB(sve2_sqshrunb_s, int32_t, uint16_t, DO_SQSHRUN_S)
88
+DO_SHRNB(sve2_sqshrunb_d, int64_t, uint32_t, DO_SQSHRUN_D)
89
+
90
+DO_SHRNT(sve2_sqshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRUN_H)
91
+DO_SHRNT(sve2_sqshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRUN_S)
92
+DO_SHRNT(sve2_sqshrunt_d, int64_t, uint32_t, , H1_4, DO_SQSHRUN_D)
93
+
94
+#define DO_SQRSHRUN_H(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT8_MAX)
95
+#define DO_SQRSHRUN_S(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT16_MAX)
96
+#define DO_SQRSHRUN_D(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT32_MAX)
97
+
98
+DO_SHRNB(sve2_sqrshrunb_h, int16_t, uint8_t, DO_SQRSHRUN_H)
99
+DO_SHRNB(sve2_sqrshrunb_s, int32_t, uint16_t, DO_SQRSHRUN_S)
100
+DO_SHRNB(sve2_sqrshrunb_d, int64_t, uint32_t, DO_SQRSHRUN_D)
101
+
102
+DO_SHRNT(sve2_sqrshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRUN_H)
103
+DO_SHRNT(sve2_sqrshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRUN_S)
104
+DO_SHRNT(sve2_sqrshrunt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRUN_D)
105
+
106
#undef DO_SHRNB
107
#undef DO_SHRNT
108
109
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
110
index XXXXXXX..XXXXXXX 100644
111
--- a/target/arm/translate-sve.c
112
+++ b/target/arm/translate-sve.c
113
@@ -XXX,XX +XXX,XX @@ static bool trans_RSHRNT(DisasContext *s, arg_rri_esz *a)
114
return do_sve2_shr_narrow(s, a, ops);
115
}
43
}
116
44
117
+static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d,
45
+static inline uint8_t get_float_default_nan_pattern(float_status *status)
118
+ TCGv_vec n, int64_t shr)
119
+{
46
+{
120
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
47
+ return status->default_nan_pattern;
121
+ int halfbits = 4 << vece;
122
+
123
+ tcg_gen_sari_vec(vece, n, n, shr);
124
+ tcg_gen_dupi_vec(vece, t, 0);
125
+ tcg_gen_smax_vec(vece, n, n, t);
126
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
127
+ tcg_gen_umin_vec(vece, d, n, t);
128
+ tcg_temp_free_vec(t);
129
+}
48
+}
130
+
49
+
131
+static bool trans_SQSHRUNB(DisasContext *s, arg_rri_esz *a)
50
static inline bool get_flush_to_zero(float_status *status)
132
+{
51
{
133
+ static const TCGOpcode vec_list[] = {
52
return status->flush_to_zero;
134
+ INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_umin_vec, 0
53
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
135
+ };
54
index XXXXXXX..XXXXXXX 100644
136
+ static const GVecGen2i ops[3] = {
55
--- a/include/fpu/softfloat-types.h
137
+ { .fniv = gen_sqshrunb_vec,
56
+++ b/include/fpu/softfloat-types.h
138
+ .opt_opc = vec_list,
57
@@ -XXX,XX +XXX,XX @@ typedef struct float_status {
139
+ .fno = gen_helper_sve2_sqshrunb_h,
58
/* should denormalised inputs go to zero and set the input_denormal flag? */
140
+ .vece = MO_16 },
59
bool flush_inputs_to_zero;
141
+ { .fniv = gen_sqshrunb_vec,
60
bool default_nan_mode;
142
+ .opt_opc = vec_list,
61
+ /*
143
+ .fno = gen_helper_sve2_sqshrunb_s,
62
+ * The pattern to use for the default NaN. Here the high bit specifies
144
+ .vece = MO_32 },
63
+ * the default NaN's sign bit, and bits 6..0 specify the high bits of the
145
+ { .fniv = gen_sqshrunb_vec,
64
+ * fractional part. The low bits of the fractional part are copies of bit 0.
146
+ .opt_opc = vec_list,
65
+ * The exponent of the default NaN is (as for any NaN) always all 1s.
147
+ .fno = gen_helper_sve2_sqshrunb_d,
66
+ * Note that a value of 0 here is not a valid NaN. The target must set
148
+ .vece = MO_64 },
67
+ * this to the correct non-zero value, or we will assert when trying to
149
+ };
68
+ * create a default NaN.
150
+ return do_sve2_shr_narrow(s, a, ops);
69
+ */
151
+}
70
+ uint8_t default_nan_pattern;
71
/*
72
* The flags below are not used on all specializations and may
73
* constant fold away (see snan_bit_is_one()/no_signalling_nans() in
74
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
75
index XXXXXXX..XXXXXXX 100644
76
--- a/fpu/softfloat-specialize.c.inc
77
+++ b/fpu/softfloat-specialize.c.inc
78
@@ -XXX,XX +XXX,XX @@ static void parts64_default_nan(FloatParts64 *p, float_status *status)
79
{
80
bool sign = 0;
81
uint64_t frac;
82
+ uint8_t dnan_pattern = status->default_nan_pattern;
83
84
+ if (dnan_pattern == 0) {
85
#if defined(TARGET_SPARC) || defined(TARGET_M68K)
86
- /* !snan_bit_is_one, set all bits */
87
- frac = (1ULL << DECOMPOSED_BINARY_POINT) - 1;
88
-#elif defined(TARGET_I386) || defined(TARGET_X86_64) \
89
+ /* Sign bit clear, all frac bits set */
90
+ dnan_pattern = 0b01111111;
91
+#elif defined(TARGET_I386) || defined(TARGET_X86_64) \
92
|| defined(TARGET_MICROBLAZE)
93
- /* !snan_bit_is_one, set sign and msb */
94
- frac = 1ULL << (DECOMPOSED_BINARY_POINT - 1);
95
- sign = 1;
96
+ /* Sign bit set, most significant frac bit set */
97
+ dnan_pattern = 0b11000000;
98
#elif defined(TARGET_HPPA)
99
- /* snan_bit_is_one, set msb-1. */
100
- frac = 1ULL << (DECOMPOSED_BINARY_POINT - 2);
101
+ /* Sign bit clear, msb-1 frac bit set */
102
+ dnan_pattern = 0b00100000;
103
#elif defined(TARGET_HEXAGON)
104
- sign = 1;
105
- frac = ~0ULL;
106
+ /* Sign bit set, all frac bits set. */
107
+ dnan_pattern = 0b11111111;
108
#else
109
- /*
110
- * This case is true for Alpha, ARM, MIPS, OpenRISC, PPC, RISC-V,
111
- * S390, SH4, TriCore, and Xtensa. Our other supported targets
112
- * do not have floating-point.
113
- */
114
- if (snan_bit_is_one(status)) {
115
- /* set all bits other than msb */
116
- frac = (1ULL << (DECOMPOSED_BINARY_POINT - 1)) - 1;
117
- } else {
118
- /* set msb */
119
- frac = 1ULL << (DECOMPOSED_BINARY_POINT - 1);
120
- }
121
+ /*
122
+ * This case is true for Alpha, ARM, MIPS, OpenRISC, PPC, RISC-V,
123
+ * S390, SH4, TriCore, and Xtensa. Our other supported targets
124
+ * do not have floating-point.
125
+ */
126
+ if (snan_bit_is_one(status)) {
127
+ /* sign bit clear, set all frac bits other than msb */
128
+ dnan_pattern = 0b00111111;
129
+ } else {
130
+ /* sign bit clear, set frac msb */
131
+ dnan_pattern = 0b01000000;
132
+ }
133
#endif
134
+ }
135
+ assert(dnan_pattern != 0);
152
+
136
+
153
+static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d,
137
+ sign = dnan_pattern >> 7;
154
+ TCGv_vec n, int64_t shr)
138
+ /*
155
+{
139
+ * Place default_nan_pattern [6:0] into bits [62:56],
156
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
140
+ * and replecate bit [0] down into [55:0]
157
+ int halfbits = 4 << vece;
141
+ */
158
+
142
+ frac = deposit64(0, DECOMPOSED_BINARY_POINT - 7, 7, dnan_pattern);
159
+ tcg_gen_sari_vec(vece, n, n, shr);
143
+ frac = deposit64(frac, 0, DECOMPOSED_BINARY_POINT - 7, -(dnan_pattern & 1));
160
+ tcg_gen_dupi_vec(vece, t, 0);
144
161
+ tcg_gen_smax_vec(vece, n, n, t);
145
*p = (FloatParts64) {
162
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
146
.cls = float_class_qnan,
163
+ tcg_gen_umin_vec(vece, n, n, t);
164
+ tcg_gen_shli_vec(vece, n, n, halfbits);
165
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
166
+ tcg_temp_free_vec(t);
167
+}
168
+
169
+static bool trans_SQSHRUNT(DisasContext *s, arg_rri_esz *a)
170
+{
171
+ static const TCGOpcode vec_list[] = {
172
+ INDEX_op_shli_vec, INDEX_op_sari_vec,
173
+ INDEX_op_smax_vec, INDEX_op_umin_vec, 0
174
+ };
175
+ static const GVecGen2i ops[3] = {
176
+ { .fniv = gen_sqshrunt_vec,
177
+ .opt_opc = vec_list,
178
+ .load_dest = true,
179
+ .fno = gen_helper_sve2_sqshrunt_h,
180
+ .vece = MO_16 },
181
+ { .fniv = gen_sqshrunt_vec,
182
+ .opt_opc = vec_list,
183
+ .load_dest = true,
184
+ .fno = gen_helper_sve2_sqshrunt_s,
185
+ .vece = MO_32 },
186
+ { .fniv = gen_sqshrunt_vec,
187
+ .opt_opc = vec_list,
188
+ .load_dest = true,
189
+ .fno = gen_helper_sve2_sqshrunt_d,
190
+ .vece = MO_64 },
191
+ };
192
+ return do_sve2_shr_narrow(s, a, ops);
193
+}
194
+
195
+static bool trans_SQRSHRUNB(DisasContext *s, arg_rri_esz *a)
196
+{
197
+ static const GVecGen2i ops[3] = {
198
+ { .fno = gen_helper_sve2_sqrshrunb_h },
199
+ { .fno = gen_helper_sve2_sqrshrunb_s },
200
+ { .fno = gen_helper_sve2_sqrshrunb_d },
201
+ };
202
+ return do_sve2_shr_narrow(s, a, ops);
203
+}
204
+
205
+static bool trans_SQRSHRUNT(DisasContext *s, arg_rri_esz *a)
206
+{
207
+ static const GVecGen2i ops[3] = {
208
+ { .fno = gen_helper_sve2_sqrshrunt_h },
209
+ { .fno = gen_helper_sve2_sqrshrunt_s },
210
+ { .fno = gen_helper_sve2_sqrshrunt_d },
211
+ };
212
+ return do_sve2_shr_narrow(s, a, ops);
213
+}
214
+
215
static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
216
gen_helper_gvec_4_ptr *fn)
217
{
218
--
147
--
219
2.20.1
148
2.34.1
220
221
diff view generated by jsdifflib
1
From: Stephen Long <steplong@quicinc.com>
1
Set the default NaN pattern explicitly for the tests/fp code.
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Stephen Long <steplong@quicinc.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210525010358.152808-41-richard.henderson@linaro.org
7
Message-Id: <20200417162231.10374-4-steplong@quicinc.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20241202131347.498124-36-peter.maydell@linaro.org
10
---
6
---
11
target/arm/helper-sve.h | 8 ++++++++
7
tests/fp/fp-bench.c | 1 +
12
target/arm/sve.decode | 2 ++
8
tests/fp/fp-test-log2.c | 1 +
13
target/arm/sve_helper.c | 10 ++++++++++
9
tests/fp/fp-test.c | 1 +
14
target/arm/translate-sve.c | 3 +++
10
3 files changed, 3 insertions(+)
15
4 files changed, 23 insertions(+)
16
11
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
12
diff --git a/tests/fp/fp-bench.c b/tests/fp/fp-bench.c
18
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-sve.h
14
--- a/tests/fp/fp-bench.c
20
+++ b/target/arm/helper-sve.h
15
+++ b/tests/fp/fp-bench.c
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_raddhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
16
@@ -XXX,XX +XXX,XX @@ static void run_bench(void)
22
DEF_HELPER_FLAGS_4(sve2_raddhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
17
set_float_2nan_prop_rule(float_2nan_prop_s_ab, &soft_status);
23
DEF_HELPER_FLAGS_4(sve2_raddhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
18
set_float_3nan_prop_rule(float_3nan_prop_s_cab, &soft_status);
24
19
set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, &soft_status);
25
+DEF_HELPER_FLAGS_4(sve2_subhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
20
+ set_float_default_nan_pattern(0b01000000, &soft_status);
26
+DEF_HELPER_FLAGS_4(sve2_subhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
21
27
+DEF_HELPER_FLAGS_4(sve2_subhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
22
f = bench_funcs[operation][precision];
28
+
23
g_assert(f);
29
+DEF_HELPER_FLAGS_4(sve2_subhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
diff --git a/tests/fp/fp-test-log2.c b/tests/fp/fp-test-log2.c
30
+DEF_HELPER_FLAGS_4(sve2_subhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(sve2_subhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
+
33
DEF_HELPER_FLAGS_5(sve2_match_ppzz_b, TCG_CALL_NO_RWG,
34
i32, ptr, ptr, ptr, ptr, i32)
35
DEF_HELPER_FLAGS_5(sve2_match_ppzz_h, TCG_CALL_NO_RWG,
36
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
37
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/sve.decode
26
--- a/tests/fp/fp-test-log2.c
39
+++ b/target/arm/sve.decode
27
+++ b/tests/fp/fp-test-log2.c
40
@@ -XXX,XX +XXX,XX @@ ADDHNB 01000101 .. 1 ..... 011 000 ..... ..... @rd_rn_rm
28
@@ -XXX,XX +XXX,XX @@ int main(int ac, char **av)
41
ADDHNT 01000101 .. 1 ..... 011 001 ..... ..... @rd_rn_rm
29
int i;
42
RADDHNB 01000101 .. 1 ..... 011 010 ..... ..... @rd_rn_rm
30
43
RADDHNT 01000101 .. 1 ..... 011 011 ..... ..... @rd_rn_rm
31
set_float_2nan_prop_rule(float_2nan_prop_s_ab, &qsf);
44
+SUBHNB 01000101 .. 1 ..... 011 100 ..... ..... @rd_rn_rm
32
+ set_float_default_nan_pattern(0b01000000, &qsf);
45
+SUBHNT 01000101 .. 1 ..... 011 101 ..... ..... @rd_rn_rm
33
set_float_rounding_mode(float_round_nearest_even, &qsf);
46
34
47
### SVE2 Character Match
35
test.d = 0.0;
48
36
diff --git a/tests/fp/fp-test.c b/tests/fp/fp-test.c
49
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
50
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/sve_helper.c
38
--- a/tests/fp/fp-test.c
52
+++ b/target/arm/sve_helper.c
39
+++ b/tests/fp/fp-test.c
53
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
40
@@ -XXX,XX +XXX,XX @@ void run_test(void)
54
41
*/
55
#define DO_ADDHN(N, M, SH) ((N + M) >> SH)
42
set_float_2nan_prop_rule(float_2nan_prop_s_ab, &qsf);
56
#define DO_RADDHN(N, M, SH) ((N + M + ((__typeof(N))1 << (SH - 1))) >> SH)
43
set_float_3nan_prop_rule(float_3nan_prop_s_cab, &qsf);
57
+#define DO_SUBHN(N, M, SH) ((N - M) >> SH)
44
+ set_float_default_nan_pattern(0b01000000, &qsf);
58
45
set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, &qsf);
59
DO_BINOPNB(sve2_addhnb_h, uint16_t, uint8_t, 8, DO_ADDHN)
46
60
DO_BINOPNB(sve2_addhnb_s, uint32_t, uint16_t, 16, DO_ADDHN)
47
genCases_setLevel(test_level);
61
@@ -XXX,XX +XXX,XX @@ DO_BINOPNT(sve2_raddhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_RADDHN)
62
DO_BINOPNT(sve2_raddhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_RADDHN)
63
DO_BINOPNT(sve2_raddhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_RADDHN)
64
65
+DO_BINOPNB(sve2_subhnb_h, uint16_t, uint8_t, 8, DO_SUBHN)
66
+DO_BINOPNB(sve2_subhnb_s, uint32_t, uint16_t, 16, DO_SUBHN)
67
+DO_BINOPNB(sve2_subhnb_d, uint64_t, uint32_t, 32, DO_SUBHN)
68
+
69
+DO_BINOPNT(sve2_subhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_SUBHN)
70
+DO_BINOPNT(sve2_subhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_SUBHN)
71
+DO_BINOPNT(sve2_subhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_SUBHN)
72
+
73
+#undef DO_SUBHN
74
#undef DO_RADDHN
75
#undef DO_ADDHN
76
77
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/translate-sve.c
80
+++ b/target/arm/translate-sve.c
81
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt)
82
DO_SVE2_ZZZ_NARROW(RADDHNB, raddhnb)
83
DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt)
84
85
+DO_SVE2_ZZZ_NARROW(SUBHNB, subhnb)
86
+DO_SVE2_ZZZ_NARROW(SUBHNT, subhnt)
87
+
88
static bool do_sve2_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
89
gen_helper_gvec_flags_4 *fn)
90
{
91
--
48
--
92
2.20.1
49
2.34.1
93
94
diff view generated by jsdifflib
1
From: Stephen Long <steplong@quicinc.com>
1
Set the default NaN pattern explicitly, and remove the ifdef from
2
parts64_default_nan().
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Stephen Long <steplong@quicinc.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210525010358.152808-40-richard.henderson@linaro.org
7
Message-Id: <20200417162231.10374-3-steplong@quicinc.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-37-peter.maydell@linaro.org
10
---
7
---
11
target/arm/helper-sve.h | 8 ++++++++
8
target/microblaze/cpu.c | 2 ++
12
target/arm/sve.decode | 2 ++
9
fpu/softfloat-specialize.c.inc | 3 +--
13
target/arm/sve_helper.c | 10 ++++++++++
10
2 files changed, 3 insertions(+), 2 deletions(-)
14
target/arm/translate-sve.c | 2 ++
15
4 files changed, 22 insertions(+)
16
11
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
12
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
18
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-sve.h
14
--- a/target/microblaze/cpu.c
20
+++ b/target/arm/helper-sve.h
15
+++ b/target/microblaze/cpu.c
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_addhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
16
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_reset_hold(Object *obj, ResetType type)
22
DEF_HELPER_FLAGS_4(sve2_addhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
17
* this architecture.
23
DEF_HELPER_FLAGS_4(sve2_addhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
18
*/
24
19
set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status);
25
+DEF_HELPER_FLAGS_4(sve2_raddhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
20
+ /* Default NaN: sign bit set, most significant frac bit set */
26
+DEF_HELPER_FLAGS_4(sve2_raddhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
21
+ set_float_default_nan_pattern(0b11000000, &env->fp_status);
27
+DEF_HELPER_FLAGS_4(sve2_raddhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
22
28
+
23
#if defined(CONFIG_USER_ONLY)
29
+DEF_HELPER_FLAGS_4(sve2_raddhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
/* start in user mode with interrupts enabled. */
30
+DEF_HELPER_FLAGS_4(sve2_raddhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
25
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
31
+DEF_HELPER_FLAGS_4(sve2_raddhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
+
33
DEF_HELPER_FLAGS_5(sve2_match_ppzz_b, TCG_CALL_NO_RWG,
34
i32, ptr, ptr, ptr, ptr, i32)
35
DEF_HELPER_FLAGS_5(sve2_match_ppzz_h, TCG_CALL_NO_RWG,
36
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
37
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/sve.decode
27
--- a/fpu/softfloat-specialize.c.inc
39
+++ b/target/arm/sve.decode
28
+++ b/fpu/softfloat-specialize.c.inc
40
@@ -XXX,XX +XXX,XX @@ UQRSHRNT 01000101 .. 1 ..... 00 1111 ..... ..... @rd_rn_tszimm_shr
29
@@ -XXX,XX +XXX,XX @@ static void parts64_default_nan(FloatParts64 *p, float_status *status)
41
30
#if defined(TARGET_SPARC) || defined(TARGET_M68K)
42
ADDHNB 01000101 .. 1 ..... 011 000 ..... ..... @rd_rn_rm
31
/* Sign bit clear, all frac bits set */
43
ADDHNT 01000101 .. 1 ..... 011 001 ..... ..... @rd_rn_rm
32
dnan_pattern = 0b01111111;
44
+RADDHNB 01000101 .. 1 ..... 011 010 ..... ..... @rd_rn_rm
33
-#elif defined(TARGET_I386) || defined(TARGET_X86_64) \
45
+RADDHNT 01000101 .. 1 ..... 011 011 ..... ..... @rd_rn_rm
34
- || defined(TARGET_MICROBLAZE)
46
35
+#elif defined(TARGET_I386) || defined(TARGET_X86_64)
47
### SVE2 Character Match
36
/* Sign bit set, most significant frac bit set */
48
37
dnan_pattern = 0b11000000;
49
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
38
#elif defined(TARGET_HPPA)
50
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/sve_helper.c
52
+++ b/target/arm/sve_helper.c
53
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
54
}
55
56
#define DO_ADDHN(N, M, SH) ((N + M) >> SH)
57
+#define DO_RADDHN(N, M, SH) ((N + M + ((__typeof(N))1 << (SH - 1))) >> SH)
58
59
DO_BINOPNB(sve2_addhnb_h, uint16_t, uint8_t, 8, DO_ADDHN)
60
DO_BINOPNB(sve2_addhnb_s, uint32_t, uint16_t, 16, DO_ADDHN)
61
@@ -XXX,XX +XXX,XX @@ DO_BINOPNT(sve2_addhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_ADDHN)
62
DO_BINOPNT(sve2_addhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_ADDHN)
63
DO_BINOPNT(sve2_addhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_ADDHN)
64
65
+DO_BINOPNB(sve2_raddhnb_h, uint16_t, uint8_t, 8, DO_RADDHN)
66
+DO_BINOPNB(sve2_raddhnb_s, uint32_t, uint16_t, 16, DO_RADDHN)
67
+DO_BINOPNB(sve2_raddhnb_d, uint64_t, uint32_t, 32, DO_RADDHN)
68
+
69
+DO_BINOPNT(sve2_raddhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_RADDHN)
70
+DO_BINOPNT(sve2_raddhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_RADDHN)
71
+DO_BINOPNT(sve2_raddhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_RADDHN)
72
+
73
+#undef DO_RADDHN
74
#undef DO_ADDHN
75
76
#undef DO_BINOPNB
77
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/translate-sve.c
80
+++ b/target/arm/translate-sve.c
81
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
82
83
DO_SVE2_ZZZ_NARROW(ADDHNB, addhnb)
84
DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt)
85
+DO_SVE2_ZZZ_NARROW(RADDHNB, raddhnb)
86
+DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt)
87
88
static bool do_sve2_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
89
gen_helper_gvec_flags_4 *fn)
90
--
39
--
91
2.20.1
40
2.34.1
92
93
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the default NaN pattern explicitly, and remove the ifdef from
2
parts64_default_nan().
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-18-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-38-peter.maydell@linaro.org
7
---
7
---
8
target/arm/cpu.h | 5 +++
8
target/i386/tcg/fpu_helper.c | 4 ++++
9
target/arm/helper-sve.h | 15 ++++++++
9
fpu/softfloat-specialize.c.inc | 3 ---
10
target/arm/sve.decode | 6 ++++
10
2 files changed, 4 insertions(+), 3 deletions(-)
11
target/arm/sve_helper.c | 73 ++++++++++++++++++++++++++++++++++++++
12
target/arm/translate-sve.c | 36 +++++++++++++++++++
13
5 files changed, 135 insertions(+)
14
11
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
12
diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c
16
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
14
--- a/target/i386/tcg/fpu_helper.c
18
+++ b/target/arm/cpu.h
15
+++ b/target/i386/tcg/fpu_helper.c
19
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sve2_pmull128(const ARMISARegisters *id)
16
@@ -XXX,XX +XXX,XX @@ void cpu_init_fp_statuses(CPUX86State *env)
20
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) >= 2;
17
*/
18
set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->sse_status);
19
set_float_3nan_prop_rule(float_3nan_prop_abc, &env->sse_status);
20
+ /* Default NaN: sign bit set, most significant frac bit set */
21
+ set_float_default_nan_pattern(0b11000000, &env->fp_status);
22
+ set_float_default_nan_pattern(0b11000000, &env->mmx_status);
23
+ set_float_default_nan_pattern(0b11000000, &env->sse_status);
21
}
24
}
22
25
23
+static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id)
26
static inline uint8_t save_exception_flags(CPUX86State *env)
24
+{
27
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
25
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0;
26
+}
27
+
28
/*
29
* Feature tests for "does this exist in either 32-bit or 64-bit?"
30
*/
31
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
32
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/helper-sve.h
29
--- a/fpu/softfloat-specialize.c.inc
34
+++ b/target/arm/helper-sve.h
30
+++ b/fpu/softfloat-specialize.c.inc
35
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_eoril_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
@@ -XXX,XX +XXX,XX @@ static void parts64_default_nan(FloatParts64 *p, float_status *status)
36
DEF_HELPER_FLAGS_4(sve2_eoril_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
#if defined(TARGET_SPARC) || defined(TARGET_M68K)
37
DEF_HELPER_FLAGS_4(sve2_eoril_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
33
/* Sign bit clear, all frac bits set */
38
DEF_HELPER_FLAGS_4(sve2_eoril_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
34
dnan_pattern = 0b01111111;
39
+
35
-#elif defined(TARGET_I386) || defined(TARGET_X86_64)
40
+DEF_HELPER_FLAGS_4(sve2_bext_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
36
- /* Sign bit set, most significant frac bit set */
41
+DEF_HELPER_FLAGS_4(sve2_bext_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
37
- dnan_pattern = 0b11000000;
42
+DEF_HELPER_FLAGS_4(sve2_bext_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
38
#elif defined(TARGET_HPPA)
43
+DEF_HELPER_FLAGS_4(sve2_bext_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
39
/* Sign bit clear, msb-1 frac bit set */
44
+
40
dnan_pattern = 0b00100000;
45
+DEF_HELPER_FLAGS_4(sve2_bdep_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
46
+DEF_HELPER_FLAGS_4(sve2_bdep_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
47
+DEF_HELPER_FLAGS_4(sve2_bdep_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
48
+DEF_HELPER_FLAGS_4(sve2_bdep_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
49
+
50
+DEF_HELPER_FLAGS_4(sve2_bgrp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
51
+DEF_HELPER_FLAGS_4(sve2_bgrp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
52
+DEF_HELPER_FLAGS_4(sve2_bgrp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
53
+DEF_HELPER_FLAGS_4(sve2_bgrp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
54
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/arm/sve.decode
57
+++ b/target/arm/sve.decode
58
@@ -XXX,XX +XXX,XX @@ USHLLT 01000101 .. 0 ..... 1010 11 ..... ..... @rd_rn_tszimm_shl
59
60
EORBT 01000101 .. 0 ..... 10010 0 ..... ..... @rd_rn_rm
61
EORTB 01000101 .. 0 ..... 10010 1 ..... ..... @rd_rn_rm
62
+
63
+## SVE2 bitwise permute
64
+
65
+BEXT 01000101 .. 0 ..... 1011 00 ..... ..... @rd_rn_rm
66
+BDEP 01000101 .. 0 ..... 1011 01 ..... ..... @rd_rn_rm
67
+BGRP 01000101 .. 0 ..... 1011 10 ..... ..... @rd_rn_rm
68
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/target/arm/sve_helper.c
71
+++ b/target/arm/sve_helper.c
72
@@ -XXX,XX +XXX,XX @@ DO_ZZZ_NTB(sve2_eoril_d, uint64_t, , DO_EOR)
73
74
#undef DO_ZZZ_NTB
75
76
+#define DO_BITPERM(NAME, TYPE, OP) \
77
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
78
+{ \
79
+ intptr_t i, opr_sz = simd_oprsz(desc); \
80
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
81
+ TYPE nn = *(TYPE *)(vn + i); \
82
+ TYPE mm = *(TYPE *)(vm + i); \
83
+ *(TYPE *)(vd + i) = OP(nn, mm, sizeof(TYPE) * 8); \
84
+ } \
85
+}
86
+
87
+static uint64_t bitextract(uint64_t data, uint64_t mask, int n)
88
+{
89
+ uint64_t res = 0;
90
+ int db, rb = 0;
91
+
92
+ for (db = 0; db < n; ++db) {
93
+ if ((mask >> db) & 1) {
94
+ res |= ((data >> db) & 1) << rb;
95
+ ++rb;
96
+ }
97
+ }
98
+ return res;
99
+}
100
+
101
+DO_BITPERM(sve2_bext_b, uint8_t, bitextract)
102
+DO_BITPERM(sve2_bext_h, uint16_t, bitextract)
103
+DO_BITPERM(sve2_bext_s, uint32_t, bitextract)
104
+DO_BITPERM(sve2_bext_d, uint64_t, bitextract)
105
+
106
+static uint64_t bitdeposit(uint64_t data, uint64_t mask, int n)
107
+{
108
+ uint64_t res = 0;
109
+ int rb, db = 0;
110
+
111
+ for (rb = 0; rb < n; ++rb) {
112
+ if ((mask >> rb) & 1) {
113
+ res |= ((data >> db) & 1) << rb;
114
+ ++db;
115
+ }
116
+ }
117
+ return res;
118
+}
119
+
120
+DO_BITPERM(sve2_bdep_b, uint8_t, bitdeposit)
121
+DO_BITPERM(sve2_bdep_h, uint16_t, bitdeposit)
122
+DO_BITPERM(sve2_bdep_s, uint32_t, bitdeposit)
123
+DO_BITPERM(sve2_bdep_d, uint64_t, bitdeposit)
124
+
125
+static uint64_t bitgroup(uint64_t data, uint64_t mask, int n)
126
+{
127
+ uint64_t resm = 0, resu = 0;
128
+ int db, rbm = 0, rbu = 0;
129
+
130
+ for (db = 0; db < n; ++db) {
131
+ uint64_t val = (data >> db) & 1;
132
+ if ((mask >> db) & 1) {
133
+ resm |= val << rbm++;
134
+ } else {
135
+ resu |= val << rbu++;
136
+ }
137
+ }
138
+
139
+ return resm | (resu << rbm);
140
+}
141
+
142
+DO_BITPERM(sve2_bgrp_b, uint8_t, bitgroup)
143
+DO_BITPERM(sve2_bgrp_h, uint16_t, bitgroup)
144
+DO_BITPERM(sve2_bgrp_s, uint32_t, bitgroup)
145
+DO_BITPERM(sve2_bgrp_d, uint64_t, bitgroup)
146
+
147
+#undef DO_BITPERM
148
+
149
#define DO_ZZI_SHLL(NAME, TYPEW, TYPEN, HW, HN) \
150
void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
151
{ \
152
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
153
index XXXXXXX..XXXXXXX 100644
154
--- a/target/arm/translate-sve.c
155
+++ b/target/arm/translate-sve.c
156
@@ -XXX,XX +XXX,XX @@ static bool trans_USHLLT(DisasContext *s, arg_rri_esz *a)
157
{
158
return do_sve2_shll_tb(s, a, true, true);
159
}
160
+
161
+static bool trans_BEXT(DisasContext *s, arg_rrr_esz *a)
162
+{
163
+ static gen_helper_gvec_3 * const fns[4] = {
164
+ gen_helper_sve2_bext_b, gen_helper_sve2_bext_h,
165
+ gen_helper_sve2_bext_s, gen_helper_sve2_bext_d,
166
+ };
167
+ if (!dc_isar_feature(aa64_sve2_bitperm, s)) {
168
+ return false;
169
+ }
170
+ return do_sve2_zzw_ool(s, a, fns[a->esz], 0);
171
+}
172
+
173
+static bool trans_BDEP(DisasContext *s, arg_rrr_esz *a)
174
+{
175
+ static gen_helper_gvec_3 * const fns[4] = {
176
+ gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h,
177
+ gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d,
178
+ };
179
+ if (!dc_isar_feature(aa64_sve2_bitperm, s)) {
180
+ return false;
181
+ }
182
+ return do_sve2_zzw_ool(s, a, fns[a->esz], 0);
183
+}
184
+
185
+static bool trans_BGRP(DisasContext *s, arg_rrr_esz *a)
186
+{
187
+ static gen_helper_gvec_3 * const fns[4] = {
188
+ gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h,
189
+ gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d,
190
+ };
191
+ if (!dc_isar_feature(aa64_sve2_bitperm, s)) {
192
+ return false;
193
+ }
194
+ return do_sve2_zzw_ool(s, a, fns[a->esz], 0);
195
+}
196
--
41
--
197
2.20.1
42
2.34.1
198
199
diff view generated by jsdifflib
1
From: Rebecca Cran <rebecca@nuviainc.com>
1
Set the default NaN pattern explicitly, and remove the ifdef from
2
parts64_default_nan().
2
3
3
ARMv8.4 adds the mandatory FEAT_TLBIOS. It provides TLBI
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
maintenance instructions that extend to the Outer Shareable domain.
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20241202131347.498124-39-peter.maydell@linaro.org
7
---
8
target/hppa/fpu_helper.c | 2 ++
9
fpu/softfloat-specialize.c.inc | 3 ---
10
2 files changed, 2 insertions(+), 3 deletions(-)
5
11
6
Signed-off-by: Rebecca Cran <rebecca@nuviainc.com>
12
diff --git a/target/hppa/fpu_helper.c b/target/hppa/fpu_helper.c
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210512182337.18563-3-rebecca@nuviainc.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/cpu.h | 5 +++++
12
target/arm/helper.c | 43 +++++++++++++++++++++++++++++++++++++++++++
13
2 files changed, 48 insertions(+)
14
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
14
--- a/target/hppa/fpu_helper.c
18
+++ b/target/arm/cpu.h
15
+++ b/target/hppa/fpu_helper.c
19
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id)
16
@@ -XXX,XX +XXX,XX @@ void HELPER(loaded_fr0)(CPUHPPAState *env)
20
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2;
17
set_float_3nan_prop_rule(float_3nan_prop_abc, &env->fp_status);
18
/* For inf * 0 + NaN, return the input NaN */
19
set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
20
+ /* Default NaN: sign bit clear, msb-1 frac bit set */
21
+ set_float_default_nan_pattern(0b00100000, &env->fp_status);
21
}
22
}
22
23
23
+static inline bool isar_feature_aa64_tlbios(const ARMISARegisters *id)
24
void cpu_hppa_loaded_fr0(CPUHPPAState *env)
24
+{
25
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
25
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) != 0;
26
+}
27
+
28
static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
29
{
30
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
31
diff --git a/target/arm/helper.c b/target/arm/helper.c
32
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/helper.c
27
--- a/fpu/softfloat-specialize.c.inc
34
+++ b/target/arm/helper.c
28
+++ b/fpu/softfloat-specialize.c.inc
35
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo tlbirange_reginfo[] = {
29
@@ -XXX,XX +XXX,XX @@ static void parts64_default_nan(FloatParts64 *p, float_status *status)
36
REGINFO_SENTINEL
30
#if defined(TARGET_SPARC) || defined(TARGET_M68K)
37
};
31
/* Sign bit clear, all frac bits set */
38
32
dnan_pattern = 0b01111111;
39
+static const ARMCPRegInfo tlbios_reginfo[] = {
33
-#elif defined(TARGET_HPPA)
40
+ { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
34
- /* Sign bit clear, msb-1 frac bit set */
41
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
35
- dnan_pattern = 0b00100000;
42
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
36
#elif defined(TARGET_HEXAGON)
43
+ .writefn = tlbi_aa64_vmalle1is_write },
37
/* Sign bit set, all frac bits set. */
44
+ { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
38
dnan_pattern = 0b11111111;
45
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
46
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
47
+ .writefn = tlbi_aa64_vmalle1is_write },
48
+ { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
49
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
50
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
51
+ .writefn = tlbi_aa64_alle2is_write },
52
+ { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
53
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
54
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
55
+ .writefn = tlbi_aa64_alle1is_write },
56
+ { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
57
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
58
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
59
+ .writefn = tlbi_aa64_alle1is_write },
60
+ { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
61
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
62
+ .access = PL2_W, .type = ARM_CP_NOP },
63
+ { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
64
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
65
+ .access = PL2_W, .type = ARM_CP_NOP },
66
+ { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
67
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
68
+ .access = PL2_W, .type = ARM_CP_NOP },
69
+ { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
70
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
71
+ .access = PL2_W, .type = ARM_CP_NOP },
72
+ { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
73
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
74
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
75
+ .writefn = tlbi_aa64_alle3is_write },
76
+ REGINFO_SENTINEL
77
+};
78
+
79
static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
80
{
81
Error *err = NULL;
82
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
83
if (cpu_isar_feature(aa64_tlbirange, cpu)) {
84
define_arm_cp_regs(cpu, tlbirange_reginfo);
85
}
86
+ if (cpu_isar_feature(aa64_tlbios, cpu)) {
87
+ define_arm_cp_regs(cpu, tlbios_reginfo);
88
+ }
89
#ifndef CONFIG_USER_ONLY
90
/* Data Cache clean instructions up to PoP */
91
if (cpu_isar_feature(aa64_dcpop, cpu)) {
92
--
39
--
93
2.20.1
40
2.34.1
94
95
diff view generated by jsdifflib
1
From: Stephen Long <steplong@quicinc.com>
1
Set the default NaN pattern explicitly for the alpha target.
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Stephen Long <steplong@quicinc.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210525010358.152808-39-richard.henderson@linaro.org
7
Message-Id: <20200417162231.10374-2-steplong@quicinc.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20241202131347.498124-40-peter.maydell@linaro.org
10
---
6
---
11
target/arm/helper-sve.h | 8 ++++++++
7
target/alpha/cpu.c | 2 ++
12
target/arm/sve.decode | 5 +++++
8
1 file changed, 2 insertions(+)
13
target/arm/sve_helper.c | 36 ++++++++++++++++++++++++++++++++++++
14
target/arm/translate-sve.c | 13 +++++++++++++
15
4 files changed, 62 insertions(+)
16
9
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
10
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
18
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-sve.h
12
--- a/target/alpha/cpu.c
20
+++ b/target/arm/helper-sve.h
13
+++ b/target/alpha/cpu.c
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_uqrshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
14
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_initfn(Object *obj)
22
DEF_HELPER_FLAGS_3(sve2_uqrshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
15
* operand in Fa. That is float_2nan_prop_ba.
23
DEF_HELPER_FLAGS_3(sve2_uqrshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
16
*/
24
17
set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status);
25
+DEF_HELPER_FLAGS_4(sve2_addhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
18
+ /* Default NaN: sign bit clear, msb frac bit set */
26
+DEF_HELPER_FLAGS_4(sve2_addhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
19
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
27
+DEF_HELPER_FLAGS_4(sve2_addhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
20
#if defined(CONFIG_USER_ONLY)
28
+
21
env->flags = ENV_FLAG_PS_USER | ENV_FLAG_FEN;
29
+DEF_HELPER_FLAGS_4(sve2_addhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
22
cpu_alpha_store_fpcr(env, (uint64_t)(FPCR_INVD | FPCR_DZED | FPCR_OVFD
30
+DEF_HELPER_FLAGS_4(sve2_addhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(sve2_addhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
+
33
DEF_HELPER_FLAGS_5(sve2_match_ppzz_b, TCG_CALL_NO_RWG,
34
i32, ptr, ptr, ptr, ptr, i32)
35
DEF_HELPER_FLAGS_5(sve2_match_ppzz_h, TCG_CALL_NO_RWG,
36
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/sve.decode
39
+++ b/target/arm/sve.decode
40
@@ -XXX,XX +XXX,XX @@ UQSHRNT 01000101 .. 1 ..... 00 1101 ..... ..... @rd_rn_tszimm_shr
41
UQRSHRNB 01000101 .. 1 ..... 00 1110 ..... ..... @rd_rn_tszimm_shr
42
UQRSHRNT 01000101 .. 1 ..... 00 1111 ..... ..... @rd_rn_tszimm_shr
43
44
+## SVE2 integer add/subtract narrow high part
45
+
46
+ADDHNB 01000101 .. 1 ..... 011 000 ..... ..... @rd_rn_rm
47
+ADDHNT 01000101 .. 1 ..... 011 001 ..... ..... @rd_rn_rm
48
+
49
### SVE2 Character Match
50
51
MATCH 01000101 .. 1 ..... 100 ... ..... 0 .... @pd_pg_rn_rm
52
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/sve_helper.c
55
+++ b/target/arm/sve_helper.c
56
@@ -XXX,XX +XXX,XX @@ DO_SHRNT(sve2_uqrshrnt_d, uint64_t, uint32_t, , H1_4, DO_UQRSHRN_D)
57
#undef DO_SHRNB
58
#undef DO_SHRNT
59
60
+#define DO_BINOPNB(NAME, TYPEW, TYPEN, SHIFT, OP) \
61
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
62
+{ \
63
+ intptr_t i, opr_sz = simd_oprsz(desc); \
64
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
65
+ TYPEW nn = *(TYPEW *)(vn + i); \
66
+ TYPEW mm = *(TYPEW *)(vm + i); \
67
+ *(TYPEW *)(vd + i) = (TYPEN)OP(nn, mm, SHIFT); \
68
+ } \
69
+}
70
+
71
+#define DO_BINOPNT(NAME, TYPEW, TYPEN, SHIFT, HW, HN, OP) \
72
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
73
+{ \
74
+ intptr_t i, opr_sz = simd_oprsz(desc); \
75
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
76
+ TYPEW nn = *(TYPEW *)(vn + HW(i)); \
77
+ TYPEW mm = *(TYPEW *)(vm + HW(i)); \
78
+ *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, mm, SHIFT); \
79
+ } \
80
+}
81
+
82
+#define DO_ADDHN(N, M, SH) ((N + M) >> SH)
83
+
84
+DO_BINOPNB(sve2_addhnb_h, uint16_t, uint8_t, 8, DO_ADDHN)
85
+DO_BINOPNB(sve2_addhnb_s, uint32_t, uint16_t, 16, DO_ADDHN)
86
+DO_BINOPNB(sve2_addhnb_d, uint64_t, uint32_t, 32, DO_ADDHN)
87
+
88
+DO_BINOPNT(sve2_addhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_ADDHN)
89
+DO_BINOPNT(sve2_addhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_ADDHN)
90
+DO_BINOPNT(sve2_addhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_ADDHN)
91
+
92
+#undef DO_ADDHN
93
+
94
+#undef DO_BINOPNB
95
+
96
/* Fully general four-operand expander, controlled by a predicate.
97
*/
98
#define DO_ZPZZZ(NAME, TYPE, H, OP) \
99
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/target/arm/translate-sve.c
102
+++ b/target/arm/translate-sve.c
103
@@ -XXX,XX +XXX,XX @@ static bool trans_UQRSHRNT(DisasContext *s, arg_rri_esz *a)
104
return do_sve2_shr_narrow(s, a, ops);
105
}
106
107
+#define DO_SVE2_ZZZ_NARROW(NAME, name) \
108
+static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
109
+{ \
110
+ static gen_helper_gvec_3 * const fns[4] = { \
111
+ NULL, gen_helper_sve2_##name##_h, \
112
+ gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
113
+ }; \
114
+ return do_sve2_zzz_ool(s, a, fns[a->esz]); \
115
+}
116
+
117
+DO_SVE2_ZZZ_NARROW(ADDHNB, addhnb)
118
+DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt)
119
+
120
static bool do_sve2_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
121
gen_helper_gvec_flags_4 *fn)
122
{
123
--
23
--
124
2.20.1
24
2.34.1
125
126
diff view generated by jsdifflib
1
From: Rebecca Cran <rebecca@nuviainc.com>
1
Set the default NaN pattern explicitly for the arm target.
2
This includes setting it for the old linux-user nwfpe emulation.
3
For nwfpe, our default doesn't match the real kernel, but we
4
avoid making a behaviour change in this commit.
2
5
3
ARMv8.4 adds the mandatory FEAT_TLBIRANGE. It provides TLBI
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
maintenance instructions that apply to a range of input addresses.
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20241202131347.498124-41-peter.maydell@linaro.org
9
---
10
linux-user/arm/nwfpe/fpa11.c | 5 +++++
11
target/arm/cpu.c | 2 ++
12
2 files changed, 7 insertions(+)
5
13
6
Signed-off-by: Rebecca Cran <rebecca@nuviainc.com>
14
diff --git a/linux-user/arm/nwfpe/fpa11.c b/linux-user/arm/nwfpe/fpa11.c
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210512182337.18563-2-rebecca@nuviainc.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/cpu.h | 5 +
12
target/arm/helper.c | 281 ++++++++++++++++++++++++++++++++++++++++++++
13
2 files changed, 286 insertions(+)
14
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
16
--- a/linux-user/arm/nwfpe/fpa11.c
18
+++ b/target/arm/cpu.h
17
+++ b/linux-user/arm/nwfpe/fpa11.c
19
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_pauth_arch(const ARMISARegisters *id)
18
@@ -XXX,XX +XXX,XX @@ void resetFPA11(void)
20
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0;
19
* this late date.
20
*/
21
set_float_2nan_prop_rule(float_2nan_prop_s_ab, &fpa11->fp_status);
22
+ /*
23
+ * Use the same default NaN value as Arm VFP. This doesn't match
24
+ * the Linux kernel's nwfpe emulation, which uses an all-1s value.
25
+ */
26
+ set_float_default_nan_pattern(0b01000000, &fpa11->fp_status);
21
}
27
}
22
28
23
+static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id)
29
void SetRoundingMode(const unsigned int opcode)
24
+{
30
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
25
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2;
31
index XXXXXXX..XXXXXXX 100644
26
+}
32
--- a/target/arm/cpu.c
27
+
33
+++ b/target/arm/cpu.c
28
static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
34
@@ -XXX,XX +XXX,XX @@ void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
35
* the pseudocode function the arguments are in the order c, a, b.
36
* * 0 * Inf + NaN returns the default NaN if the input NaN is quiet,
37
* and the input NaN if it is signalling
38
+ * * Default NaN has sign bit clear, msb frac bit set
39
*/
40
static void arm_set_default_fp_behaviours(float_status *s)
29
{
41
{
30
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
42
@@ -XXX,XX +XXX,XX @@ static void arm_set_default_fp_behaviours(float_status *s)
31
diff --git a/target/arm/helper.c b/target/arm/helper.c
43
set_float_2nan_prop_rule(float_2nan_prop_s_ab, s);
32
index XXXXXXX..XXXXXXX 100644
44
set_float_3nan_prop_rule(float_3nan_prop_s_cab, s);
33
--- a/target/arm/helper.c
45
set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, s);
34
+++ b/target/arm/helper.c
46
+ set_float_default_nan_pattern(0b01000000, s);
35
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
36
ARMMMUIdxBit_SE3, bits);
37
}
47
}
38
48
39
+#ifdef TARGET_AARCH64
49
static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
40
+static uint64_t tlbi_aa64_range_get_length(CPUARMState *env,
41
+ uint64_t value)
42
+{
43
+ unsigned int page_shift;
44
+ unsigned int page_size_granule;
45
+ uint64_t num;
46
+ uint64_t scale;
47
+ uint64_t exponent;
48
+ uint64_t length;
49
+
50
+ num = extract64(value, 39, 4);
51
+ scale = extract64(value, 44, 2);
52
+ page_size_granule = extract64(value, 46, 2);
53
+
54
+ page_shift = page_size_granule * 2 + 12;
55
+
56
+ if (page_size_granule == 0) {
57
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid page size granule %d\n",
58
+ page_size_granule);
59
+ return 0;
60
+ }
61
+
62
+ exponent = (5 * scale) + 1;
63
+ length = (num + 1) << (exponent + page_shift);
64
+
65
+ return length;
66
+}
67
+
68
+static uint64_t tlbi_aa64_range_get_base(CPUARMState *env, uint64_t value,
69
+ bool two_ranges)
70
+{
71
+ /* TODO: ARMv8.7 FEAT_LPA2 */
72
+ uint64_t pageaddr;
73
+
74
+ if (two_ranges) {
75
+ pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
76
+ } else {
77
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
78
+ }
79
+
80
+ return pageaddr;
81
+}
82
+
83
+static void do_rvae_write(CPUARMState *env, uint64_t value,
84
+ int idxmap, bool synced)
85
+{
86
+ ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
87
+ bool two_ranges = regime_has_2_ranges(one_idx);
88
+ uint64_t baseaddr, length;
89
+ int bits;
90
+
91
+ baseaddr = tlbi_aa64_range_get_base(env, value, two_ranges);
92
+ length = tlbi_aa64_range_get_length(env, value);
93
+ bits = tlbbits_for_regime(env, one_idx, baseaddr);
94
+
95
+ if (synced) {
96
+ tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
97
+ baseaddr,
98
+ length,
99
+ idxmap,
100
+ bits);
101
+ } else {
102
+ tlb_flush_range_by_mmuidx(env_cpu(env), baseaddr,
103
+ length, idxmap, bits);
104
+ }
105
+}
106
+
107
+static void tlbi_aa64_rvae1_write(CPUARMState *env,
108
+ const ARMCPRegInfo *ri,
109
+ uint64_t value)
110
+{
111
+ /*
112
+ * Invalidate by VA range, EL1&0.
113
+ * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
114
+ * since we don't support flush-for-specific-ASID-only or
115
+ * flush-last-level-only.
116
+ */
117
+
118
+ do_rvae_write(env, value, vae1_tlbmask(env),
119
+ tlb_force_broadcast(env));
120
+}
121
+
122
+static void tlbi_aa64_rvae1is_write(CPUARMState *env,
123
+ const ARMCPRegInfo *ri,
124
+ uint64_t value)
125
+{
126
+ /*
127
+ * Invalidate by VA range, Inner/Outer Shareable EL1&0.
128
+ * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
129
+ * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
130
+ * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
131
+ * shareable specific flushes.
132
+ */
133
+
134
+ do_rvae_write(env, value, vae1_tlbmask(env), true);
135
+}
136
+
137
+static int vae2_tlbmask(CPUARMState *env)
138
+{
139
+ return (arm_is_secure_below_el3(env)
140
+ ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
141
+}
142
+
143
+static void tlbi_aa64_rvae2_write(CPUARMState *env,
144
+ const ARMCPRegInfo *ri,
145
+ uint64_t value)
146
+{
147
+ /*
148
+ * Invalidate by VA range, EL2.
149
+ * Currently handles all of RVAE2 and RVALE2,
150
+ * since we don't support flush-for-specific-ASID-only or
151
+ * flush-last-level-only.
152
+ */
153
+
154
+ do_rvae_write(env, value, vae2_tlbmask(env),
155
+ tlb_force_broadcast(env));
156
+
157
+
158
+}
159
+
160
+static void tlbi_aa64_rvae2is_write(CPUARMState *env,
161
+ const ARMCPRegInfo *ri,
162
+ uint64_t value)
163
+{
164
+ /*
165
+ * Invalidate by VA range, Inner/Outer Shareable, EL2.
166
+ * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
167
+ * since we don't support flush-for-specific-ASID-only,
168
+ * flush-last-level-only or inner/outer shareable specific flushes.
169
+ */
170
+
171
+ do_rvae_write(env, value, vae2_tlbmask(env), true);
172
+
173
+}
174
+
175
+static void tlbi_aa64_rvae3_write(CPUARMState *env,
176
+ const ARMCPRegInfo *ri,
177
+ uint64_t value)
178
+{
179
+ /*
180
+ * Invalidate by VA range, EL3.
181
+ * Currently handles all of RVAE3 and RVALE3,
182
+ * since we don't support flush-for-specific-ASID-only or
183
+ * flush-last-level-only.
184
+ */
185
+
186
+ do_rvae_write(env, value, ARMMMUIdxBit_SE3,
187
+ tlb_force_broadcast(env));
188
+}
189
+
190
+static void tlbi_aa64_rvae3is_write(CPUARMState *env,
191
+ const ARMCPRegInfo *ri,
192
+ uint64_t value)
193
+{
194
+ /*
195
+ * Invalidate by VA range, EL3, Inner/Outer Shareable.
196
+ * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
197
+ * since we don't support flush-for-specific-ASID-only,
198
+ * flush-last-level-only or inner/outer specific flushes.
199
+ */
200
+
201
+ do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
202
+}
203
+#endif
204
+
205
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
206
bool isread)
207
{
208
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo pauth_reginfo[] = {
209
REGINFO_SENTINEL
210
};
211
212
+static const ARMCPRegInfo tlbirange_reginfo[] = {
213
+ { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
214
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
215
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
216
+ .writefn = tlbi_aa64_rvae1is_write },
217
+ { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
218
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
219
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
220
+ .writefn = tlbi_aa64_rvae1is_write },
221
+ { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
222
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
223
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
224
+ .writefn = tlbi_aa64_rvae1is_write },
225
+ { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
226
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
227
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
228
+ .writefn = tlbi_aa64_rvae1is_write },
229
+ { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
230
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
231
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
232
+ .writefn = tlbi_aa64_rvae1is_write },
233
+ { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
234
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
235
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
236
+ .writefn = tlbi_aa64_rvae1is_write },
237
+ { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
238
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
239
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
240
+ .writefn = tlbi_aa64_rvae1is_write },
241
+ { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
242
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
243
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
244
+ .writefn = tlbi_aa64_rvae1is_write },
245
+ { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
246
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
247
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
248
+ .writefn = tlbi_aa64_rvae1_write },
249
+ { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
250
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
251
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
252
+ .writefn = tlbi_aa64_rvae1_write },
253
+ { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
254
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
255
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
256
+ .writefn = tlbi_aa64_rvae1_write },
257
+ { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
258
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
259
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
260
+ .writefn = tlbi_aa64_rvae1_write },
261
+ { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
262
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
263
+ .access = PL2_W, .type = ARM_CP_NOP },
264
+ { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
265
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
266
+ .access = PL2_W, .type = ARM_CP_NOP },
267
+ { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
268
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
269
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
270
+ .writefn = tlbi_aa64_rvae2is_write },
271
+ { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
272
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
273
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
274
+ .writefn = tlbi_aa64_rvae2is_write },
275
+ { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
276
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
277
+ .access = PL2_W, .type = ARM_CP_NOP },
278
+ { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
279
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
280
+ .access = PL2_W, .type = ARM_CP_NOP },
281
+ { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
282
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
283
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
284
+ .writefn = tlbi_aa64_rvae2is_write },
285
+ { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
286
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
287
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
288
+ .writefn = tlbi_aa64_rvae2is_write },
289
+ { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
290
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
291
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
292
+ .writefn = tlbi_aa64_rvae2_write },
293
+ { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
294
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
295
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
296
+ .writefn = tlbi_aa64_rvae2_write },
297
+ { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
298
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
299
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
300
+ .writefn = tlbi_aa64_rvae3is_write },
301
+ { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
302
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
303
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
304
+ .writefn = tlbi_aa64_rvae3is_write },
305
+ { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
306
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
307
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
308
+ .writefn = tlbi_aa64_rvae3is_write },
309
+ { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
310
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
311
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
312
+ .writefn = tlbi_aa64_rvae3is_write },
313
+ { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
314
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
315
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
316
+ .writefn = tlbi_aa64_rvae3_write },
317
+ { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
318
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
319
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
320
+ .writefn = tlbi_aa64_rvae3_write },
321
+ REGINFO_SENTINEL
322
+};
323
+
324
static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
325
{
326
Error *err = NULL;
327
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
328
if (cpu_isar_feature(aa64_rndr, cpu)) {
329
define_arm_cp_regs(cpu, rndr_reginfo);
330
}
331
+ if (cpu_isar_feature(aa64_tlbirange, cpu)) {
332
+ define_arm_cp_regs(cpu, tlbirange_reginfo);
333
+ }
334
#ifndef CONFIG_USER_ONLY
335
/* Data Cache clean instructions up to PoP */
336
if (cpu_isar_feature(aa64_dcpop, cpu)) {
337
--
50
--
338
2.20.1
51
2.34.1
339
340
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the default NaN pattern explicitly for loongarch.
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-35-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20241202131347.498124-42-peter.maydell@linaro.org
7
---
6
---
8
target/arm/helper-sve.h | 14 ++++++++++
7
target/loongarch/tcg/fpu_helper.c | 2 ++
9
target/arm/sve.decode | 14 ++++++++++
8
1 file changed, 2 insertions(+)
10
target/arm/sve_helper.c | 30 +++++++++++++++++++++
11
target/arm/translate-sve.c | 54 ++++++++++++++++++++++++++++++++++++++
12
4 files changed, 112 insertions(+)
13
9
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
10
diff --git a/target/loongarch/tcg/fpu_helper.c b/target/loongarch/tcg/fpu_helper.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
12
--- a/target/loongarch/tcg/fpu_helper.c
17
+++ b/target/arm/helper-sve.h
13
+++ b/target/loongarch/tcg/fpu_helper.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_bcax, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
14
@@ -XXX,XX +XXX,XX @@ void restore_fp_status(CPULoongArchState *env)
19
DEF_HELPER_FLAGS_5(sve2_bsl1n, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
15
*/
20
DEF_HELPER_FLAGS_5(sve2_bsl2n, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
16
set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
21
DEF_HELPER_FLAGS_5(sve2_nbsl, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
17
set_float_3nan_prop_rule(float_3nan_prop_s_cab, &env->fp_status);
22
+
18
+ /* Default NaN: sign bit clear, msb frac bit set */
23
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_zzzw_h, TCG_CALL_NO_RWG,
19
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
24
+ void, ptr, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_zzzw_s, TCG_CALL_NO_RWG,
26
+ void, ptr, ptr, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_zzzw_d, TCG_CALL_NO_RWG,
28
+ void, ptr, ptr, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_h, TCG_CALL_NO_RWG,
31
+ void, ptr, ptr, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_s, TCG_CALL_NO_RWG,
33
+ void, ptr, ptr, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_d, TCG_CALL_NO_RWG,
35
+ void, ptr, ptr, ptr, ptr, i32)
36
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/sve.decode
39
+++ b/target/arm/sve.decode
40
@@ -XXX,XX +XXX,XX @@ FMAXNMP 01100100 .. 010 10 0 100 ... ..... ..... @rdn_pg_rm
41
FMINNMP 01100100 .. 010 10 1 100 ... ..... ..... @rdn_pg_rm
42
FMAXP 01100100 .. 010 11 0 100 ... ..... ..... @rdn_pg_rm
43
FMINP 01100100 .. 010 11 1 100 ... ..... ..... @rdn_pg_rm
44
+
45
+#### SVE Integer Multiply-Add (unpredicated)
46
+
47
+## SVE2 saturating multiply-add long
48
+
49
+SQDMLALB_zzzw 01000100 .. 0 ..... 0110 00 ..... ..... @rda_rn_rm
50
+SQDMLALT_zzzw 01000100 .. 0 ..... 0110 01 ..... ..... @rda_rn_rm
51
+SQDMLSLB_zzzw 01000100 .. 0 ..... 0110 10 ..... ..... @rda_rn_rm
52
+SQDMLSLT_zzzw 01000100 .. 0 ..... 0110 11 ..... ..... @rda_rn_rm
53
+
54
+## SVE2 saturating multiply-add interleaved long
55
+
56
+SQDMLALBT 01000100 .. 0 ..... 00001 0 ..... ..... @rda_rn_rm
57
+SQDMLSLBT 01000100 .. 0 ..... 00001 1 ..... ..... @rda_rn_rm
58
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/sve_helper.c
61
+++ b/target/arm/sve_helper.c
62
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_adcl_d)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
63
}
64
}
20
}
65
21
66
+#define DO_SQDMLAL(NAME, TYPEW, TYPEN, HW, HN, DMUL_OP, SUM_OP) \
22
int ieee_ex_to_loongarch(int xcpt)
67
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
68
+{ \
69
+ intptr_t i, opr_sz = simd_oprsz(desc); \
70
+ int sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
71
+ int sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPEN); \
72
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
73
+ TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \
74
+ TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \
75
+ TYPEW aa = *(TYPEW *)(va + HW(i)); \
76
+ *(TYPEW *)(vd + HW(i)) = SUM_OP(aa, DMUL_OP(nn, mm)); \
77
+ } \
78
+}
79
+
80
+DO_SQDMLAL(sve2_sqdmlal_zzzw_h, int16_t, int8_t, H1_2, H1,
81
+ do_sqdmull_h, DO_SQADD_H)
82
+DO_SQDMLAL(sve2_sqdmlal_zzzw_s, int32_t, int16_t, H1_4, H1_2,
83
+ do_sqdmull_s, DO_SQADD_S)
84
+DO_SQDMLAL(sve2_sqdmlal_zzzw_d, int64_t, int32_t, , H1_4,
85
+ do_sqdmull_d, do_sqadd_d)
86
+
87
+DO_SQDMLAL(sve2_sqdmlsl_zzzw_h, int16_t, int8_t, H1_2, H1,
88
+ do_sqdmull_h, DO_SQSUB_H)
89
+DO_SQDMLAL(sve2_sqdmlsl_zzzw_s, int32_t, int16_t, H1_4, H1_2,
90
+ do_sqdmull_s, DO_SQSUB_S)
91
+DO_SQDMLAL(sve2_sqdmlsl_zzzw_d, int64_t, int32_t, , H1_4,
92
+ do_sqdmull_d, do_sqsub_d)
93
+
94
+#undef DO_SQDMLAL
95
+
96
#define DO_BITPERM(NAME, TYPE, OP) \
97
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
98
{ \
99
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/target/arm/translate-sve.c
102
+++ b/target/arm/translate-sve.c
103
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZPZZ_FP(FMAXNMP, fmaxnmp)
104
DO_SVE2_ZPZZ_FP(FMINNMP, fminnmp)
105
DO_SVE2_ZPZZ_FP(FMAXP, fmaxp)
106
DO_SVE2_ZPZZ_FP(FMINP, fminp)
107
+
108
+/*
109
+ * SVE Integer Multiply-Add (unpredicated)
110
+ */
111
+
112
+static bool do_sqdmlal_zzzw(DisasContext *s, arg_rrrr_esz *a,
113
+ bool sel1, bool sel2)
114
+{
115
+ static gen_helper_gvec_4 * const fns[] = {
116
+ NULL, gen_helper_sve2_sqdmlal_zzzw_h,
117
+ gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d,
118
+ };
119
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], (sel2 << 1) | sel1);
120
+}
121
+
122
+static bool do_sqdmlsl_zzzw(DisasContext *s, arg_rrrr_esz *a,
123
+ bool sel1, bool sel2)
124
+{
125
+ static gen_helper_gvec_4 * const fns[] = {
126
+ NULL, gen_helper_sve2_sqdmlsl_zzzw_h,
127
+ gen_helper_sve2_sqdmlsl_zzzw_s, gen_helper_sve2_sqdmlsl_zzzw_d,
128
+ };
129
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], (sel2 << 1) | sel1);
130
+}
131
+
132
+static bool trans_SQDMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
133
+{
134
+ return do_sqdmlal_zzzw(s, a, false, false);
135
+}
136
+
137
+static bool trans_SQDMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
138
+{
139
+ return do_sqdmlal_zzzw(s, a, true, true);
140
+}
141
+
142
+static bool trans_SQDMLALBT(DisasContext *s, arg_rrrr_esz *a)
143
+{
144
+ return do_sqdmlal_zzzw(s, a, false, true);
145
+}
146
+
147
+static bool trans_SQDMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
148
+{
149
+ return do_sqdmlsl_zzzw(s, a, false, false);
150
+}
151
+
152
+static bool trans_SQDMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
153
+{
154
+ return do_sqdmlsl_zzzw(s, a, true, true);
155
+}
156
+
157
+static bool trans_SQDMLSLBT(DisasContext *s, arg_rrrr_esz *a)
158
+{
159
+ return do_sqdmlsl_zzzw(s, a, false, true);
160
+}
161
--
23
--
162
2.20.1
24
2.34.1
163
164
diff view generated by jsdifflib
1
When an M-profile CPU is restoring registers from the stack on
1
Set the default NaN pattern explicitly for m68k.
2
exception return, the stack pointer to use is determined based on
3
bits in the magic exception return type value. We were not getting
4
this logic entirely correct.
5
6
Whether we use one of the Secure stack pointers or one of the
7
Non-Secure stack pointers depends on the EXCRET.S bit. However,
8
whether we use the MSP or the PSP then depends on the SPSEL bit in
9
either the CONTROL_S or CONTROL_NS register. We were incorrectly
10
selecting MSP vs PSP based on the EXCRET.SPSEL bit.
11
12
(In the pseudocode this is in the PopStack() function, which calls
13
LookUpSp_with_security_mode() which in turn looks at the relevant
14
CONTROL.SPSEL bit.)
15
16
The buggy behaviour wasn't noticeable in most cases, because we write
17
EXCRET.SPSEL to the CONTROL.SPSEL bit for the S/NS register selected
18
by EXCRET.ES, so we only do the wrong thing when EXCRET.S and
19
EXCRET.ES are different. This will happen when secure code takes a
20
secure exception, which then tail-chains to a non-secure exception
21
which finally returns to the original secure code.
22
2
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
24
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
25
Message-id: 20210520130905.2049-1-peter.maydell@linaro.org
5
Message-id: 20241202131347.498124-43-peter.maydell@linaro.org
26
---
6
---
27
target/arm/m_helper.c | 3 ++-
7
target/m68k/cpu.c | 2 ++
28
1 file changed, 2 insertions(+), 1 deletion(-)
8
fpu/softfloat-specialize.c.inc | 2 +-
9
2 files changed, 3 insertions(+), 1 deletion(-)
29
10
30
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
11
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
31
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/m_helper.c
13
--- a/target/m68k/cpu.c
33
+++ b/target/arm/m_helper.c
14
+++ b/target/m68k/cpu.c
34
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
15
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_reset_hold(Object *obj, ResetType type)
35
* We use this limited C variable scope so we don't accidentally
16
* preceding paragraph for nonsignaling NaNs.
36
* use 'frame_sp_p' after we do something that makes it invalid.
17
*/
37
*/
18
set_float_2nan_prop_rule(float_2nan_prop_ab, &env->fp_status);
38
+ bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK;
19
+ /* Default NaN: sign bit clear, all frac bits set */
39
uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
20
+ set_float_default_nan_pattern(0b01111111, &env->fp_status);
40
return_to_secure,
21
41
!return_to_handler,
22
nan = floatx80_default_nan(&env->fp_status);
42
- return_to_sp_process);
23
for (i = 0; i < 8; i++) {
43
+ spsel);
24
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
44
uint32_t frameptr = *frame_sp_p;
25
index XXXXXXX..XXXXXXX 100644
45
bool pop_ok = true;
26
--- a/fpu/softfloat-specialize.c.inc
46
ARMMMUIdx mmu_idx;
27
+++ b/fpu/softfloat-specialize.c.inc
28
@@ -XXX,XX +XXX,XX @@ static void parts64_default_nan(FloatParts64 *p, float_status *status)
29
uint8_t dnan_pattern = status->default_nan_pattern;
30
31
if (dnan_pattern == 0) {
32
-#if defined(TARGET_SPARC) || defined(TARGET_M68K)
33
+#if defined(TARGET_SPARC)
34
/* Sign bit clear, all frac bits set */
35
dnan_pattern = 0b01111111;
36
#elif defined(TARGET_HEXAGON)
47
--
37
--
48
2.20.1
38
2.34.1
49
50
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the default NaN pattern explicitly for MIPS. Note that this
2
is our only target which currently changes the default NaN
3
at runtime (which it was previously doing indirectly when it
4
changed the snan_bit_is_one setting).
2
5
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-12-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20241202131347.498124-44-peter.maydell@linaro.org
7
---
9
---
8
target/arm/sve.decode | 6 ++++++
10
target/mips/fpu_helper.h | 7 +++++++
9
target/arm/translate-sve.c | 4 ++++
11
target/mips/msa.c | 3 +++
10
2 files changed, 10 insertions(+)
12
2 files changed, 10 insertions(+)
11
13
12
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
14
diff --git a/target/mips/fpu_helper.h b/target/mips/fpu_helper.h
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/sve.decode
16
--- a/target/mips/fpu_helper.h
15
+++ b/target/arm/sve.decode
17
+++ b/target/mips/fpu_helper.h
16
@@ -XXX,XX +XXX,XX @@ SABDLB 01000101 .. 0 ..... 00 1100 ..... ..... @rd_rn_rm
18
@@ -XXX,XX +XXX,XX @@ static inline void restore_snan_bit_mode(CPUMIPSState *env)
17
SABDLT 01000101 .. 0 ..... 00 1101 ..... ..... @rd_rn_rm
19
set_float_infzeronan_rule(izn_rule, &env->active_fpu.fp_status);
18
UABDLB 01000101 .. 0 ..... 00 1110 ..... ..... @rd_rn_rm
20
nan3_rule = nan2008 ? float_3nan_prop_s_cab : float_3nan_prop_s_abc;
19
UABDLT 01000101 .. 0 ..... 00 1111 ..... ..... @rd_rn_rm
21
set_float_3nan_prop_rule(nan3_rule, &env->active_fpu.fp_status);
20
+
22
+ /*
21
+## SVE2 integer add/subtract interleaved long
23
+ * With nan2008, the default NaN value has the sign bit clear and the
22
+
24
+ * frac msb set; with the older mode, the sign bit is clear, and all
23
+SADDLBT 01000101 .. 0 ..... 1000 00 ..... ..... @rd_rn_rm
25
+ * frac bits except the msb are set.
24
+SSUBLBT 01000101 .. 0 ..... 1000 10 ..... ..... @rd_rn_rm
26
+ */
25
+SSUBLTB 01000101 .. 0 ..... 1000 11 ..... ..... @rd_rn_rm
27
+ set_float_default_nan_pattern(nan2008 ? 0b01000000 : 0b00111111,
26
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
28
+ &env->active_fpu.fp_status);
29
30
}
31
32
diff --git a/target/mips/msa.c b/target/mips/msa.c
27
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/translate-sve.c
34
--- a/target/mips/msa.c
29
+++ b/target/arm/translate-sve.c
35
+++ b/target/mips/msa.c
30
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_TB(SABDLT, sabdl, true, true)
36
@@ -XXX,XX +XXX,XX @@ void msa_reset(CPUMIPSState *env)
31
DO_SVE2_ZZZ_TB(UADDLT, uaddl, true, true)
37
/* Inf * 0 + NaN returns the input NaN */
32
DO_SVE2_ZZZ_TB(USUBLT, usubl, true, true)
38
set_float_infzeronan_rule(float_infzeronan_dnan_never,
33
DO_SVE2_ZZZ_TB(UABDLT, uabdl, true, true)
39
&env->active_tc.msa_fp_status);
34
+
40
+ /* Default NaN: sign bit clear, frac msb set */
35
+DO_SVE2_ZZZ_TB(SADDLBT, saddl, false, true)
41
+ set_float_default_nan_pattern(0b01000000,
36
+DO_SVE2_ZZZ_TB(SSUBLBT, ssubl, false, true)
42
+ &env->active_tc.msa_fp_status);
37
+DO_SVE2_ZZZ_TB(SSUBLTB, ssubl, true, false)
43
}
38
--
44
--
39
2.20.1
45
2.34.1
40
41
diff view generated by jsdifflib
1
The SSE-300 was not correctly modelling its internal SRAMs:
1
Set the default NaN pattern explicitly for openrisc.
2
* the SRAM address width default is 18
3
* the SRAM is mapped at 0x2100_0000, not 0x2000_0000 like
4
the SSE-200 and IoTKit
5
2
6
The default address width is no longer guest-visible since
7
our only SSE-300 board sets it explicitly to a non-default
8
value, but following the hardware's default will help for
9
any future boards we need to model.
10
11
Reported-by: Devaraj Ranganna <devaraj.ranganna@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20210510190844.17799-4-peter.maydell@linaro.org
5
Message-id: 20241202131347.498124-45-peter.maydell@linaro.org
15
---
6
---
16
hw/arm/armsse.c | 8 ++++++--
7
target/openrisc/cpu.c | 2 ++
17
1 file changed, 6 insertions(+), 2 deletions(-)
8
1 file changed, 2 insertions(+)
18
9
19
diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c
10
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
20
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/arm/armsse.c
12
--- a/target/openrisc/cpu.c
22
+++ b/hw/arm/armsse.c
13
+++ b/target/openrisc/cpu.c
23
@@ -XXX,XX +XXX,XX @@ struct ARMSSEInfo {
14
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_reset_hold(Object *obj, ResetType type)
24
const char *cpu_type;
15
*/
25
uint32_t sse_version;
16
set_float_2nan_prop_rule(float_2nan_prop_x87, &cpu->env.fp_status);
26
int sram_banks;
17
27
+ uint32_t sram_bank_base;
18
+ /* Default NaN: sign bit clear, frac msb set */
28
int num_cpus;
19
+ set_float_default_nan_pattern(0b01000000, &cpu->env.fp_status);
29
uint32_t sys_version;
20
30
uint32_t iidr;
21
#ifndef CONFIG_USER_ONLY
31
@@ -XXX,XX +XXX,XX @@ static Property sse300_properties[] = {
22
cpu->env.picmr = 0x00000000;
32
DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION,
33
MemoryRegion *),
34
DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64),
35
- DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 15),
36
+ DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 18),
37
DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000),
38
DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], true),
39
DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true),
40
@@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = {
41
.sse_version = ARMSSE_IOTKIT,
42
.cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"),
43
.sram_banks = 1,
44
+ .sram_bank_base = 0x20000000,
45
.num_cpus = 1,
46
.sys_version = 0x41743,
47
.iidr = 0,
48
@@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = {
49
.sse_version = ARMSSE_SSE200,
50
.cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"),
51
.sram_banks = 4,
52
+ .sram_bank_base = 0x20000000,
53
.num_cpus = 2,
54
.sys_version = 0x22041743,
55
.iidr = 0,
56
@@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = {
57
.sse_version = ARMSSE_SSE300,
58
.cpu_type = ARM_CPU_TYPE_NAME("cortex-m55"),
59
.sram_banks = 2,
60
+ .sram_bank_base = 0x21000000,
61
.num_cpus = 1,
62
.sys_version = 0x7e00043b,
63
.iidr = 0x74a0043b,
64
@@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp)
65
/* Map the upstream end of the MPC into the right place... */
66
sbd_mpc = SYS_BUS_DEVICE(&s->mpc[i]);
67
memory_region_add_subregion(&s->container,
68
- 0x20000000 + i * sram_bank_size,
69
+ info->sram_bank_base + i * sram_bank_size,
70
sysbus_mmio_get_region(sbd_mpc, 1));
71
/* ...and its register interface */
72
memory_region_add_subregion(&s->container, 0x50083000 + i * 0x1000,
73
--
23
--
74
2.20.1
24
2.34.1
75
76
diff view generated by jsdifflib
1
From: Rebecca Cran <rebecca@nuviainc.com>
1
Set the default NaN pattern explicitly for ppc.
2
2
3
Indicate support for FEAT_TLBIOS and FEAT_TLBIRANGE by setting
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
ID_AA64ISAR0.TLB to 2 for the max AARCH64 CPU type.
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20241202131347.498124-46-peter.maydell@linaro.org
6
---
7
target/ppc/cpu_init.c | 4 ++++
8
1 file changed, 4 insertions(+)
5
9
6
Signed-off-by: Rebecca Cran <rebecca@nuviainc.com>
10
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210512182337.18563-4-rebecca@nuviainc.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/cpu64.c | 1 +
12
1 file changed, 1 insertion(+)
13
14
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu64.c
12
--- a/target/ppc/cpu_init.c
17
+++ b/target/arm/cpu64.c
13
+++ b/target/ppc/cpu_init.c
18
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
14
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_reset_hold(Object *obj, ResetType type)
19
t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
15
set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
20
t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);
16
set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->vec_status);
21
t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */
17
22
+ t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */
18
+ /* Default NaN: sign bit clear, set frac msb */
23
t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1);
19
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
24
cpu->isar.id_aa64isar0 = t;
20
+ set_float_default_nan_pattern(0b01000000, &env->vec_status);
21
+
22
for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) {
23
ppc_spr_t *spr = &env->spr_cb[i];
25
24
26
--
25
--
27
2.20.1
26
2.34.1
28
29
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the default NaN pattern explicitly for sh4. Note that sh4
2
is one of the only three targets (the others being HPPA and
3
sometimes MIPS) that has snan_bit_is_one set.
2
4
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-33-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20241202131347.498124-47-peter.maydell@linaro.org
7
---
8
---
8
target/arm/helper-sve.h | 6 ++
9
target/sh4/cpu.c | 2 ++
9
target/arm/sve.decode | 12 +++
10
1 file changed, 2 insertions(+)
10
target/arm/sve_helper.c | 50 +++++++++
11
target/arm/translate-sve.c | 213 +++++++++++++++++++++++++++++++++++++
12
4 files changed, 281 insertions(+)
13
11
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
12
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
15
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
14
--- a/target/sh4/cpu.c
17
+++ b/target/arm/helper-sve.h
15
+++ b/target/sh4/cpu.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_s, TCG_CALL_NO_RWG,
16
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_reset_hold(Object *obj, ResetType type)
19
void, ptr, ptr, ptr, ptr, ptr, i32)
17
set_flush_to_zero(1, &env->fp_status);
20
DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_d, TCG_CALL_NO_RWG,
18
#endif
21
void, ptr, ptr, ptr, ptr, ptr, i32)
19
set_default_nan_mode(1, &env->fp_status);
22
+
20
+ /* sign bit clear, set all frac bits other than msb */
23
+DEF_HELPER_FLAGS_5(sve2_eor3, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
21
+ set_float_default_nan_pattern(0b00111111, &env->fp_status);
24
+DEF_HELPER_FLAGS_5(sve2_bcax, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_5(sve2_bsl1n, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_5(sve2_bsl2n, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_5(sve2_nbsl, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
28
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/sve.decode
31
+++ b/target/arm/sve.decode
32
@@ -XXX,XX +XXX,XX @@
33
@rda_rn_rm ........ esz:2 . rm:5 ... ... rn:5 rd:5 \
34
&rrrr_esz ra=%reg_movprfx
35
36
+# Four operand with unused vector element size
37
+@rdn_ra_rm_e0 ........ ... rm:5 ... ... ra:5 rd:5 \
38
+ &rrrr_esz esz=0 rn=%reg_movprfx
39
+
40
# Three operand with "memory" size, aka immediate left shift
41
@rd_rn_msz_rm ........ ... rm:5 .... imm:2 rn:5 rd:5 &rrri
42
43
@@ -XXX,XX +XXX,XX @@ ORR_zzz 00000100 01 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
44
EOR_zzz 00000100 10 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
45
BIC_zzz 00000100 11 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
46
47
+# SVE2 bitwise ternary operations
48
+EOR3 00000100 00 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0
49
+BSL 00000100 00 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
50
+BCAX 00000100 01 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0
51
+BSL1N 00000100 01 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
52
+BSL2N 00000100 10 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
53
+NBSL 00000100 11 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
54
+
55
### SVE Index Generation Group
56
57
# SVE index generation (immediate start, immediate increment)
58
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/sve_helper.c
61
+++ b/target/arm/sve_helper.c
62
@@ -XXX,XX +XXX,XX @@ DO_ST1_ZPZ_D(dd_be, zd, MO_64)
63
64
#undef DO_ST1_ZPZ_S
65
#undef DO_ST1_ZPZ_D
66
+
67
+void HELPER(sve2_eor3)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
68
+{
69
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
70
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
71
+
72
+ for (i = 0; i < opr_sz; ++i) {
73
+ d[i] = n[i] ^ m[i] ^ k[i];
74
+ }
75
+}
76
+
77
+void HELPER(sve2_bcax)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
78
+{
79
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
80
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
81
+
82
+ for (i = 0; i < opr_sz; ++i) {
83
+ d[i] = n[i] ^ (m[i] & ~k[i]);
84
+ }
85
+}
86
+
87
+void HELPER(sve2_bsl1n)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
88
+{
89
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
90
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
91
+
92
+ for (i = 0; i < opr_sz; ++i) {
93
+ d[i] = (~n[i] & k[i]) | (m[i] & ~k[i]);
94
+ }
95
+}
96
+
97
+void HELPER(sve2_bsl2n)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
98
+{
99
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
100
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
101
+
102
+ for (i = 0; i < opr_sz; ++i) {
103
+ d[i] = (n[i] & k[i]) | (~m[i] & ~k[i]);
104
+ }
105
+}
106
+
107
+void HELPER(sve2_nbsl)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
108
+{
109
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
110
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
111
+
112
+ for (i = 0; i < opr_sz; ++i) {
113
+ d[i] = ~((n[i] & k[i]) | (m[i] & ~k[i]));
114
+ }
115
+}
116
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/target/arm/translate-sve.c
119
+++ b/target/arm/translate-sve.c
120
@@ -XXX,XX +XXX,XX @@ static void gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn,
121
vec_full_reg_offset(s, rm), vsz, vsz);
122
}
22
}
123
23
124
+/* Invoke a vector expander on four Zregs. */
24
static void superh_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
125
+static void gen_gvec_fn_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn,
126
+ int esz, int rd, int rn, int rm, int ra)
127
+{
128
+ unsigned vsz = vec_full_reg_size(s);
129
+ gvec_fn(esz, vec_full_reg_offset(s, rd),
130
+ vec_full_reg_offset(s, rn),
131
+ vec_full_reg_offset(s, rm),
132
+ vec_full_reg_offset(s, ra), vsz, vsz);
133
+}
134
+
135
/* Invoke a vector move on two Zregs. */
136
static bool do_mov_z(DisasContext *s, int rd, int rn)
137
{
138
@@ -XXX,XX +XXX,XX @@ static bool trans_BIC_zzz(DisasContext *s, arg_rrr_esz *a)
139
return do_zzz_fn(s, a, tcg_gen_gvec_andc);
140
}
141
142
+static bool do_sve2_zzzz_fn(DisasContext *s, arg_rrrr_esz *a, GVecGen4Fn *fn)
143
+{
144
+ if (!dc_isar_feature(aa64_sve2, s)) {
145
+ return false;
146
+ }
147
+ if (sve_access_check(s)) {
148
+ gen_gvec_fn_zzzz(s, fn, a->esz, a->rd, a->rn, a->rm, a->ra);
149
+ }
150
+ return true;
151
+}
152
+
153
+static void gen_eor3_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
154
+{
155
+ tcg_gen_xor_i64(d, n, m);
156
+ tcg_gen_xor_i64(d, d, k);
157
+}
158
+
159
+static void gen_eor3_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
160
+ TCGv_vec m, TCGv_vec k)
161
+{
162
+ tcg_gen_xor_vec(vece, d, n, m);
163
+ tcg_gen_xor_vec(vece, d, d, k);
164
+}
165
+
166
+static void gen_eor3(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
167
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
168
+{
169
+ static const GVecGen4 op = {
170
+ .fni8 = gen_eor3_i64,
171
+ .fniv = gen_eor3_vec,
172
+ .fno = gen_helper_sve2_eor3,
173
+ .vece = MO_64,
174
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
175
+ };
176
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
177
+}
178
+
179
+static bool trans_EOR3(DisasContext *s, arg_rrrr_esz *a)
180
+{
181
+ return do_sve2_zzzz_fn(s, a, gen_eor3);
182
+}
183
+
184
+static void gen_bcax_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
185
+{
186
+ tcg_gen_andc_i64(d, m, k);
187
+ tcg_gen_xor_i64(d, d, n);
188
+}
189
+
190
+static void gen_bcax_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
191
+ TCGv_vec m, TCGv_vec k)
192
+{
193
+ tcg_gen_andc_vec(vece, d, m, k);
194
+ tcg_gen_xor_vec(vece, d, d, n);
195
+}
196
+
197
+static void gen_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
198
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
199
+{
200
+ static const GVecGen4 op = {
201
+ .fni8 = gen_bcax_i64,
202
+ .fniv = gen_bcax_vec,
203
+ .fno = gen_helper_sve2_bcax,
204
+ .vece = MO_64,
205
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
206
+ };
207
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
208
+}
209
+
210
+static bool trans_BCAX(DisasContext *s, arg_rrrr_esz *a)
211
+{
212
+ return do_sve2_zzzz_fn(s, a, gen_bcax);
213
+}
214
+
215
+static void gen_bsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
216
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
217
+{
218
+ /* BSL differs from the generic bitsel in argument ordering. */
219
+ tcg_gen_gvec_bitsel(vece, d, a, n, m, oprsz, maxsz);
220
+}
221
+
222
+static bool trans_BSL(DisasContext *s, arg_rrrr_esz *a)
223
+{
224
+ return do_sve2_zzzz_fn(s, a, gen_bsl);
225
+}
226
+
227
+static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
228
+{
229
+ tcg_gen_andc_i64(n, k, n);
230
+ tcg_gen_andc_i64(m, m, k);
231
+ tcg_gen_or_i64(d, n, m);
232
+}
233
+
234
+static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
235
+ TCGv_vec m, TCGv_vec k)
236
+{
237
+ if (TCG_TARGET_HAS_bitsel_vec) {
238
+ tcg_gen_not_vec(vece, n, n);
239
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
240
+ } else {
241
+ tcg_gen_andc_vec(vece, n, k, n);
242
+ tcg_gen_andc_vec(vece, m, m, k);
243
+ tcg_gen_or_vec(vece, d, n, m);
244
+ }
245
+}
246
+
247
+static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
248
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
249
+{
250
+ static const GVecGen4 op = {
251
+ .fni8 = gen_bsl1n_i64,
252
+ .fniv = gen_bsl1n_vec,
253
+ .fno = gen_helper_sve2_bsl1n,
254
+ .vece = MO_64,
255
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
256
+ };
257
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
258
+}
259
+
260
+static bool trans_BSL1N(DisasContext *s, arg_rrrr_esz *a)
261
+{
262
+ return do_sve2_zzzz_fn(s, a, gen_bsl1n);
263
+}
264
+
265
+static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
266
+{
267
+ /*
268
+ * Z[dn] = (n & k) | (~m & ~k)
269
+ * = | ~(m | k)
270
+ */
271
+ tcg_gen_and_i64(n, n, k);
272
+ if (TCG_TARGET_HAS_orc_i64) {
273
+ tcg_gen_or_i64(m, m, k);
274
+ tcg_gen_orc_i64(d, n, m);
275
+ } else {
276
+ tcg_gen_nor_i64(m, m, k);
277
+ tcg_gen_or_i64(d, n, m);
278
+ }
279
+}
280
+
281
+static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
282
+ TCGv_vec m, TCGv_vec k)
283
+{
284
+ if (TCG_TARGET_HAS_bitsel_vec) {
285
+ tcg_gen_not_vec(vece, m, m);
286
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
287
+ } else {
288
+ tcg_gen_and_vec(vece, n, n, k);
289
+ tcg_gen_or_vec(vece, m, m, k);
290
+ tcg_gen_orc_vec(vece, d, n, m);
291
+ }
292
+}
293
+
294
+static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
295
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
296
+{
297
+ static const GVecGen4 op = {
298
+ .fni8 = gen_bsl2n_i64,
299
+ .fniv = gen_bsl2n_vec,
300
+ .fno = gen_helper_sve2_bsl2n,
301
+ .vece = MO_64,
302
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
303
+ };
304
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
305
+}
306
+
307
+static bool trans_BSL2N(DisasContext *s, arg_rrrr_esz *a)
308
+{
309
+ return do_sve2_zzzz_fn(s, a, gen_bsl2n);
310
+}
311
+
312
+static void gen_nbsl_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
313
+{
314
+ tcg_gen_and_i64(n, n, k);
315
+ tcg_gen_andc_i64(m, m, k);
316
+ tcg_gen_nor_i64(d, n, m);
317
+}
318
+
319
+static void gen_nbsl_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
320
+ TCGv_vec m, TCGv_vec k)
321
+{
322
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
323
+ tcg_gen_not_vec(vece, d, d);
324
+}
325
+
326
+static void gen_nbsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
327
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
328
+{
329
+ static const GVecGen4 op = {
330
+ .fni8 = gen_nbsl_i64,
331
+ .fniv = gen_nbsl_vec,
332
+ .fno = gen_helper_sve2_nbsl,
333
+ .vece = MO_64,
334
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
335
+ };
336
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
337
+}
338
+
339
+static bool trans_NBSL(DisasContext *s, arg_rrrr_esz *a)
340
+{
341
+ return do_sve2_zzzz_fn(s, a, gen_nbsl);
342
+}
343
+
344
/*
345
*** SVE Integer Arithmetic - Unpredicated Group
346
*/
347
--
25
--
348
2.20.1
26
2.34.1
349
350
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the default NaN pattern explicitly for rx.
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-32-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20241202131347.498124-48-peter.maydell@linaro.org
7
---
6
---
8
target/arm/sve.decode | 3 ++
7
target/rx/cpu.c | 2 ++
9
target/arm/translate-sve.c | 67 ++++++++++++++++++++++++++++++++++++++
8
1 file changed, 2 insertions(+)
10
2 files changed, 70 insertions(+)
11
9
12
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
10
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/sve.decode
12
--- a/target/rx/cpu.c
15
+++ b/target/arm/sve.decode
13
+++ b/target/rx/cpu.c
16
@@ -XXX,XX +XXX,XX @@ CTERM 00100101 1 sf:1 1 rm:5 001000 rn:5 ne:1 0000
14
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_reset_hold(Object *obj, ResetType type)
17
# SVE integer compare scalar count and limit
15
* then prefer dest over source", which is float_2nan_prop_s_ab.
18
WHILE 00100101 esz:2 1 rm:5 000 sf:1 u:1 lt:1 rn:5 eq:1 rd:4
16
*/
19
17
set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status);
20
+# SVE2 pointer conflict compare
18
+ /* Default NaN value: sign bit clear, set frac msb */
21
+WHILE_ptr 00100101 esz:2 1 rm:5 001 100 rn:5 rw:1 rd:4
19
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
22
+
23
### SVE Integer Wide Immediate - Unpredicated Group
24
25
# SVE broadcast floating-point immediate (unpredicated)
26
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/translate-sve.c
29
+++ b/target/arm/translate-sve.c
30
@@ -XXX,XX +XXX,XX @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
31
return true;
32
}
20
}
33
21
34
+static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a)
22
static ObjectClass *rx_cpu_class_by_name(const char *cpu_model)
35
+{
36
+ TCGv_i64 op0, op1, diff, t1, tmax;
37
+ TCGv_i32 t2, t3;
38
+ TCGv_ptr ptr;
39
+ unsigned vsz = vec_full_reg_size(s);
40
+ unsigned desc = 0;
41
+
42
+ if (!dc_isar_feature(aa64_sve2, s)) {
43
+ return false;
44
+ }
45
+ if (!sve_access_check(s)) {
46
+ return true;
47
+ }
48
+
49
+ op0 = read_cpu_reg(s, a->rn, 1);
50
+ op1 = read_cpu_reg(s, a->rm, 1);
51
+
52
+ tmax = tcg_const_i64(vsz);
53
+ diff = tcg_temp_new_i64();
54
+
55
+ if (a->rw) {
56
+ /* WHILERW */
57
+ /* diff = abs(op1 - op0), noting that op0/1 are unsigned. */
58
+ t1 = tcg_temp_new_i64();
59
+ tcg_gen_sub_i64(diff, op0, op1);
60
+ tcg_gen_sub_i64(t1, op1, op0);
61
+ tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1);
62
+ tcg_temp_free_i64(t1);
63
+ /* Round down to a multiple of ESIZE. */
64
+ tcg_gen_andi_i64(diff, diff, -1 << a->esz);
65
+ /* If op1 == op0, diff == 0, and the condition is always true. */
66
+ tcg_gen_movcond_i64(TCG_COND_EQ, diff, op0, op1, tmax, diff);
67
+ } else {
68
+ /* WHILEWR */
69
+ tcg_gen_sub_i64(diff, op1, op0);
70
+ /* Round down to a multiple of ESIZE. */
71
+ tcg_gen_andi_i64(diff, diff, -1 << a->esz);
72
+ /* If op0 >= op1, diff <= 0, the condition is always true. */
73
+ tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, tmax, diff);
74
+ }
75
+
76
+ /* Bound to the maximum. */
77
+ tcg_gen_umin_i64(diff, diff, tmax);
78
+ tcg_temp_free_i64(tmax);
79
+
80
+ /* Since we're bounded, pass as a 32-bit type. */
81
+ t2 = tcg_temp_new_i32();
82
+ tcg_gen_extrl_i64_i32(t2, diff);
83
+ tcg_temp_free_i64(diff);
84
+
85
+ desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
86
+ desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
87
+ t3 = tcg_const_i32(desc);
88
+
89
+ ptr = tcg_temp_new_ptr();
90
+ tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
91
+
92
+ gen_helper_sve_whilel(t2, ptr, t2, t3);
93
+ do_pred_flags(t2);
94
+
95
+ tcg_temp_free_ptr(ptr);
96
+ tcg_temp_free_i32(t2);
97
+ tcg_temp_free_i32(t3);
98
+ return true;
99
+}
100
+
101
/*
102
*** SVE Integer Wide Immediate - Unpredicated Group
103
*/
104
--
23
--
105
2.20.1
24
2.34.1
106
107
diff view generated by jsdifflib
1
Convert armsse_realize() to use ERRP_GUARD(), following
1
Set the default NaN pattern explicitly for s390x.
2
the rules in include/qapi/error.h.
3
2
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210510190844.17799-5-peter.maydell@linaro.org
5
Message-id: 20241202131347.498124-49-peter.maydell@linaro.org
7
---
6
---
8
hw/arm/armsse.c | 8 ++++----
7
target/s390x/cpu.c | 2 ++
9
1 file changed, 4 insertions(+), 4 deletions(-)
8
1 file changed, 2 insertions(+)
10
9
11
diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c
10
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/hw/arm/armsse.c
12
--- a/target/s390x/cpu.c
14
+++ b/hw/arm/armsse.c
13
+++ b/target/s390x/cpu.c
15
@@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp)
14
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_reset_hold(Object *obj, ResetType type)
16
const ARMSSEDeviceInfo *devinfo;
15
set_float_3nan_prop_rule(float_3nan_prop_s_abc, &env->fpu_status);
17
int i;
16
set_float_infzeronan_rule(float_infzeronan_dnan_always,
18
MemoryRegion *mr;
17
&env->fpu_status);
19
- Error *err = NULL;
18
+ /* Default NaN value: sign bit clear, frac msb set */
20
SysBusDevice *sbd_apb_ppc0;
19
+ set_float_default_nan_pattern(0b01000000, &env->fpu_status);
21
SysBusDevice *sbd_secctl;
20
/* fall through */
22
DeviceState *dev_apb_ppc0;
21
case RESET_TYPE_S390_CPU_NORMAL:
23
@@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp)
22
env->psw.mask &= ~PSW_MASK_RI;
24
DeviceState *dev_splitter;
25
uint32_t addr_width_max;
26
27
+ ERRP_GUARD();
28
+
29
if (!s->board_memory) {
30
error_setg(errp, "memory property was not set");
31
return;
32
@@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp)
33
uint32_t sram_bank_size = 1 << s->sram_addr_width;
34
35
memory_region_init_ram(&s->sram[i], NULL, ramname,
36
- sram_bank_size, &err);
37
+ sram_bank_size, errp);
38
g_free(ramname);
39
- if (err) {
40
- error_propagate(errp, err);
41
+ if (*errp) {
42
return;
43
}
44
object_property_set_link(OBJECT(&s->mpc[i]), "downstream",
45
--
23
--
46
2.20.1
24
2.34.1
47
48
diff view generated by jsdifflib
1
The AN547 sets the SRAM_ADDR_WIDTH for the SSE-300 to 21;
1
Set the default NaN pattern explicitly for SPARC, and remove
2
since this is not the default value for the SSE-300, model this
2
the ifdef from parts64_default_nan.
3
in mps2-tz.c as a per-board value.
4
3
5
Reported-by: Devaraj Ranganna <devaraj.ranganna@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210510190844.17799-3-peter.maydell@linaro.org
6
Message-id: 20241202131347.498124-50-peter.maydell@linaro.org
9
---
7
---
10
hw/arm/mps2-tz.c | 6 ++++++
8
target/sparc/cpu.c | 2 ++
11
1 file changed, 6 insertions(+)
9
fpu/softfloat-specialize.c.inc | 5 +----
10
2 files changed, 3 insertions(+), 4 deletions(-)
12
11
13
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
12
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
14
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
15
--- a/hw/arm/mps2-tz.c
14
--- a/target/sparc/cpu.c
16
+++ b/hw/arm/mps2-tz.c
15
+++ b/target/sparc/cpu.c
17
@@ -XXX,XX +XXX,XX @@ struct MPS2TZMachineClass {
16
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_realizefn(DeviceState *dev, Error **errp)
18
int numirq; /* Number of external interrupts */
17
set_float_3nan_prop_rule(float_3nan_prop_s_cba, &env->fp_status);
19
int uart_overflow_irq; /* number of the combined UART overflow IRQ */
18
/* For inf * 0 + NaN, return the input NaN */
20
uint32_t init_svtor; /* init-svtor setting for SSE */
19
set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
21
+ uint32_t sram_addr_width; /* SRAM_ADDR_WIDTH setting for SSE */
20
+ /* Default NaN value: sign bit clear, all frac bits set */
22
const RAMInfo *raminfo;
21
+ set_float_default_nan_pattern(0b01111111, &env->fp_status);
23
const char *armsse_type;
22
24
};
23
cpu_exec_realizefn(cs, &local_err);
25
@@ -XXX,XX +XXX,XX @@ static void mps2tz_common_init(MachineState *machine)
24
if (local_err != NULL) {
26
OBJECT(system_memory), &error_abort);
25
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
27
qdev_prop_set_uint32(iotkitdev, "EXP_NUMIRQ", mmc->numirq);
26
index XXXXXXX..XXXXXXX 100644
28
qdev_prop_set_uint32(iotkitdev, "init-svtor", mmc->init_svtor);
27
--- a/fpu/softfloat-specialize.c.inc
29
+ qdev_prop_set_uint32(iotkitdev, "SRAM_ADDR_WIDTH", mmc->sram_addr_width);
28
+++ b/fpu/softfloat-specialize.c.inc
30
qdev_connect_clock_in(iotkitdev, "MAINCLK", mms->sysclk);
29
@@ -XXX,XX +XXX,XX @@ static void parts64_default_nan(FloatParts64 *p, float_status *status)
31
qdev_connect_clock_in(iotkitdev, "S32KCLK", mms->s32kclk);
30
uint8_t dnan_pattern = status->default_nan_pattern;
32
sysbus_realize(SYS_BUS_DEVICE(&mms->iotkit), &error_fatal);
31
33
@@ -XXX,XX +XXX,XX @@ static void mps2tz_an505_class_init(ObjectClass *oc, void *data)
32
if (dnan_pattern == 0) {
34
mmc->numirq = 92;
33
-#if defined(TARGET_SPARC)
35
mmc->uart_overflow_irq = 47;
34
- /* Sign bit clear, all frac bits set */
36
mmc->init_svtor = 0x10000000;
35
- dnan_pattern = 0b01111111;
37
+ mmc->sram_addr_width = 15;
36
-#elif defined(TARGET_HEXAGON)
38
mmc->raminfo = an505_raminfo;
37
+#if defined(TARGET_HEXAGON)
39
mmc->armsse_type = TYPE_IOTKIT;
38
/* Sign bit set, all frac bits set. */
40
mps2tz_set_default_ram_info(mmc);
39
dnan_pattern = 0b11111111;
41
@@ -XXX,XX +XXX,XX @@ static void mps2tz_an521_class_init(ObjectClass *oc, void *data)
40
#else
42
mmc->numirq = 92;
43
mmc->uart_overflow_irq = 47;
44
mmc->init_svtor = 0x10000000;
45
+ mmc->sram_addr_width = 15;
46
mmc->raminfo = an505_raminfo; /* AN521 is the same as AN505 here */
47
mmc->armsse_type = TYPE_SSE200;
48
mps2tz_set_default_ram_info(mmc);
49
@@ -XXX,XX +XXX,XX @@ static void mps3tz_an524_class_init(ObjectClass *oc, void *data)
50
mmc->numirq = 95;
51
mmc->uart_overflow_irq = 47;
52
mmc->init_svtor = 0x10000000;
53
+ mmc->sram_addr_width = 15;
54
mmc->raminfo = an524_raminfo;
55
mmc->armsse_type = TYPE_SSE200;
56
mps2tz_set_default_ram_info(mmc);
57
@@ -XXX,XX +XXX,XX @@ static void mps3tz_an547_class_init(ObjectClass *oc, void *data)
58
mmc->numirq = 96;
59
mmc->uart_overflow_irq = 48;
60
mmc->init_svtor = 0x00000000;
61
+ mmc->sram_addr_width = 21;
62
mmc->raminfo = an547_raminfo;
63
mmc->armsse_type = TYPE_SSE300;
64
mps2tz_set_default_ram_info(mmc);
65
--
41
--
66
2.20.1
42
2.34.1
67
68
diff view generated by jsdifflib
1
From: Eric Auger <eric.auger@redhat.com>
1
Set the default NaN pattern explicitly for xtensa.
2
2
3
6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range")
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
failed to completely fix misalignment issues with range
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
invalidation. For instance invalidations patterns like "invalidate 32
5
Message-id: 20241202131347.498124-51-peter.maydell@linaro.org
6
4kB pages starting from 0xff395000 are not correctly handled" due
6
---
7
to the fact the previous fix only made sure the number of invalidated
7
target/xtensa/cpu.c | 2 ++
8
pages were a power of 2 but did not properly handle the start
8
1 file changed, 2 insertions(+)
9
address was not aligned with the range. This can be noticed when
10
boothing a fedora 33 with protected virtio-blk-pci.
11
9
12
Signed-off-by: Eric Auger <eric.auger@redhat.com>
10
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
13
Fixes: 6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range")
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
17
hw/arm/smmuv3.c | 50 +++++++++++++++++++++++++------------------------
18
1 file changed, 26 insertions(+), 24 deletions(-)
19
20
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
21
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/arm/smmuv3.c
12
--- a/target/xtensa/cpu.c
23
+++ b/hw/arm/smmuv3.c
13
+++ b/target/xtensa/cpu.c
24
@@ -XXX,XX +XXX,XX @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
14
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_reset_hold(Object *obj, ResetType type)
25
15
/* For inf * 0 + NaN, return the input NaN */
26
static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
16
set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status);
27
{
17
set_no_signaling_nans(!dfpu, &env->fp_status);
28
- uint8_t scale = 0, num = 0, ttl = 0;
18
+ /* Default NaN value: sign bit clear, set frac msb */
29
- dma_addr_t addr = CMD_ADDR(cmd);
19
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
30
+ dma_addr_t end, addr = CMD_ADDR(cmd);
20
xtensa_use_first_nan(env, !dfpu);
31
uint8_t type = CMD_TYPE(cmd);
32
uint16_t vmid = CMD_VMID(cmd);
33
+ uint8_t scale = CMD_SCALE(cmd);
34
+ uint8_t num = CMD_NUM(cmd);
35
+ uint8_t ttl = CMD_TTL(cmd);
36
bool leaf = CMD_LEAF(cmd);
37
uint8_t tg = CMD_TG(cmd);
38
- uint64_t first_page = 0, last_page;
39
- uint64_t num_pages = 1;
40
+ uint64_t num_pages;
41
+ uint8_t granule;
42
int asid = -1;
43
44
- if (tg) {
45
- scale = CMD_SCALE(cmd);
46
- num = CMD_NUM(cmd);
47
- ttl = CMD_TTL(cmd);
48
- num_pages = (num + 1) * BIT_ULL(scale);
49
- }
50
-
51
if (type == SMMU_CMD_TLBI_NH_VA) {
52
asid = CMD_ASID(cmd);
53
}
54
55
+ if (!tg) {
56
+ trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
57
+ smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1);
58
+ smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl);
59
+ return;
60
+ }
61
+
62
+ /* RIL in use */
63
+
64
+ num_pages = (num + 1) * BIT_ULL(scale);
65
+ granule = tg * 2 + 10;
66
+
67
/* Split invalidations into ^2 range invalidations */
68
- last_page = num_pages - 1;
69
- while (num_pages) {
70
- uint8_t granule = tg * 2 + 10;
71
- uint64_t mask, count;
72
+ end = addr + (num_pages << granule) - 1;
73
74
- mask = dma_aligned_pow2_mask(first_page, last_page, 64 - granule);
75
- count = mask + 1;
76
+ while (addr != end + 1) {
77
+ uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
78
79
- trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, count, ttl, leaf);
80
- smmuv3_inv_notifiers_iova(s, asid, addr, tg, count);
81
- smmu_iotlb_inv_iova(s, asid, addr, tg, count, ttl);
82
-
83
- num_pages -= count;
84
- first_page += count;
85
- addr += count * BIT_ULL(granule);
86
+ num_pages = (mask + 1) >> granule;
87
+ trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
88
+ smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
89
+ smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
90
+ addr += mask + 1;
91
}
92
}
21
}
93
22
94
--
23
--
95
2.20.1
24
2.34.1
96
97
diff view generated by jsdifflib
1
Currently we model the ITCM in the AN547's RAMInfo list. This is incorrect
1
Set the default NaN pattern explicitly for hexagon.
2
because this RAM is really a part of the SSE-300. We can't just delete
2
Remove the ifdef from parts64_default_nan(); the only
3
it from the RAMInfo list, though, because this would make boot_ram_size()
3
remaining unconverted targets all use the default case.
4
assert because it wouldn't be able to find an entry in the list covering
5
guest address 0.
6
7
Allow a board to specify a boot RAM size manually if it doesn't have
8
any RAM itself at address 0 and is relying on the SSE for that, and
9
set the correct value for the AN547. The other boards can continue
10
to use the "look it up from the RAMInfo list" logic.
11
4
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20210510190844.17799-6-peter.maydell@linaro.org
7
Message-id: 20241202131347.498124-52-peter.maydell@linaro.org
15
---
8
---
16
hw/arm/mps2-tz.c | 13 +++++++++++++
9
target/hexagon/cpu.c | 2 ++
17
1 file changed, 13 insertions(+)
10
fpu/softfloat-specialize.c.inc | 5 -----
11
2 files changed, 2 insertions(+), 5 deletions(-)
18
12
19
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
13
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
20
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/arm/mps2-tz.c
15
--- a/target/hexagon/cpu.c
22
+++ b/hw/arm/mps2-tz.c
16
+++ b/target/hexagon/cpu.c
23
@@ -XXX,XX +XXX,XX @@ struct MPS2TZMachineClass {
17
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_reset_hold(Object *obj, ResetType type)
24
uint32_t sram_addr_width; /* SRAM_ADDR_WIDTH setting for SSE */
18
25
const RAMInfo *raminfo;
19
set_default_nan_mode(1, &env->fp_status);
26
const char *armsse_type;
20
set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status);
27
+ uint32_t boot_ram_size; /* size of ram at address 0; 0 == find in raminfo */
21
+ /* Default NaN value: sign bit set, all frac bits set */
28
};
22
+ set_float_default_nan_pattern(0b11111111, &env->fp_status);
29
30
struct MPS2TZMachineState {
31
@@ -XXX,XX +XXX,XX @@ static uint32_t boot_ram_size(MPS2TZMachineState *mms)
32
const RAMInfo *p;
33
MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms);
34
35
+ /*
36
+ * Use a per-board specification (for when the boot RAM is in
37
+ * the SSE and so doesn't have a RAMInfo list entry)
38
+ */
39
+ if (mmc->boot_ram_size) {
40
+ return mmc->boot_ram_size;
41
+ }
42
+
43
for (p = mmc->raminfo; p->name; p++) {
44
if (p->base == boot_mem_base(mms)) {
45
return p->size;
46
@@ -XXX,XX +XXX,XX @@ static void mps2tz_an505_class_init(ObjectClass *oc, void *data)
47
mmc->sram_addr_width = 15;
48
mmc->raminfo = an505_raminfo;
49
mmc->armsse_type = TYPE_IOTKIT;
50
+ mmc->boot_ram_size = 0;
51
mps2tz_set_default_ram_info(mmc);
52
}
23
}
53
24
54
@@ -XXX,XX +XXX,XX @@ static void mps2tz_an521_class_init(ObjectClass *oc, void *data)
25
static void hexagon_cpu_disas_set_info(CPUState *s, disassemble_info *info)
55
mmc->sram_addr_width = 15;
26
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
56
mmc->raminfo = an505_raminfo; /* AN521 is the same as AN505 here */
27
index XXXXXXX..XXXXXXX 100644
57
mmc->armsse_type = TYPE_SSE200;
28
--- a/fpu/softfloat-specialize.c.inc
58
+ mmc->boot_ram_size = 0;
29
+++ b/fpu/softfloat-specialize.c.inc
59
mps2tz_set_default_ram_info(mmc);
30
@@ -XXX,XX +XXX,XX @@ static void parts64_default_nan(FloatParts64 *p, float_status *status)
60
}
31
uint8_t dnan_pattern = status->default_nan_pattern;
61
32
62
@@ -XXX,XX +XXX,XX @@ static void mps3tz_an524_class_init(ObjectClass *oc, void *data)
33
if (dnan_pattern == 0) {
63
mmc->sram_addr_width = 15;
34
-#if defined(TARGET_HEXAGON)
64
mmc->raminfo = an524_raminfo;
35
- /* Sign bit set, all frac bits set. */
65
mmc->armsse_type = TYPE_SSE200;
36
- dnan_pattern = 0b11111111;
66
+ mmc->boot_ram_size = 0;
37
-#else
67
mps2tz_set_default_ram_info(mmc);
38
/*
68
39
* This case is true for Alpha, ARM, MIPS, OpenRISC, PPC, RISC-V,
69
object_class_property_add_str(oc, "remap", mps2_get_remap, mps2_set_remap);
40
* S390, SH4, TriCore, and Xtensa. Our other supported targets
70
@@ -XXX,XX +XXX,XX @@ static void mps3tz_an547_class_init(ObjectClass *oc, void *data)
41
@@ -XXX,XX +XXX,XX @@ static void parts64_default_nan(FloatParts64 *p, float_status *status)
71
mmc->sram_addr_width = 21;
42
/* sign bit clear, set frac msb */
72
mmc->raminfo = an547_raminfo;
43
dnan_pattern = 0b01000000;
73
mmc->armsse_type = TYPE_SSE300;
44
}
74
+ mmc->boot_ram_size = 512 * KiB;
45
-#endif
75
mps2tz_set_default_ram_info(mmc);
46
}
76
}
47
assert(dnan_pattern != 0);
77
48
78
--
49
--
79
2.20.1
50
2.34.1
80
81
diff view generated by jsdifflib
1
The SRAM at 0x2000_0000 is part of the SSE-200 itself, and we model
1
Set the default NaN pattern explicitly for riscv.
2
it that way in hw/arm/armsse.c (along with the associated MPCs). We
3
incorrectly also added an entry to the RAMInfo array for the AN524 in
4
hw/arm/mps2-tz.c, which was pointless because the CPU would never see
5
it. Delete it.
6
7
The bug had no guest-visible effect because devices in the SSE-200
8
take priority over those in the board model (armsse.c maps
9
s->board_memory at priority -2).
10
2
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20210510190844.17799-2-peter.maydell@linaro.org
5
Message-id: 20241202131347.498124-53-peter.maydell@linaro.org
14
---
6
---
15
hw/arm/mps2-tz.c | 8 +-------
7
target/riscv/cpu.c | 2 ++
16
1 file changed, 1 insertion(+), 7 deletions(-)
8
1 file changed, 2 insertions(+)
17
9
18
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
10
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
19
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/arm/mps2-tz.c
12
--- a/target/riscv/cpu.c
21
+++ b/hw/arm/mps2-tz.c
13
+++ b/target/riscv/cpu.c
22
@@ -XXX,XX +XXX,XX @@ static const RAMInfo an524_raminfo[] = { {
14
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
23
.size = 512 * KiB,
15
cs->exception_index = RISCV_EXCP_NONE;
24
.mpc = 0,
16
env->load_res = -1;
25
.mrindex = 0,
17
set_default_nan_mode(1, &env->fp_status);
26
- }, {
18
+ /* Default NaN value: sign bit clear, frac msb set */
27
- .name = "sram",
19
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
28
- .base = 0x20000000,
20
env->vill = true;
29
- .size = 32 * 4 * KiB,
21
30
- .mpc = -1,
22
#ifndef CONFIG_USER_ONLY
31
- .mrindex = 1,
32
}, {
33
/* We don't model QSPI flash yet; for now expose it as simple ROM */
34
.name = "QSPI",
35
.base = 0x28000000,
36
.size = 8 * MiB,
37
.mpc = 1,
38
- .mrindex = 2,
39
+ .mrindex = 1,
40
.flags = IS_ROM,
41
}, {
42
.name = "DDR",
43
--
23
--
44
2.20.1
24
2.34.1
45
46
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Set the default NaN pattern explicitly for tricore.
2
2
3
This completes the section "SVE2 bitwise shift right narrow".
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20241202131347.498124-54-peter.maydell@linaro.org
6
---
7
target/tricore/helper.c | 2 ++
8
1 file changed, 2 insertions(+)
4
9
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
diff --git a/target/tricore/helper.c b/target/tricore/helper.c
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210525010358.152808-30-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/helper-sve.h | 16 ++++++
11
target/arm/sve.decode | 4 ++
12
target/arm/sve_helper.c | 24 +++++++++
13
target/arm/translate-sve.c | 105 +++++++++++++++++++++++++++++++++++++
14
4 files changed, 149 insertions(+)
15
16
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-sve.h
12
--- a/target/tricore/helper.c
19
+++ b/target/arm/helper-sve.h
13
+++ b/target/tricore/helper.c
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_sqrshrunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
14
@@ -XXX,XX +XXX,XX @@ void fpu_set_state(CPUTriCoreState *env)
21
DEF_HELPER_FLAGS_3(sve2_sqrshrunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
15
set_flush_to_zero(1, &env->fp_status);
22
DEF_HELPER_FLAGS_3(sve2_sqrshrunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
16
set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status);
23
17
set_default_nan_mode(1, &env->fp_status);
24
+DEF_HELPER_FLAGS_3(sve2_sqshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
18
+ /* Default NaN pattern: sign bit clear, frac msb set */
25
+DEF_HELPER_FLAGS_3(sve2_sqshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
19
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
26
+DEF_HELPER_FLAGS_3(sve2_sqshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
27
+
28
+DEF_HELPER_FLAGS_3(sve2_sqshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_3(sve2_sqshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_3(sve2_sqshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
31
+
32
+DEF_HELPER_FLAGS_3(sve2_sqrshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_3(sve2_sqrshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_3(sve2_sqrshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
35
+
36
+DEF_HELPER_FLAGS_3(sve2_sqrshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_3(sve2_sqrshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_3(sve2_sqrshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
39
+
40
DEF_HELPER_FLAGS_3(sve2_uqshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
41
DEF_HELPER_FLAGS_3(sve2_uqshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
42
DEF_HELPER_FLAGS_3(sve2_uqshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
43
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/sve.decode
46
+++ b/target/arm/sve.decode
47
@@ -XXX,XX +XXX,XX @@ SHRNB 01000101 .. 1 ..... 00 0100 ..... ..... @rd_rn_tszimm_shr
48
SHRNT 01000101 .. 1 ..... 00 0101 ..... ..... @rd_rn_tszimm_shr
49
RSHRNB 01000101 .. 1 ..... 00 0110 ..... ..... @rd_rn_tszimm_shr
50
RSHRNT 01000101 .. 1 ..... 00 0111 ..... ..... @rd_rn_tszimm_shr
51
+SQSHRNB 01000101 .. 1 ..... 00 1000 ..... ..... @rd_rn_tszimm_shr
52
+SQSHRNT 01000101 .. 1 ..... 00 1001 ..... ..... @rd_rn_tszimm_shr
53
+SQRSHRNB 01000101 .. 1 ..... 00 1010 ..... ..... @rd_rn_tszimm_shr
54
+SQRSHRNT 01000101 .. 1 ..... 00 1011 ..... ..... @rd_rn_tszimm_shr
55
UQSHRNB 01000101 .. 1 ..... 00 1100 ..... ..... @rd_rn_tszimm_shr
56
UQSHRNT 01000101 .. 1 ..... 00 1101 ..... ..... @rd_rn_tszimm_shr
57
UQRSHRNB 01000101 .. 1 ..... 00 1110 ..... ..... @rd_rn_tszimm_shr
58
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/sve_helper.c
61
+++ b/target/arm/sve_helper.c
62
@@ -XXX,XX +XXX,XX @@ DO_SHRNT(sve2_sqrshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRUN_H)
63
DO_SHRNT(sve2_sqrshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRUN_S)
64
DO_SHRNT(sve2_sqrshrunt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRUN_D)
65
66
+#define DO_SQSHRN_H(x, sh) do_sat_bhs(x >> sh, INT8_MIN, INT8_MAX)
67
+#define DO_SQSHRN_S(x, sh) do_sat_bhs(x >> sh, INT16_MIN, INT16_MAX)
68
+#define DO_SQSHRN_D(x, sh) do_sat_bhs(x >> sh, INT32_MIN, INT32_MAX)
69
+
70
+DO_SHRNB(sve2_sqshrnb_h, int16_t, uint8_t, DO_SQSHRN_H)
71
+DO_SHRNB(sve2_sqshrnb_s, int32_t, uint16_t, DO_SQSHRN_S)
72
+DO_SHRNB(sve2_sqshrnb_d, int64_t, uint32_t, DO_SQSHRN_D)
73
+
74
+DO_SHRNT(sve2_sqshrnt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRN_H)
75
+DO_SHRNT(sve2_sqshrnt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRN_S)
76
+DO_SHRNT(sve2_sqshrnt_d, int64_t, uint32_t, , H1_4, DO_SQSHRN_D)
77
+
78
+#define DO_SQRSHRN_H(x, sh) do_sat_bhs(do_srshr(x, sh), INT8_MIN, INT8_MAX)
79
+#define DO_SQRSHRN_S(x, sh) do_sat_bhs(do_srshr(x, sh), INT16_MIN, INT16_MAX)
80
+#define DO_SQRSHRN_D(x, sh) do_sat_bhs(do_srshr(x, sh), INT32_MIN, INT32_MAX)
81
+
82
+DO_SHRNB(sve2_sqrshrnb_h, int16_t, uint8_t, DO_SQRSHRN_H)
83
+DO_SHRNB(sve2_sqrshrnb_s, int32_t, uint16_t, DO_SQRSHRN_S)
84
+DO_SHRNB(sve2_sqrshrnb_d, int64_t, uint32_t, DO_SQRSHRN_D)
85
+
86
+DO_SHRNT(sve2_sqrshrnt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRN_H)
87
+DO_SHRNT(sve2_sqrshrnt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRN_S)
88
+DO_SHRNT(sve2_sqrshrnt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRN_D)
89
+
90
#define DO_UQSHRN_H(x, sh) MIN(x >> sh, UINT8_MAX)
91
#define DO_UQSHRN_S(x, sh) MIN(x >> sh, UINT16_MAX)
92
#define DO_UQSHRN_D(x, sh) MIN(x >> sh, UINT32_MAX)
93
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/target/arm/translate-sve.c
96
+++ b/target/arm/translate-sve.c
97
@@ -XXX,XX +XXX,XX @@ static bool trans_SQRSHRUNT(DisasContext *s, arg_rri_esz *a)
98
return do_sve2_shr_narrow(s, a, ops);
99
}
20
}
100
21
101
+static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d,
22
uint32_t psw_read(CPUTriCoreState *env)
102
+ TCGv_vec n, int64_t shr)
103
+{
104
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
105
+ int halfbits = 4 << vece;
106
+ int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
107
+ int64_t min = -max - 1;
108
+
109
+ tcg_gen_sari_vec(vece, n, n, shr);
110
+ tcg_gen_dupi_vec(vece, t, min);
111
+ tcg_gen_smax_vec(vece, n, n, t);
112
+ tcg_gen_dupi_vec(vece, t, max);
113
+ tcg_gen_smin_vec(vece, n, n, t);
114
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
115
+ tcg_gen_and_vec(vece, d, n, t);
116
+ tcg_temp_free_vec(t);
117
+}
118
+
119
+static bool trans_SQSHRNB(DisasContext *s, arg_rri_esz *a)
120
+{
121
+ static const TCGOpcode vec_list[] = {
122
+ INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0
123
+ };
124
+ static const GVecGen2i ops[3] = {
125
+ { .fniv = gen_sqshrnb_vec,
126
+ .opt_opc = vec_list,
127
+ .fno = gen_helper_sve2_sqshrnb_h,
128
+ .vece = MO_16 },
129
+ { .fniv = gen_sqshrnb_vec,
130
+ .opt_opc = vec_list,
131
+ .fno = gen_helper_sve2_sqshrnb_s,
132
+ .vece = MO_32 },
133
+ { .fniv = gen_sqshrnb_vec,
134
+ .opt_opc = vec_list,
135
+ .fno = gen_helper_sve2_sqshrnb_d,
136
+ .vece = MO_64 },
137
+ };
138
+ return do_sve2_shr_narrow(s, a, ops);
139
+}
140
+
141
+static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d,
142
+ TCGv_vec n, int64_t shr)
143
+{
144
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
145
+ int halfbits = 4 << vece;
146
+ int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
147
+ int64_t min = -max - 1;
148
+
149
+ tcg_gen_sari_vec(vece, n, n, shr);
150
+ tcg_gen_dupi_vec(vece, t, min);
151
+ tcg_gen_smax_vec(vece, n, n, t);
152
+ tcg_gen_dupi_vec(vece, t, max);
153
+ tcg_gen_smin_vec(vece, n, n, t);
154
+ tcg_gen_shli_vec(vece, n, n, halfbits);
155
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
156
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
157
+ tcg_temp_free_vec(t);
158
+}
159
+
160
+static bool trans_SQSHRNT(DisasContext *s, arg_rri_esz *a)
161
+{
162
+ static const TCGOpcode vec_list[] = {
163
+ INDEX_op_shli_vec, INDEX_op_sari_vec,
164
+ INDEX_op_smax_vec, INDEX_op_smin_vec, 0
165
+ };
166
+ static const GVecGen2i ops[3] = {
167
+ { .fniv = gen_sqshrnt_vec,
168
+ .opt_opc = vec_list,
169
+ .load_dest = true,
170
+ .fno = gen_helper_sve2_sqshrnt_h,
171
+ .vece = MO_16 },
172
+ { .fniv = gen_sqshrnt_vec,
173
+ .opt_opc = vec_list,
174
+ .load_dest = true,
175
+ .fno = gen_helper_sve2_sqshrnt_s,
176
+ .vece = MO_32 },
177
+ { .fniv = gen_sqshrnt_vec,
178
+ .opt_opc = vec_list,
179
+ .load_dest = true,
180
+ .fno = gen_helper_sve2_sqshrnt_d,
181
+ .vece = MO_64 },
182
+ };
183
+ return do_sve2_shr_narrow(s, a, ops);
184
+}
185
+
186
+static bool trans_SQRSHRNB(DisasContext *s, arg_rri_esz *a)
187
+{
188
+ static const GVecGen2i ops[3] = {
189
+ { .fno = gen_helper_sve2_sqrshrnb_h },
190
+ { .fno = gen_helper_sve2_sqrshrnb_s },
191
+ { .fno = gen_helper_sve2_sqrshrnb_d },
192
+ };
193
+ return do_sve2_shr_narrow(s, a, ops);
194
+}
195
+
196
+static bool trans_SQRSHRNT(DisasContext *s, arg_rri_esz *a)
197
+{
198
+ static const GVecGen2i ops[3] = {
199
+ { .fno = gen_helper_sve2_sqrshrnt_h },
200
+ { .fno = gen_helper_sve2_sqrshrnt_s },
201
+ { .fno = gen_helper_sve2_sqrshrnt_d },
202
+ };
203
+ return do_sve2_shr_narrow(s, a, ops);
204
+}
205
+
206
static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d,
207
TCGv_vec n, int64_t shr)
208
{
209
--
23
--
210
2.20.1
24
2.34.1
211
212
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Now that all our targets have bene converted to explicitly specify
2
their pattern for the default NaN value we can remove the remaining
3
fallback code in parts64_default_nan().
2
4
3
Split these operations out into a header that can be shared
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
between neon and sve. The "sat" pointer acts both as a boolean
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
for control of saturating behavior and controls the difference
7
Message-id: 20241202131347.498124-55-peter.maydell@linaro.org
6
in behavior between neon and sve -- QC bit or no QC bit.
8
---
9
fpu/softfloat-specialize.c.inc | 14 --------------
10
1 file changed, 14 deletions(-)
7
11
8
Widen the shift operand in the new helpers, as the SVE2 insns treat
12
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
9
the whole input element as significant. For the neon uses, truncate
10
the shift to int8_t while passing the parameter.
11
12
Implement right-shift rounding as
13
14
tmp = src >> (shift - 1);
15
dst = (tmp >> 1) + (tmp & 1);
16
17
This is the same number of instructions as the current
18
19
tmp = 1 << (shift - 1);
20
dst = (src + tmp) >> shift;
21
22
without any possibility of intermediate overflow.
23
24
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
25
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
26
Message-id: 20210525010358.152808-6-richard.henderson@linaro.org
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
---
29
target/arm/vec_internal.h | 138 +++++++++++
30
target/arm/neon_helper.c | 507 +++++++-------------------------------
31
2 files changed, 221 insertions(+), 424 deletions(-)
32
33
diff --git a/target/arm/vec_internal.h b/target/arm/vec_internal.h
34
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/vec_internal.h
14
--- a/fpu/softfloat-specialize.c.inc
36
+++ b/target/arm/vec_internal.h
15
+++ b/fpu/softfloat-specialize.c.inc
37
@@ -XXX,XX +XXX,XX @@ static inline void clear_tail(void *vd, uintptr_t opr_sz, uintptr_t max_sz)
16
@@ -XXX,XX +XXX,XX @@ static void parts64_default_nan(FloatParts64 *p, float_status *status)
38
}
17
uint64_t frac;
39
}
18
uint8_t dnan_pattern = status->default_nan_pattern;
40
19
41
+static inline int32_t do_sqrshl_bhs(int32_t src, int32_t shift, int bits,
20
- if (dnan_pattern == 0) {
42
+ bool round, uint32_t *sat)
21
- /*
43
+{
22
- * This case is true for Alpha, ARM, MIPS, OpenRISC, PPC, RISC-V,
44
+ if (shift <= -bits) {
23
- * S390, SH4, TriCore, and Xtensa. Our other supported targets
45
+ /* Rounding the sign bit always produces 0. */
24
- * do not have floating-point.
46
+ if (round) {
25
- */
47
+ return 0;
26
- if (snan_bit_is_one(status)) {
48
+ }
27
- /* sign bit clear, set all frac bits other than msb */
49
+ return src >> 31;
28
- dnan_pattern = 0b00111111;
50
+ } else if (shift < 0) {
51
+ if (round) {
52
+ src >>= -shift - 1;
53
+ return (src >> 1) + (src & 1);
54
+ }
55
+ return src >> -shift;
56
+ } else if (shift < bits) {
57
+ int32_t val = src << shift;
58
+ if (bits == 32) {
59
+ if (!sat || val >> shift == src) {
60
+ return val;
61
+ }
62
+ } else {
63
+ int32_t extval = sextract32(val, 0, bits);
64
+ if (!sat || val == extval) {
65
+ return extval;
66
+ }
67
+ }
68
+ } else if (!sat || src == 0) {
69
+ return 0;
70
+ }
71
+
72
+ *sat = 1;
73
+ return (1u << (bits - 1)) - (src >= 0);
74
+}
75
+
76
+static inline uint32_t do_uqrshl_bhs(uint32_t src, int32_t shift, int bits,
77
+ bool round, uint32_t *sat)
78
+{
79
+ if (shift <= -(bits + round)) {
80
+ return 0;
81
+ } else if (shift < 0) {
82
+ if (round) {
83
+ src >>= -shift - 1;
84
+ return (src >> 1) + (src & 1);
85
+ }
86
+ return src >> -shift;
87
+ } else if (shift < bits) {
88
+ uint32_t val = src << shift;
89
+ if (bits == 32) {
90
+ if (!sat || val >> shift == src) {
91
+ return val;
92
+ }
93
+ } else {
94
+ uint32_t extval = extract32(val, 0, bits);
95
+ if (!sat || val == extval) {
96
+ return extval;
97
+ }
98
+ }
99
+ } else if (!sat || src == 0) {
100
+ return 0;
101
+ }
102
+
103
+ *sat = 1;
104
+ return MAKE_64BIT_MASK(0, bits);
105
+}
106
+
107
+static inline int32_t do_suqrshl_bhs(int32_t src, int32_t shift, int bits,
108
+ bool round, uint32_t *sat)
109
+{
110
+ if (sat && src < 0) {
111
+ *sat = 1;
112
+ return 0;
113
+ }
114
+ return do_uqrshl_bhs(src, shift, bits, round, sat);
115
+}
116
+
117
+static inline int64_t do_sqrshl_d(int64_t src, int64_t shift,
118
+ bool round, uint32_t *sat)
119
+{
120
+ if (shift <= -64) {
121
+ /* Rounding the sign bit always produces 0. */
122
+ if (round) {
123
+ return 0;
124
+ }
125
+ return src >> 63;
126
+ } else if (shift < 0) {
127
+ if (round) {
128
+ src >>= -shift - 1;
129
+ return (src >> 1) + (src & 1);
130
+ }
131
+ return src >> -shift;
132
+ } else if (shift < 64) {
133
+ int64_t val = src << shift;
134
+ if (!sat || val >> shift == src) {
135
+ return val;
136
+ }
137
+ } else if (!sat || src == 0) {
138
+ return 0;
139
+ }
140
+
141
+ *sat = 1;
142
+ return src < 0 ? INT64_MIN : INT64_MAX;
143
+}
144
+
145
+static inline uint64_t do_uqrshl_d(uint64_t src, int64_t shift,
146
+ bool round, uint32_t *sat)
147
+{
148
+ if (shift <= -(64 + round)) {
149
+ return 0;
150
+ } else if (shift < 0) {
151
+ if (round) {
152
+ src >>= -shift - 1;
153
+ return (src >> 1) + (src & 1);
154
+ }
155
+ return src >> -shift;
156
+ } else if (shift < 64) {
157
+ uint64_t val = src << shift;
158
+ if (!sat || val >> shift == src) {
159
+ return val;
160
+ }
161
+ } else if (!sat || src == 0) {
162
+ return 0;
163
+ }
164
+
165
+ *sat = 1;
166
+ return UINT64_MAX;
167
+}
168
+
169
+static inline int64_t do_suqrshl_d(int64_t src, int64_t shift,
170
+ bool round, uint32_t *sat)
171
+{
172
+ if (sat && src < 0) {
173
+ *sat = 1;
174
+ return 0;
175
+ }
176
+ return do_uqrshl_d(src, shift, round, sat);
177
+}
178
+
179
#endif /* TARGET_ARM_VEC_INTERNALS_H */
180
diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c
181
index XXXXXXX..XXXXXXX 100644
182
--- a/target/arm/neon_helper.c
183
+++ b/target/arm/neon_helper.c
184
@@ -XXX,XX +XXX,XX @@
185
#include "cpu.h"
186
#include "exec/helper-proto.h"
187
#include "fpu/softfloat.h"
188
+#include "vec_internal.h"
189
190
#define SIGNBIT (uint32_t)0x80000000
191
#define SIGNBIT64 ((uint64_t)1 << 63)
192
@@ -XXX,XX +XXX,XX @@ NEON_POP(pmax_s16, neon_s16, 2)
193
NEON_POP(pmax_u16, neon_u16, 2)
194
#undef NEON_FN
195
196
-#define NEON_FN(dest, src1, src2) do { \
197
- int8_t tmp; \
198
- tmp = (int8_t)src2; \
199
- if (tmp >= (ssize_t)sizeof(src1) * 8 || \
200
- tmp <= -(ssize_t)sizeof(src1) * 8) { \
201
- dest = 0; \
202
- } else if (tmp < 0) { \
203
- dest = src1 >> -tmp; \
204
- } else { \
205
- dest = src1 << tmp; \
206
- }} while (0)
207
+#define NEON_FN(dest, src1, src2) \
208
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, false, NULL))
209
NEON_VOP(shl_u16, neon_u16, 2)
210
#undef NEON_FN
211
212
-#define NEON_FN(dest, src1, src2) do { \
213
- int8_t tmp; \
214
- tmp = (int8_t)src2; \
215
- if (tmp >= (ssize_t)sizeof(src1) * 8) { \
216
- dest = 0; \
217
- } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
218
- dest = src1 >> (sizeof(src1) * 8 - 1); \
219
- } else if (tmp < 0) { \
220
- dest = src1 >> -tmp; \
221
- } else { \
222
- dest = src1 << tmp; \
223
- }} while (0)
224
+#define NEON_FN(dest, src1, src2) \
225
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, false, NULL))
226
NEON_VOP(shl_s16, neon_s16, 2)
227
#undef NEON_FN
228
229
-#define NEON_FN(dest, src1, src2) do { \
230
- int8_t tmp; \
231
- tmp = (int8_t)src2; \
232
- if ((tmp >= (ssize_t)sizeof(src1) * 8) \
233
- || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \
234
- dest = 0; \
235
- } else if (tmp < 0) { \
236
- dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
237
- } else { \
238
- dest = src1 << tmp; \
239
- }} while (0)
240
+#define NEON_FN(dest, src1, src2) \
241
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, true, NULL))
242
NEON_VOP(rshl_s8, neon_s8, 4)
243
+#undef NEON_FN
244
+
245
+#define NEON_FN(dest, src1, src2) \
246
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, true, NULL))
247
NEON_VOP(rshl_s16, neon_s16, 2)
248
#undef NEON_FN
249
250
-/* The addition of the rounding constant may overflow, so we use an
251
- * intermediate 64 bit accumulator. */
252
-uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
253
+uint32_t HELPER(neon_rshl_s32)(uint32_t val, uint32_t shift)
254
{
255
- int32_t dest;
256
- int32_t val = (int32_t)valop;
257
- int8_t shift = (int8_t)shiftop;
258
- if ((shift >= 32) || (shift <= -32)) {
259
- dest = 0;
260
- } else if (shift < 0) {
261
- int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
262
- dest = big_dest >> -shift;
263
- } else {
264
- dest = val << shift;
265
- }
266
- return dest;
267
+ return do_sqrshl_bhs(val, (int8_t)shift, 32, true, NULL);
268
}
269
270
-/* Handling addition overflow with 64 bit input values is more
271
- * tricky than with 32 bit values. */
272
-uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
273
+uint64_t HELPER(neon_rshl_s64)(uint64_t val, uint64_t shift)
274
{
275
- int8_t shift = (int8_t)shiftop;
276
- int64_t val = valop;
277
- if ((shift >= 64) || (shift <= -64)) {
278
- val = 0;
279
- } else if (shift < 0) {
280
- val >>= (-shift - 1);
281
- if (val == INT64_MAX) {
282
- /* In this case, it means that the rounding constant is 1,
283
- * and the addition would overflow. Return the actual
284
- * result directly. */
285
- val = 0x4000000000000000LL;
286
- } else {
29
- } else {
287
- val++;
30
- /* sign bit clear, set frac msb */
288
- val >>= 1;
31
- dnan_pattern = 0b01000000;
289
- }
290
- } else {
291
- val <<= shift;
292
- }
293
- return val;
294
+ return do_sqrshl_d(val, (int8_t)shift, true, NULL);
295
}
296
297
-#define NEON_FN(dest, src1, src2) do { \
298
- int8_t tmp; \
299
- tmp = (int8_t)src2; \
300
- if (tmp >= (ssize_t)sizeof(src1) * 8 || \
301
- tmp < -(ssize_t)sizeof(src1) * 8) { \
302
- dest = 0; \
303
- } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
304
- dest = src1 >> (-tmp - 1); \
305
- } else if (tmp < 0) { \
306
- dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
307
- } else { \
308
- dest = src1 << tmp; \
309
- }} while (0)
310
+#define NEON_FN(dest, src1, src2) \
311
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, true, NULL))
312
NEON_VOP(rshl_u8, neon_u8, 4)
313
+#undef NEON_FN
314
+
315
+#define NEON_FN(dest, src1, src2) \
316
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, true, NULL))
317
NEON_VOP(rshl_u16, neon_u16, 2)
318
#undef NEON_FN
319
320
-/* The addition of the rounding constant may overflow, so we use an
321
- * intermediate 64 bit accumulator. */
322
-uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
323
+uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shift)
324
{
325
- uint32_t dest;
326
- int8_t shift = (int8_t)shiftop;
327
- if (shift >= 32 || shift < -32) {
328
- dest = 0;
329
- } else if (shift == -32) {
330
- dest = val >> 31;
331
- } else if (shift < 0) {
332
- uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
333
- dest = big_dest >> -shift;
334
- } else {
335
- dest = val << shift;
336
- }
337
- return dest;
338
+ return do_uqrshl_bhs(val, (int8_t)shift, 32, true, NULL);
339
}
340
341
-/* Handling addition overflow with 64 bit input values is more
342
- * tricky than with 32 bit values. */
343
-uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
344
+uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shift)
345
{
346
- int8_t shift = (uint8_t)shiftop;
347
- if (shift >= 64 || shift < -64) {
348
- val = 0;
349
- } else if (shift == -64) {
350
- /* Rounding a 1-bit result just preserves that bit. */
351
- val >>= 63;
352
- } else if (shift < 0) {
353
- val >>= (-shift - 1);
354
- if (val == UINT64_MAX) {
355
- /* In this case, it means that the rounding constant is 1,
356
- * and the addition would overflow. Return the actual
357
- * result directly. */
358
- val = 0x8000000000000000ULL;
359
- } else {
360
- val++;
361
- val >>= 1;
362
- }
363
- } else {
364
- val <<= shift;
365
- }
366
- return val;
367
+ return do_uqrshl_d(val, (int8_t)shift, true, NULL);
368
}
369
370
-#define NEON_FN(dest, src1, src2) do { \
371
- int8_t tmp; \
372
- tmp = (int8_t)src2; \
373
- if (tmp >= (ssize_t)sizeof(src1) * 8) { \
374
- if (src1) { \
375
- SET_QC(); \
376
- dest = ~0; \
377
- } else { \
378
- dest = 0; \
379
- } \
380
- } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
381
- dest = 0; \
382
- } else if (tmp < 0) { \
383
- dest = src1 >> -tmp; \
384
- } else { \
385
- dest = src1 << tmp; \
386
- if ((dest >> tmp) != src1) { \
387
- SET_QC(); \
388
- dest = ~0; \
389
- } \
390
- }} while (0)
391
+#define NEON_FN(dest, src1, src2) \
392
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
393
NEON_VOP_ENV(qshl_u8, neon_u8, 4)
394
+#undef NEON_FN
395
+
396
+#define NEON_FN(dest, src1, src2) \
397
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
398
NEON_VOP_ENV(qshl_u16, neon_u16, 2)
399
-NEON_VOP_ENV(qshl_u32, neon_u32, 1)
400
#undef NEON_FN
401
402
-uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
403
+uint32_t HELPER(neon_qshl_u32)(CPUARMState *env, uint32_t val, uint32_t shift)
404
{
405
- int8_t shift = (int8_t)shiftop;
406
- if (shift >= 64) {
407
- if (val) {
408
- val = ~(uint64_t)0;
409
- SET_QC();
410
- }
411
- } else if (shift <= -64) {
412
- val = 0;
413
- } else if (shift < 0) {
414
- val >>= -shift;
415
- } else {
416
- uint64_t tmp = val;
417
- val <<= shift;
418
- if ((val >> shift) != tmp) {
419
- SET_QC();
420
- val = ~(uint64_t)0;
421
- }
32
- }
422
- }
33
- }
423
- return val;
34
assert(dnan_pattern != 0);
424
+ return do_uqrshl_bhs(val, (int8_t)shift, 32, false, env->vfp.qc);
35
425
}
36
sign = dnan_pattern >> 7;
426
427
-#define NEON_FN(dest, src1, src2) do { \
428
- int8_t tmp; \
429
- tmp = (int8_t)src2; \
430
- if (tmp >= (ssize_t)sizeof(src1) * 8) { \
431
- if (src1) { \
432
- SET_QC(); \
433
- dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
434
- if (src1 > 0) { \
435
- dest--; \
436
- } \
437
- } else { \
438
- dest = src1; \
439
- } \
440
- } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
441
- dest = src1 >> 31; \
442
- } else if (tmp < 0) { \
443
- dest = src1 >> -tmp; \
444
- } else { \
445
- dest = src1 << tmp; \
446
- if ((dest >> tmp) != src1) { \
447
- SET_QC(); \
448
- dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
449
- if (src1 > 0) { \
450
- dest--; \
451
- } \
452
- } \
453
- }} while (0)
454
+uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shift)
455
+{
456
+ return do_uqrshl_d(val, (int8_t)shift, false, env->vfp.qc);
457
+}
458
+
459
+#define NEON_FN(dest, src1, src2) \
460
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
461
NEON_VOP_ENV(qshl_s8, neon_s8, 4)
462
+#undef NEON_FN
463
+
464
+#define NEON_FN(dest, src1, src2) \
465
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
466
NEON_VOP_ENV(qshl_s16, neon_s16, 2)
467
-NEON_VOP_ENV(qshl_s32, neon_s32, 1)
468
#undef NEON_FN
469
470
-uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
471
+uint32_t HELPER(neon_qshl_s32)(CPUARMState *env, uint32_t val, uint32_t shift)
472
{
473
- int8_t shift = (uint8_t)shiftop;
474
- int64_t val = valop;
475
- if (shift >= 64) {
476
- if (val) {
477
- SET_QC();
478
- val = (val >> 63) ^ ~SIGNBIT64;
479
- }
480
- } else if (shift <= -64) {
481
- val >>= 63;
482
- } else if (shift < 0) {
483
- val >>= -shift;
484
- } else {
485
- int64_t tmp = val;
486
- val <<= shift;
487
- if ((val >> shift) != tmp) {
488
- SET_QC();
489
- val = (tmp >> 63) ^ ~SIGNBIT64;
490
- }
491
- }
492
- return val;
493
+ return do_sqrshl_bhs(val, (int8_t)shift, 32, false, env->vfp.qc);
494
}
495
496
-#define NEON_FN(dest, src1, src2) do { \
497
- if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \
498
- SET_QC(); \
499
- dest = 0; \
500
- } else { \
501
- int8_t tmp; \
502
- tmp = (int8_t)src2; \
503
- if (tmp >= (ssize_t)sizeof(src1) * 8) { \
504
- if (src1) { \
505
- SET_QC(); \
506
- dest = ~0; \
507
- } else { \
508
- dest = 0; \
509
- } \
510
- } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
511
- dest = 0; \
512
- } else if (tmp < 0) { \
513
- dest = src1 >> -tmp; \
514
- } else { \
515
- dest = src1 << tmp; \
516
- if ((dest >> tmp) != src1) { \
517
- SET_QC(); \
518
- dest = ~0; \
519
- } \
520
- } \
521
- }} while (0)
522
-NEON_VOP_ENV(qshlu_s8, neon_u8, 4)
523
-NEON_VOP_ENV(qshlu_s16, neon_u16, 2)
524
+uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t val, uint64_t shift)
525
+{
526
+ return do_sqrshl_d(val, (int8_t)shift, false, env->vfp.qc);
527
+}
528
+
529
+#define NEON_FN(dest, src1, src2) \
530
+ (dest = do_suqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
531
+NEON_VOP_ENV(qshlu_s8, neon_s8, 4)
532
#undef NEON_FN
533
534
-uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
535
+#define NEON_FN(dest, src1, src2) \
536
+ (dest = do_suqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
537
+NEON_VOP_ENV(qshlu_s16, neon_s16, 2)
538
+#undef NEON_FN
539
+
540
+uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t val, uint32_t shift)
541
{
542
- if ((int32_t)valop < 0) {
543
- SET_QC();
544
- return 0;
545
- }
546
- return helper_neon_qshl_u32(env, valop, shiftop);
547
+ return do_suqrshl_bhs(val, (int8_t)shift, 32, false, env->vfp.qc);
548
}
549
550
-uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
551
+uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t val, uint64_t shift)
552
{
553
- if ((int64_t)valop < 0) {
554
- SET_QC();
555
- return 0;
556
- }
557
- return helper_neon_qshl_u64(env, valop, shiftop);
558
+ return do_suqrshl_d(val, (int8_t)shift, false, env->vfp.qc);
559
}
560
561
-#define NEON_FN(dest, src1, src2) do { \
562
- int8_t tmp; \
563
- tmp = (int8_t)src2; \
564
- if (tmp >= (ssize_t)sizeof(src1) * 8) { \
565
- if (src1) { \
566
- SET_QC(); \
567
- dest = ~0; \
568
- } else { \
569
- dest = 0; \
570
- } \
571
- } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \
572
- dest = 0; \
573
- } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
574
- dest = src1 >> (sizeof(src1) * 8 - 1); \
575
- } else if (tmp < 0) { \
576
- dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
577
- } else { \
578
- dest = src1 << tmp; \
579
- if ((dest >> tmp) != src1) { \
580
- SET_QC(); \
581
- dest = ~0; \
582
- } \
583
- }} while (0)
584
+#define NEON_FN(dest, src1, src2) \
585
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, true, env->vfp.qc))
586
NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
587
+#undef NEON_FN
588
+
589
+#define NEON_FN(dest, src1, src2) \
590
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, true, env->vfp.qc))
591
NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
592
#undef NEON_FN
593
594
-/* The addition of the rounding constant may overflow, so we use an
595
- * intermediate 64 bit accumulator. */
596
-uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop)
597
+uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shift)
598
{
599
- uint32_t dest;
600
- int8_t shift = (int8_t)shiftop;
601
- if (shift >= 32) {
602
- if (val) {
603
- SET_QC();
604
- dest = ~0;
605
- } else {
606
- dest = 0;
607
- }
608
- } else if (shift < -32) {
609
- dest = 0;
610
- } else if (shift == -32) {
611
- dest = val >> 31;
612
- } else if (shift < 0) {
613
- uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
614
- dest = big_dest >> -shift;
615
- } else {
616
- dest = val << shift;
617
- if ((dest >> shift) != val) {
618
- SET_QC();
619
- dest = ~0;
620
- }
621
- }
622
- return dest;
623
+ return do_uqrshl_bhs(val, (int8_t)shift, 32, true, env->vfp.qc);
624
}
625
626
-/* Handling addition overflow with 64 bit input values is more
627
- * tricky than with 32 bit values. */
628
-uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
629
+uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shift)
630
{
631
- int8_t shift = (int8_t)shiftop;
632
- if (shift >= 64) {
633
- if (val) {
634
- SET_QC();
635
- val = ~0;
636
- }
637
- } else if (shift < -64) {
638
- val = 0;
639
- } else if (shift == -64) {
640
- val >>= 63;
641
- } else if (shift < 0) {
642
- val >>= (-shift - 1);
643
- if (val == UINT64_MAX) {
644
- /* In this case, it means that the rounding constant is 1,
645
- * and the addition would overflow. Return the actual
646
- * result directly. */
647
- val = 0x8000000000000000ULL;
648
- } else {
649
- val++;
650
- val >>= 1;
651
- }
652
- } else { \
653
- uint64_t tmp = val;
654
- val <<= shift;
655
- if ((val >> shift) != tmp) {
656
- SET_QC();
657
- val = ~0;
658
- }
659
- }
660
- return val;
661
+ return do_uqrshl_d(val, (int8_t)shift, true, env->vfp.qc);
662
}
663
664
-#define NEON_FN(dest, src1, src2) do { \
665
- int8_t tmp; \
666
- tmp = (int8_t)src2; \
667
- if (tmp >= (ssize_t)sizeof(src1) * 8) { \
668
- if (src1) { \
669
- SET_QC(); \
670
- dest = (typeof(dest))(1 << (sizeof(src1) * 8 - 1)); \
671
- if (src1 > 0) { \
672
- dest--; \
673
- } \
674
- } else { \
675
- dest = 0; \
676
- } \
677
- } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
678
- dest = 0; \
679
- } else if (tmp < 0) { \
680
- dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
681
- } else { \
682
- dest = src1 << tmp; \
683
- if ((dest >> tmp) != src1) { \
684
- SET_QC(); \
685
- dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
686
- if (src1 > 0) { \
687
- dest--; \
688
- } \
689
- } \
690
- }} while (0)
691
+#define NEON_FN(dest, src1, src2) \
692
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, true, env->vfp.qc))
693
NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
694
+#undef NEON_FN
695
+
696
+#define NEON_FN(dest, src1, src2) \
697
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, true, env->vfp.qc))
698
NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
699
#undef NEON_FN
700
701
-/* The addition of the rounding constant may overflow, so we use an
702
- * intermediate 64 bit accumulator. */
703
-uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
704
+uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t val, uint32_t shift)
705
{
706
- int32_t dest;
707
- int32_t val = (int32_t)valop;
708
- int8_t shift = (int8_t)shiftop;
709
- if (shift >= 32) {
710
- if (val) {
711
- SET_QC();
712
- dest = (val >> 31) ^ ~SIGNBIT;
713
- } else {
714
- dest = 0;
715
- }
716
- } else if (shift <= -32) {
717
- dest = 0;
718
- } else if (shift < 0) {
719
- int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
720
- dest = big_dest >> -shift;
721
- } else {
722
- dest = val << shift;
723
- if ((dest >> shift) != val) {
724
- SET_QC();
725
- dest = (val >> 31) ^ ~SIGNBIT;
726
- }
727
- }
728
- return dest;
729
+ return do_sqrshl_bhs(val, (int8_t)shift, 32, true, env->vfp.qc);
730
}
731
732
-/* Handling addition overflow with 64 bit input values is more
733
- * tricky than with 32 bit values. */
734
-uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
735
+uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t val, uint64_t shift)
736
{
737
- int8_t shift = (uint8_t)shiftop;
738
- int64_t val = valop;
739
-
740
- if (shift >= 64) {
741
- if (val) {
742
- SET_QC();
743
- val = (val >> 63) ^ ~SIGNBIT64;
744
- }
745
- } else if (shift <= -64) {
746
- val = 0;
747
- } else if (shift < 0) {
748
- val >>= (-shift - 1);
749
- if (val == INT64_MAX) {
750
- /* In this case, it means that the rounding constant is 1,
751
- * and the addition would overflow. Return the actual
752
- * result directly. */
753
- val = 0x4000000000000000ULL;
754
- } else {
755
- val++;
756
- val >>= 1;
757
- }
758
- } else {
759
- int64_t tmp = val;
760
- val <<= shift;
761
- if ((val >> shift) != tmp) {
762
- SET_QC();
763
- val = (tmp >> 63) ^ ~SIGNBIT64;
764
- }
765
- }
766
- return val;
767
+ return do_sqrshl_d(val, (int8_t)shift, true, env->vfp.qc);
768
}
769
770
uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b)
771
--
37
--
772
2.20.1
38
2.34.1
773
774
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Forward tlb_flush_page_bits_by_mmuidx to tlb_flush_range_by_mmuidx
3
Inline pickNaNMulAdd into its only caller. This makes
4
passing TARGET_PAGE_SIZE.
4
one assert redundant with the immediately preceding IF.
5
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Message-id: 20210509151618.2331764-5-f4bug@amsat.org
8
Message-id: 20241203203949.483774-3-richard.henderson@linaro.org
9
Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org>
9
[PMM: keep comment from old code in new location]
10
[PMD: Split from bigger patch]
11
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
11
---
15
include/exec/exec-all.h | 19 +++++++++++++++++++
12
fpu/softfloat-parts.c.inc | 41 +++++++++++++++++++++++++-
16
accel/tcg/cputlb.c | 20 +++++++++++++++-----
13
fpu/softfloat-specialize.c.inc | 54 ----------------------------------
17
2 files changed, 34 insertions(+), 5 deletions(-)
14
2 files changed, 40 insertions(+), 55 deletions(-)
18
15
19
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
16
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
20
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
21
--- a/include/exec/exec-all.h
18
--- a/fpu/softfloat-parts.c.inc
22
+++ b/include/exec/exec-all.h
19
+++ b/fpu/softfloat-parts.c.inc
23
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
20
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
24
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
21
}
25
(CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
22
26
23
if (s->default_nan_mode) {
27
+/**
24
+ /*
28
+ * tlb_flush_range_by_mmuidx
25
+ * We guarantee not to require the target to tell us how to
29
+ * @cpu: CPU whose TLB should be flushed
26
+ * pick a NaN if we're always returning the default NaN.
30
+ * @addr: virtual address of the start of the range to be flushed
27
+ * But if we're not in default-NaN mode then the target must
31
+ * @len: length of range to be flushed
28
+ * specify.
32
+ * @idxmap: bitmap of mmu indexes to flush
29
+ */
33
+ * @bits: number of significant bits in address
30
which = 3;
34
+ *
31
+ } else if (infzero) {
35
+ * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
32
+ /*
36
+ * comparing only the low @bits worth of each virtual page.
33
+ * Inf * 0 + NaN -- some implementations return the
37
+ */
34
+ * default NaN here, and some return the input NaN.
38
+void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
35
+ */
39
+ target_ulong len, uint16_t idxmap,
36
+ switch (s->float_infzeronan_rule) {
40
+ unsigned bits);
37
+ case float_infzeronan_dnan_never:
41
/**
38
+ which = 2;
42
* tlb_set_page_with_attrs:
39
+ break;
43
* @cpu: CPU to add this TLB entry for
40
+ case float_infzeronan_dnan_always:
44
@@ -XXX,XX +XXX,XX @@ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
41
+ which = 3;
45
uint16_t idxmap, unsigned bits)
42
+ break;
46
{
43
+ case float_infzeronan_dnan_if_qnan:
47
}
44
+ which = is_qnan(c->cls) ? 3 : 2;
48
+static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
45
+ break;
49
+ target_ulong len, uint16_t idxmap,
46
+ default:
50
+ unsigned bits)
47
+ g_assert_not_reached();
51
+{
48
+ }
52
+}
49
} else {
53
#endif
50
- which = pickNaNMulAdd(a->cls, b->cls, c->cls, infzero, have_snan, s);
54
/**
51
+ FloatClass cls[3] = { a->cls, b->cls, c->cls };
55
* probe_access:
52
+ Float3NaNPropRule rule = s->float_3nan_prop_rule;
56
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
53
+
54
+ assert(rule != float_3nan_prop_none);
55
+ if (have_snan && (rule & R_3NAN_SNAN_MASK)) {
56
+ /* We have at least one SNaN input and should prefer it */
57
+ do {
58
+ which = rule & R_3NAN_1ST_MASK;
59
+ rule >>= R_3NAN_1ST_LENGTH;
60
+ } while (!is_snan(cls[which]));
61
+ } else {
62
+ do {
63
+ which = rule & R_3NAN_1ST_MASK;
64
+ rule >>= R_3NAN_1ST_LENGTH;
65
+ } while (!is_nan(cls[which]));
66
+ }
67
}
68
69
if (which == 3) {
70
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
57
index XXXXXXX..XXXXXXX 100644
71
index XXXXXXX..XXXXXXX 100644
58
--- a/accel/tcg/cputlb.c
72
--- a/fpu/softfloat-specialize.c.inc
59
+++ b/accel/tcg/cputlb.c
73
+++ b/fpu/softfloat-specialize.c.inc
60
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu,
74
@@ -XXX,XX +XXX,XX @@ static int pickNaN(FloatClass a_cls, FloatClass b_cls,
61
g_free(d);
62
}
63
64
-void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
65
- uint16_t idxmap, unsigned bits)
66
+void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
67
+ target_ulong len, uint16_t idxmap,
68
+ unsigned bits)
69
{
70
TLBFlushRangeData d;
71
72
- /* If all bits are significant, this devolves to tlb_flush_page. */
73
- if (bits >= TARGET_LONG_BITS) {
74
+ /*
75
+ * If all bits are significant, and len is small,
76
+ * this devolves to tlb_flush_page.
77
+ */
78
+ if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
79
tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
80
return;
81
}
82
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
83
84
/* This should already be page aligned */
85
d.addr = addr & TARGET_PAGE_MASK;
86
- d.len = TARGET_PAGE_SIZE;
87
+ d.len = len;
88
d.idxmap = idxmap;
89
d.bits = bits;
90
91
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
92
}
75
}
93
}
76
}
94
77
95
+void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
78
-/*----------------------------------------------------------------------------
96
+ uint16_t idxmap, unsigned bits)
79
-| Select which NaN to propagate for a three-input operation.
97
+{
80
-| For the moment we assume that no CPU needs the 'larger significand'
98
+ tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
81
-| information.
99
+}
82
-| Return values : 0 : a; 1 : b; 2 : c; 3 : default-NaN
100
+
83
-*----------------------------------------------------------------------------*/
101
void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
84
-static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
102
target_ulong addr,
85
- bool infzero, bool have_snan, float_status *status)
103
uint16_t idxmap,
86
-{
87
- FloatClass cls[3] = { a_cls, b_cls, c_cls };
88
- Float3NaNPropRule rule = status->float_3nan_prop_rule;
89
- int which;
90
-
91
- /*
92
- * We guarantee not to require the target to tell us how to
93
- * pick a NaN if we're always returning the default NaN.
94
- * But if we're not in default-NaN mode then the target must
95
- * specify.
96
- */
97
- assert(!status->default_nan_mode);
98
-
99
- if (infzero) {
100
- /*
101
- * Inf * 0 + NaN -- some implementations return the default NaN here,
102
- * and some return the input NaN.
103
- */
104
- switch (status->float_infzeronan_rule) {
105
- case float_infzeronan_dnan_never:
106
- return 2;
107
- case float_infzeronan_dnan_always:
108
- return 3;
109
- case float_infzeronan_dnan_if_qnan:
110
- return is_qnan(c_cls) ? 3 : 2;
111
- default:
112
- g_assert_not_reached();
113
- }
114
- }
115
-
116
- assert(rule != float_3nan_prop_none);
117
- if (have_snan && (rule & R_3NAN_SNAN_MASK)) {
118
- /* We have at least one SNaN input and should prefer it */
119
- do {
120
- which = rule & R_3NAN_1ST_MASK;
121
- rule >>= R_3NAN_1ST_LENGTH;
122
- } while (!is_snan(cls[which]));
123
- } else {
124
- do {
125
- which = rule & R_3NAN_1ST_MASK;
126
- rule >>= R_3NAN_1ST_LENGTH;
127
- } while (!is_nan(cls[which]));
128
- }
129
- return which;
130
-}
131
-
132
/*----------------------------------------------------------------------------
133
| Returns 1 if the double-precision floating-point value `a' is a quiet
134
| NaN; otherwise returns 0.
104
--
135
--
105
2.20.1
136
2.34.1
106
137
107
138
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
For SVE, we potentially have a 4th argument coming from the
3
Remove "3" as a special case for which and simply
4
movprfx instruction. Currently we do not optimize movprfx,
4
branch to return the desired value.
5
so the problem is not visible.
6
5
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210525010358.152808-51-richard.henderson@linaro.org
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Message-id: 20241203203949.483774-4-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
target/arm/helper.h | 20 +++++++--------
11
fpu/softfloat-parts.c.inc | 20 ++++++++++----------
13
target/arm/translate-a64.c | 28 +++++++++++++++++----
12
1 file changed, 10 insertions(+), 10 deletions(-)
14
target/arm/translate-neon.c | 10 +++++---
15
target/arm/translate-sve.c | 5 ++--
16
target/arm/vec_helper.c | 50 +++++++++++++++----------------------
17
5 files changed, 62 insertions(+), 51 deletions(-)
18
13
19
diff --git a/target/arm/helper.h b/target/arm/helper.h
14
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
20
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper.h
16
--- a/fpu/softfloat-parts.c.inc
22
+++ b/target/arm/helper.h
17
+++ b/fpu/softfloat-parts.c.inc
23
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,
18
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
24
DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG,
19
* But if we're not in default-NaN mode then the target must
25
void, ptr, ptr, ptr, ptr, i32)
20
* specify.
26
21
*/
27
-DEF_HELPER_FLAGS_5(gvec_fcmlah, TCG_CALL_NO_RWG,
22
- which = 3;
28
- void, ptr, ptr, ptr, ptr, i32)
23
+ goto default_nan;
29
-DEF_HELPER_FLAGS_5(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
24
} else if (infzero) {
30
- void, ptr, ptr, ptr, ptr, i32)
25
/*
31
-DEF_HELPER_FLAGS_5(gvec_fcmlas, TCG_CALL_NO_RWG,
26
* Inf * 0 + NaN -- some implementations return the
32
- void, ptr, ptr, ptr, ptr, i32)
27
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
33
-DEF_HELPER_FLAGS_5(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
28
*/
34
- void, ptr, ptr, ptr, ptr, i32)
29
switch (s->float_infzeronan_rule) {
35
-DEF_HELPER_FLAGS_5(gvec_fcmlad, TCG_CALL_NO_RWG,
30
case float_infzeronan_dnan_never:
36
- void, ptr, ptr, ptr, ptr, i32)
31
- which = 2;
37
+DEF_HELPER_FLAGS_6(gvec_fcmlah, TCG_CALL_NO_RWG,
38
+ void, ptr, ptr, ptr, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_6(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
40
+ void, ptr, ptr, ptr, ptr, ptr, i32)
41
+DEF_HELPER_FLAGS_6(gvec_fcmlas, TCG_CALL_NO_RWG,
42
+ void, ptr, ptr, ptr, ptr, ptr, i32)
43
+DEF_HELPER_FLAGS_6(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
44
+ void, ptr, ptr, ptr, ptr, ptr, i32)
45
+DEF_HELPER_FLAGS_6(gvec_fcmlad, TCG_CALL_NO_RWG,
46
+ void, ptr, ptr, ptr, ptr, ptr, i32)
47
48
DEF_HELPER_FLAGS_5(neon_paddh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
49
DEF_HELPER_FLAGS_5(neon_pmaxh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
50
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/translate-a64.c
53
+++ b/target/arm/translate-a64.c
54
@@ -XXX,XX +XXX,XX @@ static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
55
is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
56
}
57
58
+/*
59
+ * Expand a 4-operand + fpstatus pointer + simd data value operation using
60
+ * an out-of-line helper.
61
+ */
62
+static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
63
+ int rm, int ra, bool is_fp16, int data,
64
+ gen_helper_gvec_4_ptr *fn)
65
+{
66
+ TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
67
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
68
+ vec_full_reg_offset(s, rn),
69
+ vec_full_reg_offset(s, rm),
70
+ vec_full_reg_offset(s, ra), fpst,
71
+ is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
72
+ tcg_temp_free_ptr(fpst);
73
+}
74
+
75
/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
76
* than the 32 bit equivalent.
77
*/
78
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
79
rot = extract32(opcode, 0, 2);
80
switch (size) {
81
case 1:
82
- gen_gvec_op3_fpst(s, is_q, rd, rn, rm, true, rot,
83
+ gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot,
84
gen_helper_gvec_fcmlah);
85
break;
32
break;
86
case 2:
33
case float_infzeronan_dnan_always:
87
- gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
34
- which = 3;
88
+ gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
35
- break;
89
gen_helper_gvec_fcmlas);
36
+ goto default_nan;
90
break;
37
case float_infzeronan_dnan_if_qnan:
91
case 3:
38
- which = is_qnan(c->cls) ? 3 : 2;
92
- gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
39
+ if (is_qnan(c->cls)) {
93
+ gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
40
+ goto default_nan;
94
gen_helper_gvec_fcmlad);
41
+ }
95
break;
42
break;
96
default:
43
default:
97
@@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
44
g_assert_not_reached();
98
{
45
}
99
int rot = extract32(insn, 13, 2);
46
+ which = 2;
100
int data = (index << 2) | rot;
47
} else {
101
- tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
48
FloatClass cls[3] = { a->cls, b->cls, c->cls };
102
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
49
Float3NaNPropRule rule = s->float_3nan_prop_rule;
103
vec_full_reg_offset(s, rn),
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
104
- vec_full_reg_offset(s, rm), fpst,
105
+ vec_full_reg_offset(s, rm),
106
+ vec_full_reg_offset(s, rd), fpst,
107
is_q ? 16 : 8, vec_full_reg_size(s), data,
108
size == MO_64
109
? gen_helper_gvec_fcmlas_idx
110
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/target/arm/translate-neon.c
113
+++ b/target/arm/translate-neon.c
114
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
115
{
116
int opr_sz;
117
TCGv_ptr fpst;
118
- gen_helper_gvec_3_ptr *fn_gvec_ptr;
119
+ gen_helper_gvec_4_ptr *fn_gvec_ptr;
120
121
if (!dc_isar_feature(aa32_vcma, s)
122
|| (a->size == MO_16 && !dc_isar_feature(aa32_fp16_arith, s))) {
123
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
124
fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD);
125
fn_gvec_ptr = (a->size == MO_16) ?
126
gen_helper_gvec_fcmlah : gen_helper_gvec_fcmlas;
127
- tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
128
+ tcg_gen_gvec_4_ptr(vfp_reg_offset(1, a->vd),
129
vfp_reg_offset(1, a->vn),
130
vfp_reg_offset(1, a->vm),
131
+ vfp_reg_offset(1, a->vd),
132
fpst, opr_sz, opr_sz, a->rot,
133
fn_gvec_ptr);
134
tcg_temp_free_ptr(fpst);
135
@@ -XXX,XX +XXX,XX @@ static bool trans_VFML(DisasContext *s, arg_VFML *a)
136
137
static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
138
{
139
- gen_helper_gvec_3_ptr *fn_gvec_ptr;
140
+ gen_helper_gvec_4_ptr *fn_gvec_ptr;
141
int opr_sz;
142
TCGv_ptr fpst;
143
144
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
145
gen_helper_gvec_fcmlah_idx : gen_helper_gvec_fcmlas_idx;
146
opr_sz = (1 + a->q) * 8;
147
fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD);
148
- tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
149
+ tcg_gen_gvec_4_ptr(vfp_reg_offset(1, a->vd),
150
vfp_reg_offset(1, a->vn),
151
vfp_reg_offset(1, a->vm),
152
+ vfp_reg_offset(1, a->vd),
153
fpst, opr_sz, opr_sz,
154
(a->index << 2) | a->rot, fn_gvec_ptr);
155
tcg_temp_free_ptr(fpst);
156
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
157
index XXXXXXX..XXXXXXX 100644
158
--- a/target/arm/translate-sve.c
159
+++ b/target/arm/translate-sve.c
160
@@ -XXX,XX +XXX,XX @@ static bool trans_FCMLA_zpzzz(DisasContext *s, arg_FCMLA_zpzzz *a)
161
162
static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a)
163
{
164
- static gen_helper_gvec_3_ptr * const fns[2] = {
165
+ static gen_helper_gvec_4_ptr * const fns[2] = {
166
gen_helper_gvec_fcmlah_idx,
167
gen_helper_gvec_fcmlas_idx,
168
};
169
@@ -XXX,XX +XXX,XX @@ static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a)
170
if (sve_access_check(s)) {
171
unsigned vsz = vec_full_reg_size(s);
172
TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
173
- tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
174
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
175
vec_full_reg_offset(s, a->rn),
176
vec_full_reg_offset(s, a->rm),
177
+ vec_full_reg_offset(s, a->ra),
178
status, vsz, vsz,
179
a->index * 4 + a->rot,
180
fns[a->esz - 1]);
181
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
182
index XXXXXXX..XXXXXXX 100644
183
--- a/target/arm/vec_helper.c
184
+++ b/target/arm/vec_helper.c
185
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm,
186
clear_tail(d, opr_sz, simd_maxsz(desc));
187
}
188
189
-void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm,
190
+void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, void *va,
191
void *vfpst, uint32_t desc)
192
{
193
uintptr_t opr_sz = simd_oprsz(desc);
194
- float16 *d = vd;
195
- float16 *n = vn;
196
- float16 *m = vm;
197
+ float16 *d = vd, *n = vn, *m = vm, *a = va;
198
float_status *fpst = vfpst;
199
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
200
uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
201
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm,
202
float16 e4 = e2;
203
float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag;
204
205
- d[H2(i)] = float16_muladd(e2, e1, d[H2(i)], 0, fpst);
206
- d[H2(i + 1)] = float16_muladd(e4, e3, d[H2(i + 1)], 0, fpst);
207
+ d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], 0, fpst);
208
+ d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], 0, fpst);
209
}
210
clear_tail(d, opr_sz, simd_maxsz(desc));
211
}
212
213
-void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm,
214
+void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, void *va,
215
void *vfpst, uint32_t desc)
216
{
217
uintptr_t opr_sz = simd_oprsz(desc);
218
- float16 *d = vd;
219
- float16 *n = vn;
220
- float16 *m = vm;
221
+ float16 *d = vd, *n = vn, *m = vm, *a = va;
222
float_status *fpst = vfpst;
223
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
224
uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
225
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm,
226
float16 e2 = n[H2(j + flip)];
227
float16 e4 = e2;
228
229
- d[H2(j)] = float16_muladd(e2, e1, d[H2(j)], 0, fpst);
230
- d[H2(j + 1)] = float16_muladd(e4, e3, d[H2(j + 1)], 0, fpst);
231
+ d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], 0, fpst);
232
+ d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], 0, fpst);
233
}
51
}
234
}
52
}
235
clear_tail(d, opr_sz, simd_maxsz(desc));
53
54
- if (which == 3) {
55
- parts_default_nan(a, s);
56
- return a;
57
- }
58
-
59
switch (which) {
60
case 0:
61
break;
62
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
63
parts_silence_nan(a, s);
64
}
65
return a;
66
+
67
+ default_nan:
68
+ parts_default_nan(a, s);
69
+ return a;
236
}
70
}
237
71
238
-void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm,
72
/*
239
+void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, void *va,
240
void *vfpst, uint32_t desc)
241
{
242
uintptr_t opr_sz = simd_oprsz(desc);
243
- float32 *d = vd;
244
- float32 *n = vn;
245
- float32 *m = vm;
246
+ float32 *d = vd, *n = vn, *m = vm, *a = va;
247
float_status *fpst = vfpst;
248
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
249
uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
250
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm,
251
float32 e4 = e2;
252
float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag;
253
254
- d[H4(i)] = float32_muladd(e2, e1, d[H4(i)], 0, fpst);
255
- d[H4(i + 1)] = float32_muladd(e4, e3, d[H4(i + 1)], 0, fpst);
256
+ d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], 0, fpst);
257
+ d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], 0, fpst);
258
}
259
clear_tail(d, opr_sz, simd_maxsz(desc));
260
}
261
262
-void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm,
263
+void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, void *va,
264
void *vfpst, uint32_t desc)
265
{
266
uintptr_t opr_sz = simd_oprsz(desc);
267
- float32 *d = vd;
268
- float32 *n = vn;
269
- float32 *m = vm;
270
+ float32 *d = vd, *n = vn, *m = vm, *a = va;
271
float_status *fpst = vfpst;
272
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
273
uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
274
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm,
275
float32 e2 = n[H4(j + flip)];
276
float32 e4 = e2;
277
278
- d[H4(j)] = float32_muladd(e2, e1, d[H4(j)], 0, fpst);
279
- d[H4(j + 1)] = float32_muladd(e4, e3, d[H4(j + 1)], 0, fpst);
280
+ d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], 0, fpst);
281
+ d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], 0, fpst);
282
}
283
}
284
clear_tail(d, opr_sz, simd_maxsz(desc));
285
}
286
287
-void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm,
288
+void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, void *va,
289
void *vfpst, uint32_t desc)
290
{
291
uintptr_t opr_sz = simd_oprsz(desc);
292
- float64 *d = vd;
293
- float64 *n = vn;
294
- float64 *m = vm;
295
+ float64 *d = vd, *n = vn, *m = vm, *a = va;
296
float_status *fpst = vfpst;
297
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
298
uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
299
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm,
300
float64 e4 = e2;
301
float64 e3 = m[i + 1 - flip] ^ neg_imag;
302
303
- d[i] = float64_muladd(e2, e1, d[i], 0, fpst);
304
- d[i + 1] = float64_muladd(e4, e3, d[i + 1], 0, fpst);
305
+ d[i] = float64_muladd(e2, e1, a[i], 0, fpst);
306
+ d[i + 1] = float64_muladd(e4, e3, a[i + 1], 0, fpst);
307
}
308
clear_tail(d, opr_sz, simd_maxsz(desc));
309
}
310
--
73
--
311
2.20.1
74
2.34.1
312
75
313
76
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Rename to match tlb_flush_range_locked.
3
Assign the pointer return value to 'a' directly,
4
rather than going through an intermediary index.
4
5
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Message-id: 20210509151618.2331764-8-f4bug@amsat.org
8
Message-id: 20241203203949.483774-5-richard.henderson@linaro.org
8
Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org>
9
[PMD: Split from bigger patch]
10
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
10
---
14
accel/tcg/cputlb.c | 11 +++++------
11
fpu/softfloat-parts.c.inc | 32 ++++++++++----------------------
15
1 file changed, 5 insertions(+), 6 deletions(-)
12
1 file changed, 10 insertions(+), 22 deletions(-)
16
13
17
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
14
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
18
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/cputlb.c
16
--- a/fpu/softfloat-parts.c.inc
20
+++ b/accel/tcg/cputlb.c
17
+++ b/fpu/softfloat-parts.c.inc
21
@@ -XXX,XX +XXX,XX @@ typedef struct {
18
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
22
uint16_t bits;
19
FloatPartsN *c, float_status *s,
23
} TLBFlushRangeData;
20
int ab_mask, int abc_mask)
24
25
-static void
26
-tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu,
27
- TLBFlushRangeData d)
28
+static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
29
+ TLBFlushRangeData d)
30
{
21
{
31
CPUArchState *env = cpu->env_ptr;
22
- int which;
32
int mmu_idx;
23
bool infzero = (ab_mask == float_cmask_infzero);
33
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu,
24
bool have_snan = (abc_mask & float_cmask_snan);
34
run_on_cpu_data data)
25
+ FloatPartsN *ret;
35
{
26
36
TLBFlushRangeData *d = data.host_ptr;
27
if (unlikely(have_snan)) {
37
- tlb_flush_page_bits_by_mmuidx_async_0(cpu, *d);
28
float_raise(float_flag_invalid | float_flag_invalid_snan, s);
38
+ tlb_flush_range_by_mmuidx_async_0(cpu, *d);
29
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
39
g_free(d);
30
default:
40
}
31
g_assert_not_reached();
41
32
}
42
@@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
33
- which = 2;
43
d.bits = bits;
34
+ ret = c;
44
45
if (qemu_cpu_is_self(cpu)) {
46
- tlb_flush_page_bits_by_mmuidx_async_0(cpu, d);
47
+ tlb_flush_range_by_mmuidx_async_0(cpu, d);
48
} else {
35
} else {
49
/* Otherwise allocate a structure, freed by the worker. */
36
- FloatClass cls[3] = { a->cls, b->cls, c->cls };
50
TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
37
+ FloatPartsN *val[3] = { a, b, c };
51
@@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
38
Float3NaNPropRule rule = s->float_3nan_prop_rule;
39
40
assert(rule != float_3nan_prop_none);
41
if (have_snan && (rule & R_3NAN_SNAN_MASK)) {
42
/* We have at least one SNaN input and should prefer it */
43
do {
44
- which = rule & R_3NAN_1ST_MASK;
45
+ ret = val[rule & R_3NAN_1ST_MASK];
46
rule >>= R_3NAN_1ST_LENGTH;
47
- } while (!is_snan(cls[which]));
48
+ } while (!is_snan(ret->cls));
49
} else {
50
do {
51
- which = rule & R_3NAN_1ST_MASK;
52
+ ret = val[rule & R_3NAN_1ST_MASK];
53
rule >>= R_3NAN_1ST_LENGTH;
54
- } while (!is_nan(cls[which]));
55
+ } while (!is_nan(ret->cls));
52
}
56
}
53
}
57
}
54
58
55
- tlb_flush_page_bits_by_mmuidx_async_0(src_cpu, d);
59
- switch (which) {
56
+ tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
60
- case 0:
57
}
61
- break;
58
62
- case 1:
59
void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
63
- a = b;
64
- break;
65
- case 2:
66
- a = c;
67
- break;
68
- default:
69
- g_assert_not_reached();
70
+ if (is_snan(ret->cls)) {
71
+ parts_silence_nan(ret, s);
72
}
73
- if (is_snan(a->cls)) {
74
- parts_silence_nan(a, s);
75
- }
76
- return a;
77
+ return ret;
78
79
default_nan:
80
parts_default_nan(a, s);
60
--
81
--
61
2.20.1
82
2.34.1
62
83
63
84
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The signed dot product routines produce a signed result.
3
While all indices into val[] should be in [0-2], the mask
4
Since we use -fwrapv, there is no functional change.
4
applied is two bits. To help static analysis see there is
5
no possibility of read beyond the end of the array, pad the
6
array to 4 entries, with the final being (implicitly) NULL.
5
7
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210525010358.152808-49-richard.henderson@linaro.org
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Message-id: 20241203203949.483774-6-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
12
---
11
target/arm/vec_helper.c | 8 ++++----
13
fpu/softfloat-parts.c.inc | 2 +-
12
1 file changed, 4 insertions(+), 4 deletions(-)
14
1 file changed, 1 insertion(+), 1 deletion(-)
13
15
14
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
16
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/vec_helper.c
18
--- a/fpu/softfloat-parts.c.inc
17
+++ b/target/arm/vec_helper.c
19
+++ b/fpu/softfloat-parts.c.inc
18
@@ -XXX,XX +XXX,XX @@ void HELPER(sve2_sqrdmlsh_d)(void *vd, void *vn, void *vm,
20
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
19
void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, uint32_t desc)
21
}
20
{
22
ret = c;
21
intptr_t i, opr_sz = simd_oprsz(desc);
23
} else {
22
- uint32_t *d = vd;
24
- FloatPartsN *val[3] = { a, b, c };
23
+ int32_t *d = vd;
25
+ FloatPartsN *val[R_3NAN_1ST_MASK + 1] = { a, b, c };
24
int8_t *n = vn, *m = vm;
26
Float3NaNPropRule rule = s->float_3nan_prop_rule;
25
27
26
for (i = 0; i < opr_sz / 4; ++i) {
28
assert(rule != float_3nan_prop_none);
27
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, uint32_t desc)
28
void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, uint32_t desc)
29
{
30
intptr_t i, opr_sz = simd_oprsz(desc);
31
- uint64_t *d = vd;
32
+ int64_t *d = vd;
33
int16_t *n = vn, *m = vm;
34
35
for (i = 0; i < opr_sz / 8; ++i) {
36
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
37
{
38
intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
39
intptr_t index = simd_data(desc);
40
- uint32_t *d = vd;
41
+ int32_t *d = vd;
42
int8_t *n = vn;
43
int8_t *m_indexed = (int8_t *)vm + H4(index) * 4;
44
45
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
46
{
47
intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
48
intptr_t index = simd_data(desc);
49
- uint64_t *d = vd;
50
+ int64_t *d = vd;
51
int16_t *n = vn;
52
int16_t *m_indexed = (int16_t *)vm + index * 4;
53
54
--
29
--
55
2.20.1
30
2.34.1
56
31
57
32
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Rename the existing sve_while (less-than) helper to sve_whilel
3
This function is part of the public interface and
4
to make room for a new sve_whileg helper for greater-than.
4
is not "specialized" to any target in any way.
5
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20241203203949.483774-7-richard.henderson@linaro.org
8
Message-id: 20210525010358.152808-31-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
target/arm/helper-sve.h | 3 +-
11
fpu/softfloat.c | 52 ++++++++++++++++++++++++++++++++++
12
target/arm/sve.decode | 2 +-
12
fpu/softfloat-specialize.c.inc | 52 ----------------------------------
13
target/arm/sve_helper.c | 38 +++++++++++++++++++++++++-
13
2 files changed, 52 insertions(+), 52 deletions(-)
14
target/arm/translate-sve.c | 56 ++++++++++++++++++++++++++++----------
15
4 files changed, 82 insertions(+), 17 deletions(-)
16
14
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-sve.h
17
--- a/fpu/softfloat.c
20
+++ b/target/arm/helper-sve.h
18
+++ b/fpu/softfloat.c
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_brkns, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
19
@@ -XXX,XX +XXX,XX @@ void normalizeFloatx80Subnormal(uint64_t aSig, int32_t *zExpPtr,
22
20
*zExpPtr = 1 - shiftCount;
23
DEF_HELPER_FLAGS_3(sve_cntp, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
24
25
-DEF_HELPER_FLAGS_3(sve_while, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
26
+DEF_HELPER_FLAGS_3(sve_whilel, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
27
+DEF_HELPER_FLAGS_3(sve_whileg, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
28
29
DEF_HELPER_FLAGS_4(sve_subri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
30
DEF_HELPER_FLAGS_4(sve_subri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
31
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/sve.decode
34
+++ b/target/arm/sve.decode
35
@@ -XXX,XX +XXX,XX @@ SINCDECP_z 00100101 .. 1010 d:1 u:1 10000 00 .... ..... @incdec2_pred
36
CTERM 00100101 1 sf:1 1 rm:5 001000 rn:5 ne:1 0000
37
38
# SVE integer compare scalar count and limit
39
-WHILE 00100101 esz:2 1 rm:5 000 sf:1 u:1 1 rn:5 eq:1 rd:4
40
+WHILE 00100101 esz:2 1 rm:5 000 sf:1 u:1 lt:1 rn:5 eq:1 rd:4
41
42
### SVE Integer Wide Immediate - Unpredicated Group
43
44
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/sve_helper.c
47
+++ b/target/arm/sve_helper.c
48
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(sve_cntp)(void *vn, void *vg, uint32_t pred_desc)
49
return sum;
50
}
21
}
51
22
52
-uint32_t HELPER(sve_while)(void *vd, uint32_t count, uint32_t pred_desc)
23
+/*----------------------------------------------------------------------------
53
+uint32_t HELPER(sve_whilel)(void *vd, uint32_t count, uint32_t pred_desc)
24
+| Takes two extended double-precision floating-point values `a' and `b', one
54
{
25
+| of which is a NaN, and returns the appropriate NaN result. If either `a' or
55
intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
26
+| `b' is a signaling NaN, the invalid exception is raised.
56
intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
27
+*----------------------------------------------------------------------------*/
57
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(sve_while)(void *vd, uint32_t count, uint32_t pred_desc)
28
+
58
return predtest_ones(d, oprsz, esz_mask);
29
+floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, float_status *status)
59
}
60
61
+uint32_t HELPER(sve_whileg)(void *vd, uint32_t count, uint32_t pred_desc)
62
+{
30
+{
63
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
31
+ bool aIsLargerSignificand;
64
+ intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
32
+ FloatClass a_cls, b_cls;
65
+ uint64_t esz_mask = pred_esz_masks[esz];
66
+ ARMPredicateReg *d = vd;
67
+ intptr_t i, invcount, oprbits;
68
+ uint64_t bits;
69
+
33
+
70
+ if (count == 0) {
34
+ /* This is not complete, but is good enough for pickNaN. */
71
+ return do_zero(d, oprsz);
35
+ a_cls = (!floatx80_is_any_nan(a)
36
+ ? float_class_normal
37
+ : floatx80_is_signaling_nan(a, status)
38
+ ? float_class_snan
39
+ : float_class_qnan);
40
+ b_cls = (!floatx80_is_any_nan(b)
41
+ ? float_class_normal
42
+ : floatx80_is_signaling_nan(b, status)
43
+ ? float_class_snan
44
+ : float_class_qnan);
45
+
46
+ if (is_snan(a_cls) || is_snan(b_cls)) {
47
+ float_raise(float_flag_invalid, status);
72
+ }
48
+ }
73
+
49
+
74
+ oprbits = oprsz * 8;
50
+ if (status->default_nan_mode) {
75
+ tcg_debug_assert(count <= oprbits);
51
+ return floatx80_default_nan(status);
76
+
77
+ bits = esz_mask;
78
+ if (oprbits & 63) {
79
+ bits &= MAKE_64BIT_MASK(0, oprbits & 63);
80
+ }
52
+ }
81
+
53
+
82
+ invcount = oprbits - count;
54
+ if (a.low < b.low) {
83
+ for (i = (oprsz - 1) / 8; i > invcount / 64; --i) {
55
+ aIsLargerSignificand = 0;
84
+ d->p[i] = bits;
56
+ } else if (b.low < a.low) {
85
+ bits = esz_mask;
57
+ aIsLargerSignificand = 1;
58
+ } else {
59
+ aIsLargerSignificand = (a.high < b.high) ? 1 : 0;
86
+ }
60
+ }
87
+
61
+
88
+ d->p[i] = bits & MAKE_64BIT_MASK(invcount & 63, 64);
62
+ if (pickNaN(a_cls, b_cls, aIsLargerSignificand, status)) {
89
+
63
+ if (is_snan(b_cls)) {
90
+ while (--i >= 0) {
64
+ return floatx80_silence_nan(b, status);
91
+ d->p[i] = 0;
65
+ }
66
+ return b;
67
+ } else {
68
+ if (is_snan(a_cls)) {
69
+ return floatx80_silence_nan(a, status);
70
+ }
71
+ return a;
92
+ }
72
+ }
93
+
94
+ return predtest_ones(d, oprsz, esz_mask);
95
+}
73
+}
96
+
74
+
97
/* Recursive reduction on a function;
75
/*----------------------------------------------------------------------------
98
* C.f. the ARM ARM function ReducePredicated.
76
| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
99
*
77
| and extended significand formed by the concatenation of `zSig0' and `zSig1',
100
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
78
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
101
index XXXXXXX..XXXXXXX 100644
79
index XXXXXXX..XXXXXXX 100644
102
--- a/target/arm/translate-sve.c
80
--- a/fpu/softfloat-specialize.c.inc
103
+++ b/target/arm/translate-sve.c
81
+++ b/fpu/softfloat-specialize.c.inc
104
@@ -XXX,XX +XXX,XX @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
82
@@ -XXX,XX +XXX,XX @@ floatx80 floatx80_silence_nan(floatx80 a, float_status *status)
105
unsigned vsz = vec_full_reg_size(s);
83
return a;
106
unsigned desc = 0;
84
}
107
TCGCond cond;
85
108
+ uint64_t maxval;
86
-/*----------------------------------------------------------------------------
109
+ /* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */
87
-| Takes two extended double-precision floating-point values `a' and `b', one
110
+ bool eq = a->eq == a->lt;
88
-| of which is a NaN, and returns the appropriate NaN result. If either `a' or
111
89
-| `b' is a signaling NaN, the invalid exception is raised.
112
+ /* The greater-than conditions are all SVE2. */
90
-*----------------------------------------------------------------------------*/
113
+ if (!a->lt && !dc_isar_feature(aa64_sve2, s)) {
91
-
114
+ return false;
92
-floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, float_status *status)
115
+ }
93
-{
116
if (!sve_access_check(s)) {
94
- bool aIsLargerSignificand;
117
return true;
95
- FloatClass a_cls, b_cls;
118
}
96
-
119
@@ -XXX,XX +XXX,XX @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
97
- /* This is not complete, but is good enough for pickNaN. */
120
*/
98
- a_cls = (!floatx80_is_any_nan(a)
121
t0 = tcg_temp_new_i64();
99
- ? float_class_normal
122
t1 = tcg_temp_new_i64();
100
- : floatx80_is_signaling_nan(a, status)
123
- tcg_gen_sub_i64(t0, op1, op0);
101
- ? float_class_snan
124
+
102
- : float_class_qnan);
125
+ if (a->lt) {
103
- b_cls = (!floatx80_is_any_nan(b)
126
+ tcg_gen_sub_i64(t0, op1, op0);
104
- ? float_class_normal
127
+ if (a->u) {
105
- : floatx80_is_signaling_nan(b, status)
128
+ maxval = a->sf ? UINT64_MAX : UINT32_MAX;
106
- ? float_class_snan
129
+ cond = eq ? TCG_COND_LEU : TCG_COND_LTU;
107
- : float_class_qnan);
130
+ } else {
108
-
131
+ maxval = a->sf ? INT64_MAX : INT32_MAX;
109
- if (is_snan(a_cls) || is_snan(b_cls)) {
132
+ cond = eq ? TCG_COND_LE : TCG_COND_LT;
110
- float_raise(float_flag_invalid, status);
133
+ }
111
- }
134
+ } else {
112
-
135
+ tcg_gen_sub_i64(t0, op0, op1);
113
- if (status->default_nan_mode) {
136
+ if (a->u) {
114
- return floatx80_default_nan(status);
137
+ maxval = 0;
115
- }
138
+ cond = eq ? TCG_COND_GEU : TCG_COND_GTU;
116
-
139
+ } else {
117
- if (a.low < b.low) {
140
+ maxval = a->sf ? INT64_MIN : INT32_MIN;
118
- aIsLargerSignificand = 0;
141
+ cond = eq ? TCG_COND_GE : TCG_COND_GT;
119
- } else if (b.low < a.low) {
142
+ }
120
- aIsLargerSignificand = 1;
143
+ }
121
- } else {
144
122
- aIsLargerSignificand = (a.high < b.high) ? 1 : 0;
145
tmax = tcg_const_i64(vsz >> a->esz);
123
- }
146
- if (a->eq) {
124
-
147
+ if (eq) {
125
- if (pickNaN(a_cls, b_cls, aIsLargerSignificand, status)) {
148
/* Equality means one more iteration. */
126
- if (is_snan(b_cls)) {
149
tcg_gen_addi_i64(t0, t0, 1);
127
- return floatx80_silence_nan(b, status);
150
128
- }
151
- /* If op1 is max (un)signed integer (and the only time the addition
129
- return b;
152
- * above could overflow), then we produce an all-true predicate by
130
- } else {
153
- * setting the count to the vector length. This is because the
131
- if (is_snan(a_cls)) {
154
- * pseudocode is described as an increment + compare loop, and the
132
- return floatx80_silence_nan(a, status);
155
- * max integer would always compare true.
133
- }
156
+ /*
134
- return a;
157
+ * For the less-than while, if op1 is maxval (and the only time
135
- }
158
+ * the addition above could overflow), then we produce an all-true
136
-}
159
+ * predicate by setting the count to the vector length. This is
137
-
160
+ * because the pseudocode is described as an increment + compare
138
/*----------------------------------------------------------------------------
161
+ * loop, and the maximum integer would always compare true.
139
| Returns 1 if the quadruple-precision floating-point value `a' is a quiet
162
+ * Similarly, the greater-than while has the same issue with the
140
| NaN; otherwise returns 0.
163
+ * minimum integer due to the decrement + compare loop.
164
*/
165
- tcg_gen_movi_i64(t1, (a->sf
166
- ? (a->u ? UINT64_MAX : INT64_MAX)
167
- : (a->u ? UINT32_MAX : INT32_MAX)));
168
+ tcg_gen_movi_i64(t1, maxval);
169
tcg_gen_movcond_i64(TCG_COND_EQ, t0, op1, t1, tmax, t0);
170
}
171
172
@@ -XXX,XX +XXX,XX @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
173
tcg_temp_free_i64(tmax);
174
175
/* Set the count to zero if the condition is false. */
176
- cond = (a->u
177
- ? (a->eq ? TCG_COND_LEU : TCG_COND_LTU)
178
- : (a->eq ? TCG_COND_LE : TCG_COND_LT));
179
tcg_gen_movi_i64(t1, 0);
180
tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1);
181
tcg_temp_free_i64(t1);
182
@@ -XXX,XX +XXX,XX @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
183
ptr = tcg_temp_new_ptr();
184
tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
185
186
- gen_helper_sve_while(t2, ptr, t2, t3);
187
+ if (a->lt) {
188
+ gen_helper_sve_whilel(t2, ptr, t2, t3);
189
+ } else {
190
+ gen_helper_sve_whileg(t2, ptr, t2, t3);
191
+ }
192
do_pred_flags(t2);
193
194
tcg_temp_free_ptr(ptr);
195
--
141
--
196
2.20.1
142
2.34.1
197
198
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Rename to match tlb_flush_range_locked.
3
Unpacking and repacking the parts may be slightly more work
4
than we did before, but we get to reuse more code. For a
5
code path handling exceptional values, this is an improvement.
4
6
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-id: 20241203203949.483774-8-richard.henderson@linaro.org
7
Message-id: 20210509151618.2331764-9-f4bug@amsat.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org>
9
[PMD: Split from bigger patch]
10
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
11
---
14
accel/tcg/cputlb.c | 12 ++++++------
12
fpu/softfloat.c | 43 +++++--------------------------------------
15
1 file changed, 6 insertions(+), 6 deletions(-)
13
1 file changed, 5 insertions(+), 38 deletions(-)
16
14
17
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
15
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/cputlb.c
17
--- a/fpu/softfloat.c
20
+++ b/accel/tcg/cputlb.c
18
+++ b/fpu/softfloat.c
21
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
19
@@ -XXX,XX +XXX,XX @@ void normalizeFloatx80Subnormal(uint64_t aSig, int32_t *zExpPtr,
20
21
floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, float_status *status)
22
{
23
- bool aIsLargerSignificand;
24
- FloatClass a_cls, b_cls;
25
+ FloatParts128 pa, pb, *pr;
26
27
- /* This is not complete, but is good enough for pickNaN. */
28
- a_cls = (!floatx80_is_any_nan(a)
29
- ? float_class_normal
30
- : floatx80_is_signaling_nan(a, status)
31
- ? float_class_snan
32
- : float_class_qnan);
33
- b_cls = (!floatx80_is_any_nan(b)
34
- ? float_class_normal
35
- : floatx80_is_signaling_nan(b, status)
36
- ? float_class_snan
37
- : float_class_qnan);
38
-
39
- if (is_snan(a_cls) || is_snan(b_cls)) {
40
- float_raise(float_flag_invalid, status);
41
- }
42
-
43
- if (status->default_nan_mode) {
44
+ if (!floatx80_unpack_canonical(&pa, a, status) ||
45
+ !floatx80_unpack_canonical(&pb, b, status)) {
46
return floatx80_default_nan(status);
22
}
47
}
48
49
- if (a.low < b.low) {
50
- aIsLargerSignificand = 0;
51
- } else if (b.low < a.low) {
52
- aIsLargerSignificand = 1;
53
- } else {
54
- aIsLargerSignificand = (a.high < b.high) ? 1 : 0;
55
- }
56
-
57
- if (pickNaN(a_cls, b_cls, aIsLargerSignificand, status)) {
58
- if (is_snan(b_cls)) {
59
- return floatx80_silence_nan(b, status);
60
- }
61
- return b;
62
- } else {
63
- if (is_snan(a_cls)) {
64
- return floatx80_silence_nan(a, status);
65
- }
66
- return a;
67
- }
68
+ pr = parts_pick_nan(&pa, &pb, status);
69
+ return floatx80_round_pack_canonical(pr, status);
23
}
70
}
24
71
25
-static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu,
72
/*----------------------------------------------------------------------------
26
- run_on_cpu_data data)
27
+static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
28
+ run_on_cpu_data data)
29
{
30
TLBFlushRangeData *d = data.host_ptr;
31
tlb_flush_range_by_mmuidx_async_0(cpu, *d);
32
@@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
33
} else {
34
/* Otherwise allocate a structure, freed by the worker. */
35
TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
36
- async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2,
37
+ async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
38
RUN_ON_CPU_HOST_PTR(p));
39
}
40
}
41
@@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
42
if (dst_cpu != src_cpu) {
43
TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
44
async_run_on_cpu(dst_cpu,
45
- tlb_flush_page_bits_by_mmuidx_async_2,
46
+ tlb_flush_range_by_mmuidx_async_1,
47
RUN_ON_CPU_HOST_PTR(p));
48
}
49
}
50
@@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
51
CPU_FOREACH(dst_cpu) {
52
if (dst_cpu != src_cpu) {
53
p = g_memdup(&d, sizeof(d));
54
- async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
55
+ async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
56
RUN_ON_CPU_HOST_PTR(p));
57
}
58
}
59
60
p = g_memdup(&d, sizeof(d));
61
- async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
62
+ async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
63
RUN_ON_CPU_HOST_PTR(p));
64
}
65
66
--
73
--
67
2.20.1
74
2.34.1
68
69
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
We will not be able to fit address + length into a 64-bit packet.
3
Inline pickNaN into its only caller. This makes one assert
4
Drop this optimization before re-organizing this code.
4
redundant with the immediately preceding IF.
5
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Message-id: 20210509151618.2331764-10-f4bug@amsat.org
8
Message-id: 20241203203949.483774-9-richard.henderson@linaro.org
9
Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org>
10
[PMD: Split from bigger patch]
11
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
[PMM: Moved patch earlier in the series]
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
10
---
15
accel/tcg/cputlb.c | 86 +++++++++++-----------------------------------
11
fpu/softfloat-parts.c.inc | 82 +++++++++++++++++++++++++----
16
1 file changed, 20 insertions(+), 66 deletions(-)
12
fpu/softfloat-specialize.c.inc | 96 ----------------------------------
17
13
2 files changed, 73 insertions(+), 105 deletions(-)
18
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
14
15
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
19
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
20
--- a/accel/tcg/cputlb.c
17
--- a/fpu/softfloat-parts.c.inc
21
+++ b/accel/tcg/cputlb.c
18
+++ b/fpu/softfloat-parts.c.inc
22
@@ -XXX,XX +XXX,XX @@ tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu,
19
@@ -XXX,XX +XXX,XX @@ static void partsN(return_nan)(FloatPartsN *a, float_status *s)
20
static FloatPartsN *partsN(pick_nan)(FloatPartsN *a, FloatPartsN *b,
21
float_status *s)
22
{
23
+ int cmp, which;
24
+
25
if (is_snan(a->cls) || is_snan(b->cls)) {
26
float_raise(float_flag_invalid | float_flag_invalid_snan, s);
27
}
28
29
if (s->default_nan_mode) {
30
parts_default_nan(a, s);
31
- } else {
32
- int cmp = frac_cmp(a, b);
33
- if (cmp == 0) {
34
- cmp = a->sign < b->sign;
35
- }
36
+ return a;
37
+ }
38
39
- if (pickNaN(a->cls, b->cls, cmp > 0, s)) {
40
- a = b;
41
- }
42
+ cmp = frac_cmp(a, b);
43
+ if (cmp == 0) {
44
+ cmp = a->sign < b->sign;
45
+ }
46
+
47
+ switch (s->float_2nan_prop_rule) {
48
+ case float_2nan_prop_s_ab:
49
if (is_snan(a->cls)) {
50
- parts_silence_nan(a, s);
51
+ which = 0;
52
+ } else if (is_snan(b->cls)) {
53
+ which = 1;
54
+ } else if (is_qnan(a->cls)) {
55
+ which = 0;
56
+ } else {
57
+ which = 1;
58
}
59
+ break;
60
+ case float_2nan_prop_s_ba:
61
+ if (is_snan(b->cls)) {
62
+ which = 1;
63
+ } else if (is_snan(a->cls)) {
64
+ which = 0;
65
+ } else if (is_qnan(b->cls)) {
66
+ which = 1;
67
+ } else {
68
+ which = 0;
69
+ }
70
+ break;
71
+ case float_2nan_prop_ab:
72
+ which = is_nan(a->cls) ? 0 : 1;
73
+ break;
74
+ case float_2nan_prop_ba:
75
+ which = is_nan(b->cls) ? 1 : 0;
76
+ break;
77
+ case float_2nan_prop_x87:
78
+ /*
79
+ * This implements x87 NaN propagation rules:
80
+ * SNaN + QNaN => return the QNaN
81
+ * two SNaNs => return the one with the larger significand, silenced
82
+ * two QNaNs => return the one with the larger significand
83
+ * SNaN and a non-NaN => return the SNaN, silenced
84
+ * QNaN and a non-NaN => return the QNaN
85
+ *
86
+ * If we get down to comparing significands and they are the same,
87
+ * return the NaN with the positive sign bit (if any).
88
+ */
89
+ if (is_snan(a->cls)) {
90
+ if (is_snan(b->cls)) {
91
+ which = cmp > 0 ? 0 : 1;
92
+ } else {
93
+ which = is_qnan(b->cls) ? 1 : 0;
94
+ }
95
+ } else if (is_qnan(a->cls)) {
96
+ if (is_snan(b->cls) || !is_qnan(b->cls)) {
97
+ which = 0;
98
+ } else {
99
+ which = cmp > 0 ? 0 : 1;
100
+ }
101
+ } else {
102
+ which = 1;
103
+ }
104
+ break;
105
+ default:
106
+ g_assert_not_reached();
107
+ }
108
+
109
+ if (which) {
110
+ a = b;
111
+ }
112
+ if (is_snan(a->cls)) {
113
+ parts_silence_nan(a, s);
114
}
115
return a;
116
}
117
diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
118
index XXXXXXX..XXXXXXX 100644
119
--- a/fpu/softfloat-specialize.c.inc
120
+++ b/fpu/softfloat-specialize.c.inc
121
@@ -XXX,XX +XXX,XX @@ bool float32_is_signaling_nan(float32 a_, float_status *status)
23
}
122
}
24
}
123
}
25
124
26
-static bool encode_pbm_to_runon(run_on_cpu_data *out,
125
-/*----------------------------------------------------------------------------
27
- TLBFlushRangeData d)
126
-| Select which NaN to propagate for a two-input operation.
127
-| IEEE754 doesn't specify all the details of this, so the
128
-| algorithm is target-specific.
129
-| The routine is passed various bits of information about the
130
-| two NaNs and should return 0 to select NaN a and 1 for NaN b.
131
-| Note that signalling NaNs are always squashed to quiet NaNs
132
-| by the caller, by calling floatXX_silence_nan() before
133
-| returning them.
134
-|
135
-| aIsLargerSignificand is only valid if both a and b are NaNs
136
-| of some kind, and is true if a has the larger significand,
137
-| or if both a and b have the same significand but a is
138
-| positive but b is negative. It is only needed for the x87
139
-| tie-break rule.
140
-*----------------------------------------------------------------------------*/
141
-
142
-static int pickNaN(FloatClass a_cls, FloatClass b_cls,
143
- bool aIsLargerSignificand, float_status *status)
28
-{
144
-{
29
- /* We need 6 bits to hold to hold @bits up to 63. */
145
- /*
30
- if (d.idxmap <= MAKE_64BIT_MASK(0, TARGET_PAGE_BITS - 6)) {
146
- * We guarantee not to require the target to tell us how to
31
- *out = RUN_ON_CPU_TARGET_PTR(d.addr | (d.idxmap << 6) | d.bits);
147
- * pick a NaN if we're always returning the default NaN.
32
- return true;
148
- * But if we're not in default-NaN mode then the target must
149
- * specify via set_float_2nan_prop_rule().
150
- */
151
- assert(!status->default_nan_mode);
152
-
153
- switch (status->float_2nan_prop_rule) {
154
- case float_2nan_prop_s_ab:
155
- if (is_snan(a_cls)) {
156
- return 0;
157
- } else if (is_snan(b_cls)) {
158
- return 1;
159
- } else if (is_qnan(a_cls)) {
160
- return 0;
161
- } else {
162
- return 1;
163
- }
164
- break;
165
- case float_2nan_prop_s_ba:
166
- if (is_snan(b_cls)) {
167
- return 1;
168
- } else if (is_snan(a_cls)) {
169
- return 0;
170
- } else if (is_qnan(b_cls)) {
171
- return 1;
172
- } else {
173
- return 0;
174
- }
175
- break;
176
- case float_2nan_prop_ab:
177
- if (is_nan(a_cls)) {
178
- return 0;
179
- } else {
180
- return 1;
181
- }
182
- break;
183
- case float_2nan_prop_ba:
184
- if (is_nan(b_cls)) {
185
- return 1;
186
- } else {
187
- return 0;
188
- }
189
- break;
190
- case float_2nan_prop_x87:
191
- /*
192
- * This implements x87 NaN propagation rules:
193
- * SNaN + QNaN => return the QNaN
194
- * two SNaNs => return the one with the larger significand, silenced
195
- * two QNaNs => return the one with the larger significand
196
- * SNaN and a non-NaN => return the SNaN, silenced
197
- * QNaN and a non-NaN => return the QNaN
198
- *
199
- * If we get down to comparing significands and they are the same,
200
- * return the NaN with the positive sign bit (if any).
201
- */
202
- if (is_snan(a_cls)) {
203
- if (is_snan(b_cls)) {
204
- return aIsLargerSignificand ? 0 : 1;
205
- }
206
- return is_qnan(b_cls) ? 1 : 0;
207
- } else if (is_qnan(a_cls)) {
208
- if (is_snan(b_cls) || !is_qnan(b_cls)) {
209
- return 0;
210
- } else {
211
- return aIsLargerSignificand ? 0 : 1;
212
- }
213
- } else {
214
- return 1;
215
- }
216
- default:
217
- g_assert_not_reached();
33
- }
218
- }
34
- return false;
35
-}
219
-}
36
-
220
-
37
-static TLBFlushRangeData
221
/*----------------------------------------------------------------------------
38
-decode_runon_to_pbm(run_on_cpu_data data)
222
| Returns 1 if the double-precision floating-point value `a' is a quiet
39
-{
223
| NaN; otherwise returns 0.
40
- target_ulong addr_map_bits = (target_ulong) data.target_ptr;
41
- return (TLBFlushRangeData){
42
- .addr = addr_map_bits & TARGET_PAGE_MASK,
43
- .idxmap = (addr_map_bits & ~TARGET_PAGE_MASK) >> 6,
44
- .bits = addr_map_bits & 0x3f
45
- };
46
-}
47
-
48
-static void tlb_flush_page_bits_by_mmuidx_async_1(CPUState *cpu,
49
- run_on_cpu_data runon)
50
-{
51
- tlb_flush_page_bits_by_mmuidx_async_0(cpu, decode_runon_to_pbm(runon));
52
-}
53
-
54
static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu,
55
run_on_cpu_data data)
56
{
57
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
58
uint16_t idxmap, unsigned bits)
59
{
60
TLBFlushRangeData d;
61
- run_on_cpu_data runon;
62
63
/* If all bits are significant, this devolves to tlb_flush_page. */
64
if (bits >= TARGET_LONG_BITS) {
65
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
66
67
if (qemu_cpu_is_self(cpu)) {
68
tlb_flush_page_bits_by_mmuidx_async_0(cpu, d);
69
- } else if (encode_pbm_to_runon(&runon, d)) {
70
- async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
71
} else {
72
/* Otherwise allocate a structure, freed by the worker. */
73
TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
74
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
75
unsigned bits)
76
{
77
TLBFlushRangeData d;
78
- run_on_cpu_data runon;
79
+ CPUState *dst_cpu;
80
81
/* If all bits are significant, this devolves to tlb_flush_page. */
82
if (bits >= TARGET_LONG_BITS) {
83
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
84
d.idxmap = idxmap;
85
d.bits = bits;
86
87
- if (encode_pbm_to_runon(&runon, d)) {
88
- flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
89
- } else {
90
- CPUState *dst_cpu;
91
-
92
- /* Allocate a separate data block for each destination cpu. */
93
- CPU_FOREACH(dst_cpu) {
94
- if (dst_cpu != src_cpu) {
95
- TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
96
- async_run_on_cpu(dst_cpu,
97
- tlb_flush_page_bits_by_mmuidx_async_2,
98
- RUN_ON_CPU_HOST_PTR(p));
99
- }
100
+ /* Allocate a separate data block for each destination cpu. */
101
+ CPU_FOREACH(dst_cpu) {
102
+ if (dst_cpu != src_cpu) {
103
+ TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
104
+ async_run_on_cpu(dst_cpu,
105
+ tlb_flush_page_bits_by_mmuidx_async_2,
106
+ RUN_ON_CPU_HOST_PTR(p));
107
}
108
}
109
110
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
111
uint16_t idxmap,
112
unsigned bits)
113
{
114
- TLBFlushRangeData d;
115
- run_on_cpu_data runon;
116
+ TLBFlushRangeData d, *p;
117
+ CPUState *dst_cpu;
118
119
/* If all bits are significant, this devolves to tlb_flush_page. */
120
if (bits >= TARGET_LONG_BITS) {
121
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
122
d.idxmap = idxmap;
123
d.bits = bits;
124
125
- if (encode_pbm_to_runon(&runon, d)) {
126
- flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
127
- async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1,
128
- runon);
129
- } else {
130
- CPUState *dst_cpu;
131
- TLBFlushRangeData *p;
132
-
133
- /* Allocate a separate data block for each destination cpu. */
134
- CPU_FOREACH(dst_cpu) {
135
- if (dst_cpu != src_cpu) {
136
- p = g_memdup(&d, sizeof(d));
137
- async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
138
- RUN_ON_CPU_HOST_PTR(p));
139
- }
140
+ /* Allocate a separate data block for each destination cpu. */
141
+ CPU_FOREACH(dst_cpu) {
142
+ if (dst_cpu != src_cpu) {
143
+ p = g_memdup(&d, sizeof(d));
144
+ async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
145
+ RUN_ON_CPU_HOST_PTR(p));
146
}
147
-
148
- p = g_memdup(&d, sizeof(d));
149
- async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
150
- RUN_ON_CPU_HOST_PTR(p));
151
}
152
+
153
+ p = g_memdup(&d, sizeof(d));
154
+ async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
155
+ RUN_ON_CPU_HOST_PTR(p));
156
}
157
158
/* update the TLBs so that writes to code in the virtual page 'addr'
159
--
224
--
160
2.20.1
225
2.34.1
161
226
162
227
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Forward tlb_flush_page_bits_by_mmuidx_all_cpus_synced to
3
Remember if there was an SNaN, and use that to simplify
4
tlb_flush_range_by_mmuidx_all_cpus_synced passing TARGET_PAGE_SIZE.
4
float_2nan_prop_s_{ab,ba} to only the snan component.
5
Then, fall through to the corresponding
6
float_2nan_prop_{ab,ba} case to handle any remaining
7
nans, which must be quiet.
5
8
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-id: 20210509151618.2331764-7-f4bug@amsat.org
9
Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org>
10
[PMD: Split from bigger patch]
11
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Message-id: 20241203203949.483774-10-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
13
---
15
include/exec/exec-all.h | 12 ++++++++++++
14
fpu/softfloat-parts.c.inc | 32 ++++++++++++--------------------
16
accel/tcg/cputlb.c | 27 ++++++++++++++++++++-------
15
1 file changed, 12 insertions(+), 20 deletions(-)
17
2 files changed, 32 insertions(+), 7 deletions(-)
18
16
19
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
17
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
20
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
21
--- a/include/exec/exec-all.h
19
--- a/fpu/softfloat-parts.c.inc
22
+++ b/include/exec/exec-all.h
20
+++ b/fpu/softfloat-parts.c.inc
23
@@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
21
@@ -XXX,XX +XXX,XX @@ static void partsN(return_nan)(FloatPartsN *a, float_status *s)
24
void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
22
static FloatPartsN *partsN(pick_nan)(FloatPartsN *a, FloatPartsN *b,
25
target_ulong len, uint16_t idxmap,
23
float_status *s)
26
unsigned bits);
27
+void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
28
+ target_ulong addr,
29
+ target_ulong len,
30
+ uint16_t idxmap,
31
+ unsigned bits);
32
33
/**
34
* tlb_set_page_with_attrs:
35
@@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
36
unsigned bits)
37
{
24
{
38
}
25
+ bool have_snan = false;
39
+static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
26
int cmp, which;
40
+ target_ulong addr,
27
41
+ target_long len,
28
if (is_snan(a->cls) || is_snan(b->cls)) {
42
+ uint16_t idxmap,
29
float_raise(float_flag_invalid | float_flag_invalid_snan, s);
43
+ unsigned bits)
30
+ have_snan = true;
44
+{
45
+}
46
#endif
47
/**
48
* probe_access:
49
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/accel/tcg/cputlb.c
52
+++ b/accel/tcg/cputlb.c
53
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
54
idxmap, bits);
55
}
56
57
-void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
58
- target_ulong addr,
59
- uint16_t idxmap,
60
- unsigned bits)
61
+void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
62
+ target_ulong addr,
63
+ target_ulong len,
64
+ uint16_t idxmap,
65
+ unsigned bits)
66
{
67
TLBFlushRangeData d, *p;
68
CPUState *dst_cpu;
69
70
- /* If all bits are significant, this devolves to tlb_flush_page. */
71
- if (bits >= TARGET_LONG_BITS) {
72
+ /*
73
+ * If all bits are significant, and len is small,
74
+ * this devolves to tlb_flush_page.
75
+ */
76
+ if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
77
tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
78
return;
79
}
31
}
80
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
32
81
33
if (s->default_nan_mode) {
82
/* This should already be page aligned */
34
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan)(FloatPartsN *a, FloatPartsN *b,
83
d.addr = addr & TARGET_PAGE_MASK;
35
84
- d.len = TARGET_PAGE_SIZE;
36
switch (s->float_2nan_prop_rule) {
85
+ d.len = len;
37
case float_2nan_prop_s_ab:
86
d.idxmap = idxmap;
38
- if (is_snan(a->cls)) {
87
d.bits = bits;
39
- which = 0;
88
40
- } else if (is_snan(b->cls)) {
89
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
41
- which = 1;
90
RUN_ON_CPU_HOST_PTR(p));
42
- } else if (is_qnan(a->cls)) {
91
}
43
- which = 0;
92
44
- } else {
93
+void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
45
- which = 1;
94
+ target_ulong addr,
46
+ if (have_snan) {
95
+ uint16_t idxmap,
47
+ which = is_snan(a->cls) ? 0 : 1;
96
+ unsigned bits)
48
+ break;
97
+{
49
}
98
+ tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
50
- break;
99
+ idxmap, bits);
51
- case float_2nan_prop_s_ba:
100
+}
52
- if (is_snan(b->cls)) {
101
+
53
- which = 1;
102
/* update the TLBs so that writes to code in the virtual page 'addr'
54
- } else if (is_snan(a->cls)) {
103
can be detected */
55
- which = 0;
104
void tlb_protect_code(ram_addr_t ram_addr)
56
- } else if (is_qnan(b->cls)) {
57
- which = 1;
58
- } else {
59
- which = 0;
60
- }
61
- break;
62
+ /* fall through */
63
case float_2nan_prop_ab:
64
which = is_nan(a->cls) ? 0 : 1;
65
break;
66
+ case float_2nan_prop_s_ba:
67
+ if (have_snan) {
68
+ which = is_snan(b->cls) ? 1 : 0;
69
+ break;
70
+ }
71
+ /* fall through */
72
case float_2nan_prop_ba:
73
which = is_nan(b->cls) ? 1 : 0;
74
break;
105
--
75
--
106
2.20.1
76
2.34.1
107
108
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Forward tlb_flush_page_bits_by_mmuidx_all_cpus to
3
Move the fractional comparison to the end of the
4
tlb_flush_range_by_mmuidx_all_cpus passing TARGET_PAGE_SIZE.
4
float_2nan_prop_x87 case. This is not required for
5
any other 2nan propagation rule. Reorganize the
6
x87 case itself to break out of the switch when the
7
fractional comparison is not required.
5
8
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-id: 20210509151618.2331764-6-f4bug@amsat.org
9
Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org>
10
[PMD: Split from bigger patch]
11
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Message-id: 20241203203949.483774-11-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
13
---
15
include/exec/exec-all.h | 13 +++++++++++++
14
fpu/softfloat-parts.c.inc | 19 +++++++++----------
16
accel/tcg/cputlb.c | 24 +++++++++++++++++-------
15
1 file changed, 9 insertions(+), 10 deletions(-)
17
2 files changed, 30 insertions(+), 7 deletions(-)
18
16
19
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
17
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
20
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
21
--- a/include/exec/exec-all.h
19
--- a/fpu/softfloat-parts.c.inc
22
+++ b/include/exec/exec-all.h
20
+++ b/fpu/softfloat-parts.c.inc
23
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
21
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan)(FloatPartsN *a, FloatPartsN *b,
24
void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
22
return a;
25
target_ulong len, uint16_t idxmap,
26
unsigned bits);
27
+
28
+/* Similarly, with broadcast and syncing. */
29
+void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
30
+ target_ulong len, uint16_t idxmap,
31
+ unsigned bits);
32
+
33
/**
34
* tlb_set_page_with_attrs:
35
* @cpu: CPU to add this TLB entry for
36
@@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
37
unsigned bits)
38
{
39
}
40
+static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
41
+ target_ulong addr,
42
+ target_ulong len,
43
+ uint16_t idxmap,
44
+ unsigned bits)
45
+{
46
+}
47
#endif
48
/**
49
* probe_access:
50
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/accel/tcg/cputlb.c
53
+++ b/accel/tcg/cputlb.c
54
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
55
tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
56
}
57
58
-void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
59
- target_ulong addr,
60
- uint16_t idxmap,
61
- unsigned bits)
62
+void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
63
+ target_ulong addr, target_ulong len,
64
+ uint16_t idxmap, unsigned bits)
65
{
66
TLBFlushRangeData d;
67
CPUState *dst_cpu;
68
69
- /* If all bits are significant, this devolves to tlb_flush_page. */
70
- if (bits >= TARGET_LONG_BITS) {
71
+ /*
72
+ * If all bits are significant, and len is small,
73
+ * this devolves to tlb_flush_page.
74
+ */
75
+ if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
76
tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
77
return;
78
}
23
}
79
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
24
80
25
- cmp = frac_cmp(a, b);
81
/* This should already be page aligned */
26
- if (cmp == 0) {
82
d.addr = addr & TARGET_PAGE_MASK;
27
- cmp = a->sign < b->sign;
83
- d.len = TARGET_PAGE_SIZE;
28
- }
84
+ d.len = len;
29
-
85
d.idxmap = idxmap;
30
switch (s->float_2nan_prop_rule) {
86
d.bits = bits;
31
case float_2nan_prop_s_ab:
87
32
if (have_snan) {
88
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
33
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan)(FloatPartsN *a, FloatPartsN *b,
89
tlb_flush_page_bits_by_mmuidx_async_0(src_cpu, d);
34
* return the NaN with the positive sign bit (if any).
90
}
35
*/
91
36
if (is_snan(a->cls)) {
92
+void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
37
- if (is_snan(b->cls)) {
93
+ target_ulong addr,
38
- which = cmp > 0 ? 0 : 1;
94
+ uint16_t idxmap, unsigned bits)
39
- } else {
95
+{
40
+ if (!is_snan(b->cls)) {
96
+ tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
41
which = is_qnan(b->cls) ? 1 : 0;
97
+ idxmap, bits);
42
+ break;
98
+}
43
}
99
+
44
} else if (is_qnan(a->cls)) {
100
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
45
if (is_snan(b->cls) || !is_qnan(b->cls)) {
101
target_ulong addr,
46
which = 0;
102
uint16_t idxmap,
47
- } else {
48
- which = cmp > 0 ? 0 : 1;
49
+ break;
50
}
51
} else {
52
which = 1;
53
+ break;
54
}
55
+ cmp = frac_cmp(a, b);
56
+ if (cmp == 0) {
57
+ cmp = a->sign < b->sign;
58
+ }
59
+ which = cmp > 0 ? 0 : 1;
60
break;
61
default:
62
g_assert_not_reached();
103
--
63
--
104
2.20.1
64
2.34.1
105
106
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Rename the structure to match the rename of tlb_flush_range_locked.
3
Replace the "index" selecting between A and B with a result variable
4
of the proper type. This improves clarity within the function.
4
5
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Message-id: 20210509151618.2331764-4-f4bug@amsat.org
8
Message-id: 20241203203949.483774-12-richard.henderson@linaro.org
8
Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org>
9
[PMD: Split from bigger patch]
10
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
10
---
14
accel/tcg/cputlb.c | 24 ++++++++++++------------
11
fpu/softfloat-parts.c.inc | 28 +++++++++++++---------------
15
1 file changed, 12 insertions(+), 12 deletions(-)
12
1 file changed, 13 insertions(+), 15 deletions(-)
16
13
17
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
14
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
18
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/cputlb.c
16
--- a/fpu/softfloat-parts.c.inc
20
+++ b/accel/tcg/cputlb.c
17
+++ b/fpu/softfloat-parts.c.inc
21
@@ -XXX,XX +XXX,XX @@ typedef struct {
18
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan)(FloatPartsN *a, FloatPartsN *b,
22
target_ulong len;
19
float_status *s)
23
uint16_t idxmap;
24
uint16_t bits;
25
-} TLBFlushPageBitsByMMUIdxData;
26
+} TLBFlushRangeData;
27
28
static void
29
tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu,
30
- TLBFlushPageBitsByMMUIdxData d)
31
+ TLBFlushRangeData d)
32
{
20
{
33
CPUArchState *env = cpu->env_ptr;
21
bool have_snan = false;
34
int mmu_idx;
22
- int cmp, which;
35
@@ -XXX,XX +XXX,XX @@ tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu,
23
+ FloatPartsN *ret;
24
+ int cmp;
25
26
if (is_snan(a->cls) || is_snan(b->cls)) {
27
float_raise(float_flag_invalid | float_flag_invalid_snan, s);
28
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan)(FloatPartsN *a, FloatPartsN *b,
29
switch (s->float_2nan_prop_rule) {
30
case float_2nan_prop_s_ab:
31
if (have_snan) {
32
- which = is_snan(a->cls) ? 0 : 1;
33
+ ret = is_snan(a->cls) ? a : b;
34
break;
35
}
36
/* fall through */
37
case float_2nan_prop_ab:
38
- which = is_nan(a->cls) ? 0 : 1;
39
+ ret = is_nan(a->cls) ? a : b;
40
break;
41
case float_2nan_prop_s_ba:
42
if (have_snan) {
43
- which = is_snan(b->cls) ? 1 : 0;
44
+ ret = is_snan(b->cls) ? b : a;
45
break;
46
}
47
/* fall through */
48
case float_2nan_prop_ba:
49
- which = is_nan(b->cls) ? 1 : 0;
50
+ ret = is_nan(b->cls) ? b : a;
51
break;
52
case float_2nan_prop_x87:
53
/*
54
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(pick_nan)(FloatPartsN *a, FloatPartsN *b,
55
*/
56
if (is_snan(a->cls)) {
57
if (!is_snan(b->cls)) {
58
- which = is_qnan(b->cls) ? 1 : 0;
59
+ ret = is_qnan(b->cls) ? b : a;
60
break;
61
}
62
} else if (is_qnan(a->cls)) {
63
if (is_snan(b->cls) || !is_qnan(b->cls)) {
64
- which = 0;
65
+ ret = a;
66
break;
67
}
68
} else {
69
- which = 1;
70
+ ret = b;
71
break;
72
}
73
cmp = frac_cmp(a, b);
74
if (cmp == 0) {
75
cmp = a->sign < b->sign;
76
}
77
- which = cmp > 0 ? 0 : 1;
78
+ ret = cmp > 0 ? a : b;
79
break;
80
default:
81
g_assert_not_reached();
82
}
83
84
- if (which) {
85
- a = b;
86
+ if (is_snan(ret->cls)) {
87
+ parts_silence_nan(ret, s);
88
}
89
- if (is_snan(a->cls)) {
90
- parts_silence_nan(a, s);
91
- }
92
- return a;
93
+ return ret;
36
}
94
}
37
95
38
static bool encode_pbm_to_runon(run_on_cpu_data *out,
96
static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
39
- TLBFlushPageBitsByMMUIdxData d)
40
+ TLBFlushRangeData d)
41
{
42
/* We need 6 bits to hold to hold @bits up to 63. */
43
if (d.idxmap <= MAKE_64BIT_MASK(0, TARGET_PAGE_BITS - 6)) {
44
@@ -XXX,XX +XXX,XX @@ static bool encode_pbm_to_runon(run_on_cpu_data *out,
45
return false;
46
}
47
48
-static TLBFlushPageBitsByMMUIdxData
49
+static TLBFlushRangeData
50
decode_runon_to_pbm(run_on_cpu_data data)
51
{
52
target_ulong addr_map_bits = (target_ulong) data.target_ptr;
53
- return (TLBFlushPageBitsByMMUIdxData){
54
+ return (TLBFlushRangeData){
55
.addr = addr_map_bits & TARGET_PAGE_MASK,
56
.idxmap = (addr_map_bits & ~TARGET_PAGE_MASK) >> 6,
57
.bits = addr_map_bits & 0x3f
58
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_bits_by_mmuidx_async_1(CPUState *cpu,
59
static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu,
60
run_on_cpu_data data)
61
{
62
- TLBFlushPageBitsByMMUIdxData *d = data.host_ptr;
63
+ TLBFlushRangeData *d = data.host_ptr;
64
tlb_flush_page_bits_by_mmuidx_async_0(cpu, *d);
65
g_free(d);
66
}
67
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu,
68
void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
69
uint16_t idxmap, unsigned bits)
70
{
71
- TLBFlushPageBitsByMMUIdxData d;
72
+ TLBFlushRangeData d;
73
run_on_cpu_data runon;
74
75
/* If all bits are significant, this devolves to tlb_flush_page. */
76
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
77
async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
78
} else {
79
/* Otherwise allocate a structure, freed by the worker. */
80
- TLBFlushPageBitsByMMUIdxData *p = g_memdup(&d, sizeof(d));
81
+ TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
82
async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2,
83
RUN_ON_CPU_HOST_PTR(p));
84
}
85
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
86
uint16_t idxmap,
87
unsigned bits)
88
{
89
- TLBFlushPageBitsByMMUIdxData d;
90
+ TLBFlushRangeData d;
91
run_on_cpu_data runon;
92
93
/* If all bits are significant, this devolves to tlb_flush_page. */
94
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
95
/* Allocate a separate data block for each destination cpu. */
96
CPU_FOREACH(dst_cpu) {
97
if (dst_cpu != src_cpu) {
98
- TLBFlushPageBitsByMMUIdxData *p = g_memdup(&d, sizeof(d));
99
+ TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
100
async_run_on_cpu(dst_cpu,
101
tlb_flush_page_bits_by_mmuidx_async_2,
102
RUN_ON_CPU_HOST_PTR(p));
103
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
104
uint16_t idxmap,
105
unsigned bits)
106
{
107
- TLBFlushPageBitsByMMUIdxData d;
108
+ TLBFlushRangeData d;
109
run_on_cpu_data runon;
110
111
/* If all bits are significant, this devolves to tlb_flush_page. */
112
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
113
runon);
114
} else {
115
CPUState *dst_cpu;
116
- TLBFlushPageBitsByMMUIdxData *p;
117
+ TLBFlushRangeData *p;
118
119
/* Allocate a separate data block for each destination cpu. */
120
CPU_FOREACH(dst_cpu) {
121
--
97
--
122
2.20.1
98
2.34.1
123
99
124
100
diff view generated by jsdifflib
Deleted patch
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
1
3
When selecting an ARM target on Debian unstable, we get:
4
5
Compiling C++ object libcommon.fa.p/disas_libvixl_vixl_utils.cc.o
6
FAILED: libcommon.fa.p/disas_libvixl_vixl_utils.cc.o
7
c++ -Ilibcommon.fa.p -I. -I.. [...] -o libcommon.fa.p/disas_libvixl_vixl_utils.cc.o -c ../disas/libvixl/vixl/utils.cc
8
In file included from /home/philmd/qemu/disas/libvixl/vixl/utils.h:30,
9
from ../disas/libvixl/vixl/utils.cc:27:
10
/usr/include/string.h:36:43: error: missing binary operator before token "("
11
36 | #if defined __cplusplus && (__GNUC_PREREQ (4, 4) \
12
| ^
13
/usr/include/string.h:53:62: error: missing binary operator before token "("
14
53 | #if defined __USE_MISC || defined __USE_XOPEN || __GLIBC_USE (ISOC2X)
15
| ^
16
/usr/include/string.h:165:21: error: missing binary operator before token "("
17
165 | || __GLIBC_USE (LIB_EXT2) || __GLIBC_USE (ISOC2X))
18
| ^
19
/usr/include/string.h:174:43: error: missing binary operator before token "("
20
174 | #if defined __USE_XOPEN2K8 || __GLIBC_USE (LIB_EXT2) || __GLIBC_USE (ISOC2X)
21
| ^
22
/usr/include/string.h:492:19: error: missing binary operator before token "("
23
492 | #if __GNUC_PREREQ (3,4)
24
| ^
25
26
Relevant information from the host:
27
28
$ lsb_release -d
29
Description: Debian GNU/Linux 11 (bullseye)
30
$ gcc --version
31
gcc (Debian 10.2.1-6) 10.2.1 20210110
32
$ dpkg -S /usr/include/string.h
33
libc6-dev: /usr/include/string.h
34
$ apt-cache show libc6-dev
35
Package: libc6-dev
36
Version: 2.31-11
37
38
Partially cherry-pick vixl commit 78973f258039f6e96 [*]:
39
40
Refactor VIXL to use `extern` block when including C header
41
that do not have a C++ counterpart.
42
43
which is similar to commit 875df03b221 ('osdep: protect qemu/osdep.h
44
with extern "C"').
45
46
[*] https://git.linaro.org/arm/vixl.git/commit/?id=78973f258039f6e96
47
48
Buglink: https://bugs.launchpad.net/qemu/+bug/1914870
49
Suggested-by: Thomas Huth <thuth@redhat.com>
50
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
51
Reviewed-by: Thomas Huth <thuth@redhat.com>
52
Message-id: 20210516171023.510778-1-f4bug@amsat.org
53
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
54
---
55
disas/libvixl/vixl/code-buffer.h | 2 +-
56
disas/libvixl/vixl/globals.h | 16 +++++++++-------
57
disas/libvixl/vixl/invalset.h | 2 +-
58
disas/libvixl/vixl/platform.h | 2 ++
59
disas/libvixl/vixl/utils.h | 2 +-
60
disas/libvixl/vixl/utils.cc | 2 +-
61
6 files changed, 15 insertions(+), 11 deletions(-)
62
63
diff --git a/disas/libvixl/vixl/code-buffer.h b/disas/libvixl/vixl/code-buffer.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/disas/libvixl/vixl/code-buffer.h
66
+++ b/disas/libvixl/vixl/code-buffer.h
67
@@ -XXX,XX +XXX,XX @@
68
#ifndef VIXL_CODE_BUFFER_H
69
#define VIXL_CODE_BUFFER_H
70
71
-#include <string.h>
72
+#include <cstring>
73
#include "vixl/globals.h"
74
75
namespace vixl {
76
diff --git a/disas/libvixl/vixl/globals.h b/disas/libvixl/vixl/globals.h
77
index XXXXXXX..XXXXXXX 100644
78
--- a/disas/libvixl/vixl/globals.h
79
+++ b/disas/libvixl/vixl/globals.h
80
@@ -XXX,XX +XXX,XX @@
81
#define __STDC_FORMAT_MACROS
82
#endif
83
84
-#include <stdint.h>
85
+extern "C" {
86
#include <inttypes.h>
87
-
88
-#include <assert.h>
89
-#include <stdarg.h>
90
-#include <stdio.h>
91
#include <stdint.h>
92
-#include <stdlib.h>
93
-#include <stddef.h>
94
+}
95
+
96
+#include <cassert>
97
+#include <cstdarg>
98
+#include <cstddef>
99
+#include <cstdio>
100
+#include <cstdlib>
101
+
102
#include "vixl/platform.h"
103
104
105
diff --git a/disas/libvixl/vixl/invalset.h b/disas/libvixl/vixl/invalset.h
106
index XXXXXXX..XXXXXXX 100644
107
--- a/disas/libvixl/vixl/invalset.h
108
+++ b/disas/libvixl/vixl/invalset.h
109
@@ -XXX,XX +XXX,XX @@
110
#ifndef VIXL_INVALSET_H_
111
#define VIXL_INVALSET_H_
112
113
-#include <string.h>
114
+#include <cstring>
115
116
#include <algorithm>
117
#include <vector>
118
diff --git a/disas/libvixl/vixl/platform.h b/disas/libvixl/vixl/platform.h
119
index XXXXXXX..XXXXXXX 100644
120
--- a/disas/libvixl/vixl/platform.h
121
+++ b/disas/libvixl/vixl/platform.h
122
@@ -XXX,XX +XXX,XX @@
123
#define PLATFORM_H
124
125
// Define platform specific functionalities.
126
+extern "C" {
127
#include <signal.h>
128
+}
129
130
namespace vixl {
131
inline void HostBreakpoint() { raise(SIGINT); }
132
diff --git a/disas/libvixl/vixl/utils.h b/disas/libvixl/vixl/utils.h
133
index XXXXXXX..XXXXXXX 100644
134
--- a/disas/libvixl/vixl/utils.h
135
+++ b/disas/libvixl/vixl/utils.h
136
@@ -XXX,XX +XXX,XX @@
137
#ifndef VIXL_UTILS_H
138
#define VIXL_UTILS_H
139
140
-#include <string.h>
141
#include <cmath>
142
+#include <cstring>
143
#include "vixl/globals.h"
144
#include "vixl/compiler-intrinsics.h"
145
146
diff --git a/disas/libvixl/vixl/utils.cc b/disas/libvixl/vixl/utils.cc
147
index XXXXXXX..XXXXXXX 100644
148
--- a/disas/libvixl/vixl/utils.cc
149
+++ b/disas/libvixl/vixl/utils.cc
150
@@ -XXX,XX +XXX,XX @@
151
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
152
153
#include "vixl/utils.h"
154
-#include <stdio.h>
155
+#include <cstdio>
156
157
namespace vixl {
158
159
--
160
2.20.1
161
162
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
For MUL, we can rely on generic support. For SMULH and UMULH,
4
create some trivial helpers. For PMUL, back in a21bb78e5817,
5
we organized helper_gvec_pmul_b in preparation for this use.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210525010358.152808-3-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/helper.h | 10 ++++
13
target/arm/sve.decode | 10 ++++
14
target/arm/translate-sve.c | 50 ++++++++++++++++++++
15
target/arm/vec_helper.c | 96 ++++++++++++++++++++++++++++++++++++++
16
4 files changed, 166 insertions(+)
17
18
diff --git a/target/arm/helper.h b/target/arm/helper.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper.h
21
+++ b/target/arm/helper.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(gvec_cgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
23
DEF_HELPER_FLAGS_3(gvec_cge0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
24
DEF_HELPER_FLAGS_3(gvec_cge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
25
26
+DEF_HELPER_FLAGS_4(gvec_smulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(gvec_smulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(gvec_smulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(gvec_smulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
30
+
31
+DEF_HELPER_FLAGS_4(gvec_umulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(gvec_umulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_4(gvec_umulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_4(gvec_umulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
35
+
36
DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
37
DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
38
DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
39
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/sve.decode
42
+++ b/target/arm/sve.decode
43
@@ -XXX,XX +XXX,XX @@ ST1_zprz 1110010 .. 00 ..... 100 ... ..... ..... \
44
@rprr_scatter_store xs=0 esz=3 scale=0
45
ST1_zprz 1110010 .. 00 ..... 110 ... ..... ..... \
46
@rprr_scatter_store xs=1 esz=3 scale=0
47
+
48
+#### SVE2 Support
49
+
50
+### SVE2 Integer Multiply - Unpredicated
51
+
52
+# SVE2 integer multiply vectors (unpredicated)
53
+MUL_zzz 00000100 .. 1 ..... 0110 00 ..... ..... @rd_rn_rm
54
+SMULH_zzz 00000100 .. 1 ..... 0110 10 ..... ..... @rd_rn_rm
55
+UMULH_zzz 00000100 .. 1 ..... 0110 11 ..... ..... @rd_rn_rm
56
+PMUL_zzz 00000100 00 1 ..... 0110 01 ..... ..... @rd_rn_rm_e0
57
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/translate-sve.c
60
+++ b/target/arm/translate-sve.c
61
@@ -XXX,XX +XXX,XX @@ static bool trans_MOVPRFX_z(DisasContext *s, arg_rpr_esz *a)
62
{
63
return do_movz_zpz(s, a->rd, a->rn, a->pg, a->esz, false);
64
}
65
+
66
+/*
67
+ * SVE2 Integer Multiply - Unpredicated
68
+ */
69
+
70
+static bool trans_MUL_zzz(DisasContext *s, arg_rrr_esz *a)
71
+{
72
+ if (!dc_isar_feature(aa64_sve2, s)) {
73
+ return false;
74
+ }
75
+ if (sve_access_check(s)) {
76
+ gen_gvec_fn_zzz(s, tcg_gen_gvec_mul, a->esz, a->rd, a->rn, a->rm);
77
+ }
78
+ return true;
79
+}
80
+
81
+static bool do_sve2_zzz_ool(DisasContext *s, arg_rrr_esz *a,
82
+ gen_helper_gvec_3 *fn)
83
+{
84
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
85
+ return false;
86
+ }
87
+ if (sve_access_check(s)) {
88
+ gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, 0);
89
+ }
90
+ return true;
91
+}
92
+
93
+static bool trans_SMULH_zzz(DisasContext *s, arg_rrr_esz *a)
94
+{
95
+ static gen_helper_gvec_3 * const fns[4] = {
96
+ gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h,
97
+ gen_helper_gvec_smulh_s, gen_helper_gvec_smulh_d,
98
+ };
99
+ return do_sve2_zzz_ool(s, a, fns[a->esz]);
100
+}
101
+
102
+static bool trans_UMULH_zzz(DisasContext *s, arg_rrr_esz *a)
103
+{
104
+ static gen_helper_gvec_3 * const fns[4] = {
105
+ gen_helper_gvec_umulh_b, gen_helper_gvec_umulh_h,
106
+ gen_helper_gvec_umulh_s, gen_helper_gvec_umulh_d,
107
+ };
108
+ return do_sve2_zzz_ool(s, a, fns[a->esz]);
109
+}
110
+
111
+static bool trans_PMUL_zzz(DisasContext *s, arg_rrr_esz *a)
112
+{
113
+ return do_sve2_zzz_ool(s, a, gen_helper_gvec_pmul_b);
114
+}
115
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
116
index XXXXXXX..XXXXXXX 100644
117
--- a/target/arm/vec_helper.c
118
+++ b/target/arm/vec_helper.c
119
@@ -XXX,XX +XXX,XX @@ void HELPER(simd_tblx)(void *vd, void *vm, void *venv, uint32_t desc)
120
clear_tail(vd, oprsz, simd_maxsz(desc));
121
}
122
#endif
123
+
124
+/*
125
+ * NxN -> N highpart multiply
126
+ *
127
+ * TODO: expose this as a generic vector operation.
128
+ */
129
+
130
+void HELPER(gvec_smulh_b)(void *vd, void *vn, void *vm, uint32_t desc)
131
+{
132
+ intptr_t i, opr_sz = simd_oprsz(desc);
133
+ int8_t *d = vd, *n = vn, *m = vm;
134
+
135
+ for (i = 0; i < opr_sz; ++i) {
136
+ d[i] = ((int32_t)n[i] * m[i]) >> 8;
137
+ }
138
+ clear_tail(d, opr_sz, simd_maxsz(desc));
139
+}
140
+
141
+void HELPER(gvec_smulh_h)(void *vd, void *vn, void *vm, uint32_t desc)
142
+{
143
+ intptr_t i, opr_sz = simd_oprsz(desc);
144
+ int16_t *d = vd, *n = vn, *m = vm;
145
+
146
+ for (i = 0; i < opr_sz / 2; ++i) {
147
+ d[i] = ((int32_t)n[i] * m[i]) >> 16;
148
+ }
149
+ clear_tail(d, opr_sz, simd_maxsz(desc));
150
+}
151
+
152
+void HELPER(gvec_smulh_s)(void *vd, void *vn, void *vm, uint32_t desc)
153
+{
154
+ intptr_t i, opr_sz = simd_oprsz(desc);
155
+ int32_t *d = vd, *n = vn, *m = vm;
156
+
157
+ for (i = 0; i < opr_sz / 4; ++i) {
158
+ d[i] = ((int64_t)n[i] * m[i]) >> 32;
159
+ }
160
+ clear_tail(d, opr_sz, simd_maxsz(desc));
161
+}
162
+
163
+void HELPER(gvec_smulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
164
+{
165
+ intptr_t i, opr_sz = simd_oprsz(desc);
166
+ uint64_t *d = vd, *n = vn, *m = vm;
167
+ uint64_t discard;
168
+
169
+ for (i = 0; i < opr_sz / 8; ++i) {
170
+ muls64(&discard, &d[i], n[i], m[i]);
171
+ }
172
+ clear_tail(d, opr_sz, simd_maxsz(desc));
173
+}
174
+
175
+void HELPER(gvec_umulh_b)(void *vd, void *vn, void *vm, uint32_t desc)
176
+{
177
+ intptr_t i, opr_sz = simd_oprsz(desc);
178
+ uint8_t *d = vd, *n = vn, *m = vm;
179
+
180
+ for (i = 0; i < opr_sz; ++i) {
181
+ d[i] = ((uint32_t)n[i] * m[i]) >> 8;
182
+ }
183
+ clear_tail(d, opr_sz, simd_maxsz(desc));
184
+}
185
+
186
+void HELPER(gvec_umulh_h)(void *vd, void *vn, void *vm, uint32_t desc)
187
+{
188
+ intptr_t i, opr_sz = simd_oprsz(desc);
189
+ uint16_t *d = vd, *n = vn, *m = vm;
190
+
191
+ for (i = 0; i < opr_sz / 2; ++i) {
192
+ d[i] = ((uint32_t)n[i] * m[i]) >> 16;
193
+ }
194
+ clear_tail(d, opr_sz, simd_maxsz(desc));
195
+}
196
+
197
+void HELPER(gvec_umulh_s)(void *vd, void *vn, void *vm, uint32_t desc)
198
+{
199
+ intptr_t i, opr_sz = simd_oprsz(desc);
200
+ uint32_t *d = vd, *n = vn, *m = vm;
201
+
202
+ for (i = 0; i < opr_sz / 4; ++i) {
203
+ d[i] = ((uint64_t)n[i] * m[i]) >> 32;
204
+ }
205
+ clear_tail(d, opr_sz, simd_maxsz(desc));
206
+}
207
+
208
+void HELPER(gvec_umulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
209
+{
210
+ intptr_t i, opr_sz = simd_oprsz(desc);
211
+ uint64_t *d = vd, *n = vn, *m = vm;
212
+ uint64_t discard;
213
+
214
+ for (i = 0; i < opr_sz / 8; ++i) {
215
+ mulu64(&discard, &d[i], n[i], m[i]);
216
+ }
217
+ clear_tail(d, opr_sz, simd_maxsz(desc));
218
+}
219
--
220
2.20.1
221
222
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-4-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 14 ++++++++++++
9
target/arm/sve.decode | 5 +++++
10
target/arm/sve_helper.c | 44 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sve.c | 39 +++++++++++++++++++++++++++++++++
12
4 files changed, 102 insertions(+)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve_umulh_zpzz_s, TCG_CALL_NO_RWG,
19
DEF_HELPER_FLAGS_5(sve_umulh_zpzz_d, TCG_CALL_NO_RWG,
20
void, ptr, ptr, ptr, ptr, i32)
21
22
+DEF_HELPER_FLAGS_5(sve2_sadalp_zpzz_h, TCG_CALL_NO_RWG,
23
+ void, ptr, ptr, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_5(sve2_sadalp_zpzz_s, TCG_CALL_NO_RWG,
25
+ void, ptr, ptr, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_5(sve2_sadalp_zpzz_d, TCG_CALL_NO_RWG,
27
+ void, ptr, ptr, ptr, ptr, i32)
28
+
29
+DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_h, TCG_CALL_NO_RWG,
30
+ void, ptr, ptr, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_s, TCG_CALL_NO_RWG,
32
+ void, ptr, ptr, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_d, TCG_CALL_NO_RWG,
34
+ void, ptr, ptr, ptr, ptr, i32)
35
+
36
DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_s, TCG_CALL_NO_RWG,
37
void, ptr, ptr, ptr, ptr, i32)
38
DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_d, TCG_CALL_NO_RWG,
39
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/sve.decode
42
+++ b/target/arm/sve.decode
43
@@ -XXX,XX +XXX,XX @@ MUL_zzz 00000100 .. 1 ..... 0110 00 ..... ..... @rd_rn_rm
44
SMULH_zzz 00000100 .. 1 ..... 0110 10 ..... ..... @rd_rn_rm
45
UMULH_zzz 00000100 .. 1 ..... 0110 11 ..... ..... @rd_rn_rm
46
PMUL_zzz 00000100 00 1 ..... 0110 01 ..... ..... @rd_rn_rm_e0
47
+
48
+### SVE2 Integer - Predicated
49
+
50
+SADALP_zpzz 01000100 .. 000 100 101 ... ..... ..... @rdm_pg_rn
51
+UADALP_zpzz 01000100 .. 000 101 101 ... ..... ..... @rdm_pg_rn
52
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/sve_helper.c
55
+++ b/target/arm/sve_helper.c
56
@@ -XXX,XX +XXX,XX @@ DO_ZPZZ_D(sve_asr_zpzz_d, int64_t, DO_ASR)
57
DO_ZPZZ_D(sve_lsr_zpzz_d, uint64_t, DO_LSR)
58
DO_ZPZZ_D(sve_lsl_zpzz_d, uint64_t, DO_LSL)
59
60
+static inline uint16_t do_sadalp_h(int16_t n, int16_t m)
61
+{
62
+ int8_t n1 = n, n2 = n >> 8;
63
+ return m + n1 + n2;
64
+}
65
+
66
+static inline uint32_t do_sadalp_s(int32_t n, int32_t m)
67
+{
68
+ int16_t n1 = n, n2 = n >> 16;
69
+ return m + n1 + n2;
70
+}
71
+
72
+static inline uint64_t do_sadalp_d(int64_t n, int64_t m)
73
+{
74
+ int32_t n1 = n, n2 = n >> 32;
75
+ return m + n1 + n2;
76
+}
77
+
78
+DO_ZPZZ(sve2_sadalp_zpzz_h, int16_t, H1_2, do_sadalp_h)
79
+DO_ZPZZ(sve2_sadalp_zpzz_s, int32_t, H1_4, do_sadalp_s)
80
+DO_ZPZZ_D(sve2_sadalp_zpzz_d, int64_t, do_sadalp_d)
81
+
82
+static inline uint16_t do_uadalp_h(uint16_t n, uint16_t m)
83
+{
84
+ uint8_t n1 = n, n2 = n >> 8;
85
+ return m + n1 + n2;
86
+}
87
+
88
+static inline uint32_t do_uadalp_s(uint32_t n, uint32_t m)
89
+{
90
+ uint16_t n1 = n, n2 = n >> 16;
91
+ return m + n1 + n2;
92
+}
93
+
94
+static inline uint64_t do_uadalp_d(uint64_t n, uint64_t m)
95
+{
96
+ uint32_t n1 = n, n2 = n >> 32;
97
+ return m + n1 + n2;
98
+}
99
+
100
+DO_ZPZZ(sve2_uadalp_zpzz_h, uint16_t, H1_2, do_uadalp_h)
101
+DO_ZPZZ(sve2_uadalp_zpzz_s, uint32_t, H1_4, do_uadalp_s)
102
+DO_ZPZZ_D(sve2_uadalp_zpzz_d, uint64_t, do_uadalp_d)
103
+
104
#undef DO_ZPZZ
105
#undef DO_ZPZZ_D
106
107
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/target/arm/translate-sve.c
110
+++ b/target/arm/translate-sve.c
111
@@ -XXX,XX +XXX,XX @@ static bool trans_PMUL_zzz(DisasContext *s, arg_rrr_esz *a)
112
{
113
return do_sve2_zzz_ool(s, a, gen_helper_gvec_pmul_b);
114
}
115
+
116
+/*
117
+ * SVE2 Integer - Predicated
118
+ */
119
+
120
+static bool do_sve2_zpzz_ool(DisasContext *s, arg_rprr_esz *a,
121
+ gen_helper_gvec_4 *fn)
122
+{
123
+ if (!dc_isar_feature(aa64_sve2, s)) {
124
+ return false;
125
+ }
126
+ return do_zpzz_ool(s, a, fn);
127
+}
128
+
129
+static bool trans_SADALP_zpzz(DisasContext *s, arg_rprr_esz *a)
130
+{
131
+ static gen_helper_gvec_4 * const fns[3] = {
132
+ gen_helper_sve2_sadalp_zpzz_h,
133
+ gen_helper_sve2_sadalp_zpzz_s,
134
+ gen_helper_sve2_sadalp_zpzz_d,
135
+ };
136
+ if (a->esz == 0) {
137
+ return false;
138
+ }
139
+ return do_sve2_zpzz_ool(s, a, fns[a->esz - 1]);
140
+}
141
+
142
+static bool trans_UADALP_zpzz(DisasContext *s, arg_rprr_esz *a)
143
+{
144
+ static gen_helper_gvec_4 * const fns[3] = {
145
+ gen_helper_sve2_uadalp_zpzz_h,
146
+ gen_helper_sve2_uadalp_zpzz_s,
147
+ gen_helper_sve2_uadalp_zpzz_d,
148
+ };
149
+ if (a->esz == 0) {
150
+ return false;
151
+ }
152
+ return do_sve2_zpzz_ool(s, a, fns[a->esz - 1]);
153
+}
154
--
155
2.20.1
156
157
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-5-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 13 +++++++++++
9
target/arm/sve.decode | 7 ++++++
10
target/arm/sve_helper.c | 21 +++++++++++++++++
11
target/arm/translate-sve.c | 47 ++++++++++++++++++++++++++++++++++++++
12
4 files changed, 88 insertions(+)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_4(sve_rbit_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
21
22
+DEF_HELPER_FLAGS_4(sve2_sqabs_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_4(sve2_sqabs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_4(sve2_sqabs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_4(sve2_sqabs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
26
+
27
+DEF_HELPER_FLAGS_4(sve2_sqneg_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(sve2_sqneg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(sve2_sqneg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_4(sve2_sqneg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
+
32
+DEF_HELPER_FLAGS_4(sve2_urecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_4(sve2_ursqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
34
+
35
DEF_HELPER_FLAGS_5(sve_splice, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
36
37
DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_b, TCG_CALL_NO_RWG,
38
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/sve.decode
41
+++ b/target/arm/sve.decode
42
@@ -XXX,XX +XXX,XX @@ PMUL_zzz 00000100 00 1 ..... 0110 01 ..... ..... @rd_rn_rm_e0
43
44
SADALP_zpzz 01000100 .. 000 100 101 ... ..... ..... @rdm_pg_rn
45
UADALP_zpzz 01000100 .. 000 101 101 ... ..... ..... @rdm_pg_rn
46
+
47
+### SVE2 integer unary operations (predicated)
48
+
49
+URECPE 01000100 .. 000 000 101 ... ..... ..... @rd_pg_rn
50
+URSQRTE 01000100 .. 000 001 101 ... ..... ..... @rd_pg_rn
51
+SQABS 01000100 .. 001 000 101 ... ..... ..... @rd_pg_rn
52
+SQNEG 01000100 .. 001 001 101 ... ..... ..... @rd_pg_rn
53
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/sve_helper.c
56
+++ b/target/arm/sve_helper.c
57
@@ -XXX,XX +XXX,XX @@ DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16)
58
DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32)
59
DO_ZPZ_D(sve_rbit_d, uint64_t, revbit64)
60
61
+#define DO_SQABS(X) \
62
+ ({ __typeof(X) x_ = (X), min_ = 1ull << (sizeof(X) * 8 - 1); \
63
+ x_ >= 0 ? x_ : x_ == min_ ? -min_ - 1 : -x_; })
64
+
65
+DO_ZPZ(sve2_sqabs_b, int8_t, H1, DO_SQABS)
66
+DO_ZPZ(sve2_sqabs_h, int16_t, H1_2, DO_SQABS)
67
+DO_ZPZ(sve2_sqabs_s, int32_t, H1_4, DO_SQABS)
68
+DO_ZPZ_D(sve2_sqabs_d, int64_t, DO_SQABS)
69
+
70
+#define DO_SQNEG(X) \
71
+ ({ __typeof(X) x_ = (X), min_ = 1ull << (sizeof(X) * 8 - 1); \
72
+ x_ == min_ ? -min_ - 1 : -x_; })
73
+
74
+DO_ZPZ(sve2_sqneg_b, uint8_t, H1, DO_SQNEG)
75
+DO_ZPZ(sve2_sqneg_h, uint16_t, H1_2, DO_SQNEG)
76
+DO_ZPZ(sve2_sqneg_s, uint32_t, H1_4, DO_SQNEG)
77
+DO_ZPZ_D(sve2_sqneg_d, uint64_t, DO_SQNEG)
78
+
79
+DO_ZPZ(sve2_urecpe_s, uint32_t, H1_4, helper_recpe_u32)
80
+DO_ZPZ(sve2_ursqrte_s, uint32_t, H1_4, helper_rsqrte_u32)
81
+
82
/* Three-operand expander, unpredicated, in which the third operand is "wide".
83
*/
84
#define DO_ZZW(NAME, TYPE, TYPEW, H, OP) \
85
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/target/arm/translate-sve.c
88
+++ b/target/arm/translate-sve.c
89
@@ -XXX,XX +XXX,XX @@ static bool trans_UADALP_zpzz(DisasContext *s, arg_rprr_esz *a)
90
}
91
return do_sve2_zpzz_ool(s, a, fns[a->esz - 1]);
92
}
93
+
94
+/*
95
+ * SVE2 integer unary operations (predicated)
96
+ */
97
+
98
+static bool do_sve2_zpz_ool(DisasContext *s, arg_rpr_esz *a,
99
+ gen_helper_gvec_3 *fn)
100
+{
101
+ if (!dc_isar_feature(aa64_sve2, s)) {
102
+ return false;
103
+ }
104
+ return do_zpz_ool(s, a, fn);
105
+}
106
+
107
+static bool trans_URECPE(DisasContext *s, arg_rpr_esz *a)
108
+{
109
+ if (a->esz != 2) {
110
+ return false;
111
+ }
112
+ return do_sve2_zpz_ool(s, a, gen_helper_sve2_urecpe_s);
113
+}
114
+
115
+static bool trans_URSQRTE(DisasContext *s, arg_rpr_esz *a)
116
+{
117
+ if (a->esz != 2) {
118
+ return false;
119
+ }
120
+ return do_sve2_zpz_ool(s, a, gen_helper_sve2_ursqrte_s);
121
+}
122
+
123
+static bool trans_SQABS(DisasContext *s, arg_rpr_esz *a)
124
+{
125
+ static gen_helper_gvec_3 * const fns[4] = {
126
+ gen_helper_sve2_sqabs_b, gen_helper_sve2_sqabs_h,
127
+ gen_helper_sve2_sqabs_s, gen_helper_sve2_sqabs_d,
128
+ };
129
+ return do_sve2_zpz_ool(s, a, fns[a->esz]);
130
+}
131
+
132
+static bool trans_SQNEG(DisasContext *s, arg_rpr_esz *a)
133
+{
134
+ static gen_helper_gvec_3 * const fns[4] = {
135
+ gen_helper_sve2_sqneg_b, gen_helper_sve2_sqneg_h,
136
+ gen_helper_sve2_sqneg_s, gen_helper_sve2_sqneg_d,
137
+ };
138
+ return do_sve2_zpz_ool(s, a, fns[a->esz]);
139
+}
140
--
141
2.20.1
142
143
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20210525010358.152808-7-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 54 +++++++++++++++++++++++
9
target/arm/sve.decode | 17 ++++++++
10
target/arm/sve_helper.c | 87 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sve.c | 18 ++++++++
12
4 files changed, 176 insertions(+)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_s, TCG_CALL_NO_RWG,
19
DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_d, TCG_CALL_NO_RWG,
20
void, ptr, ptr, ptr, ptr, i32)
21
22
+DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_b, TCG_CALL_NO_RWG,
23
+ void, ptr, ptr, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_h, TCG_CALL_NO_RWG,
25
+ void, ptr, ptr, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_s, TCG_CALL_NO_RWG,
27
+ void, ptr, ptr, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_d, TCG_CALL_NO_RWG,
29
+ void, ptr, ptr, ptr, ptr, i32)
30
+
31
+DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_b, TCG_CALL_NO_RWG,
32
+ void, ptr, ptr, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_h, TCG_CALL_NO_RWG,
34
+ void, ptr, ptr, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_s, TCG_CALL_NO_RWG,
36
+ void, ptr, ptr, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_d, TCG_CALL_NO_RWG,
38
+ void, ptr, ptr, ptr, ptr, i32)
39
+
40
+DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_b, TCG_CALL_NO_RWG,
41
+ void, ptr, ptr, ptr, ptr, i32)
42
+DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_h, TCG_CALL_NO_RWG,
43
+ void, ptr, ptr, ptr, ptr, i32)
44
+DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_s, TCG_CALL_NO_RWG,
45
+ void, ptr, ptr, ptr, ptr, i32)
46
+DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_d, TCG_CALL_NO_RWG,
47
+ void, ptr, ptr, ptr, ptr, i32)
48
+
49
+DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_b, TCG_CALL_NO_RWG,
50
+ void, ptr, ptr, ptr, ptr, i32)
51
+DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_h, TCG_CALL_NO_RWG,
52
+ void, ptr, ptr, ptr, ptr, i32)
53
+DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_s, TCG_CALL_NO_RWG,
54
+ void, ptr, ptr, ptr, ptr, i32)
55
+DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_d, TCG_CALL_NO_RWG,
56
+ void, ptr, ptr, ptr, ptr, i32)
57
+
58
+DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_b, TCG_CALL_NO_RWG,
59
+ void, ptr, ptr, ptr, ptr, i32)
60
+DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_h, TCG_CALL_NO_RWG,
61
+ void, ptr, ptr, ptr, ptr, i32)
62
+DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_s, TCG_CALL_NO_RWG,
63
+ void, ptr, ptr, ptr, ptr, i32)
64
+DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_d, TCG_CALL_NO_RWG,
65
+ void, ptr, ptr, ptr, ptr, i32)
66
+
67
+DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_b, TCG_CALL_NO_RWG,
68
+ void, ptr, ptr, ptr, ptr, i32)
69
+DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_h, TCG_CALL_NO_RWG,
70
+ void, ptr, ptr, ptr, ptr, i32)
71
+DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_s, TCG_CALL_NO_RWG,
72
+ void, ptr, ptr, ptr, ptr, i32)
73
+DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_d, TCG_CALL_NO_RWG,
74
+ void, ptr, ptr, ptr, ptr, i32)
75
+
76
DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_s, TCG_CALL_NO_RWG,
77
void, ptr, ptr, ptr, ptr, i32)
78
DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_d, TCG_CALL_NO_RWG,
79
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
80
index XXXXXXX..XXXXXXX 100644
81
--- a/target/arm/sve.decode
82
+++ b/target/arm/sve.decode
83
@@ -XXX,XX +XXX,XX @@ URECPE 01000100 .. 000 000 101 ... ..... ..... @rd_pg_rn
84
URSQRTE 01000100 .. 000 001 101 ... ..... ..... @rd_pg_rn
85
SQABS 01000100 .. 001 000 101 ... ..... ..... @rd_pg_rn
86
SQNEG 01000100 .. 001 001 101 ... ..... ..... @rd_pg_rn
87
+
88
+### SVE2 saturating/rounding bitwise shift left (predicated)
89
+
90
+SRSHL 01000100 .. 000 010 100 ... ..... ..... @rdn_pg_rm
91
+URSHL 01000100 .. 000 011 100 ... ..... ..... @rdn_pg_rm
92
+SRSHL 01000100 .. 000 110 100 ... ..... ..... @rdm_pg_rn # SRSHLR
93
+URSHL 01000100 .. 000 111 100 ... ..... ..... @rdm_pg_rn # URSHLR
94
+
95
+SQSHL 01000100 .. 001 000 100 ... ..... ..... @rdn_pg_rm
96
+UQSHL 01000100 .. 001 001 100 ... ..... ..... @rdn_pg_rm
97
+SQSHL 01000100 .. 001 100 100 ... ..... ..... @rdm_pg_rn # SQSHLR
98
+UQSHL 01000100 .. 001 101 100 ... ..... ..... @rdm_pg_rn # UQSHLR
99
+
100
+SQRSHL 01000100 .. 001 010 100 ... ..... ..... @rdn_pg_rm
101
+UQRSHL 01000100 .. 001 011 100 ... ..... ..... @rdn_pg_rm
102
+SQRSHL 01000100 .. 001 110 100 ... ..... ..... @rdm_pg_rn # SQRSHLR
103
+UQRSHL 01000100 .. 001 111 100 ... ..... ..... @rdm_pg_rn # UQRSHLR
104
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/target/arm/sve_helper.c
107
+++ b/target/arm/sve_helper.c
108
@@ -XXX,XX +XXX,XX @@
109
#include "tcg/tcg-gvec-desc.h"
110
#include "fpu/softfloat.h"
111
#include "tcg/tcg.h"
112
+#include "vec_internal.h"
113
114
115
/* Note that vector data is stored in host-endian 64-bit chunks,
116
@@ -XXX,XX +XXX,XX @@ DO_ZPZZ(sve2_uadalp_zpzz_h, uint16_t, H1_2, do_uadalp_h)
117
DO_ZPZZ(sve2_uadalp_zpzz_s, uint32_t, H1_4, do_uadalp_s)
118
DO_ZPZZ_D(sve2_uadalp_zpzz_d, uint64_t, do_uadalp_d)
119
120
+#define do_srshl_b(n, m) do_sqrshl_bhs(n, m, 8, true, NULL)
121
+#define do_srshl_h(n, m) do_sqrshl_bhs(n, m, 16, true, NULL)
122
+#define do_srshl_s(n, m) do_sqrshl_bhs(n, m, 32, true, NULL)
123
+#define do_srshl_d(n, m) do_sqrshl_d(n, m, true, NULL)
124
+
125
+DO_ZPZZ(sve2_srshl_zpzz_b, int8_t, H1, do_srshl_b)
126
+DO_ZPZZ(sve2_srshl_zpzz_h, int16_t, H1_2, do_srshl_h)
127
+DO_ZPZZ(sve2_srshl_zpzz_s, int32_t, H1_4, do_srshl_s)
128
+DO_ZPZZ_D(sve2_srshl_zpzz_d, int64_t, do_srshl_d)
129
+
130
+#define do_urshl_b(n, m) do_uqrshl_bhs(n, (int8_t)m, 8, true, NULL)
131
+#define do_urshl_h(n, m) do_uqrshl_bhs(n, (int16_t)m, 16, true, NULL)
132
+#define do_urshl_s(n, m) do_uqrshl_bhs(n, m, 32, true, NULL)
133
+#define do_urshl_d(n, m) do_uqrshl_d(n, m, true, NULL)
134
+
135
+DO_ZPZZ(sve2_urshl_zpzz_b, uint8_t, H1, do_urshl_b)
136
+DO_ZPZZ(sve2_urshl_zpzz_h, uint16_t, H1_2, do_urshl_h)
137
+DO_ZPZZ(sve2_urshl_zpzz_s, uint32_t, H1_4, do_urshl_s)
138
+DO_ZPZZ_D(sve2_urshl_zpzz_d, uint64_t, do_urshl_d)
139
+
140
+/*
141
+ * Unlike the NEON and AdvSIMD versions, there is no QC bit to set.
142
+ * We pass in a pointer to a dummy saturation field to trigger
143
+ * the saturating arithmetic but discard the information about
144
+ * whether it has occurred.
145
+ */
146
+#define do_sqshl_b(n, m) \
147
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 8, false, &discard); })
148
+#define do_sqshl_h(n, m) \
149
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 16, false, &discard); })
150
+#define do_sqshl_s(n, m) \
151
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 32, false, &discard); })
152
+#define do_sqshl_d(n, m) \
153
+ ({ uint32_t discard; do_sqrshl_d(n, m, false, &discard); })
154
+
155
+DO_ZPZZ(sve2_sqshl_zpzz_b, int8_t, H1_2, do_sqshl_b)
156
+DO_ZPZZ(sve2_sqshl_zpzz_h, int16_t, H1_2, do_sqshl_h)
157
+DO_ZPZZ(sve2_sqshl_zpzz_s, int32_t, H1_4, do_sqshl_s)
158
+DO_ZPZZ_D(sve2_sqshl_zpzz_d, int64_t, do_sqshl_d)
159
+
160
+#define do_uqshl_b(n, m) \
161
+ ({ uint32_t discard; do_uqrshl_bhs(n, (int8_t)m, 8, false, &discard); })
162
+#define do_uqshl_h(n, m) \
163
+ ({ uint32_t discard; do_uqrshl_bhs(n, (int16_t)m, 16, false, &discard); })
164
+#define do_uqshl_s(n, m) \
165
+ ({ uint32_t discard; do_uqrshl_bhs(n, m, 32, false, &discard); })
166
+#define do_uqshl_d(n, m) \
167
+ ({ uint32_t discard; do_uqrshl_d(n, m, false, &discard); })
168
+
169
+DO_ZPZZ(sve2_uqshl_zpzz_b, uint8_t, H1_2, do_uqshl_b)
170
+DO_ZPZZ(sve2_uqshl_zpzz_h, uint16_t, H1_2, do_uqshl_h)
171
+DO_ZPZZ(sve2_uqshl_zpzz_s, uint32_t, H1_4, do_uqshl_s)
172
+DO_ZPZZ_D(sve2_uqshl_zpzz_d, uint64_t, do_uqshl_d)
173
+
174
+#define do_sqrshl_b(n, m) \
175
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 8, true, &discard); })
176
+#define do_sqrshl_h(n, m) \
177
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 16, true, &discard); })
178
+#define do_sqrshl_s(n, m) \
179
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 32, true, &discard); })
180
+#define do_sqrshl_d(n, m) \
181
+ ({ uint32_t discard; do_sqrshl_d(n, m, true, &discard); })
182
+
183
+DO_ZPZZ(sve2_sqrshl_zpzz_b, int8_t, H1_2, do_sqrshl_b)
184
+DO_ZPZZ(sve2_sqrshl_zpzz_h, int16_t, H1_2, do_sqrshl_h)
185
+DO_ZPZZ(sve2_sqrshl_zpzz_s, int32_t, H1_4, do_sqrshl_s)
186
+DO_ZPZZ_D(sve2_sqrshl_zpzz_d, int64_t, do_sqrshl_d)
187
+
188
+#undef do_sqrshl_d
189
+
190
+#define do_uqrshl_b(n, m) \
191
+ ({ uint32_t discard; do_uqrshl_bhs(n, (int8_t)m, 8, true, &discard); })
192
+#define do_uqrshl_h(n, m) \
193
+ ({ uint32_t discard; do_uqrshl_bhs(n, (int16_t)m, 16, true, &discard); })
194
+#define do_uqrshl_s(n, m) \
195
+ ({ uint32_t discard; do_uqrshl_bhs(n, m, 32, true, &discard); })
196
+#define do_uqrshl_d(n, m) \
197
+ ({ uint32_t discard; do_uqrshl_d(n, m, true, &discard); })
198
+
199
+DO_ZPZZ(sve2_uqrshl_zpzz_b, uint8_t, H1_2, do_uqrshl_b)
200
+DO_ZPZZ(sve2_uqrshl_zpzz_h, uint16_t, H1_2, do_uqrshl_h)
201
+DO_ZPZZ(sve2_uqrshl_zpzz_s, uint32_t, H1_4, do_uqrshl_s)
202
+DO_ZPZZ_D(sve2_uqrshl_zpzz_d, uint64_t, do_uqrshl_d)
203
+
204
+#undef do_uqrshl_d
205
+
206
#undef DO_ZPZZ
207
#undef DO_ZPZZ_D
208
209
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
210
index XXXXXXX..XXXXXXX 100644
211
--- a/target/arm/translate-sve.c
212
+++ b/target/arm/translate-sve.c
213
@@ -XXX,XX +XXX,XX @@ static bool trans_SQNEG(DisasContext *s, arg_rpr_esz *a)
214
};
215
return do_sve2_zpz_ool(s, a, fns[a->esz]);
216
}
217
+
218
+#define DO_SVE2_ZPZZ(NAME, name) \
219
+static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
220
+{ \
221
+ static gen_helper_gvec_4 * const fns[4] = { \
222
+ gen_helper_sve2_##name##_zpzz_b, gen_helper_sve2_##name##_zpzz_h, \
223
+ gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d, \
224
+ }; \
225
+ return do_sve2_zpzz_ool(s, a, fns[a->esz]); \
226
+}
227
+
228
+DO_SVE2_ZPZZ(SQSHL, sqshl)
229
+DO_SVE2_ZPZZ(SQRSHL, sqrshl)
230
+DO_SVE2_ZPZZ(SRSHL, srshl)
231
+
232
+DO_SVE2_ZPZZ(UQSHL, uqshl)
233
+DO_SVE2_ZPZZ(UQRSHL, uqrshl)
234
+DO_SVE2_ZPZZ(URSHL, urshl)
235
--
236
2.20.1
237
238
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-8-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 54 ++++++++++++++++++++++++++++++++++++++
9
target/arm/sve.decode | 11 ++++++++
10
target/arm/sve_helper.c | 39 +++++++++++++++++++++++++++
11
target/arm/translate-sve.c | 8 ++++++
12
4 files changed, 112 insertions(+)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_s, TCG_CALL_NO_RWG,
19
DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_d, TCG_CALL_NO_RWG,
20
void, ptr, ptr, ptr, ptr, i32)
21
22
+DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_b, TCG_CALL_NO_RWG,
23
+ void, ptr, ptr, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_h, TCG_CALL_NO_RWG,
25
+ void, ptr, ptr, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_s, TCG_CALL_NO_RWG,
27
+ void, ptr, ptr, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_d, TCG_CALL_NO_RWG,
29
+ void, ptr, ptr, ptr, ptr, i32)
30
+
31
+DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_b, TCG_CALL_NO_RWG,
32
+ void, ptr, ptr, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_h, TCG_CALL_NO_RWG,
34
+ void, ptr, ptr, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_s, TCG_CALL_NO_RWG,
36
+ void, ptr, ptr, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_d, TCG_CALL_NO_RWG,
38
+ void, ptr, ptr, ptr, ptr, i32)
39
+
40
+DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_b, TCG_CALL_NO_RWG,
41
+ void, ptr, ptr, ptr, ptr, i32)
42
+DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_h, TCG_CALL_NO_RWG,
43
+ void, ptr, ptr, ptr, ptr, i32)
44
+DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_s, TCG_CALL_NO_RWG,
45
+ void, ptr, ptr, ptr, ptr, i32)
46
+DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_d, TCG_CALL_NO_RWG,
47
+ void, ptr, ptr, ptr, ptr, i32)
48
+
49
+DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_b, TCG_CALL_NO_RWG,
50
+ void, ptr, ptr, ptr, ptr, i32)
51
+DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_h, TCG_CALL_NO_RWG,
52
+ void, ptr, ptr, ptr, ptr, i32)
53
+DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_s, TCG_CALL_NO_RWG,
54
+ void, ptr, ptr, ptr, ptr, i32)
55
+DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_d, TCG_CALL_NO_RWG,
56
+ void, ptr, ptr, ptr, ptr, i32)
57
+
58
+DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_b, TCG_CALL_NO_RWG,
59
+ void, ptr, ptr, ptr, ptr, i32)
60
+DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_h, TCG_CALL_NO_RWG,
61
+ void, ptr, ptr, ptr, ptr, i32)
62
+DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_s, TCG_CALL_NO_RWG,
63
+ void, ptr, ptr, ptr, ptr, i32)
64
+DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_d, TCG_CALL_NO_RWG,
65
+ void, ptr, ptr, ptr, ptr, i32)
66
+
67
+DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_b, TCG_CALL_NO_RWG,
68
+ void, ptr, ptr, ptr, ptr, i32)
69
+DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_h, TCG_CALL_NO_RWG,
70
+ void, ptr, ptr, ptr, ptr, i32)
71
+DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_s, TCG_CALL_NO_RWG,
72
+ void, ptr, ptr, ptr, ptr, i32)
73
+DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_d, TCG_CALL_NO_RWG,
74
+ void, ptr, ptr, ptr, ptr, i32)
75
+
76
DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_s, TCG_CALL_NO_RWG,
77
void, ptr, ptr, ptr, ptr, i32)
78
DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_d, TCG_CALL_NO_RWG,
79
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
80
index XXXXXXX..XXXXXXX 100644
81
--- a/target/arm/sve.decode
82
+++ b/target/arm/sve.decode
83
@@ -XXX,XX +XXX,XX @@ SQRSHL 01000100 .. 001 010 100 ... ..... ..... @rdn_pg_rm
84
UQRSHL 01000100 .. 001 011 100 ... ..... ..... @rdn_pg_rm
85
SQRSHL 01000100 .. 001 110 100 ... ..... ..... @rdm_pg_rn # SQRSHLR
86
UQRSHL 01000100 .. 001 111 100 ... ..... ..... @rdm_pg_rn # UQRSHLR
87
+
88
+### SVE2 integer halving add/subtract (predicated)
89
+
90
+SHADD 01000100 .. 010 000 100 ... ..... ..... @rdn_pg_rm
91
+UHADD 01000100 .. 010 001 100 ... ..... ..... @rdn_pg_rm
92
+SHSUB 01000100 .. 010 010 100 ... ..... ..... @rdn_pg_rm
93
+UHSUB 01000100 .. 010 011 100 ... ..... ..... @rdn_pg_rm
94
+SRHADD 01000100 .. 010 100 100 ... ..... ..... @rdn_pg_rm
95
+URHADD 01000100 .. 010 101 100 ... ..... ..... @rdn_pg_rm
96
+SHSUB 01000100 .. 010 110 100 ... ..... ..... @rdm_pg_rn # SHSUBR
97
+UHSUB 01000100 .. 010 111 100 ... ..... ..... @rdm_pg_rn # UHSUBR
98
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/sve_helper.c
101
+++ b/target/arm/sve_helper.c
102
@@ -XXX,XX +XXX,XX @@ DO_ZPZZ_D(sve2_uqrshl_zpzz_d, uint64_t, do_uqrshl_d)
103
104
#undef do_uqrshl_d
105
106
+#define DO_HADD_BHS(n, m) (((int64_t)n + m) >> 1)
107
+#define DO_HADD_D(n, m) ((n >> 1) + (m >> 1) + (n & m & 1))
108
+
109
+DO_ZPZZ(sve2_shadd_zpzz_b, int8_t, H1, DO_HADD_BHS)
110
+DO_ZPZZ(sve2_shadd_zpzz_h, int16_t, H1_2, DO_HADD_BHS)
111
+DO_ZPZZ(sve2_shadd_zpzz_s, int32_t, H1_4, DO_HADD_BHS)
112
+DO_ZPZZ_D(sve2_shadd_zpzz_d, int64_t, DO_HADD_D)
113
+
114
+DO_ZPZZ(sve2_uhadd_zpzz_b, uint8_t, H1, DO_HADD_BHS)
115
+DO_ZPZZ(sve2_uhadd_zpzz_h, uint16_t, H1_2, DO_HADD_BHS)
116
+DO_ZPZZ(sve2_uhadd_zpzz_s, uint32_t, H1_4, DO_HADD_BHS)
117
+DO_ZPZZ_D(sve2_uhadd_zpzz_d, uint64_t, DO_HADD_D)
118
+
119
+#define DO_RHADD_BHS(n, m) (((int64_t)n + m + 1) >> 1)
120
+#define DO_RHADD_D(n, m) ((n >> 1) + (m >> 1) + ((n | m) & 1))
121
+
122
+DO_ZPZZ(sve2_srhadd_zpzz_b, int8_t, H1, DO_RHADD_BHS)
123
+DO_ZPZZ(sve2_srhadd_zpzz_h, int16_t, H1_2, DO_RHADD_BHS)
124
+DO_ZPZZ(sve2_srhadd_zpzz_s, int32_t, H1_4, DO_RHADD_BHS)
125
+DO_ZPZZ_D(sve2_srhadd_zpzz_d, int64_t, DO_RHADD_D)
126
+
127
+DO_ZPZZ(sve2_urhadd_zpzz_b, uint8_t, H1, DO_RHADD_BHS)
128
+DO_ZPZZ(sve2_urhadd_zpzz_h, uint16_t, H1_2, DO_RHADD_BHS)
129
+DO_ZPZZ(sve2_urhadd_zpzz_s, uint32_t, H1_4, DO_RHADD_BHS)
130
+DO_ZPZZ_D(sve2_urhadd_zpzz_d, uint64_t, DO_RHADD_D)
131
+
132
+#define DO_HSUB_BHS(n, m) (((int64_t)n - m) >> 1)
133
+#define DO_HSUB_D(n, m) ((n >> 1) - (m >> 1) - (~n & m & 1))
134
+
135
+DO_ZPZZ(sve2_shsub_zpzz_b, int8_t, H1, DO_HSUB_BHS)
136
+DO_ZPZZ(sve2_shsub_zpzz_h, int16_t, H1_2, DO_HSUB_BHS)
137
+DO_ZPZZ(sve2_shsub_zpzz_s, int32_t, H1_4, DO_HSUB_BHS)
138
+DO_ZPZZ_D(sve2_shsub_zpzz_d, int64_t, DO_HSUB_D)
139
+
140
+DO_ZPZZ(sve2_uhsub_zpzz_b, uint8_t, H1, DO_HSUB_BHS)
141
+DO_ZPZZ(sve2_uhsub_zpzz_h, uint16_t, H1_2, DO_HSUB_BHS)
142
+DO_ZPZZ(sve2_uhsub_zpzz_s, uint32_t, H1_4, DO_HSUB_BHS)
143
+DO_ZPZZ_D(sve2_uhsub_zpzz_d, uint64_t, DO_HSUB_D)
144
+
145
#undef DO_ZPZZ
146
#undef DO_ZPZZ_D
147
148
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
149
index XXXXXXX..XXXXXXX 100644
150
--- a/target/arm/translate-sve.c
151
+++ b/target/arm/translate-sve.c
152
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZPZZ(SRSHL, srshl)
153
DO_SVE2_ZPZZ(UQSHL, uqshl)
154
DO_SVE2_ZPZZ(UQRSHL, uqrshl)
155
DO_SVE2_ZPZZ(URSHL, urshl)
156
+
157
+DO_SVE2_ZPZZ(SHADD, shadd)
158
+DO_SVE2_ZPZZ(SRHADD, srhadd)
159
+DO_SVE2_ZPZZ(SHSUB, shsub)
160
+
161
+DO_SVE2_ZPZZ(UHADD, uhadd)
162
+DO_SVE2_ZPZZ(URHADD, urhadd)
163
+DO_SVE2_ZPZZ(UHSUB, uhsub)
164
--
165
2.20.1
166
167
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-9-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 45 ++++++++++++++++++++++
9
target/arm/sve.decode | 8 ++++
10
target/arm/sve_helper.c | 76 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sve.c | 6 +++
12
4 files changed, 135 insertions(+)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG,
19
DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG,
20
void, ptr, ptr, ptr, ptr, i32)
21
22
+DEF_HELPER_FLAGS_5(sve2_addp_zpzz_b, TCG_CALL_NO_RWG,
23
+ void, ptr, ptr, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_5(sve2_addp_zpzz_h, TCG_CALL_NO_RWG,
25
+ void, ptr, ptr, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_5(sve2_addp_zpzz_s, TCG_CALL_NO_RWG,
27
+ void, ptr, ptr, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_5(sve2_addp_zpzz_d, TCG_CALL_NO_RWG,
29
+ void, ptr, ptr, ptr, ptr, i32)
30
+
31
+DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_b, TCG_CALL_NO_RWG,
32
+ void, ptr, ptr, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_h, TCG_CALL_NO_RWG,
34
+ void, ptr, ptr, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_s, TCG_CALL_NO_RWG,
36
+ void, ptr, ptr, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_d, TCG_CALL_NO_RWG,
38
+ void, ptr, ptr, ptr, ptr, i32)
39
+
40
+DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_b, TCG_CALL_NO_RWG,
41
+ void, ptr, ptr, ptr, ptr, i32)
42
+DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_h, TCG_CALL_NO_RWG,
43
+ void, ptr, ptr, ptr, ptr, i32)
44
+DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_s, TCG_CALL_NO_RWG,
45
+ void, ptr, ptr, ptr, ptr, i32)
46
+DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_d, TCG_CALL_NO_RWG,
47
+ void, ptr, ptr, ptr, ptr, i32)
48
+
49
+DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_b, TCG_CALL_NO_RWG,
50
+ void, ptr, ptr, ptr, ptr, i32)
51
+DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_h, TCG_CALL_NO_RWG,
52
+ void, ptr, ptr, ptr, ptr, i32)
53
+DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_s, TCG_CALL_NO_RWG,
54
+ void, ptr, ptr, ptr, ptr, i32)
55
+DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_d, TCG_CALL_NO_RWG,
56
+ void, ptr, ptr, ptr, ptr, i32)
57
+
58
+DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_b, TCG_CALL_NO_RWG,
59
+ void, ptr, ptr, ptr, ptr, i32)
60
+DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_h, TCG_CALL_NO_RWG,
61
+ void, ptr, ptr, ptr, ptr, i32)
62
+DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_s, TCG_CALL_NO_RWG,
63
+ void, ptr, ptr, ptr, ptr, i32)
64
+DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_d, TCG_CALL_NO_RWG,
65
+ void, ptr, ptr, ptr, ptr, i32)
66
+
67
DEF_HELPER_FLAGS_5(sve_asr_zpzw_b, TCG_CALL_NO_RWG,
68
void, ptr, ptr, ptr, ptr, i32)
69
DEF_HELPER_FLAGS_5(sve_asr_zpzw_h, TCG_CALL_NO_RWG,
70
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
71
index XXXXXXX..XXXXXXX 100644
72
--- a/target/arm/sve.decode
73
+++ b/target/arm/sve.decode
74
@@ -XXX,XX +XXX,XX @@ SRHADD 01000100 .. 010 100 100 ... ..... ..... @rdn_pg_rm
75
URHADD 01000100 .. 010 101 100 ... ..... ..... @rdn_pg_rm
76
SHSUB 01000100 .. 010 110 100 ... ..... ..... @rdm_pg_rn # SHSUBR
77
UHSUB 01000100 .. 010 111 100 ... ..... ..... @rdm_pg_rn # UHSUBR
78
+
79
+### SVE2 integer pairwise arithmetic
80
+
81
+ADDP 01000100 .. 010 001 101 ... ..... ..... @rdn_pg_rm
82
+SMAXP 01000100 .. 010 100 101 ... ..... ..... @rdn_pg_rm
83
+UMAXP 01000100 .. 010 101 101 ... ..... ..... @rdn_pg_rm
84
+SMINP 01000100 .. 010 110 101 ... ..... ..... @rdn_pg_rm
85
+UMINP 01000100 .. 010 111 101 ... ..... ..... @rdn_pg_rm
86
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/target/arm/sve_helper.c
89
+++ b/target/arm/sve_helper.c
90
@@ -XXX,XX +XXX,XX @@ DO_ZPZZ_D(sve2_uhsub_zpzz_d, uint64_t, DO_HSUB_D)
91
#undef DO_ZPZZ
92
#undef DO_ZPZZ_D
93
94
+/*
95
+ * Three operand expander, operating on element pairs.
96
+ * If the slot I is even, the elements from from VN {I, I+1}.
97
+ * If the slot I is odd, the elements from from VM {I-1, I}.
98
+ * Load all of the input elements in each pair before overwriting output.
99
+ */
100
+#define DO_ZPZZ_PAIR(NAME, TYPE, H, OP) \
101
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
102
+{ \
103
+ intptr_t i, opr_sz = simd_oprsz(desc); \
104
+ for (i = 0; i < opr_sz; ) { \
105
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
106
+ do { \
107
+ TYPE n0 = *(TYPE *)(vn + H(i)); \
108
+ TYPE m0 = *(TYPE *)(vm + H(i)); \
109
+ TYPE n1 = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
110
+ TYPE m1 = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
111
+ if (pg & 1) { \
112
+ *(TYPE *)(vd + H(i)) = OP(n0, n1); \
113
+ } \
114
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
115
+ if (pg & 1) { \
116
+ *(TYPE *)(vd + H(i)) = OP(m0, m1); \
117
+ } \
118
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
119
+ } while (i & 15); \
120
+ } \
121
+}
122
+
123
+/* Similarly, specialized for 64-bit operands. */
124
+#define DO_ZPZZ_PAIR_D(NAME, TYPE, OP) \
125
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
126
+{ \
127
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
128
+ TYPE *d = vd, *n = vn, *m = vm; \
129
+ uint8_t *pg = vg; \
130
+ for (i = 0; i < opr_sz; i += 2) { \
131
+ TYPE n0 = n[i], n1 = n[i + 1]; \
132
+ TYPE m0 = m[i], m1 = m[i + 1]; \
133
+ if (pg[H1(i)] & 1) { \
134
+ d[i] = OP(n0, n1); \
135
+ } \
136
+ if (pg[H1(i + 1)] & 1) { \
137
+ d[i + 1] = OP(m0, m1); \
138
+ } \
139
+ } \
140
+}
141
+
142
+DO_ZPZZ_PAIR(sve2_addp_zpzz_b, uint8_t, H1, DO_ADD)
143
+DO_ZPZZ_PAIR(sve2_addp_zpzz_h, uint16_t, H1_2, DO_ADD)
144
+DO_ZPZZ_PAIR(sve2_addp_zpzz_s, uint32_t, H1_4, DO_ADD)
145
+DO_ZPZZ_PAIR_D(sve2_addp_zpzz_d, uint64_t, DO_ADD)
146
+
147
+DO_ZPZZ_PAIR(sve2_umaxp_zpzz_b, uint8_t, H1, DO_MAX)
148
+DO_ZPZZ_PAIR(sve2_umaxp_zpzz_h, uint16_t, H1_2, DO_MAX)
149
+DO_ZPZZ_PAIR(sve2_umaxp_zpzz_s, uint32_t, H1_4, DO_MAX)
150
+DO_ZPZZ_PAIR_D(sve2_umaxp_zpzz_d, uint64_t, DO_MAX)
151
+
152
+DO_ZPZZ_PAIR(sve2_uminp_zpzz_b, uint8_t, H1, DO_MIN)
153
+DO_ZPZZ_PAIR(sve2_uminp_zpzz_h, uint16_t, H1_2, DO_MIN)
154
+DO_ZPZZ_PAIR(sve2_uminp_zpzz_s, uint32_t, H1_4, DO_MIN)
155
+DO_ZPZZ_PAIR_D(sve2_uminp_zpzz_d, uint64_t, DO_MIN)
156
+
157
+DO_ZPZZ_PAIR(sve2_smaxp_zpzz_b, int8_t, H1, DO_MAX)
158
+DO_ZPZZ_PAIR(sve2_smaxp_zpzz_h, int16_t, H1_2, DO_MAX)
159
+DO_ZPZZ_PAIR(sve2_smaxp_zpzz_s, int32_t, H1_4, DO_MAX)
160
+DO_ZPZZ_PAIR_D(sve2_smaxp_zpzz_d, int64_t, DO_MAX)
161
+
162
+DO_ZPZZ_PAIR(sve2_sminp_zpzz_b, int8_t, H1, DO_MIN)
163
+DO_ZPZZ_PAIR(sve2_sminp_zpzz_h, int16_t, H1_2, DO_MIN)
164
+DO_ZPZZ_PAIR(sve2_sminp_zpzz_s, int32_t, H1_4, DO_MIN)
165
+DO_ZPZZ_PAIR_D(sve2_sminp_zpzz_d, int64_t, DO_MIN)
166
+
167
+#undef DO_ZPZZ_PAIR
168
+#undef DO_ZPZZ_PAIR_D
169
+
170
/* Three-operand expander, controlled by a predicate, in which the
171
* third operand is "wide". That is, for D = N op M, the same 64-bit
172
* value of M is used with all of the narrower values of N.
173
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/arm/translate-sve.c
176
+++ b/target/arm/translate-sve.c
177
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZPZZ(SHSUB, shsub)
178
DO_SVE2_ZPZZ(UHADD, uhadd)
179
DO_SVE2_ZPZZ(URHADD, urhadd)
180
DO_SVE2_ZPZZ(UHSUB, uhsub)
181
+
182
+DO_SVE2_ZPZZ(ADDP, addp)
183
+DO_SVE2_ZPZZ(SMAXP, smaxp)
184
+DO_SVE2_ZPZZ(UMAXP, umaxp)
185
+DO_SVE2_ZPZZ(SMINP, sminp)
186
+DO_SVE2_ZPZZ(UMINP, uminp)
187
--
188
2.20.1
189
190
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-11-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 24 ++++++++++++++++++++
9
target/arm/sve.decode | 19 ++++++++++++++++
10
target/arm/sve_helper.c | 43 +++++++++++++++++++++++++++++++++++
11
target/arm/translate-sve.c | 46 ++++++++++++++++++++++++++++++++++++++
12
4 files changed, 132 insertions(+)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve_ftmad_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_5(sve_ftmad_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_5(sve_ftmad_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
21
22
+DEF_HELPER_FLAGS_4(sve2_saddl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_4(sve2_saddl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_4(sve2_saddl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
25
+
26
+DEF_HELPER_FLAGS_4(sve2_ssubl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(sve2_ssubl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(sve2_ssubl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_4(sve2_sabdl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(sve2_sabdl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(sve2_sabdl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
33
+
34
+DEF_HELPER_FLAGS_4(sve2_uaddl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_4(sve2_uaddl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(sve2_uaddl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
37
+
38
+DEF_HELPER_FLAGS_4(sve2_usubl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(sve2_usubl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_4(sve2_usubl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
41
+
42
+DEF_HELPER_FLAGS_4(sve2_uabdl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
43
+DEF_HELPER_FLAGS_4(sve2_uabdl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
44
+DEF_HELPER_FLAGS_4(sve2_uabdl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
45
+
46
DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
47
DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
48
DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
49
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
50
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/sve.decode
52
+++ b/target/arm/sve.decode
53
@@ -XXX,XX +XXX,XX @@ SUQADD 01000100 .. 011 100 100 ... ..... ..... @rdn_pg_rm
54
USQADD 01000100 .. 011 101 100 ... ..... ..... @rdn_pg_rm
55
SQSUB_zpzz 01000100 .. 011 110 100 ... ..... ..... @rdm_pg_rn # SQSUBR
56
UQSUB_zpzz 01000100 .. 011 111 100 ... ..... ..... @rdm_pg_rn # UQSUBR
57
+
58
+#### SVE2 Widening Integer Arithmetic
59
+
60
+## SVE2 integer add/subtract long
61
+
62
+SADDLB 01000101 .. 0 ..... 00 0000 ..... ..... @rd_rn_rm
63
+SADDLT 01000101 .. 0 ..... 00 0001 ..... ..... @rd_rn_rm
64
+UADDLB 01000101 .. 0 ..... 00 0010 ..... ..... @rd_rn_rm
65
+UADDLT 01000101 .. 0 ..... 00 0011 ..... ..... @rd_rn_rm
66
+
67
+SSUBLB 01000101 .. 0 ..... 00 0100 ..... ..... @rd_rn_rm
68
+SSUBLT 01000101 .. 0 ..... 00 0101 ..... ..... @rd_rn_rm
69
+USUBLB 01000101 .. 0 ..... 00 0110 ..... ..... @rd_rn_rm
70
+USUBLT 01000101 .. 0 ..... 00 0111 ..... ..... @rd_rn_rm
71
+
72
+SABDLB 01000101 .. 0 ..... 00 1100 ..... ..... @rd_rn_rm
73
+SABDLT 01000101 .. 0 ..... 00 1101 ..... ..... @rd_rn_rm
74
+UABDLB 01000101 .. 0 ..... 00 1110 ..... ..... @rd_rn_rm
75
+UABDLT 01000101 .. 0 ..... 00 1111 ..... ..... @rd_rn_rm
76
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/sve_helper.c
79
+++ b/target/arm/sve_helper.c
80
@@ -XXX,XX +XXX,XX @@ DO_ZZW(sve_lsl_zzw_s, uint32_t, uint64_t, H1_4, DO_LSL)
81
#undef DO_ZPZ
82
#undef DO_ZPZ_D
83
84
+/*
85
+ * Three-operand expander, unpredicated, in which the two inputs are
86
+ * selected from the top or bottom half of the wide column.
87
+ */
88
+#define DO_ZZZ_TB(NAME, TYPEW, TYPEN, HW, HN, OP) \
89
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
90
+{ \
91
+ intptr_t i, opr_sz = simd_oprsz(desc); \
92
+ int sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
93
+ int sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPEN); \
94
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
95
+ TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \
96
+ TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \
97
+ *(TYPEW *)(vd + HW(i)) = OP(nn, mm); \
98
+ } \
99
+}
100
+
101
+DO_ZZZ_TB(sve2_saddl_h, int16_t, int8_t, H1_2, H1, DO_ADD)
102
+DO_ZZZ_TB(sve2_saddl_s, int32_t, int16_t, H1_4, H1_2, DO_ADD)
103
+DO_ZZZ_TB(sve2_saddl_d, int64_t, int32_t, , H1_4, DO_ADD)
104
+
105
+DO_ZZZ_TB(sve2_ssubl_h, int16_t, int8_t, H1_2, H1, DO_SUB)
106
+DO_ZZZ_TB(sve2_ssubl_s, int32_t, int16_t, H1_4, H1_2, DO_SUB)
107
+DO_ZZZ_TB(sve2_ssubl_d, int64_t, int32_t, , H1_4, DO_SUB)
108
+
109
+DO_ZZZ_TB(sve2_sabdl_h, int16_t, int8_t, H1_2, H1, DO_ABD)
110
+DO_ZZZ_TB(sve2_sabdl_s, int32_t, int16_t, H1_4, H1_2, DO_ABD)
111
+DO_ZZZ_TB(sve2_sabdl_d, int64_t, int32_t, , H1_4, DO_ABD)
112
+
113
+DO_ZZZ_TB(sve2_uaddl_h, uint16_t, uint8_t, H1_2, H1, DO_ADD)
114
+DO_ZZZ_TB(sve2_uaddl_s, uint32_t, uint16_t, H1_4, H1_2, DO_ADD)
115
+DO_ZZZ_TB(sve2_uaddl_d, uint64_t, uint32_t, , H1_4, DO_ADD)
116
+
117
+DO_ZZZ_TB(sve2_usubl_h, uint16_t, uint8_t, H1_2, H1, DO_SUB)
118
+DO_ZZZ_TB(sve2_usubl_s, uint32_t, uint16_t, H1_4, H1_2, DO_SUB)
119
+DO_ZZZ_TB(sve2_usubl_d, uint64_t, uint32_t, , H1_4, DO_SUB)
120
+
121
+DO_ZZZ_TB(sve2_uabdl_h, uint16_t, uint8_t, H1_2, H1, DO_ABD)
122
+DO_ZZZ_TB(sve2_uabdl_s, uint32_t, uint16_t, H1_4, H1_2, DO_ABD)
123
+DO_ZZZ_TB(sve2_uabdl_d, uint64_t, uint32_t, , H1_4, DO_ABD)
124
+
125
+#undef DO_ZZZ_TB
126
+
127
/* Two-operand reduction expander, controlled by a predicate.
128
* The difference between TYPERED and TYPERET has to do with
129
* sign-extension. E.g. for SMAX, TYPERED must be signed,
130
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
131
index XXXXXXX..XXXXXXX 100644
132
--- a/target/arm/translate-sve.c
133
+++ b/target/arm/translate-sve.c
134
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZPZZ(SQSUB_zpzz, sqsub)
135
DO_SVE2_ZPZZ(UQSUB_zpzz, uqsub)
136
DO_SVE2_ZPZZ(SUQADD, suqadd)
137
DO_SVE2_ZPZZ(USQADD, usqadd)
138
+
139
+/*
140
+ * SVE2 Widening Integer Arithmetic
141
+ */
142
+
143
+static bool do_sve2_zzw_ool(DisasContext *s, arg_rrr_esz *a,
144
+ gen_helper_gvec_3 *fn, int data)
145
+{
146
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
147
+ return false;
148
+ }
149
+ if (sve_access_check(s)) {
150
+ unsigned vsz = vec_full_reg_size(s);
151
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
152
+ vec_full_reg_offset(s, a->rn),
153
+ vec_full_reg_offset(s, a->rm),
154
+ vsz, vsz, data, fn);
155
+ }
156
+ return true;
157
+}
158
+
159
+#define DO_SVE2_ZZZ_TB(NAME, name, SEL1, SEL2) \
160
+static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
161
+{ \
162
+ static gen_helper_gvec_3 * const fns[4] = { \
163
+ NULL, gen_helper_sve2_##name##_h, \
164
+ gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
165
+ }; \
166
+ return do_sve2_zzw_ool(s, a, fns[a->esz], (SEL2 << 1) | SEL1); \
167
+}
168
+
169
+DO_SVE2_ZZZ_TB(SADDLB, saddl, false, false)
170
+DO_SVE2_ZZZ_TB(SSUBLB, ssubl, false, false)
171
+DO_SVE2_ZZZ_TB(SABDLB, sabdl, false, false)
172
+
173
+DO_SVE2_ZZZ_TB(UADDLB, uaddl, false, false)
174
+DO_SVE2_ZZZ_TB(USUBLB, usubl, false, false)
175
+DO_SVE2_ZZZ_TB(UABDLB, uabdl, false, false)
176
+
177
+DO_SVE2_ZZZ_TB(SADDLT, saddl, true, true)
178
+DO_SVE2_ZZZ_TB(SSUBLT, ssubl, true, true)
179
+DO_SVE2_ZZZ_TB(SABDLT, sabdl, true, true)
180
+
181
+DO_SVE2_ZZZ_TB(UADDLT, uaddl, true, true)
182
+DO_SVE2_ZZZ_TB(USUBLT, usubl, true, true)
183
+DO_SVE2_ZZZ_TB(UABDLT, uabdl, true, true)
184
--
185
2.20.1
186
187
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-13-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 16 ++++++++++++++++
9
target/arm/sve.decode | 12 ++++++++++++
10
target/arm/sve_helper.c | 30 ++++++++++++++++++++++++++++++
11
target/arm/translate-sve.c | 20 ++++++++++++++++++++
12
4 files changed, 78 insertions(+)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_uabdl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_4(sve2_uabdl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_4(sve2_uabdl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
21
22
+DEF_HELPER_FLAGS_4(sve2_saddw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_4(sve2_saddw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_4(sve2_saddw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
25
+
26
+DEF_HELPER_FLAGS_4(sve2_ssubw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(sve2_ssubw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(sve2_ssubw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_4(sve2_uaddw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(sve2_uaddw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(sve2_uaddw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
33
+
34
+DEF_HELPER_FLAGS_4(sve2_usubw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_4(sve2_usubw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(sve2_usubw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
37
+
38
DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
39
DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
40
DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
41
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/sve.decode
44
+++ b/target/arm/sve.decode
45
@@ -XXX,XX +XXX,XX @@ UABDLT 01000101 .. 0 ..... 00 1111 ..... ..... @rd_rn_rm
46
SADDLBT 01000101 .. 0 ..... 1000 00 ..... ..... @rd_rn_rm
47
SSUBLBT 01000101 .. 0 ..... 1000 10 ..... ..... @rd_rn_rm
48
SSUBLTB 01000101 .. 0 ..... 1000 11 ..... ..... @rd_rn_rm
49
+
50
+## SVE2 integer add/subtract wide
51
+
52
+SADDWB 01000101 .. 0 ..... 010 000 ..... ..... @rd_rn_rm
53
+SADDWT 01000101 .. 0 ..... 010 001 ..... ..... @rd_rn_rm
54
+UADDWB 01000101 .. 0 ..... 010 010 ..... ..... @rd_rn_rm
55
+UADDWT 01000101 .. 0 ..... 010 011 ..... ..... @rd_rn_rm
56
+
57
+SSUBWB 01000101 .. 0 ..... 010 100 ..... ..... @rd_rn_rm
58
+SSUBWT 01000101 .. 0 ..... 010 101 ..... ..... @rd_rn_rm
59
+USUBWB 01000101 .. 0 ..... 010 110 ..... ..... @rd_rn_rm
60
+USUBWT 01000101 .. 0 ..... 010 111 ..... ..... @rd_rn_rm
61
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/arm/sve_helper.c
64
+++ b/target/arm/sve_helper.c
65
@@ -XXX,XX +XXX,XX @@ DO_ZZZ_TB(sve2_uabdl_d, uint64_t, uint32_t, , H1_4, DO_ABD)
66
67
#undef DO_ZZZ_TB
68
69
+#define DO_ZZZ_WTB(NAME, TYPEW, TYPEN, HW, HN, OP) \
70
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
71
+{ \
72
+ intptr_t i, opr_sz = simd_oprsz(desc); \
73
+ int sel2 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
74
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
75
+ TYPEW nn = *(TYPEW *)(vn + HW(i)); \
76
+ TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \
77
+ *(TYPEW *)(vd + HW(i)) = OP(nn, mm); \
78
+ } \
79
+}
80
+
81
+DO_ZZZ_WTB(sve2_saddw_h, int16_t, int8_t, H1_2, H1, DO_ADD)
82
+DO_ZZZ_WTB(sve2_saddw_s, int32_t, int16_t, H1_4, H1_2, DO_ADD)
83
+DO_ZZZ_WTB(sve2_saddw_d, int64_t, int32_t, , H1_4, DO_ADD)
84
+
85
+DO_ZZZ_WTB(sve2_ssubw_h, int16_t, int8_t, H1_2, H1, DO_SUB)
86
+DO_ZZZ_WTB(sve2_ssubw_s, int32_t, int16_t, H1_4, H1_2, DO_SUB)
87
+DO_ZZZ_WTB(sve2_ssubw_d, int64_t, int32_t, , H1_4, DO_SUB)
88
+
89
+DO_ZZZ_WTB(sve2_uaddw_h, uint16_t, uint8_t, H1_2, H1, DO_ADD)
90
+DO_ZZZ_WTB(sve2_uaddw_s, uint32_t, uint16_t, H1_4, H1_2, DO_ADD)
91
+DO_ZZZ_WTB(sve2_uaddw_d, uint64_t, uint32_t, , H1_4, DO_ADD)
92
+
93
+DO_ZZZ_WTB(sve2_usubw_h, uint16_t, uint8_t, H1_2, H1, DO_SUB)
94
+DO_ZZZ_WTB(sve2_usubw_s, uint32_t, uint16_t, H1_4, H1_2, DO_SUB)
95
+DO_ZZZ_WTB(sve2_usubw_d, uint64_t, uint32_t, , H1_4, DO_SUB)
96
+
97
+#undef DO_ZZZ_WTB
98
+
99
/* Two-operand reduction expander, controlled by a predicate.
100
* The difference between TYPERED and TYPERET has to do with
101
* sign-extension. E.g. for SMAX, TYPERED must be signed,
102
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
103
index XXXXXXX..XXXXXXX 100644
104
--- a/target/arm/translate-sve.c
105
+++ b/target/arm/translate-sve.c
106
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_TB(UABDLT, uabdl, true, true)
107
DO_SVE2_ZZZ_TB(SADDLBT, saddl, false, true)
108
DO_SVE2_ZZZ_TB(SSUBLBT, ssubl, false, true)
109
DO_SVE2_ZZZ_TB(SSUBLTB, ssubl, true, false)
110
+
111
+#define DO_SVE2_ZZZ_WTB(NAME, name, SEL2) \
112
+static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
113
+{ \
114
+ static gen_helper_gvec_3 * const fns[4] = { \
115
+ NULL, gen_helper_sve2_##name##_h, \
116
+ gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
117
+ }; \
118
+ return do_sve2_zzw_ool(s, a, fns[a->esz], SEL2); \
119
+}
120
+
121
+DO_SVE2_ZZZ_WTB(SADDWB, saddw, false)
122
+DO_SVE2_ZZZ_WTB(SADDWT, saddw, true)
123
+DO_SVE2_ZZZ_WTB(SSUBWB, ssubw, false)
124
+DO_SVE2_ZZZ_WTB(SSUBWT, ssubw, true)
125
+
126
+DO_SVE2_ZZZ_WTB(UADDWB, uaddw, false)
127
+DO_SVE2_ZZZ_WTB(UADDWT, uaddw, true)
128
+DO_SVE2_ZZZ_WTB(USUBWB, usubw, false)
129
+DO_SVE2_ZZZ_WTB(USUBWT, usubw, true)
130
--
131
2.20.1
132
133
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Exclude PMULL from this category for the moment.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210525010358.152808-14-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/helper-sve.h | 15 +++++++++++++++
11
target/arm/sve.decode | 9 +++++++++
12
target/arm/sve_helper.c | 31 +++++++++++++++++++++++++++++++
13
target/arm/translate-sve.c | 9 +++++++++
14
4 files changed, 64 insertions(+)
15
16
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-sve.h
19
+++ b/target/arm/helper-sve.h
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(sve_stdd_le_zd_mte, TCG_CALL_NO_WG,
21
DEF_HELPER_FLAGS_6(sve_stdd_be_zd_mte, TCG_CALL_NO_WG,
22
void, env, ptr, ptr, ptr, tl, i32)
23
24
+DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_h, TCG_CALL_NO_RWG,
25
+ void, ptr, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_s, TCG_CALL_NO_RWG,
27
+ void, ptr, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_d, TCG_CALL_NO_RWG,
29
+ void, ptr, ptr, ptr, i32)
30
+
31
+DEF_HELPER_FLAGS_4(sve2_smull_zzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(sve2_smull_zzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_4(sve2_smull_zzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
34
+
35
+DEF_HELPER_FLAGS_4(sve2_umull_zzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(sve2_umull_zzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_4(sve2_umull_zzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
38
+
39
DEF_HELPER_FLAGS_4(sve2_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
40
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/sve.decode
43
+++ b/target/arm/sve.decode
44
@@ -XXX,XX +XXX,XX @@ SSUBWB 01000101 .. 0 ..... 010 100 ..... ..... @rd_rn_rm
45
SSUBWT 01000101 .. 0 ..... 010 101 ..... ..... @rd_rn_rm
46
USUBWB 01000101 .. 0 ..... 010 110 ..... ..... @rd_rn_rm
47
USUBWT 01000101 .. 0 ..... 010 111 ..... ..... @rd_rn_rm
48
+
49
+## SVE2 integer multiply long
50
+
51
+SQDMULLB_zzz 01000101 .. 0 ..... 011 000 ..... ..... @rd_rn_rm
52
+SQDMULLT_zzz 01000101 .. 0 ..... 011 001 ..... ..... @rd_rn_rm
53
+SMULLB_zzz 01000101 .. 0 ..... 011 100 ..... ..... @rd_rn_rm
54
+SMULLT_zzz 01000101 .. 0 ..... 011 101 ..... ..... @rd_rn_rm
55
+UMULLB_zzz 01000101 .. 0 ..... 011 110 ..... ..... @rd_rn_rm
56
+UMULLT_zzz 01000101 .. 0 ..... 011 111 ..... ..... @rd_rn_rm
57
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/sve_helper.c
60
+++ b/target/arm/sve_helper.c
61
@@ -XXX,XX +XXX,XX @@ DO_ZZZ_TB(sve2_uabdl_h, uint16_t, uint8_t, H1_2, H1, DO_ABD)
62
DO_ZZZ_TB(sve2_uabdl_s, uint32_t, uint16_t, H1_4, H1_2, DO_ABD)
63
DO_ZZZ_TB(sve2_uabdl_d, uint64_t, uint32_t, , H1_4, DO_ABD)
64
65
+DO_ZZZ_TB(sve2_smull_zzz_h, int16_t, int8_t, H1_2, H1, DO_MUL)
66
+DO_ZZZ_TB(sve2_smull_zzz_s, int32_t, int16_t, H1_4, H1_2, DO_MUL)
67
+DO_ZZZ_TB(sve2_smull_zzz_d, int64_t, int32_t, , H1_4, DO_MUL)
68
+
69
+DO_ZZZ_TB(sve2_umull_zzz_h, uint16_t, uint8_t, H1_2, H1, DO_MUL)
70
+DO_ZZZ_TB(sve2_umull_zzz_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL)
71
+DO_ZZZ_TB(sve2_umull_zzz_d, uint64_t, uint32_t, , H1_4, DO_MUL)
72
+
73
+/* Note that the multiply cannot overflow, but the doubling can. */
74
+static inline int16_t do_sqdmull_h(int16_t n, int16_t m)
75
+{
76
+ int16_t val = n * m;
77
+ return DO_SQADD_H(val, val);
78
+}
79
+
80
+static inline int32_t do_sqdmull_s(int32_t n, int32_t m)
81
+{
82
+ int32_t val = n * m;
83
+ return DO_SQADD_S(val, val);
84
+}
85
+
86
+static inline int64_t do_sqdmull_d(int64_t n, int64_t m)
87
+{
88
+ int64_t val = n * m;
89
+ return do_sqadd_d(val, val);
90
+}
91
+
92
+DO_ZZZ_TB(sve2_sqdmull_zzz_h, int16_t, int8_t, H1_2, H1, do_sqdmull_h)
93
+DO_ZZZ_TB(sve2_sqdmull_zzz_s, int32_t, int16_t, H1_4, H1_2, do_sqdmull_s)
94
+DO_ZZZ_TB(sve2_sqdmull_zzz_d, int64_t, int32_t, , H1_4, do_sqdmull_d)
95
+
96
#undef DO_ZZZ_TB
97
98
#define DO_ZZZ_WTB(NAME, TYPEW, TYPEN, HW, HN, OP) \
99
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/target/arm/translate-sve.c
102
+++ b/target/arm/translate-sve.c
103
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_TB(SADDLBT, saddl, false, true)
104
DO_SVE2_ZZZ_TB(SSUBLBT, ssubl, false, true)
105
DO_SVE2_ZZZ_TB(SSUBLTB, ssubl, true, false)
106
107
+DO_SVE2_ZZZ_TB(SQDMULLB_zzz, sqdmull_zzz, false, false)
108
+DO_SVE2_ZZZ_TB(SQDMULLT_zzz, sqdmull_zzz, true, true)
109
+
110
+DO_SVE2_ZZZ_TB(SMULLB_zzz, smull_zzz, false, false)
111
+DO_SVE2_ZZZ_TB(SMULLT_zzz, smull_zzz, true, true)
112
+
113
+DO_SVE2_ZZZ_TB(UMULLB_zzz, umull_zzz, false, false)
114
+DO_SVE2_ZZZ_TB(UMULLT_zzz, umull_zzz, true, true)
115
+
116
#define DO_SVE2_ZZZ_WTB(NAME, name, SEL2) \
117
static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
118
{ \
119
--
120
2.20.1
121
122
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-16-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 8 ++
9
target/arm/sve.decode | 8 ++
10
target/arm/sve_helper.c | 22 +++++
11
target/arm/translate-sve.c | 159 +++++++++++++++++++++++++++++++++++++
12
4 files changed, 197 insertions(+)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_umull_zzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
19
20
DEF_HELPER_FLAGS_4(sve2_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_4(sve2_pmull_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
22
+
23
+DEF_HELPER_FLAGS_3(sve2_sshll_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_3(sve2_sshll_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_3(sve2_sshll_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
26
+
27
+DEF_HELPER_FLAGS_3(sve2_ushll_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_3(sve2_ushll_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_3(sve2_ushll_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
30
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/sve.decode
33
+++ b/target/arm/sve.decode
34
@@ -XXX,XX +XXX,XX @@ SMULLB_zzz 01000101 .. 0 ..... 011 100 ..... ..... @rd_rn_rm
35
SMULLT_zzz 01000101 .. 0 ..... 011 101 ..... ..... @rd_rn_rm
36
UMULLB_zzz 01000101 .. 0 ..... 011 110 ..... ..... @rd_rn_rm
37
UMULLT_zzz 01000101 .. 0 ..... 011 111 ..... ..... @rd_rn_rm
38
+
39
+## SVE2 bitwise shift left long
40
+
41
+# Note bit23 == 0 is handled by esz > 0 in do_sve2_shll_tb.
42
+SSHLLB 01000101 .. 0 ..... 1010 00 ..... ..... @rd_rn_tszimm_shl
43
+SSHLLT 01000101 .. 0 ..... 1010 01 ..... ..... @rd_rn_tszimm_shl
44
+USHLLB 01000101 .. 0 ..... 1010 10 ..... ..... @rd_rn_tszimm_shl
45
+USHLLT 01000101 .. 0 ..... 1010 11 ..... ..... @rd_rn_tszimm_shl
46
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/sve_helper.c
49
+++ b/target/arm/sve_helper.c
50
@@ -XXX,XX +XXX,XX @@ DO_ZZZ_WTB(sve2_usubw_d, uint64_t, uint32_t, , H1_4, DO_SUB)
51
52
#undef DO_ZZZ_WTB
53
54
+#define DO_ZZI_SHLL(NAME, TYPEW, TYPEN, HW, HN) \
55
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
56
+{ \
57
+ intptr_t i, opr_sz = simd_oprsz(desc); \
58
+ intptr_t sel = (simd_data(desc) & 1) * sizeof(TYPEN); \
59
+ int shift = simd_data(desc) >> 1; \
60
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
61
+ TYPEW nn = *(TYPEN *)(vn + HN(i + sel)); \
62
+ *(TYPEW *)(vd + HW(i)) = nn << shift; \
63
+ } \
64
+}
65
+
66
+DO_ZZI_SHLL(sve2_sshll_h, int16_t, int8_t, H1_2, H1)
67
+DO_ZZI_SHLL(sve2_sshll_s, int32_t, int16_t, H1_4, H1_2)
68
+DO_ZZI_SHLL(sve2_sshll_d, int64_t, int32_t, , H1_4)
69
+
70
+DO_ZZI_SHLL(sve2_ushll_h, uint16_t, uint8_t, H1_2, H1)
71
+DO_ZZI_SHLL(sve2_ushll_s, uint32_t, uint16_t, H1_4, H1_2)
72
+DO_ZZI_SHLL(sve2_ushll_d, uint64_t, uint32_t, , H1_4)
73
+
74
+#undef DO_ZZI_SHLL
75
+
76
/* Two-operand reduction expander, controlled by a predicate.
77
* The difference between TYPERED and TYPERET has to do with
78
* sign-extension. E.g. for SMAX, TYPERED must be signed,
79
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/target/arm/translate-sve.c
82
+++ b/target/arm/translate-sve.c
83
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_WTB(UADDWB, uaddw, false)
84
DO_SVE2_ZZZ_WTB(UADDWT, uaddw, true)
85
DO_SVE2_ZZZ_WTB(USUBWB, usubw, false)
86
DO_SVE2_ZZZ_WTB(USUBWT, usubw, true)
87
+
88
+static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
89
+{
90
+ int top = imm & 1;
91
+ int shl = imm >> 1;
92
+ int halfbits = 4 << vece;
93
+
94
+ if (top) {
95
+ if (shl == halfbits) {
96
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
97
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
98
+ tcg_gen_and_vec(vece, d, n, t);
99
+ tcg_temp_free_vec(t);
100
+ } else {
101
+ tcg_gen_sari_vec(vece, d, n, halfbits);
102
+ tcg_gen_shli_vec(vece, d, d, shl);
103
+ }
104
+ } else {
105
+ tcg_gen_shli_vec(vece, d, n, halfbits);
106
+ tcg_gen_sari_vec(vece, d, d, halfbits - shl);
107
+ }
108
+}
109
+
110
+static void gen_ushll_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int imm)
111
+{
112
+ int halfbits = 4 << vece;
113
+ int top = imm & 1;
114
+ int shl = (imm >> 1);
115
+ int shift;
116
+ uint64_t mask;
117
+
118
+ mask = MAKE_64BIT_MASK(0, halfbits);
119
+ mask <<= shl;
120
+ mask = dup_const(vece, mask);
121
+
122
+ shift = shl - top * halfbits;
123
+ if (shift < 0) {
124
+ tcg_gen_shri_i64(d, n, -shift);
125
+ } else {
126
+ tcg_gen_shli_i64(d, n, shift);
127
+ }
128
+ tcg_gen_andi_i64(d, d, mask);
129
+}
130
+
131
+static void gen_ushll16_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
132
+{
133
+ gen_ushll_i64(MO_16, d, n, imm);
134
+}
135
+
136
+static void gen_ushll32_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
137
+{
138
+ gen_ushll_i64(MO_32, d, n, imm);
139
+}
140
+
141
+static void gen_ushll64_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
142
+{
143
+ gen_ushll_i64(MO_64, d, n, imm);
144
+}
145
+
146
+static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
147
+{
148
+ int halfbits = 4 << vece;
149
+ int top = imm & 1;
150
+ int shl = imm >> 1;
151
+
152
+ if (top) {
153
+ if (shl == halfbits) {
154
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
155
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
156
+ tcg_gen_and_vec(vece, d, n, t);
157
+ tcg_temp_free_vec(t);
158
+ } else {
159
+ tcg_gen_shri_vec(vece, d, n, halfbits);
160
+ tcg_gen_shli_vec(vece, d, d, shl);
161
+ }
162
+ } else {
163
+ if (shl == 0) {
164
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
165
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
166
+ tcg_gen_and_vec(vece, d, n, t);
167
+ tcg_temp_free_vec(t);
168
+ } else {
169
+ tcg_gen_shli_vec(vece, d, n, halfbits);
170
+ tcg_gen_shri_vec(vece, d, d, halfbits - shl);
171
+ }
172
+ }
173
+}
174
+
175
+static bool do_sve2_shll_tb(DisasContext *s, arg_rri_esz *a,
176
+ bool sel, bool uns)
177
+{
178
+ static const TCGOpcode sshll_list[] = {
179
+ INDEX_op_shli_vec, INDEX_op_sari_vec, 0
180
+ };
181
+ static const TCGOpcode ushll_list[] = {
182
+ INDEX_op_shli_vec, INDEX_op_shri_vec, 0
183
+ };
184
+ static const GVecGen2i ops[2][3] = {
185
+ { { .fniv = gen_sshll_vec,
186
+ .opt_opc = sshll_list,
187
+ .fno = gen_helper_sve2_sshll_h,
188
+ .vece = MO_16 },
189
+ { .fniv = gen_sshll_vec,
190
+ .opt_opc = sshll_list,
191
+ .fno = gen_helper_sve2_sshll_s,
192
+ .vece = MO_32 },
193
+ { .fniv = gen_sshll_vec,
194
+ .opt_opc = sshll_list,
195
+ .fno = gen_helper_sve2_sshll_d,
196
+ .vece = MO_64 } },
197
+ { { .fni8 = gen_ushll16_i64,
198
+ .fniv = gen_ushll_vec,
199
+ .opt_opc = ushll_list,
200
+ .fno = gen_helper_sve2_ushll_h,
201
+ .vece = MO_16 },
202
+ { .fni8 = gen_ushll32_i64,
203
+ .fniv = gen_ushll_vec,
204
+ .opt_opc = ushll_list,
205
+ .fno = gen_helper_sve2_ushll_s,
206
+ .vece = MO_32 },
207
+ { .fni8 = gen_ushll64_i64,
208
+ .fniv = gen_ushll_vec,
209
+ .opt_opc = ushll_list,
210
+ .fno = gen_helper_sve2_ushll_d,
211
+ .vece = MO_64 } },
212
+ };
213
+
214
+ if (a->esz < 0 || a->esz > 2 || !dc_isar_feature(aa64_sve2, s)) {
215
+ return false;
216
+ }
217
+ if (sve_access_check(s)) {
218
+ unsigned vsz = vec_full_reg_size(s);
219
+ tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
220
+ vec_full_reg_offset(s, a->rn),
221
+ vsz, vsz, (a->imm << 1) | sel,
222
+ &ops[uns][a->esz]);
223
+ }
224
+ return true;
225
+}
226
+
227
+static bool trans_SSHLLB(DisasContext *s, arg_rri_esz *a)
228
+{
229
+ return do_sve2_shll_tb(s, a, false, false);
230
+}
231
+
232
+static bool trans_SSHLLT(DisasContext *s, arg_rri_esz *a)
233
+{
234
+ return do_sve2_shll_tb(s, a, true, false);
235
+}
236
+
237
+static bool trans_USHLLB(DisasContext *s, arg_rri_esz *a)
238
+{
239
+ return do_sve2_shll_tb(s, a, false, true);
240
+}
241
+
242
+static bool trans_USHLLT(DisasContext *s, arg_rri_esz *a)
243
+{
244
+ return do_sve2_shll_tb(s, a, true, true);
245
+}
246
--
247
2.20.1
248
249
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-17-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 5 +++++
9
target/arm/sve.decode | 5 +++++
10
target/arm/sve_helper.c | 20 ++++++++++++++++++++
11
target/arm/translate-sve.c | 19 +++++++++++++++++++
12
4 files changed, 49 insertions(+)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_sshll_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_3(sve2_ushll_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_3(sve2_ushll_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_3(sve2_ushll_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
22
+
23
+DEF_HELPER_FLAGS_4(sve2_eoril_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_4(sve2_eoril_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_4(sve2_eoril_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(sve2_eoril_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
27
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/sve.decode
30
+++ b/target/arm/sve.decode
31
@@ -XXX,XX +XXX,XX @@ SSHLLB 01000101 .. 0 ..... 1010 00 ..... ..... @rd_rn_tszimm_shl
32
SSHLLT 01000101 .. 0 ..... 1010 01 ..... ..... @rd_rn_tszimm_shl
33
USHLLB 01000101 .. 0 ..... 1010 10 ..... ..... @rd_rn_tszimm_shl
34
USHLLT 01000101 .. 0 ..... 1010 11 ..... ..... @rd_rn_tszimm_shl
35
+
36
+## SVE2 bitwise exclusive-or interleaved
37
+
38
+EORBT 01000101 .. 0 ..... 10010 0 ..... ..... @rd_rn_rm
39
+EORTB 01000101 .. 0 ..... 10010 1 ..... ..... @rd_rn_rm
40
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/sve_helper.c
43
+++ b/target/arm/sve_helper.c
44
@@ -XXX,XX +XXX,XX @@ DO_ZZZ_WTB(sve2_usubw_d, uint64_t, uint32_t, , H1_4, DO_SUB)
45
46
#undef DO_ZZZ_WTB
47
48
+#define DO_ZZZ_NTB(NAME, TYPE, H, OP) \
49
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
50
+{ \
51
+ intptr_t i, opr_sz = simd_oprsz(desc); \
52
+ intptr_t sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPE); \
53
+ intptr_t sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPE); \
54
+ for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \
55
+ TYPE nn = *(TYPE *)(vn + H(i + sel1)); \
56
+ TYPE mm = *(TYPE *)(vm + H(i + sel2)); \
57
+ *(TYPE *)(vd + H(i + sel1)) = OP(nn, mm); \
58
+ } \
59
+}
60
+
61
+DO_ZZZ_NTB(sve2_eoril_b, uint8_t, H1, DO_EOR)
62
+DO_ZZZ_NTB(sve2_eoril_h, uint16_t, H1_2, DO_EOR)
63
+DO_ZZZ_NTB(sve2_eoril_s, uint32_t, H1_4, DO_EOR)
64
+DO_ZZZ_NTB(sve2_eoril_d, uint64_t, , DO_EOR)
65
+
66
+#undef DO_ZZZ_NTB
67
+
68
#define DO_ZZI_SHLL(NAME, TYPEW, TYPEN, HW, HN) \
69
void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
70
{ \
71
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/translate-sve.c
74
+++ b/target/arm/translate-sve.c
75
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_TB(SMULLT_zzz, smull_zzz, true, true)
76
DO_SVE2_ZZZ_TB(UMULLB_zzz, umull_zzz, false, false)
77
DO_SVE2_ZZZ_TB(UMULLT_zzz, umull_zzz, true, true)
78
79
+static bool do_eor_tb(DisasContext *s, arg_rrr_esz *a, bool sel1)
80
+{
81
+ static gen_helper_gvec_3 * const fns[4] = {
82
+ gen_helper_sve2_eoril_b, gen_helper_sve2_eoril_h,
83
+ gen_helper_sve2_eoril_s, gen_helper_sve2_eoril_d,
84
+ };
85
+ return do_sve2_zzw_ool(s, a, fns[a->esz], (!sel1 << 1) | sel1);
86
+}
87
+
88
+static bool trans_EORBT(DisasContext *s, arg_rrr_esz *a)
89
+{
90
+ return do_eor_tb(s, a, false);
91
+}
92
+
93
+static bool trans_EORTB(DisasContext *s, arg_rrr_esz *a)
94
+{
95
+ return do_eor_tb(s, a, true);
96
+}
97
+
98
static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
99
{
100
static gen_helper_gvec_3 * const fns[4] = {
101
--
102
2.20.1
103
104
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-19-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 10 +++++++++
9
target/arm/sve.decode | 9 ++++++++
10
target/arm/sve_helper.c | 42 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sve.c | 31 ++++++++++++++++++++++++++++
12
4 files changed, 92 insertions(+)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_bgrp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_4(sve2_bgrp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_4(sve2_bgrp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_4(sve2_bgrp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
22
+
23
+DEF_HELPER_FLAGS_4(sve2_cadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_4(sve2_cadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_4(sve2_cadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(sve2_cadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
27
+
28
+DEF_HELPER_FLAGS_4(sve2_sqcadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(sve2_sqcadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_4(sve2_sqcadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(sve2_sqcadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/sve.decode
35
+++ b/target/arm/sve.decode
36
@@ -XXX,XX +XXX,XX @@ EORTB 01000101 .. 0 ..... 10010 1 ..... ..... @rd_rn_rm
37
BEXT 01000101 .. 0 ..... 1011 00 ..... ..... @rd_rn_rm
38
BDEP 01000101 .. 0 ..... 1011 01 ..... ..... @rd_rn_rm
39
BGRP 01000101 .. 0 ..... 1011 10 ..... ..... @rd_rn_rm
40
+
41
+#### SVE2 Accumulate
42
+
43
+## SVE2 complex integer add
44
+
45
+CADD_rot90 01000101 .. 00000 0 11011 0 ..... ..... @rdn_rm
46
+CADD_rot270 01000101 .. 00000 0 11011 1 ..... ..... @rdn_rm
47
+SQCADD_rot90 01000101 .. 00000 1 11011 0 ..... ..... @rdn_rm
48
+SQCADD_rot270 01000101 .. 00000 1 11011 1 ..... ..... @rdn_rm
49
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/sve_helper.c
52
+++ b/target/arm/sve_helper.c
53
@@ -XXX,XX +XXX,XX @@ DO_BITPERM(sve2_bgrp_d, uint64_t, bitgroup)
54
55
#undef DO_BITPERM
56
57
+#define DO_CADD(NAME, TYPE, H, ADD_OP, SUB_OP) \
58
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
59
+{ \
60
+ intptr_t i, opr_sz = simd_oprsz(desc); \
61
+ int sub_r = simd_data(desc); \
62
+ if (sub_r) { \
63
+ for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \
64
+ TYPE acc_r = *(TYPE *)(vn + H(i)); \
65
+ TYPE acc_i = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
66
+ TYPE el2_r = *(TYPE *)(vm + H(i)); \
67
+ TYPE el2_i = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
68
+ acc_r = ADD_OP(acc_r, el2_i); \
69
+ acc_i = SUB_OP(acc_i, el2_r); \
70
+ *(TYPE *)(vd + H(i)) = acc_r; \
71
+ *(TYPE *)(vd + H(i + sizeof(TYPE))) = acc_i; \
72
+ } \
73
+ } else { \
74
+ for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \
75
+ TYPE acc_r = *(TYPE *)(vn + H(i)); \
76
+ TYPE acc_i = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
77
+ TYPE el2_r = *(TYPE *)(vm + H(i)); \
78
+ TYPE el2_i = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
79
+ acc_r = SUB_OP(acc_r, el2_i); \
80
+ acc_i = ADD_OP(acc_i, el2_r); \
81
+ *(TYPE *)(vd + H(i)) = acc_r; \
82
+ *(TYPE *)(vd + H(i + sizeof(TYPE))) = acc_i; \
83
+ } \
84
+ } \
85
+}
86
+
87
+DO_CADD(sve2_cadd_b, int8_t, H1, DO_ADD, DO_SUB)
88
+DO_CADD(sve2_cadd_h, int16_t, H1_2, DO_ADD, DO_SUB)
89
+DO_CADD(sve2_cadd_s, int32_t, H1_4, DO_ADD, DO_SUB)
90
+DO_CADD(sve2_cadd_d, int64_t, , DO_ADD, DO_SUB)
91
+
92
+DO_CADD(sve2_sqcadd_b, int8_t, H1, DO_SQADD_B, DO_SQSUB_B)
93
+DO_CADD(sve2_sqcadd_h, int16_t, H1_2, DO_SQADD_H, DO_SQSUB_H)
94
+DO_CADD(sve2_sqcadd_s, int32_t, H1_4, DO_SQADD_S, DO_SQSUB_S)
95
+DO_CADD(sve2_sqcadd_d, int64_t, , do_sqadd_d, do_sqsub_d)
96
+
97
+#undef DO_CADD
98
+
99
#define DO_ZZI_SHLL(NAME, TYPEW, TYPEN, HW, HN) \
100
void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
101
{ \
102
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
103
index XXXXXXX..XXXXXXX 100644
104
--- a/target/arm/translate-sve.c
105
+++ b/target/arm/translate-sve.c
106
@@ -XXX,XX +XXX,XX @@ static bool trans_BGRP(DisasContext *s, arg_rrr_esz *a)
107
}
108
return do_sve2_zzw_ool(s, a, fns[a->esz], 0);
109
}
110
+
111
+static bool do_cadd(DisasContext *s, arg_rrr_esz *a, bool sq, bool rot)
112
+{
113
+ static gen_helper_gvec_3 * const fns[2][4] = {
114
+ { gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h,
115
+ gen_helper_sve2_cadd_s, gen_helper_sve2_cadd_d },
116
+ { gen_helper_sve2_sqcadd_b, gen_helper_sve2_sqcadd_h,
117
+ gen_helper_sve2_sqcadd_s, gen_helper_sve2_sqcadd_d },
118
+ };
119
+ return do_sve2_zzw_ool(s, a, fns[sq][a->esz], rot);
120
+}
121
+
122
+static bool trans_CADD_rot90(DisasContext *s, arg_rrr_esz *a)
123
+{
124
+ return do_cadd(s, a, false, false);
125
+}
126
+
127
+static bool trans_CADD_rot270(DisasContext *s, arg_rrr_esz *a)
128
+{
129
+ return do_cadd(s, a, false, true);
130
+}
131
+
132
+static bool trans_SQCADD_rot90(DisasContext *s, arg_rrr_esz *a)
133
+{
134
+ return do_cadd(s, a, true, false);
135
+}
136
+
137
+static bool trans_SQCADD_rot270(DisasContext *s, arg_rrr_esz *a)
138
+{
139
+ return do_cadd(s, a, true, true);
140
+}
141
--
142
2.20.1
143
144
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-20-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 14 ++++++++++
9
target/arm/sve.decode | 12 +++++++++
10
target/arm/sve_helper.c | 23 ++++++++++++++++
11
target/arm/translate-sve.c | 55 ++++++++++++++++++++++++++++++++++++++
12
4 files changed, 104 insertions(+)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve2_sqcadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_4(sve2_sqcadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_4(sve2_sqcadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_4(sve2_sqcadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
22
+
23
+DEF_HELPER_FLAGS_5(sve2_sabal_h, TCG_CALL_NO_RWG,
24
+ void, ptr, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_5(sve2_sabal_s, TCG_CALL_NO_RWG,
26
+ void, ptr, ptr, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_5(sve2_sabal_d, TCG_CALL_NO_RWG,
28
+ void, ptr, ptr, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_5(sve2_uabal_h, TCG_CALL_NO_RWG,
31
+ void, ptr, ptr, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_5(sve2_uabal_s, TCG_CALL_NO_RWG,
33
+ void, ptr, ptr, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_5(sve2_uabal_d, TCG_CALL_NO_RWG,
35
+ void, ptr, ptr, ptr, ptr, i32)
36
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/sve.decode
39
+++ b/target/arm/sve.decode
40
@@ -XXX,XX +XXX,XX @@
41
&rpr_s rd pg rn s
42
&rprr_s rd pg rn rm s
43
&rprr_esz rd pg rn rm esz
44
+&rrrr_esz rd ra rn rm esz
45
&rprrr_esz rd pg rn rm ra esz
46
&rpri_esz rd pg rn imm esz
47
&ptrue rd esz pat s
48
@@ -XXX,XX +XXX,XX @@
49
@rdn_i8s ........ esz:2 ...... ... imm:s8 rd:5 \
50
&rri_esz rn=%reg_movprfx
51
52
+# Four operand, vector element size
53
+@rda_rn_rm ........ esz:2 . rm:5 ... ... rn:5 rd:5 \
54
+ &rrrr_esz ra=%reg_movprfx
55
+
56
# Three operand with "memory" size, aka immediate left shift
57
@rd_rn_msz_rm ........ ... rm:5 .... imm:2 rn:5 rd:5 &rrri
58
59
@@ -XXX,XX +XXX,XX @@ CADD_rot90 01000101 .. 00000 0 11011 0 ..... ..... @rdn_rm
60
CADD_rot270 01000101 .. 00000 0 11011 1 ..... ..... @rdn_rm
61
SQCADD_rot90 01000101 .. 00000 1 11011 0 ..... ..... @rdn_rm
62
SQCADD_rot270 01000101 .. 00000 1 11011 1 ..... ..... @rdn_rm
63
+
64
+## SVE2 integer absolute difference and accumulate long
65
+
66
+SABALB 01000101 .. 0 ..... 1100 00 ..... ..... @rda_rn_rm
67
+SABALT 01000101 .. 0 ..... 1100 01 ..... ..... @rda_rn_rm
68
+UABALB 01000101 .. 0 ..... 1100 10 ..... ..... @rda_rn_rm
69
+UABALT 01000101 .. 0 ..... 1100 11 ..... ..... @rda_rn_rm
70
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/target/arm/sve_helper.c
73
+++ b/target/arm/sve_helper.c
74
@@ -XXX,XX +XXX,XX @@ DO_ZZZ_NTB(sve2_eoril_d, uint64_t, , DO_EOR)
75
76
#undef DO_ZZZ_NTB
77
78
+#define DO_ZZZW_ACC(NAME, TYPEW, TYPEN, HW, HN, OP) \
79
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
80
+{ \
81
+ intptr_t i, opr_sz = simd_oprsz(desc); \
82
+ intptr_t sel1 = simd_data(desc) * sizeof(TYPEN); \
83
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
84
+ TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \
85
+ TYPEW mm = *(TYPEN *)(vm + HN(i + sel1)); \
86
+ TYPEW aa = *(TYPEW *)(va + HW(i)); \
87
+ *(TYPEW *)(vd + HW(i)) = OP(nn, mm) + aa; \
88
+ } \
89
+}
90
+
91
+DO_ZZZW_ACC(sve2_sabal_h, int16_t, int8_t, H1_2, H1, DO_ABD)
92
+DO_ZZZW_ACC(sve2_sabal_s, int32_t, int16_t, H1_4, H1_2, DO_ABD)
93
+DO_ZZZW_ACC(sve2_sabal_d, int64_t, int32_t, , H1_4, DO_ABD)
94
+
95
+DO_ZZZW_ACC(sve2_uabal_h, uint16_t, uint8_t, H1_2, H1, DO_ABD)
96
+DO_ZZZW_ACC(sve2_uabal_s, uint32_t, uint16_t, H1_4, H1_2, DO_ABD)
97
+DO_ZZZW_ACC(sve2_uabal_d, uint64_t, uint32_t, , H1_4, DO_ABD)
98
+
99
+#undef DO_ZZZW_ACC
100
+
101
#define DO_BITPERM(NAME, TYPE, OP) \
102
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
103
{ \
104
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/target/arm/translate-sve.c
107
+++ b/target/arm/translate-sve.c
108
@@ -XXX,XX +XXX,XX @@ static void gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn,
109
vsz, vsz, data, fn);
110
}
111
112
+/* Invoke an out-of-line helper on 4 Zregs. */
113
+static void gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn,
114
+ int rd, int rn, int rm, int ra, int data)
115
+{
116
+ unsigned vsz = vec_full_reg_size(s);
117
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
118
+ vec_full_reg_offset(s, rn),
119
+ vec_full_reg_offset(s, rm),
120
+ vec_full_reg_offset(s, ra),
121
+ vsz, vsz, data, fn);
122
+}
123
+
124
/* Invoke an out-of-line helper on 2 Zregs and a predicate. */
125
static void gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn,
126
int rd, int rn, int pg, int data)
127
@@ -XXX,XX +XXX,XX @@ static bool trans_SQCADD_rot270(DisasContext *s, arg_rrr_esz *a)
128
{
129
return do_cadd(s, a, true, true);
130
}
131
+
132
+static bool do_sve2_zzzz_ool(DisasContext *s, arg_rrrr_esz *a,
133
+ gen_helper_gvec_4 *fn, int data)
134
+{
135
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
136
+ return false;
137
+ }
138
+ if (sve_access_check(s)) {
139
+ gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data);
140
+ }
141
+ return true;
142
+}
143
+
144
+static bool do_abal(DisasContext *s, arg_rrrr_esz *a, bool uns, bool sel)
145
+{
146
+ static gen_helper_gvec_4 * const fns[2][4] = {
147
+ { NULL, gen_helper_sve2_sabal_h,
148
+ gen_helper_sve2_sabal_s, gen_helper_sve2_sabal_d },
149
+ { NULL, gen_helper_sve2_uabal_h,
150
+ gen_helper_sve2_uabal_s, gen_helper_sve2_uabal_d },
151
+ };
152
+ return do_sve2_zzzz_ool(s, a, fns[uns][a->esz], sel);
153
+}
154
+
155
+static bool trans_SABALB(DisasContext *s, arg_rrrr_esz *a)
156
+{
157
+ return do_abal(s, a, false, false);
158
+}
159
+
160
+static bool trans_SABALT(DisasContext *s, arg_rrrr_esz *a)
161
+{
162
+ return do_abal(s, a, false, true);
163
+}
164
+
165
+static bool trans_UABALB(DisasContext *s, arg_rrrr_esz *a)
166
+{
167
+ return do_abal(s, a, true, false);
168
+}
169
+
170
+static bool trans_UABALT(DisasContext *s, arg_rrrr_esz *a)
171
+{
172
+ return do_abal(s, a, true, true);
173
+}
174
--
175
2.20.1
176
177
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-21-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 3 +++
9
target/arm/sve.decode | 6 ++++++
10
target/arm/sve_helper.c | 34 ++++++++++++++++++++++++++++++++++
11
target/arm/translate-sve.c | 23 +++++++++++++++++++++++
12
4 files changed, 66 insertions(+)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_uabal_s, TCG_CALL_NO_RWG,
19
void, ptr, ptr, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_5(sve2_uabal_d, TCG_CALL_NO_RWG,
21
void, ptr, ptr, ptr, ptr, i32)
22
+
23
+DEF_HELPER_FLAGS_5(sve2_adcl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_5(sve2_adcl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/sve.decode
28
+++ b/target/arm/sve.decode
29
@@ -XXX,XX +XXX,XX @@ SABALB 01000101 .. 0 ..... 1100 00 ..... ..... @rda_rn_rm
30
SABALT 01000101 .. 0 ..... 1100 01 ..... ..... @rda_rn_rm
31
UABALB 01000101 .. 0 ..... 1100 10 ..... ..... @rda_rn_rm
32
UABALT 01000101 .. 0 ..... 1100 11 ..... ..... @rda_rn_rm
33
+
34
+## SVE2 integer add/subtract long with carry
35
+
36
+# ADC and SBC decoded via size in helper dispatch.
37
+ADCLB 01000101 .. 0 ..... 11010 0 ..... ..... @rda_rn_rm
38
+ADCLT 01000101 .. 0 ..... 11010 1 ..... ..... @rda_rn_rm
39
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/sve_helper.c
42
+++ b/target/arm/sve_helper.c
43
@@ -XXX,XX +XXX,XX @@ DO_ZZZW_ACC(sve2_uabal_d, uint64_t, uint32_t, , H1_4, DO_ABD)
44
45
#undef DO_ZZZW_ACC
46
47
+void HELPER(sve2_adcl_s)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
48
+{
49
+ intptr_t i, opr_sz = simd_oprsz(desc);
50
+ int sel = H4(extract32(desc, SIMD_DATA_SHIFT, 1));
51
+ uint32_t inv = -extract32(desc, SIMD_DATA_SHIFT + 1, 1);
52
+ uint32_t *a = va, *n = vn;
53
+ uint64_t *d = vd, *m = vm;
54
+
55
+ for (i = 0; i < opr_sz / 8; ++i) {
56
+ uint32_t e1 = a[2 * i + H4(0)];
57
+ uint32_t e2 = n[2 * i + sel] ^ inv;
58
+ uint64_t c = extract64(m[i], 32, 1);
59
+ /* Compute and store the entire 33-bit result at once. */
60
+ d[i] = c + e1 + e2;
61
+ }
62
+}
63
+
64
+void HELPER(sve2_adcl_d)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
65
+{
66
+ intptr_t i, opr_sz = simd_oprsz(desc);
67
+ int sel = extract32(desc, SIMD_DATA_SHIFT, 1);
68
+ uint64_t inv = -(uint64_t)extract32(desc, SIMD_DATA_SHIFT + 1, 1);
69
+ uint64_t *d = vd, *a = va, *n = vn, *m = vm;
70
+
71
+ for (i = 0; i < opr_sz / 8; i += 2) {
72
+ Int128 e1 = int128_make64(a[i]);
73
+ Int128 e2 = int128_make64(n[i + sel] ^ inv);
74
+ Int128 c = int128_make64(m[i + 1] & 1);
75
+ Int128 r = int128_add(int128_add(e1, e2), c);
76
+ d[i + 0] = int128_getlo(r);
77
+ d[i + 1] = int128_gethi(r);
78
+ }
79
+}
80
+
81
#define DO_BITPERM(NAME, TYPE, OP) \
82
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
83
{ \
84
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
85
index XXXXXXX..XXXXXXX 100644
86
--- a/target/arm/translate-sve.c
87
+++ b/target/arm/translate-sve.c
88
@@ -XXX,XX +XXX,XX @@ static bool trans_UABALT(DisasContext *s, arg_rrrr_esz *a)
89
{
90
return do_abal(s, a, true, true);
91
}
92
+
93
+static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel)
94
+{
95
+ static gen_helper_gvec_4 * const fns[2] = {
96
+ gen_helper_sve2_adcl_s,
97
+ gen_helper_sve2_adcl_d,
98
+ };
99
+ /*
100
+ * Note that in this case the ESZ field encodes both size and sign.
101
+ * Split out 'subtract' into bit 1 of the data field for the helper.
102
+ */
103
+ return do_sve2_zzzz_ool(s, a, fns[a->esz & 1], (a->esz & 2) | sel);
104
+}
105
+
106
+static bool trans_ADCLB(DisasContext *s, arg_rrrr_esz *a)
107
+{
108
+ return do_adcl(s, a, false);
109
+}
110
+
111
+static bool trans_ADCLT(DisasContext *s, arg_rrrr_esz *a)
112
+{
113
+ return do_adcl(s, a, true);
114
+}
115
--
116
2.20.1
117
118
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-22-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/sve.decode | 8 ++++++++
9
target/arm/translate-sve.c | 34 ++++++++++++++++++++++++++++++++++
10
2 files changed, 42 insertions(+)
11
12
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/sve.decode
15
+++ b/target/arm/sve.decode
16
@@ -XXX,XX +XXX,XX @@ UABALT 01000101 .. 0 ..... 1100 11 ..... ..... @rda_rn_rm
17
# ADC and SBC decoded via size in helper dispatch.
18
ADCLB 01000101 .. 0 ..... 11010 0 ..... ..... @rda_rn_rm
19
ADCLT 01000101 .. 0 ..... 11010 1 ..... ..... @rda_rn_rm
20
+
21
+## SVE2 bitwise shift right and accumulate
22
+
23
+# TODO: Use @rda and %reg_movprfx here.
24
+SSRA 01000101 .. 0 ..... 1110 00 ..... ..... @rd_rn_tszimm_shr
25
+USRA 01000101 .. 0 ..... 1110 01 ..... ..... @rd_rn_tszimm_shr
26
+SRSRA 01000101 .. 0 ..... 1110 10 ..... ..... @rd_rn_tszimm_shr
27
+URSRA 01000101 .. 0 ..... 1110 11 ..... ..... @rd_rn_tszimm_shr
28
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-sve.c
31
+++ b/target/arm/translate-sve.c
32
@@ -XXX,XX +XXX,XX @@ static bool trans_ADCLT(DisasContext *s, arg_rrrr_esz *a)
33
{
34
return do_adcl(s, a, true);
35
}
36
+
37
+static bool do_sve2_fn2i(DisasContext *s, arg_rri_esz *a, GVecGen2iFn *fn)
38
+{
39
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
40
+ return false;
41
+ }
42
+ if (sve_access_check(s)) {
43
+ unsigned vsz = vec_full_reg_size(s);
44
+ unsigned rd_ofs = vec_full_reg_offset(s, a->rd);
45
+ unsigned rn_ofs = vec_full_reg_offset(s, a->rn);
46
+ fn(a->esz, rd_ofs, rn_ofs, a->imm, vsz, vsz);
47
+ }
48
+ return true;
49
+}
50
+
51
+static bool trans_SSRA(DisasContext *s, arg_rri_esz *a)
52
+{
53
+ return do_sve2_fn2i(s, a, gen_gvec_ssra);
54
+}
55
+
56
+static bool trans_USRA(DisasContext *s, arg_rri_esz *a)
57
+{
58
+ return do_sve2_fn2i(s, a, gen_gvec_usra);
59
+}
60
+
61
+static bool trans_SRSRA(DisasContext *s, arg_rri_esz *a)
62
+{
63
+ return do_sve2_fn2i(s, a, gen_gvec_srsra);
64
+}
65
+
66
+static bool trans_URSRA(DisasContext *s, arg_rri_esz *a)
67
+{
68
+ return do_sve2_fn2i(s, a, gen_gvec_ursra);
69
+}
70
--
71
2.20.1
72
73
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-23-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/sve.decode | 5 +++++
9
target/arm/translate-sve.c | 10 ++++++++++
10
2 files changed, 15 insertions(+)
11
12
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/sve.decode
15
+++ b/target/arm/sve.decode
16
@@ -XXX,XX +XXX,XX @@ SSRA 01000101 .. 0 ..... 1110 00 ..... ..... @rd_rn_tszimm_shr
17
USRA 01000101 .. 0 ..... 1110 01 ..... ..... @rd_rn_tszimm_shr
18
SRSRA 01000101 .. 0 ..... 1110 10 ..... ..... @rd_rn_tszimm_shr
19
URSRA 01000101 .. 0 ..... 1110 11 ..... ..... @rd_rn_tszimm_shr
20
+
21
+## SVE2 bitwise shift and insert
22
+
23
+SRI 01000101 .. 0 ..... 11110 0 ..... ..... @rd_rn_tszimm_shr
24
+SLI 01000101 .. 0 ..... 11110 1 ..... ..... @rd_rn_tszimm_shl
25
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate-sve.c
28
+++ b/target/arm/translate-sve.c
29
@@ -XXX,XX +XXX,XX @@ static bool trans_URSRA(DisasContext *s, arg_rri_esz *a)
30
{
31
return do_sve2_fn2i(s, a, gen_gvec_ursra);
32
}
33
+
34
+static bool trans_SRI(DisasContext *s, arg_rri_esz *a)
35
+{
36
+ return do_sve2_fn2i(s, a, gen_gvec_sri);
37
+}
38
+
39
+static bool trans_SLI(DisasContext *s, arg_rri_esz *a)
40
+{
41
+ return do_sve2_fn2i(s, a, gen_gvec_sli);
42
+}
43
--
44
2.20.1
45
46
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-24-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/sve.decode | 6 ++++++
9
target/arm/translate-sve.c | 21 +++++++++++++++++++++
10
2 files changed, 27 insertions(+)
11
12
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/sve.decode
15
+++ b/target/arm/sve.decode
16
@@ -XXX,XX +XXX,XX @@ URSRA 01000101 .. 0 ..... 1110 11 ..... ..... @rd_rn_tszimm_shr
17
18
SRI 01000101 .. 0 ..... 11110 0 ..... ..... @rd_rn_tszimm_shr
19
SLI 01000101 .. 0 ..... 11110 1 ..... ..... @rd_rn_tszimm_shl
20
+
21
+## SVE2 integer absolute difference and accumulate
22
+
23
+# TODO: Use @rda and %reg_movprfx here.
24
+SABA 01000101 .. 0 ..... 11111 0 ..... ..... @rd_rn_rm
25
+UABA 01000101 .. 0 ..... 11111 1 ..... ..... @rd_rn_rm
26
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/translate-sve.c
29
+++ b/target/arm/translate-sve.c
30
@@ -XXX,XX +XXX,XX @@ static bool trans_SLI(DisasContext *s, arg_rri_esz *a)
31
{
32
return do_sve2_fn2i(s, a, gen_gvec_sli);
33
}
34
+
35
+static bool do_sve2_fn_zzz(DisasContext *s, arg_rrr_esz *a, GVecGen3Fn *fn)
36
+{
37
+ if (!dc_isar_feature(aa64_sve2, s)) {
38
+ return false;
39
+ }
40
+ if (sve_access_check(s)) {
41
+ gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm);
42
+ }
43
+ return true;
44
+}
45
+
46
+static bool trans_SABA(DisasContext *s, arg_rrr_esz *a)
47
+{
48
+ return do_sve2_fn_zzz(s, a, gen_gvec_saba);
49
+}
50
+
51
+static bool trans_UABA(DisasContext *s, arg_rrr_esz *a)
52
+{
53
+ return do_sve2_fn_zzz(s, a, gen_gvec_uaba);
54
+}
55
--
56
2.20.1
57
58
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-25-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 24 ++++
9
target/arm/sve.decode | 12 ++
10
target/arm/sve_helper.c | 56 +++++++++
11
target/arm/translate-sve.c | 238 +++++++++++++++++++++++++++++++++++++
12
4 files changed, 330 insertions(+)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_uabal_d, TCG_CALL_NO_RWG,
19
20
DEF_HELPER_FLAGS_5(sve2_adcl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_5(sve2_adcl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
22
+
23
+DEF_HELPER_FLAGS_3(sve2_sqxtnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_3(sve2_sqxtnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_3(sve2_sqxtnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
26
+
27
+DEF_HELPER_FLAGS_3(sve2_uqxtnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_3(sve2_uqxtnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_3(sve2_uqxtnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
30
+
31
+DEF_HELPER_FLAGS_3(sve2_sqxtunb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_3(sve2_sqxtunb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_3(sve2_sqxtunb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
34
+
35
+DEF_HELPER_FLAGS_3(sve2_sqxtnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_3(sve2_sqxtnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_3(sve2_sqxtnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
38
+
39
+DEF_HELPER_FLAGS_3(sve2_uqxtnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_3(sve2_uqxtnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
41
+DEF_HELPER_FLAGS_3(sve2_uqxtnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
42
+
43
+DEF_HELPER_FLAGS_3(sve2_sqxtunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
44
+DEF_HELPER_FLAGS_3(sve2_sqxtunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
45
+DEF_HELPER_FLAGS_3(sve2_sqxtunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
46
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/sve.decode
49
+++ b/target/arm/sve.decode
50
@@ -XXX,XX +XXX,XX @@ SLI 01000101 .. 0 ..... 11110 1 ..... ..... @rd_rn_tszimm_shl
51
# TODO: Use @rda and %reg_movprfx here.
52
SABA 01000101 .. 0 ..... 11111 0 ..... ..... @rd_rn_rm
53
UABA 01000101 .. 0 ..... 11111 1 ..... ..... @rd_rn_rm
54
+
55
+#### SVE2 Narrowing
56
+
57
+## SVE2 saturating extract narrow
58
+
59
+# Bits 23, 18-16 are zero, limited in the translator via esz < 3 & imm == 0.
60
+SQXTNB 01000101 .. 1 ..... 010 000 ..... ..... @rd_rn_tszimm_shl
61
+SQXTNT 01000101 .. 1 ..... 010 001 ..... ..... @rd_rn_tszimm_shl
62
+UQXTNB 01000101 .. 1 ..... 010 010 ..... ..... @rd_rn_tszimm_shl
63
+UQXTNT 01000101 .. 1 ..... 010 011 ..... ..... @rd_rn_tszimm_shl
64
+SQXTUNB 01000101 .. 1 ..... 010 100 ..... ..... @rd_rn_tszimm_shl
65
+SQXTUNT 01000101 .. 1 ..... 010 101 ..... ..... @rd_rn_tszimm_shl
66
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/sve_helper.c
69
+++ b/target/arm/sve_helper.c
70
@@ -XXX,XX +XXX,XX @@ DO_ZZZW_ACC(sve2_uabal_d, uint64_t, uint32_t, , H1_4, DO_ABD)
71
72
#undef DO_ZZZW_ACC
73
74
+#define DO_XTNB(NAME, TYPE, OP) \
75
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
76
+{ \
77
+ intptr_t i, opr_sz = simd_oprsz(desc); \
78
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
79
+ TYPE nn = *(TYPE *)(vn + i); \
80
+ nn = OP(nn) & MAKE_64BIT_MASK(0, sizeof(TYPE) * 4); \
81
+ *(TYPE *)(vd + i) = nn; \
82
+ } \
83
+}
84
+
85
+#define DO_XTNT(NAME, TYPE, TYPEN, H, OP) \
86
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
87
+{ \
88
+ intptr_t i, opr_sz = simd_oprsz(desc), odd = H(sizeof(TYPEN)); \
89
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
90
+ TYPE nn = *(TYPE *)(vn + i); \
91
+ *(TYPEN *)(vd + i + odd) = OP(nn); \
92
+ } \
93
+}
94
+
95
+#define DO_SQXTN_H(n) do_sat_bhs(n, INT8_MIN, INT8_MAX)
96
+#define DO_SQXTN_S(n) do_sat_bhs(n, INT16_MIN, INT16_MAX)
97
+#define DO_SQXTN_D(n) do_sat_bhs(n, INT32_MIN, INT32_MAX)
98
+
99
+DO_XTNB(sve2_sqxtnb_h, int16_t, DO_SQXTN_H)
100
+DO_XTNB(sve2_sqxtnb_s, int32_t, DO_SQXTN_S)
101
+DO_XTNB(sve2_sqxtnb_d, int64_t, DO_SQXTN_D)
102
+
103
+DO_XTNT(sve2_sqxtnt_h, int16_t, int8_t, H1, DO_SQXTN_H)
104
+DO_XTNT(sve2_sqxtnt_s, int32_t, int16_t, H1_2, DO_SQXTN_S)
105
+DO_XTNT(sve2_sqxtnt_d, int64_t, int32_t, H1_4, DO_SQXTN_D)
106
+
107
+#define DO_UQXTN_H(n) do_sat_bhs(n, 0, UINT8_MAX)
108
+#define DO_UQXTN_S(n) do_sat_bhs(n, 0, UINT16_MAX)
109
+#define DO_UQXTN_D(n) do_sat_bhs(n, 0, UINT32_MAX)
110
+
111
+DO_XTNB(sve2_uqxtnb_h, uint16_t, DO_UQXTN_H)
112
+DO_XTNB(sve2_uqxtnb_s, uint32_t, DO_UQXTN_S)
113
+DO_XTNB(sve2_uqxtnb_d, uint64_t, DO_UQXTN_D)
114
+
115
+DO_XTNT(sve2_uqxtnt_h, uint16_t, uint8_t, H1, DO_UQXTN_H)
116
+DO_XTNT(sve2_uqxtnt_s, uint32_t, uint16_t, H1_2, DO_UQXTN_S)
117
+DO_XTNT(sve2_uqxtnt_d, uint64_t, uint32_t, H1_4, DO_UQXTN_D)
118
+
119
+DO_XTNB(sve2_sqxtunb_h, int16_t, DO_UQXTN_H)
120
+DO_XTNB(sve2_sqxtunb_s, int32_t, DO_UQXTN_S)
121
+DO_XTNB(sve2_sqxtunb_d, int64_t, DO_UQXTN_D)
122
+
123
+DO_XTNT(sve2_sqxtunt_h, int16_t, int8_t, H1, DO_UQXTN_H)
124
+DO_XTNT(sve2_sqxtunt_s, int32_t, int16_t, H1_2, DO_UQXTN_S)
125
+DO_XTNT(sve2_sqxtunt_d, int64_t, int32_t, H1_4, DO_UQXTN_D)
126
+
127
+#undef DO_XTNB
128
+#undef DO_XTNT
129
+
130
void HELPER(sve2_adcl_s)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
131
{
132
intptr_t i, opr_sz = simd_oprsz(desc);
133
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/target/arm/translate-sve.c
136
+++ b/target/arm/translate-sve.c
137
@@ -XXX,XX +XXX,XX @@ static bool trans_UABA(DisasContext *s, arg_rrr_esz *a)
138
{
139
return do_sve2_fn_zzz(s, a, gen_gvec_uaba);
140
}
141
+
142
+static bool do_sve2_narrow_extract(DisasContext *s, arg_rri_esz *a,
143
+ const GVecGen2 ops[3])
144
+{
145
+ if (a->esz < 0 || a->esz > MO_32 || a->imm != 0 ||
146
+ !dc_isar_feature(aa64_sve2, s)) {
147
+ return false;
148
+ }
149
+ if (sve_access_check(s)) {
150
+ unsigned vsz = vec_full_reg_size(s);
151
+ tcg_gen_gvec_2(vec_full_reg_offset(s, a->rd),
152
+ vec_full_reg_offset(s, a->rn),
153
+ vsz, vsz, &ops[a->esz]);
154
+ }
155
+ return true;
156
+}
157
+
158
+static const TCGOpcode sqxtn_list[] = {
159
+ INDEX_op_shli_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0
160
+};
161
+
162
+static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
163
+{
164
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
165
+ int halfbits = 4 << vece;
166
+ int64_t mask = (1ull << halfbits) - 1;
167
+ int64_t min = -1ull << (halfbits - 1);
168
+ int64_t max = -min - 1;
169
+
170
+ tcg_gen_dupi_vec(vece, t, min);
171
+ tcg_gen_smax_vec(vece, d, n, t);
172
+ tcg_gen_dupi_vec(vece, t, max);
173
+ tcg_gen_smin_vec(vece, d, d, t);
174
+ tcg_gen_dupi_vec(vece, t, mask);
175
+ tcg_gen_and_vec(vece, d, d, t);
176
+ tcg_temp_free_vec(t);
177
+}
178
+
179
+static bool trans_SQXTNB(DisasContext *s, arg_rri_esz *a)
180
+{
181
+ static const GVecGen2 ops[3] = {
182
+ { .fniv = gen_sqxtnb_vec,
183
+ .opt_opc = sqxtn_list,
184
+ .fno = gen_helper_sve2_sqxtnb_h,
185
+ .vece = MO_16 },
186
+ { .fniv = gen_sqxtnb_vec,
187
+ .opt_opc = sqxtn_list,
188
+ .fno = gen_helper_sve2_sqxtnb_s,
189
+ .vece = MO_32 },
190
+ { .fniv = gen_sqxtnb_vec,
191
+ .opt_opc = sqxtn_list,
192
+ .fno = gen_helper_sve2_sqxtnb_d,
193
+ .vece = MO_64 },
194
+ };
195
+ return do_sve2_narrow_extract(s, a, ops);
196
+}
197
+
198
+static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
199
+{
200
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
201
+ int halfbits = 4 << vece;
202
+ int64_t mask = (1ull << halfbits) - 1;
203
+ int64_t min = -1ull << (halfbits - 1);
204
+ int64_t max = -min - 1;
205
+
206
+ tcg_gen_dupi_vec(vece, t, min);
207
+ tcg_gen_smax_vec(vece, n, n, t);
208
+ tcg_gen_dupi_vec(vece, t, max);
209
+ tcg_gen_smin_vec(vece, n, n, t);
210
+ tcg_gen_shli_vec(vece, n, n, halfbits);
211
+ tcg_gen_dupi_vec(vece, t, mask);
212
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
213
+ tcg_temp_free_vec(t);
214
+}
215
+
216
+static bool trans_SQXTNT(DisasContext *s, arg_rri_esz *a)
217
+{
218
+ static const GVecGen2 ops[3] = {
219
+ { .fniv = gen_sqxtnt_vec,
220
+ .opt_opc = sqxtn_list,
221
+ .load_dest = true,
222
+ .fno = gen_helper_sve2_sqxtnt_h,
223
+ .vece = MO_16 },
224
+ { .fniv = gen_sqxtnt_vec,
225
+ .opt_opc = sqxtn_list,
226
+ .load_dest = true,
227
+ .fno = gen_helper_sve2_sqxtnt_s,
228
+ .vece = MO_32 },
229
+ { .fniv = gen_sqxtnt_vec,
230
+ .opt_opc = sqxtn_list,
231
+ .load_dest = true,
232
+ .fno = gen_helper_sve2_sqxtnt_d,
233
+ .vece = MO_64 },
234
+ };
235
+ return do_sve2_narrow_extract(s, a, ops);
236
+}
237
+
238
+static const TCGOpcode uqxtn_list[] = {
239
+ INDEX_op_shli_vec, INDEX_op_umin_vec, 0
240
+};
241
+
242
+static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
243
+{
244
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
245
+ int halfbits = 4 << vece;
246
+ int64_t max = (1ull << halfbits) - 1;
247
+
248
+ tcg_gen_dupi_vec(vece, t, max);
249
+ tcg_gen_umin_vec(vece, d, n, t);
250
+ tcg_temp_free_vec(t);
251
+}
252
+
253
+static bool trans_UQXTNB(DisasContext *s, arg_rri_esz *a)
254
+{
255
+ static const GVecGen2 ops[3] = {
256
+ { .fniv = gen_uqxtnb_vec,
257
+ .opt_opc = uqxtn_list,
258
+ .fno = gen_helper_sve2_uqxtnb_h,
259
+ .vece = MO_16 },
260
+ { .fniv = gen_uqxtnb_vec,
261
+ .opt_opc = uqxtn_list,
262
+ .fno = gen_helper_sve2_uqxtnb_s,
263
+ .vece = MO_32 },
264
+ { .fniv = gen_uqxtnb_vec,
265
+ .opt_opc = uqxtn_list,
266
+ .fno = gen_helper_sve2_uqxtnb_d,
267
+ .vece = MO_64 },
268
+ };
269
+ return do_sve2_narrow_extract(s, a, ops);
270
+}
271
+
272
+static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
273
+{
274
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
275
+ int halfbits = 4 << vece;
276
+ int64_t max = (1ull << halfbits) - 1;
277
+
278
+ tcg_gen_dupi_vec(vece, t, max);
279
+ tcg_gen_umin_vec(vece, n, n, t);
280
+ tcg_gen_shli_vec(vece, n, n, halfbits);
281
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
282
+ tcg_temp_free_vec(t);
283
+}
284
+
285
+static bool trans_UQXTNT(DisasContext *s, arg_rri_esz *a)
286
+{
287
+ static const GVecGen2 ops[3] = {
288
+ { .fniv = gen_uqxtnt_vec,
289
+ .opt_opc = uqxtn_list,
290
+ .load_dest = true,
291
+ .fno = gen_helper_sve2_uqxtnt_h,
292
+ .vece = MO_16 },
293
+ { .fniv = gen_uqxtnt_vec,
294
+ .opt_opc = uqxtn_list,
295
+ .load_dest = true,
296
+ .fno = gen_helper_sve2_uqxtnt_s,
297
+ .vece = MO_32 },
298
+ { .fniv = gen_uqxtnt_vec,
299
+ .opt_opc = uqxtn_list,
300
+ .load_dest = true,
301
+ .fno = gen_helper_sve2_uqxtnt_d,
302
+ .vece = MO_64 },
303
+ };
304
+ return do_sve2_narrow_extract(s, a, ops);
305
+}
306
+
307
+static const TCGOpcode sqxtun_list[] = {
308
+ INDEX_op_shli_vec, INDEX_op_umin_vec, INDEX_op_smax_vec, 0
309
+};
310
+
311
+static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
312
+{
313
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
314
+ int halfbits = 4 << vece;
315
+ int64_t max = (1ull << halfbits) - 1;
316
+
317
+ tcg_gen_dupi_vec(vece, t, 0);
318
+ tcg_gen_smax_vec(vece, d, n, t);
319
+ tcg_gen_dupi_vec(vece, t, max);
320
+ tcg_gen_umin_vec(vece, d, d, t);
321
+ tcg_temp_free_vec(t);
322
+}
323
+
324
+static bool trans_SQXTUNB(DisasContext *s, arg_rri_esz *a)
325
+{
326
+ static const GVecGen2 ops[3] = {
327
+ { .fniv = gen_sqxtunb_vec,
328
+ .opt_opc = sqxtun_list,
329
+ .fno = gen_helper_sve2_sqxtunb_h,
330
+ .vece = MO_16 },
331
+ { .fniv = gen_sqxtunb_vec,
332
+ .opt_opc = sqxtun_list,
333
+ .fno = gen_helper_sve2_sqxtunb_s,
334
+ .vece = MO_32 },
335
+ { .fniv = gen_sqxtunb_vec,
336
+ .opt_opc = sqxtun_list,
337
+ .fno = gen_helper_sve2_sqxtunb_d,
338
+ .vece = MO_64 },
339
+ };
340
+ return do_sve2_narrow_extract(s, a, ops);
341
+}
342
+
343
+static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
344
+{
345
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
346
+ int halfbits = 4 << vece;
347
+ int64_t max = (1ull << halfbits) - 1;
348
+
349
+ tcg_gen_dupi_vec(vece, t, 0);
350
+ tcg_gen_smax_vec(vece, n, n, t);
351
+ tcg_gen_dupi_vec(vece, t, max);
352
+ tcg_gen_umin_vec(vece, n, n, t);
353
+ tcg_gen_shli_vec(vece, n, n, halfbits);
354
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
355
+ tcg_temp_free_vec(t);
356
+}
357
+
358
+static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz *a)
359
+{
360
+ static const GVecGen2 ops[3] = {
361
+ { .fniv = gen_sqxtunt_vec,
362
+ .opt_opc = sqxtun_list,
363
+ .load_dest = true,
364
+ .fno = gen_helper_sve2_sqxtunt_h,
365
+ .vece = MO_16 },
366
+ { .fniv = gen_sqxtunt_vec,
367
+ .opt_opc = sqxtun_list,
368
+ .load_dest = true,
369
+ .fno = gen_helper_sve2_sqxtunt_s,
370
+ .vece = MO_32 },
371
+ { .fniv = gen_sqxtunt_vec,
372
+ .opt_opc = sqxtun_list,
373
+ .load_dest = true,
374
+ .fno = gen_helper_sve2_sqxtunt_d,
375
+ .vece = MO_64 },
376
+ };
377
+ return do_sve2_narrow_extract(s, a, ops);
378
+}
379
--
380
2.20.1
381
382
diff view generated by jsdifflib
Deleted patch
1
From: Stephen Long <steplong@quicinc.com>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Stephen Long <steplong@quicinc.com>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210525010358.152808-26-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/helper-sve.h | 35 +++++++++++++++++++++++++++++
11
target/arm/sve.decode | 8 +++++++
12
target/arm/sve_helper.c | 46 ++++++++++++++++++++++++++++++++++++++
13
target/arm/translate-sve.c | 25 +++++++++++++++++++++
14
4 files changed, 114 insertions(+)
15
16
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-sve.h
19
+++ b/target/arm/helper-sve.h
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_uqxtnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_3(sve2_sqxtunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
22
DEF_HELPER_FLAGS_3(sve2_sqxtunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
23
DEF_HELPER_FLAGS_3(sve2_sqxtunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
24
+
25
+DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG,
26
+ void, ptr, ptr, ptr, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG,
28
+ void, ptr, ptr, ptr, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_d, TCG_CALL_NO_RWG,
30
+ void, ptr, ptr, ptr, ptr, ptr, i32)
31
+
32
+DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_h, TCG_CALL_NO_RWG,
33
+ void, ptr, ptr, ptr, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_s, TCG_CALL_NO_RWG,
35
+ void, ptr, ptr, ptr, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_d, TCG_CALL_NO_RWG,
37
+ void, ptr, ptr, ptr, ptr, ptr, i32)
38
+
39
+DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_h, TCG_CALL_NO_RWG,
40
+ void, ptr, ptr, ptr, ptr, ptr, i32)
41
+DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_s, TCG_CALL_NO_RWG,
42
+ void, ptr, ptr, ptr, ptr, ptr, i32)
43
+DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_d, TCG_CALL_NO_RWG,
44
+ void, ptr, ptr, ptr, ptr, ptr, i32)
45
+
46
+DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_h, TCG_CALL_NO_RWG,
47
+ void, ptr, ptr, ptr, ptr, ptr, i32)
48
+DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_s, TCG_CALL_NO_RWG,
49
+ void, ptr, ptr, ptr, ptr, ptr, i32)
50
+DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_d, TCG_CALL_NO_RWG,
51
+ void, ptr, ptr, ptr, ptr, ptr, i32)
52
+
53
+DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_h, TCG_CALL_NO_RWG,
54
+ void, ptr, ptr, ptr, ptr, ptr, i32)
55
+DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_s, TCG_CALL_NO_RWG,
56
+ void, ptr, ptr, ptr, ptr, ptr, i32)
57
+DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_d, TCG_CALL_NO_RWG,
58
+ void, ptr, ptr, ptr, ptr, ptr, i32)
59
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/sve.decode
62
+++ b/target/arm/sve.decode
63
@@ -XXX,XX +XXX,XX @@ UQXTNB 01000101 .. 1 ..... 010 010 ..... ..... @rd_rn_tszimm_shl
64
UQXTNT 01000101 .. 1 ..... 010 011 ..... ..... @rd_rn_tszimm_shl
65
SQXTUNB 01000101 .. 1 ..... 010 100 ..... ..... @rd_rn_tszimm_shl
66
SQXTUNT 01000101 .. 1 ..... 010 101 ..... ..... @rd_rn_tszimm_shl
67
+
68
+## SVE2 floating-point pairwise operations
69
+
70
+FADDP 01100100 .. 010 00 0 100 ... ..... ..... @rdn_pg_rm
71
+FMAXNMP 01100100 .. 010 10 0 100 ... ..... ..... @rdn_pg_rm
72
+FMINNMP 01100100 .. 010 10 1 100 ... ..... ..... @rdn_pg_rm
73
+FMAXP 01100100 .. 010 11 0 100 ... ..... ..... @rdn_pg_rm
74
+FMINP 01100100 .. 010 11 1 100 ... ..... ..... @rdn_pg_rm
75
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/sve_helper.c
78
+++ b/target/arm/sve_helper.c
79
@@ -XXX,XX +XXX,XX @@ DO_ZPZZ_PAIR_D(sve2_sminp_zpzz_d, int64_t, DO_MIN)
80
#undef DO_ZPZZ_PAIR
81
#undef DO_ZPZZ_PAIR_D
82
83
+#define DO_ZPZZ_PAIR_FP(NAME, TYPE, H, OP) \
84
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \
85
+ void *status, uint32_t desc) \
86
+{ \
87
+ intptr_t i, opr_sz = simd_oprsz(desc); \
88
+ for (i = 0; i < opr_sz; ) { \
89
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
90
+ do { \
91
+ TYPE n0 = *(TYPE *)(vn + H(i)); \
92
+ TYPE m0 = *(TYPE *)(vm + H(i)); \
93
+ TYPE n1 = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
94
+ TYPE m1 = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
95
+ if (pg & 1) { \
96
+ *(TYPE *)(vd + H(i)) = OP(n0, n1, status); \
97
+ } \
98
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
99
+ if (pg & 1) { \
100
+ *(TYPE *)(vd + H(i)) = OP(m0, m1, status); \
101
+ } \
102
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
103
+ } while (i & 15); \
104
+ } \
105
+}
106
+
107
+DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_h, float16, H1_2, float16_add)
108
+DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_s, float32, H1_4, float32_add)
109
+DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_d, float64, , float64_add)
110
+
111
+DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_h, float16, H1_2, float16_maxnum)
112
+DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_s, float32, H1_4, float32_maxnum)
113
+DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_d, float64, , float64_maxnum)
114
+
115
+DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_h, float16, H1_2, float16_minnum)
116
+DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_s, float32, H1_4, float32_minnum)
117
+DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_d, float64, , float64_minnum)
118
+
119
+DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_h, float16, H1_2, float16_max)
120
+DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_s, float32, H1_4, float32_max)
121
+DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_d, float64, , float64_max)
122
+
123
+DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_h, float16, H1_2, float16_min)
124
+DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_s, float32, H1_4, float32_min)
125
+DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_d, float64, , float64_min)
126
+
127
+#undef DO_ZPZZ_PAIR_FP
128
+
129
/* Three-operand expander, controlled by a predicate, in which the
130
* third operand is "wide". That is, for D = N op M, the same 64-bit
131
* value of M is used with all of the narrower values of N.
132
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
133
index XXXXXXX..XXXXXXX 100644
134
--- a/target/arm/translate-sve.c
135
+++ b/target/arm/translate-sve.c
136
@@ -XXX,XX +XXX,XX @@ static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz *a)
137
};
138
return do_sve2_narrow_extract(s, a, ops);
139
}
140
+
141
+static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
142
+ gen_helper_gvec_4_ptr *fn)
143
+{
144
+ if (!dc_isar_feature(aa64_sve2, s)) {
145
+ return false;
146
+ }
147
+ return do_zpzz_fp(s, a, fn);
148
+}
149
+
150
+#define DO_SVE2_ZPZZ_FP(NAME, name) \
151
+static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
152
+{ \
153
+ static gen_helper_gvec_4_ptr * const fns[4] = { \
154
+ NULL, gen_helper_sve2_##name##_zpzz_h, \
155
+ gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d \
156
+ }; \
157
+ return do_sve2_zpzz_fp(s, a, fns[a->esz]); \
158
+}
159
+
160
+DO_SVE2_ZPZZ_FP(FADDP, faddp)
161
+DO_SVE2_ZPZZ_FP(FMAXNMP, fmaxnmp)
162
+DO_SVE2_ZPZZ_FP(FMINNMP, fminnmp)
163
+DO_SVE2_ZPZZ_FP(FMAXP, fmaxp)
164
+DO_SVE2_ZPZZ_FP(FMINP, fminp)
165
--
166
2.20.1
167
168
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-27-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 16 ++++
9
target/arm/sve.decode | 8 ++
10
target/arm/sve_helper.c | 54 ++++++++++++-
11
target/arm/translate-sve.c | 160 +++++++++++++++++++++++++++++++++++++
12
4 files changed, 236 insertions(+), 2 deletions(-)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_sqxtunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_3(sve2_sqxtunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_3(sve2_sqxtunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
21
22
+DEF_HELPER_FLAGS_3(sve2_shrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_3(sve2_shrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_3(sve2_shrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
25
+
26
+DEF_HELPER_FLAGS_3(sve2_shrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_3(sve2_shrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_3(sve2_shrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_3(sve2_rshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_3(sve2_rshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_3(sve2_rshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
33
+
34
+DEF_HELPER_FLAGS_3(sve2_rshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_3(sve2_rshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_3(sve2_rshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
37
+
38
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG,
39
void, ptr, ptr, ptr, ptr, ptr, i32)
40
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG,
41
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/sve.decode
44
+++ b/target/arm/sve.decode
45
@@ -XXX,XX +XXX,XX @@ UQXTNT 01000101 .. 1 ..... 010 011 ..... ..... @rd_rn_tszimm_shl
46
SQXTUNB 01000101 .. 1 ..... 010 100 ..... ..... @rd_rn_tszimm_shl
47
SQXTUNT 01000101 .. 1 ..... 010 101 ..... ..... @rd_rn_tszimm_shl
48
49
+## SVE2 bitwise shift right narrow
50
+
51
+# Bit 23 == 0 is handled by esz > 0 in the translator.
52
+SHRNB 01000101 .. 1 ..... 00 0100 ..... ..... @rd_rn_tszimm_shr
53
+SHRNT 01000101 .. 1 ..... 00 0101 ..... ..... @rd_rn_tszimm_shr
54
+RSHRNB 01000101 .. 1 ..... 00 0110 ..... ..... @rd_rn_tszimm_shr
55
+RSHRNT 01000101 .. 1 ..... 00 0111 ..... ..... @rd_rn_tszimm_shr
56
+
57
## SVE2 floating-point pairwise operations
58
59
FADDP 01100100 .. 010 00 0 100 ... ..... ..... @rdn_pg_rm
60
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/sve_helper.c
63
+++ b/target/arm/sve_helper.c
64
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \
65
when N is negative, add 2**M-1. */
66
#define DO_ASRD(N, M) ((N + (N < 0 ? ((__typeof(N))1 << M) - 1 : 0)) >> M)
67
68
+static inline uint64_t do_urshr(uint64_t x, unsigned sh)
69
+{
70
+ if (likely(sh < 64)) {
71
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
72
+ } else if (sh == 64) {
73
+ return x >> 63;
74
+ } else {
75
+ return 0;
76
+ }
77
+}
78
+
79
DO_ZPZI(sve_asr_zpzi_b, int8_t, H1, DO_SHR)
80
DO_ZPZI(sve_asr_zpzi_h, int16_t, H1_2, DO_SHR)
81
DO_ZPZI(sve_asr_zpzi_s, int32_t, H1_4, DO_SHR)
82
@@ -XXX,XX +XXX,XX @@ DO_ZPZI(sve_asrd_h, int16_t, H1_2, DO_ASRD)
83
DO_ZPZI(sve_asrd_s, int32_t, H1_4, DO_ASRD)
84
DO_ZPZI_D(sve_asrd_d, int64_t, DO_ASRD)
85
86
-#undef DO_SHR
87
-#undef DO_SHL
88
#undef DO_ASRD
89
#undef DO_ZPZI
90
#undef DO_ZPZI_D
91
92
+#define DO_SHRNB(NAME, TYPEW, TYPEN, OP) \
93
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
94
+{ \
95
+ intptr_t i, opr_sz = simd_oprsz(desc); \
96
+ int shift = simd_data(desc); \
97
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
98
+ TYPEW nn = *(TYPEW *)(vn + i); \
99
+ *(TYPEW *)(vd + i) = (TYPEN)OP(nn, shift); \
100
+ } \
101
+}
102
+
103
+#define DO_SHRNT(NAME, TYPEW, TYPEN, HW, HN, OP) \
104
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
105
+{ \
106
+ intptr_t i, opr_sz = simd_oprsz(desc); \
107
+ int shift = simd_data(desc); \
108
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
109
+ TYPEW nn = *(TYPEW *)(vn + HW(i)); \
110
+ *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, shift); \
111
+ } \
112
+}
113
+
114
+DO_SHRNB(sve2_shrnb_h, uint16_t, uint8_t, DO_SHR)
115
+DO_SHRNB(sve2_shrnb_s, uint32_t, uint16_t, DO_SHR)
116
+DO_SHRNB(sve2_shrnb_d, uint64_t, uint32_t, DO_SHR)
117
+
118
+DO_SHRNT(sve2_shrnt_h, uint16_t, uint8_t, H1_2, H1, DO_SHR)
119
+DO_SHRNT(sve2_shrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_SHR)
120
+DO_SHRNT(sve2_shrnt_d, uint64_t, uint32_t, , H1_4, DO_SHR)
121
+
122
+DO_SHRNB(sve2_rshrnb_h, uint16_t, uint8_t, do_urshr)
123
+DO_SHRNB(sve2_rshrnb_s, uint32_t, uint16_t, do_urshr)
124
+DO_SHRNB(sve2_rshrnb_d, uint64_t, uint32_t, do_urshr)
125
+
126
+DO_SHRNT(sve2_rshrnt_h, uint16_t, uint8_t, H1_2, H1, do_urshr)
127
+DO_SHRNT(sve2_rshrnt_s, uint32_t, uint16_t, H1_4, H1_2, do_urshr)
128
+DO_SHRNT(sve2_rshrnt_d, uint64_t, uint32_t, , H1_4, do_urshr)
129
+
130
+#undef DO_SHRNB
131
+#undef DO_SHRNT
132
+
133
/* Fully general four-operand expander, controlled by a predicate.
134
*/
135
#define DO_ZPZZZ(NAME, TYPE, H, OP) \
136
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
137
index XXXXXXX..XXXXXXX 100644
138
--- a/target/arm/translate-sve.c
139
+++ b/target/arm/translate-sve.c
140
@@ -XXX,XX +XXX,XX @@ static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz *a)
141
return do_sve2_narrow_extract(s, a, ops);
142
}
143
144
+static bool do_sve2_shr_narrow(DisasContext *s, arg_rri_esz *a,
145
+ const GVecGen2i ops[3])
146
+{
147
+ if (a->esz < 0 || a->esz > MO_32 || !dc_isar_feature(aa64_sve2, s)) {
148
+ return false;
149
+ }
150
+ assert(a->imm > 0 && a->imm <= (8 << a->esz));
151
+ if (sve_access_check(s)) {
152
+ unsigned vsz = vec_full_reg_size(s);
153
+ tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
154
+ vec_full_reg_offset(s, a->rn),
155
+ vsz, vsz, a->imm, &ops[a->esz]);
156
+ }
157
+ return true;
158
+}
159
+
160
+static void gen_shrnb_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
161
+{
162
+ int halfbits = 4 << vece;
163
+ uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
164
+
165
+ tcg_gen_shri_i64(d, n, shr);
166
+ tcg_gen_andi_i64(d, d, mask);
167
+}
168
+
169
+static void gen_shrnb16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
170
+{
171
+ gen_shrnb_i64(MO_16, d, n, shr);
172
+}
173
+
174
+static void gen_shrnb32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
175
+{
176
+ gen_shrnb_i64(MO_32, d, n, shr);
177
+}
178
+
179
+static void gen_shrnb64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
180
+{
181
+ gen_shrnb_i64(MO_64, d, n, shr);
182
+}
183
+
184
+static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
185
+{
186
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
187
+ int halfbits = 4 << vece;
188
+ uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
189
+
190
+ tcg_gen_shri_vec(vece, n, n, shr);
191
+ tcg_gen_dupi_vec(vece, t, mask);
192
+ tcg_gen_and_vec(vece, d, n, t);
193
+ tcg_temp_free_vec(t);
194
+}
195
+
196
+static bool trans_SHRNB(DisasContext *s, arg_rri_esz *a)
197
+{
198
+ static const TCGOpcode vec_list[] = { INDEX_op_shri_vec, 0 };
199
+ static const GVecGen2i ops[3] = {
200
+ { .fni8 = gen_shrnb16_i64,
201
+ .fniv = gen_shrnb_vec,
202
+ .opt_opc = vec_list,
203
+ .fno = gen_helper_sve2_shrnb_h,
204
+ .vece = MO_16 },
205
+ { .fni8 = gen_shrnb32_i64,
206
+ .fniv = gen_shrnb_vec,
207
+ .opt_opc = vec_list,
208
+ .fno = gen_helper_sve2_shrnb_s,
209
+ .vece = MO_32 },
210
+ { .fni8 = gen_shrnb64_i64,
211
+ .fniv = gen_shrnb_vec,
212
+ .opt_opc = vec_list,
213
+ .fno = gen_helper_sve2_shrnb_d,
214
+ .vece = MO_64 },
215
+ };
216
+ return do_sve2_shr_narrow(s, a, ops);
217
+}
218
+
219
+static void gen_shrnt_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
220
+{
221
+ int halfbits = 4 << vece;
222
+ uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
223
+
224
+ tcg_gen_shli_i64(n, n, halfbits - shr);
225
+ tcg_gen_andi_i64(n, n, ~mask);
226
+ tcg_gen_andi_i64(d, d, mask);
227
+ tcg_gen_or_i64(d, d, n);
228
+}
229
+
230
+static void gen_shrnt16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
231
+{
232
+ gen_shrnt_i64(MO_16, d, n, shr);
233
+}
234
+
235
+static void gen_shrnt32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
236
+{
237
+ gen_shrnt_i64(MO_32, d, n, shr);
238
+}
239
+
240
+static void gen_shrnt64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
241
+{
242
+ tcg_gen_shri_i64(n, n, shr);
243
+ tcg_gen_deposit_i64(d, d, n, 32, 32);
244
+}
245
+
246
+static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
247
+{
248
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
249
+ int halfbits = 4 << vece;
250
+ uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
251
+
252
+ tcg_gen_shli_vec(vece, n, n, halfbits - shr);
253
+ tcg_gen_dupi_vec(vece, t, mask);
254
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
255
+ tcg_temp_free_vec(t);
256
+}
257
+
258
+static bool trans_SHRNT(DisasContext *s, arg_rri_esz *a)
259
+{
260
+ static const TCGOpcode vec_list[] = { INDEX_op_shli_vec, 0 };
261
+ static const GVecGen2i ops[3] = {
262
+ { .fni8 = gen_shrnt16_i64,
263
+ .fniv = gen_shrnt_vec,
264
+ .opt_opc = vec_list,
265
+ .load_dest = true,
266
+ .fno = gen_helper_sve2_shrnt_h,
267
+ .vece = MO_16 },
268
+ { .fni8 = gen_shrnt32_i64,
269
+ .fniv = gen_shrnt_vec,
270
+ .opt_opc = vec_list,
271
+ .load_dest = true,
272
+ .fno = gen_helper_sve2_shrnt_s,
273
+ .vece = MO_32 },
274
+ { .fni8 = gen_shrnt64_i64,
275
+ .fniv = gen_shrnt_vec,
276
+ .opt_opc = vec_list,
277
+ .load_dest = true,
278
+ .fno = gen_helper_sve2_shrnt_d,
279
+ .vece = MO_64 },
280
+ };
281
+ return do_sve2_shr_narrow(s, a, ops);
282
+}
283
+
284
+static bool trans_RSHRNB(DisasContext *s, arg_rri_esz *a)
285
+{
286
+ static const GVecGen2i ops[3] = {
287
+ { .fno = gen_helper_sve2_rshrnb_h },
288
+ { .fno = gen_helper_sve2_rshrnb_s },
289
+ { .fno = gen_helper_sve2_rshrnb_d },
290
+ };
291
+ return do_sve2_shr_narrow(s, a, ops);
292
+}
293
+
294
+static bool trans_RSHRNT(DisasContext *s, arg_rri_esz *a)
295
+{
296
+ static const GVecGen2i ops[3] = {
297
+ { .fno = gen_helper_sve2_rshrnt_h },
298
+ { .fno = gen_helper_sve2_rshrnt_s },
299
+ { .fno = gen_helper_sve2_rshrnt_d },
300
+ };
301
+ return do_sve2_shr_narrow(s, a, ops);
302
+}
303
+
304
static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
305
gen_helper_gvec_4_ptr *fn)
306
{
307
--
308
2.20.1
309
310
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525010358.152808-29-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sve.h | 16 +++++++
9
target/arm/sve.decode | 4 ++
10
target/arm/sve_helper.c | 24 ++++++++++
11
target/arm/translate-sve.c | 93 ++++++++++++++++++++++++++++++++++++++
12
4 files changed, 137 insertions(+)
13
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sve2_sqrshrunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_3(sve2_sqrshrunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_3(sve2_sqrshrunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
21
22
+DEF_HELPER_FLAGS_3(sve2_uqshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_3(sve2_uqshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_3(sve2_uqshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
25
+
26
+DEF_HELPER_FLAGS_3(sve2_uqshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_3(sve2_uqshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_3(sve2_uqshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_3(sve2_uqrshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_3(sve2_uqrshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_3(sve2_uqrshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
33
+
34
+DEF_HELPER_FLAGS_3(sve2_uqrshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_3(sve2_uqrshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_3(sve2_uqrshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
37
+
38
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG,
39
void, ptr, ptr, ptr, ptr, ptr, i32)
40
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG,
41
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/sve.decode
44
+++ b/target/arm/sve.decode
45
@@ -XXX,XX +XXX,XX @@ SHRNB 01000101 .. 1 ..... 00 0100 ..... ..... @rd_rn_tszimm_shr
46
SHRNT 01000101 .. 1 ..... 00 0101 ..... ..... @rd_rn_tszimm_shr
47
RSHRNB 01000101 .. 1 ..... 00 0110 ..... ..... @rd_rn_tszimm_shr
48
RSHRNT 01000101 .. 1 ..... 00 0111 ..... ..... @rd_rn_tszimm_shr
49
+UQSHRNB 01000101 .. 1 ..... 00 1100 ..... ..... @rd_rn_tszimm_shr
50
+UQSHRNT 01000101 .. 1 ..... 00 1101 ..... ..... @rd_rn_tszimm_shr
51
+UQRSHRNB 01000101 .. 1 ..... 00 1110 ..... ..... @rd_rn_tszimm_shr
52
+UQRSHRNT 01000101 .. 1 ..... 00 1111 ..... ..... @rd_rn_tszimm_shr
53
54
## SVE2 floating-point pairwise operations
55
56
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/sve_helper.c
59
+++ b/target/arm/sve_helper.c
60
@@ -XXX,XX +XXX,XX @@ DO_SHRNT(sve2_sqrshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRUN_H)
61
DO_SHRNT(sve2_sqrshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRUN_S)
62
DO_SHRNT(sve2_sqrshrunt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRUN_D)
63
64
+#define DO_UQSHRN_H(x, sh) MIN(x >> sh, UINT8_MAX)
65
+#define DO_UQSHRN_S(x, sh) MIN(x >> sh, UINT16_MAX)
66
+#define DO_UQSHRN_D(x, sh) MIN(x >> sh, UINT32_MAX)
67
+
68
+DO_SHRNB(sve2_uqshrnb_h, uint16_t, uint8_t, DO_UQSHRN_H)
69
+DO_SHRNB(sve2_uqshrnb_s, uint32_t, uint16_t, DO_UQSHRN_S)
70
+DO_SHRNB(sve2_uqshrnb_d, uint64_t, uint32_t, DO_UQSHRN_D)
71
+
72
+DO_SHRNT(sve2_uqshrnt_h, uint16_t, uint8_t, H1_2, H1, DO_UQSHRN_H)
73
+DO_SHRNT(sve2_uqshrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_UQSHRN_S)
74
+DO_SHRNT(sve2_uqshrnt_d, uint64_t, uint32_t, , H1_4, DO_UQSHRN_D)
75
+
76
+#define DO_UQRSHRN_H(x, sh) MIN(do_urshr(x, sh), UINT8_MAX)
77
+#define DO_UQRSHRN_S(x, sh) MIN(do_urshr(x, sh), UINT16_MAX)
78
+#define DO_UQRSHRN_D(x, sh) MIN(do_urshr(x, sh), UINT32_MAX)
79
+
80
+DO_SHRNB(sve2_uqrshrnb_h, uint16_t, uint8_t, DO_UQRSHRN_H)
81
+DO_SHRNB(sve2_uqrshrnb_s, uint32_t, uint16_t, DO_UQRSHRN_S)
82
+DO_SHRNB(sve2_uqrshrnb_d, uint64_t, uint32_t, DO_UQRSHRN_D)
83
+
84
+DO_SHRNT(sve2_uqrshrnt_h, uint16_t, uint8_t, H1_2, H1, DO_UQRSHRN_H)
85
+DO_SHRNT(sve2_uqrshrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_UQRSHRN_S)
86
+DO_SHRNT(sve2_uqrshrnt_d, uint64_t, uint32_t, , H1_4, DO_UQRSHRN_D)
87
+
88
#undef DO_SHRNB
89
#undef DO_SHRNT
90
91
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/target/arm/translate-sve.c
94
+++ b/target/arm/translate-sve.c
95
@@ -XXX,XX +XXX,XX @@ static bool trans_SQRSHRUNT(DisasContext *s, arg_rri_esz *a)
96
return do_sve2_shr_narrow(s, a, ops);
97
}
98
99
+static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d,
100
+ TCGv_vec n, int64_t shr)
101
+{
102
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
103
+ int halfbits = 4 << vece;
104
+
105
+ tcg_gen_shri_vec(vece, n, n, shr);
106
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
107
+ tcg_gen_umin_vec(vece, d, n, t);
108
+ tcg_temp_free_vec(t);
109
+}
110
+
111
+static bool trans_UQSHRNB(DisasContext *s, arg_rri_esz *a)
112
+{
113
+ static const TCGOpcode vec_list[] = {
114
+ INDEX_op_shri_vec, INDEX_op_umin_vec, 0
115
+ };
116
+ static const GVecGen2i ops[3] = {
117
+ { .fniv = gen_uqshrnb_vec,
118
+ .opt_opc = vec_list,
119
+ .fno = gen_helper_sve2_uqshrnb_h,
120
+ .vece = MO_16 },
121
+ { .fniv = gen_uqshrnb_vec,
122
+ .opt_opc = vec_list,
123
+ .fno = gen_helper_sve2_uqshrnb_s,
124
+ .vece = MO_32 },
125
+ { .fniv = gen_uqshrnb_vec,
126
+ .opt_opc = vec_list,
127
+ .fno = gen_helper_sve2_uqshrnb_d,
128
+ .vece = MO_64 },
129
+ };
130
+ return do_sve2_shr_narrow(s, a, ops);
131
+}
132
+
133
+static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d,
134
+ TCGv_vec n, int64_t shr)
135
+{
136
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
137
+ int halfbits = 4 << vece;
138
+
139
+ tcg_gen_shri_vec(vece, n, n, shr);
140
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
141
+ tcg_gen_umin_vec(vece, n, n, t);
142
+ tcg_gen_shli_vec(vece, n, n, halfbits);
143
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
144
+ tcg_temp_free_vec(t);
145
+}
146
+
147
+static bool trans_UQSHRNT(DisasContext *s, arg_rri_esz *a)
148
+{
149
+ static const TCGOpcode vec_list[] = {
150
+ INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0
151
+ };
152
+ static const GVecGen2i ops[3] = {
153
+ { .fniv = gen_uqshrnt_vec,
154
+ .opt_opc = vec_list,
155
+ .load_dest = true,
156
+ .fno = gen_helper_sve2_uqshrnt_h,
157
+ .vece = MO_16 },
158
+ { .fniv = gen_uqshrnt_vec,
159
+ .opt_opc = vec_list,
160
+ .load_dest = true,
161
+ .fno = gen_helper_sve2_uqshrnt_s,
162
+ .vece = MO_32 },
163
+ { .fniv = gen_uqshrnt_vec,
164
+ .opt_opc = vec_list,
165
+ .load_dest = true,
166
+ .fno = gen_helper_sve2_uqshrnt_d,
167
+ .vece = MO_64 },
168
+ };
169
+ return do_sve2_shr_narrow(s, a, ops);
170
+}
171
+
172
+static bool trans_UQRSHRNB(DisasContext *s, arg_rri_esz *a)
173
+{
174
+ static const GVecGen2i ops[3] = {
175
+ { .fno = gen_helper_sve2_uqrshrnb_h },
176
+ { .fno = gen_helper_sve2_uqrshrnb_s },
177
+ { .fno = gen_helper_sve2_uqrshrnb_d },
178
+ };
179
+ return do_sve2_shr_narrow(s, a, ops);
180
+}
181
+
182
+static bool trans_UQRSHRNT(DisasContext *s, arg_rri_esz *a)
183
+{
184
+ static const GVecGen2i ops[3] = {
185
+ { .fno = gen_helper_sve2_uqrshrnt_h },
186
+ { .fno = gen_helper_sve2_uqrshrnt_s },
187
+ { .fno = gen_helper_sve2_uqrshrnt_d },
188
+ };
189
+ return do_sve2_shr_narrow(s, a, ops);
190
+}
191
+
192
static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
193
gen_helper_gvec_4_ptr *fn)
194
{
195
--
196
2.20.1
197
198
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Leif Lindholm <quic_llindhol@quicinc.com>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
I'm migrating to Qualcomm's new open source email infrastructure, so
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
update my email address, and update the mailmap to match.
5
Message-id: 20210525010358.152808-37-richard.henderson@linaro.org
5
6
Signed-off-by: Leif Lindholm <leif.lindholm@oss.qualcomm.com>
7
Reviewed-by: Leif Lindholm <quic_llindhol@quicinc.com>
8
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Tested-by: Philippe Mathieu-Daudé <philmd@linaro.org>
11
Message-id: 20241205114047.1125842-1-leif.lindholm@oss.qualcomm.com
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
13
---
8
target/arm/helper-sve.h | 28 ++++++++++++++
14
MAINTAINERS | 2 +-
9
target/arm/sve.decode | 11 ++++++
15
.mailmap | 5 +++--
10
target/arm/sve_helper.c | 18 +++++++++
16
2 files changed, 4 insertions(+), 3 deletions(-)
11
target/arm/translate-sve.c | 76 ++++++++++++++++++++++++++++++++++++++
12
4 files changed, 133 insertions(+)
13
17
14
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
18
diff --git a/MAINTAINERS b/MAINTAINERS
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sve.h
20
--- a/MAINTAINERS
17
+++ b/target/arm/helper-sve.h
21
+++ b/MAINTAINERS
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_s, TCG_CALL_NO_RWG,
22
@@ -XXX,XX +XXX,XX @@ F: include/hw/ssi/imx_spi.h
19
void, ptr, ptr, ptr, ptr, i32)
23
SBSA-REF
20
DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_d, TCG_CALL_NO_RWG,
24
M: Radoslaw Biernacki <rad@semihalf.com>
21
void, ptr, ptr, ptr, ptr, i32)
25
M: Peter Maydell <peter.maydell@linaro.org>
22
+
26
-R: Leif Lindholm <quic_llindhol@quicinc.com>
23
+DEF_HELPER_FLAGS_5(sve2_smlal_zzzw_h, TCG_CALL_NO_RWG,
27
+R: Leif Lindholm <leif.lindholm@oss.qualcomm.com>
24
+ void, ptr, ptr, ptr, ptr, i32)
28
R: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
25
+DEF_HELPER_FLAGS_5(sve2_smlal_zzzw_s, TCG_CALL_NO_RWG,
29
L: qemu-arm@nongnu.org
26
+ void, ptr, ptr, ptr, ptr, i32)
30
S: Maintained
27
+DEF_HELPER_FLAGS_5(sve2_smlal_zzzw_d, TCG_CALL_NO_RWG,
31
diff --git a/.mailmap b/.mailmap
28
+ void, ptr, ptr, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_5(sve2_umlal_zzzw_h, TCG_CALL_NO_RWG,
31
+ void, ptr, ptr, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_5(sve2_umlal_zzzw_s, TCG_CALL_NO_RWG,
33
+ void, ptr, ptr, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_5(sve2_umlal_zzzw_d, TCG_CALL_NO_RWG,
35
+ void, ptr, ptr, ptr, ptr, i32)
36
+
37
+DEF_HELPER_FLAGS_5(sve2_smlsl_zzzw_h, TCG_CALL_NO_RWG,
38
+ void, ptr, ptr, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_5(sve2_smlsl_zzzw_s, TCG_CALL_NO_RWG,
40
+ void, ptr, ptr, ptr, ptr, i32)
41
+DEF_HELPER_FLAGS_5(sve2_smlsl_zzzw_d, TCG_CALL_NO_RWG,
42
+ void, ptr, ptr, ptr, ptr, i32)
43
+
44
+DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_h, TCG_CALL_NO_RWG,
45
+ void, ptr, ptr, ptr, ptr, i32)
46
+DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_s, TCG_CALL_NO_RWG,
47
+ void, ptr, ptr, ptr, ptr, i32)
48
+DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_d, TCG_CALL_NO_RWG,
49
+ void, ptr, ptr, ptr, ptr, i32)
50
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
51
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/sve.decode
33
--- a/.mailmap
53
+++ b/target/arm/sve.decode
34
+++ b/.mailmap
54
@@ -XXX,XX +XXX,XX @@ SQDMLSLBT 01000100 .. 0 ..... 00001 1 ..... ..... @rda_rn_rm
35
@@ -XXX,XX +XXX,XX @@ Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
55
36
Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
56
SQRDMLAH_zzzz 01000100 .. 0 ..... 01110 0 ..... ..... @rda_rn_rm
37
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
57
SQRDMLSH_zzzz 01000100 .. 0 ..... 01110 1 ..... ..... @rda_rn_rm
38
Juan Quintela <quintela@trasno.org> <quintela@redhat.com>
58
+
39
-Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
59
+## SVE2 integer multiply-add long
40
-Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
60
+
41
+Leif Lindholm <leif.lindholm@oss.qualcomm.com> <quic_llindhol@quicinc.com>
61
+SMLALB_zzzw 01000100 .. 0 ..... 010 000 ..... ..... @rda_rn_rm
42
+Leif Lindholm <leif.lindholm@oss.qualcomm.com> <leif.lindholm@linaro.org>
62
+SMLALT_zzzw 01000100 .. 0 ..... 010 001 ..... ..... @rda_rn_rm
43
+Leif Lindholm <leif.lindholm@oss.qualcomm.com> <leif@nuviainc.com>
63
+UMLALB_zzzw 01000100 .. 0 ..... 010 010 ..... ..... @rda_rn_rm
44
Luc Michel <luc@lmichel.fr> <luc.michel@git.antfield.fr>
64
+UMLALT_zzzw 01000100 .. 0 ..... 010 011 ..... ..... @rda_rn_rm
45
Luc Michel <luc@lmichel.fr> <luc.michel@greensocs.com>
65
+SMLSLB_zzzw 01000100 .. 0 ..... 010 100 ..... ..... @rda_rn_rm
46
Luc Michel <luc@lmichel.fr> <lmichel@kalray.eu>
66
+SMLSLT_zzzw 01000100 .. 0 ..... 010 101 ..... ..... @rda_rn_rm
67
+UMLSLB_zzzw 01000100 .. 0 ..... 010 110 ..... ..... @rda_rn_rm
68
+UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm
69
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/sve_helper.c
72
+++ b/target/arm/sve_helper.c
73
@@ -XXX,XX +XXX,XX @@ DO_ZZZW_ACC(sve2_uabal_h, uint16_t, uint8_t, H1_2, H1, DO_ABD)
74
DO_ZZZW_ACC(sve2_uabal_s, uint32_t, uint16_t, H1_4, H1_2, DO_ABD)
75
DO_ZZZW_ACC(sve2_uabal_d, uint64_t, uint32_t, , H1_4, DO_ABD)
76
77
+DO_ZZZW_ACC(sve2_smlal_zzzw_h, int16_t, int8_t, H1_2, H1, DO_MUL)
78
+DO_ZZZW_ACC(sve2_smlal_zzzw_s, int32_t, int16_t, H1_4, H1_2, DO_MUL)
79
+DO_ZZZW_ACC(sve2_smlal_zzzw_d, int64_t, int32_t, , H1_4, DO_MUL)
80
+
81
+DO_ZZZW_ACC(sve2_umlal_zzzw_h, uint16_t, uint8_t, H1_2, H1, DO_MUL)
82
+DO_ZZZW_ACC(sve2_umlal_zzzw_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL)
83
+DO_ZZZW_ACC(sve2_umlal_zzzw_d, uint64_t, uint32_t, , H1_4, DO_MUL)
84
+
85
+#define DO_NMUL(N, M) -(N * M)
86
+
87
+DO_ZZZW_ACC(sve2_smlsl_zzzw_h, int16_t, int8_t, H1_2, H1, DO_NMUL)
88
+DO_ZZZW_ACC(sve2_smlsl_zzzw_s, int32_t, int16_t, H1_4, H1_2, DO_NMUL)
89
+DO_ZZZW_ACC(sve2_smlsl_zzzw_d, int64_t, int32_t, , H1_4, DO_NMUL)
90
+
91
+DO_ZZZW_ACC(sve2_umlsl_zzzw_h, uint16_t, uint8_t, H1_2, H1, DO_NMUL)
92
+DO_ZZZW_ACC(sve2_umlsl_zzzw_s, uint32_t, uint16_t, H1_4, H1_2, DO_NMUL)
93
+DO_ZZZW_ACC(sve2_umlsl_zzzw_d, uint64_t, uint32_t, , H1_4, DO_NMUL)
94
+
95
#undef DO_ZZZW_ACC
96
97
#define DO_XTNB(NAME, TYPE, OP) \
98
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/translate-sve.c
101
+++ b/target/arm/translate-sve.c
102
@@ -XXX,XX +XXX,XX @@ static bool trans_SQRDMLSH_zzzz(DisasContext *s, arg_rrrr_esz *a)
103
};
104
return do_sve2_zzzz_ool(s, a, fns[a->esz], 0);
105
}
106
+
107
+static bool do_smlal_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
108
+{
109
+ static gen_helper_gvec_4 * const fns[] = {
110
+ NULL, gen_helper_sve2_smlal_zzzw_h,
111
+ gen_helper_sve2_smlal_zzzw_s, gen_helper_sve2_smlal_zzzw_d,
112
+ };
113
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], sel);
114
+}
115
+
116
+static bool trans_SMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
117
+{
118
+ return do_smlal_zzzw(s, a, false);
119
+}
120
+
121
+static bool trans_SMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
122
+{
123
+ return do_smlal_zzzw(s, a, true);
124
+}
125
+
126
+static bool do_umlal_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
127
+{
128
+ static gen_helper_gvec_4 * const fns[] = {
129
+ NULL, gen_helper_sve2_umlal_zzzw_h,
130
+ gen_helper_sve2_umlal_zzzw_s, gen_helper_sve2_umlal_zzzw_d,
131
+ };
132
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], sel);
133
+}
134
+
135
+static bool trans_UMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
136
+{
137
+ return do_umlal_zzzw(s, a, false);
138
+}
139
+
140
+static bool trans_UMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
141
+{
142
+ return do_umlal_zzzw(s, a, true);
143
+}
144
+
145
+static bool do_smlsl_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
146
+{
147
+ static gen_helper_gvec_4 * const fns[] = {
148
+ NULL, gen_helper_sve2_smlsl_zzzw_h,
149
+ gen_helper_sve2_smlsl_zzzw_s, gen_helper_sve2_smlsl_zzzw_d,
150
+ };
151
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], sel);
152
+}
153
+
154
+static bool trans_SMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
155
+{
156
+ return do_smlsl_zzzw(s, a, false);
157
+}
158
+
159
+static bool trans_SMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
160
+{
161
+ return do_smlsl_zzzw(s, a, true);
162
+}
163
+
164
+static bool do_umlsl_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
165
+{
166
+ static gen_helper_gvec_4 * const fns[] = {
167
+ NULL, gen_helper_sve2_umlsl_zzzw_h,
168
+ gen_helper_sve2_umlsl_zzzw_s, gen_helper_sve2_umlsl_zzzw_d,
169
+ };
170
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], sel);
171
+}
172
+
173
+static bool trans_UMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
174
+{
175
+ return do_umlsl_zzzw(s, a, false);
176
+}
177
+
178
+static bool trans_UMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
179
+{
180
+ return do_umlsl_zzzw(s, a, true);
181
+}
182
--
47
--
183
2.20.1
48
2.34.1
184
49
185
50
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Vikram Garhwal <vikram.garhwal@bytedance.com>
2
2
3
SVE2 has two additional sizes of the operation and unlike NEON,
3
Previously, maintainer role was paused due to inactive email id. Commit id:
4
there is no saturation flag. Create new entry points for SVE2
4
c009d715721861984c4987bcc78b7ee183e86d75.
5
that do not set QC.
6
5
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Vikram Garhwal <vikram.garhwal@bytedance.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Francisco Iglesias <francisco.iglesias@amd.com>
9
Message-id: 20210525010358.152808-36-richard.henderson@linaro.org
8
Message-id: 20241204184205.12952-1-vikram.garhwal@bytedance.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
target/arm/helper.h | 17 ++++
11
MAINTAINERS | 2 ++
13
target/arm/sve.decode | 5 ++
12
1 file changed, 2 insertions(+)
14
target/arm/translate-sve.c | 18 +++++
15
target/arm/vec_helper.c | 161 +++++++++++++++++++++++++++++++++++--
16
4 files changed, 195 insertions(+), 6 deletions(-)
17
13
18
diff --git a/target/arm/helper.h b/target/arm/helper.h
14
diff --git a/MAINTAINERS b/MAINTAINERS
19
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper.h
16
--- a/MAINTAINERS
21
+++ b/target/arm/helper.h
17
+++ b/MAINTAINERS
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_qrdmlah_s32, TCG_CALL_NO_RWG,
18
@@ -XXX,XX +XXX,XX @@ F: tests/qtest/fuzz-sb16-test.c
23
DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s32, TCG_CALL_NO_RWG,
19
24
void, ptr, ptr, ptr, ptr, i32)
20
Xilinx CAN
25
21
M: Francisco Iglesias <francisco.iglesias@amd.com>
26
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_b, TCG_CALL_NO_RWG,
22
+M: Vikram Garhwal <vikram.garhwal@bytedance.com>
27
+ void, ptr, ptr, ptr, ptr, i32)
23
S: Maintained
28
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_b, TCG_CALL_NO_RWG,
24
F: hw/net/can/xlnx-*
29
+ void, ptr, ptr, ptr, ptr, i32)
25
F: include/hw/net/xlnx-*
30
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_h, TCG_CALL_NO_RWG,
26
@@ -XXX,XX +XXX,XX @@ F: include/hw/rx/
31
+ void, ptr, ptr, ptr, ptr, i32)
27
CAN bus subsystem and hardware
32
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_h, TCG_CALL_NO_RWG,
28
M: Pavel Pisa <pisa@cmp.felk.cvut.cz>
33
+ void, ptr, ptr, ptr, ptr, i32)
29
M: Francisco Iglesias <francisco.iglesias@amd.com>
34
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_s, TCG_CALL_NO_RWG,
30
+M: Vikram Garhwal <vikram.garhwal@bytedance.com>
35
+ void, ptr, ptr, ptr, ptr, i32)
31
S: Maintained
36
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_s, TCG_CALL_NO_RWG,
32
W: https://canbus.pages.fel.cvut.cz/
37
+ void, ptr, ptr, ptr, ptr, i32)
33
F: net/can/*
38
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_d, TCG_CALL_NO_RWG,
39
+ void, ptr, ptr, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_d, TCG_CALL_NO_RWG,
41
+ void, ptr, ptr, ptr, ptr, i32)
42
+
43
DEF_HELPER_FLAGS_4(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
44
DEF_HELPER_FLAGS_4(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
45
DEF_HELPER_FLAGS_4(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
46
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/sve.decode
49
+++ b/target/arm/sve.decode
50
@@ -XXX,XX +XXX,XX @@ SQDMLSLT_zzzw 01000100 .. 0 ..... 0110 11 ..... ..... @rda_rn_rm
51
52
SQDMLALBT 01000100 .. 0 ..... 00001 0 ..... ..... @rda_rn_rm
53
SQDMLSLBT 01000100 .. 0 ..... 00001 1 ..... ..... @rda_rn_rm
54
+
55
+## SVE2 saturating multiply-add high
56
+
57
+SQRDMLAH_zzzz 01000100 .. 0 ..... 01110 0 ..... ..... @rda_rn_rm
58
+SQRDMLSH_zzzz 01000100 .. 0 ..... 01110 1 ..... ..... @rda_rn_rm
59
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/translate-sve.c
62
+++ b/target/arm/translate-sve.c
63
@@ -XXX,XX +XXX,XX @@ static bool trans_SQDMLSLBT(DisasContext *s, arg_rrrr_esz *a)
64
{
65
return do_sqdmlsl_zzzw(s, a, false, true);
66
}
67
+
68
+static bool trans_SQRDMLAH_zzzz(DisasContext *s, arg_rrrr_esz *a)
69
+{
70
+ static gen_helper_gvec_4 * const fns[] = {
71
+ gen_helper_sve2_sqrdmlah_b, gen_helper_sve2_sqrdmlah_h,
72
+ gen_helper_sve2_sqrdmlah_s, gen_helper_sve2_sqrdmlah_d,
73
+ };
74
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], 0);
75
+}
76
+
77
+static bool trans_SQRDMLSH_zzzz(DisasContext *s, arg_rrrr_esz *a)
78
+{
79
+ static gen_helper_gvec_4 * const fns[] = {
80
+ gen_helper_sve2_sqrdmlsh_b, gen_helper_sve2_sqrdmlsh_h,
81
+ gen_helper_sve2_sqrdmlsh_s, gen_helper_sve2_sqrdmlsh_d,
82
+ };
83
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], 0);
84
+}
85
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/target/arm/vec_helper.c
88
+++ b/target/arm/vec_helper.c
89
@@ -XXX,XX +XXX,XX @@
90
#include "exec/helper-proto.h"
91
#include "tcg/tcg-gvec-desc.h"
92
#include "fpu/softfloat.h"
93
+#include "qemu/int128.h"
94
#include "vec_internal.h"
95
96
/* Note that vector data is stored in host-endian 64-bit chunks,
97
@@ -XXX,XX +XXX,XX @@
98
#define H4(x) (x)
99
#endif
100
101
+/* Signed saturating rounding doubling multiply-accumulate high half, 8-bit */
102
+static int8_t do_sqrdmlah_b(int8_t src1, int8_t src2, int8_t src3,
103
+ bool neg, bool round)
104
+{
105
+ /*
106
+ * Simplify:
107
+ * = ((a3 << 8) + ((e1 * e2) << 1) + (round << 7)) >> 8
108
+ * = ((a3 << 7) + (e1 * e2) + (round << 6)) >> 7
109
+ */
110
+ int32_t ret = (int32_t)src1 * src2;
111
+ if (neg) {
112
+ ret = -ret;
113
+ }
114
+ ret += ((int32_t)src3 << 7) + (round << 6);
115
+ ret >>= 7;
116
+
117
+ if (ret != (int8_t)ret) {
118
+ ret = (ret < 0 ? INT8_MIN : INT8_MAX);
119
+ }
120
+ return ret;
121
+}
122
+
123
+void HELPER(sve2_sqrdmlah_b)(void *vd, void *vn, void *vm,
124
+ void *va, uint32_t desc)
125
+{
126
+ intptr_t i, opr_sz = simd_oprsz(desc);
127
+ int8_t *d = vd, *n = vn, *m = vm, *a = va;
128
+
129
+ for (i = 0; i < opr_sz; ++i) {
130
+ d[i] = do_sqrdmlah_b(n[i], m[i], a[i], false, true);
131
+ }
132
+}
133
+
134
+void HELPER(sve2_sqrdmlsh_b)(void *vd, void *vn, void *vm,
135
+ void *va, uint32_t desc)
136
+{
137
+ intptr_t i, opr_sz = simd_oprsz(desc);
138
+ int8_t *d = vd, *n = vn, *m = vm, *a = va;
139
+
140
+ for (i = 0; i < opr_sz; ++i) {
141
+ d[i] = do_sqrdmlah_b(n[i], m[i], a[i], true, true);
142
+ }
143
+}
144
+
145
/* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */
146
static int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3,
147
bool neg, bool round, uint32_t *sat)
148
{
149
- /*
150
- * Simplify:
151
- * = ((a3 << 16) + ((e1 * e2) << 1) + (1 << 15)) >> 16
152
- * = ((a3 << 15) + (e1 * e2) + (1 << 14)) >> 15
153
- */
154
+ /* Simplify similarly to do_sqrdmlah_b above. */
155
int32_t ret = (int32_t)src1 * src2;
156
if (neg) {
157
ret = -ret;
158
@@ -XXX,XX +XXX,XX @@ void HELPER(neon_sqrdmulh_h)(void *vd, void *vn, void *vm,
159
clear_tail(d, opr_sz, simd_maxsz(desc));
160
}
161
162
+void HELPER(sve2_sqrdmlah_h)(void *vd, void *vn, void *vm,
163
+ void *va, uint32_t desc)
164
+{
165
+ intptr_t i, opr_sz = simd_oprsz(desc);
166
+ int16_t *d = vd, *n = vn, *m = vm, *a = va;
167
+ uint32_t discard;
168
+
169
+ for (i = 0; i < opr_sz / 2; ++i) {
170
+ d[i] = do_sqrdmlah_h(n[i], m[i], a[i], false, true, &discard);
171
+ }
172
+}
173
+
174
+void HELPER(sve2_sqrdmlsh_h)(void *vd, void *vn, void *vm,
175
+ void *va, uint32_t desc)
176
+{
177
+ intptr_t i, opr_sz = simd_oprsz(desc);
178
+ int16_t *d = vd, *n = vn, *m = vm, *a = va;
179
+ uint32_t discard;
180
+
181
+ for (i = 0; i < opr_sz / 2; ++i) {
182
+ d[i] = do_sqrdmlah_h(n[i], m[i], a[i], true, true, &discard);
183
+ }
184
+}
185
+
186
/* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */
187
static int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3,
188
bool neg, bool round, uint32_t *sat)
189
{
190
- /* Simplify similarly to int_qrdmlah_s16 above. */
191
+ /* Simplify similarly to do_sqrdmlah_b above. */
192
int64_t ret = (int64_t)src1 * src2;
193
if (neg) {
194
ret = -ret;
195
@@ -XXX,XX +XXX,XX @@ void HELPER(neon_sqrdmulh_s)(void *vd, void *vn, void *vm,
196
clear_tail(d, opr_sz, simd_maxsz(desc));
197
}
198
199
+void HELPER(sve2_sqrdmlah_s)(void *vd, void *vn, void *vm,
200
+ void *va, uint32_t desc)
201
+{
202
+ intptr_t i, opr_sz = simd_oprsz(desc);
203
+ int32_t *d = vd, *n = vn, *m = vm, *a = va;
204
+ uint32_t discard;
205
+
206
+ for (i = 0; i < opr_sz / 4; ++i) {
207
+ d[i] = do_sqrdmlah_s(n[i], m[i], a[i], false, true, &discard);
208
+ }
209
+}
210
+
211
+void HELPER(sve2_sqrdmlsh_s)(void *vd, void *vn, void *vm,
212
+ void *va, uint32_t desc)
213
+{
214
+ intptr_t i, opr_sz = simd_oprsz(desc);
215
+ int32_t *d = vd, *n = vn, *m = vm, *a = va;
216
+ uint32_t discard;
217
+
218
+ for (i = 0; i < opr_sz / 4; ++i) {
219
+ d[i] = do_sqrdmlah_s(n[i], m[i], a[i], true, true, &discard);
220
+ }
221
+}
222
+
223
+/* Signed saturating rounding doubling multiply-accumulate high half, 64-bit */
224
+static int64_t do_sat128_d(Int128 r)
225
+{
226
+ int64_t ls = int128_getlo(r);
227
+ int64_t hs = int128_gethi(r);
228
+
229
+ if (unlikely(hs != (ls >> 63))) {
230
+ return hs < 0 ? INT64_MIN : INT64_MAX;
231
+ }
232
+ return ls;
233
+}
234
+
235
+static int64_t do_sqrdmlah_d(int64_t n, int64_t m, int64_t a,
236
+ bool neg, bool round)
237
+{
238
+ uint64_t l, h;
239
+ Int128 r, t;
240
+
241
+ /* As in do_sqrdmlah_b, but with 128-bit arithmetic. */
242
+ muls64(&l, &h, m, n);
243
+ r = int128_make128(l, h);
244
+ if (neg) {
245
+ r = int128_neg(r);
246
+ }
247
+ if (a) {
248
+ t = int128_exts64(a);
249
+ t = int128_lshift(t, 63);
250
+ r = int128_add(r, t);
251
+ }
252
+ if (round) {
253
+ t = int128_exts64(1ll << 62);
254
+ r = int128_add(r, t);
255
+ }
256
+ r = int128_rshift(r, 63);
257
+
258
+ return do_sat128_d(r);
259
+}
260
+
261
+void HELPER(sve2_sqrdmlah_d)(void *vd, void *vn, void *vm,
262
+ void *va, uint32_t desc)
263
+{
264
+ intptr_t i, opr_sz = simd_oprsz(desc);
265
+ int64_t *d = vd, *n = vn, *m = vm, *a = va;
266
+
267
+ for (i = 0; i < opr_sz / 8; ++i) {
268
+ d[i] = do_sqrdmlah_d(n[i], m[i], a[i], false, true);
269
+ }
270
+}
271
+
272
+void HELPER(sve2_sqrdmlsh_d)(void *vd, void *vn, void *vm,
273
+ void *va, uint32_t desc)
274
+{
275
+ intptr_t i, opr_sz = simd_oprsz(desc);
276
+ int64_t *d = vd, *n = vn, *m = vm, *a = va;
277
+
278
+ for (i = 0; i < opr_sz / 8; ++i) {
279
+ d[i] = do_sqrdmlah_d(n[i], m[i], a[i], true, true);
280
+ }
281
+}
282
+
283
/* Integer 8 and 16-bit dot-product.
284
*
285
* Note that for the loops herein, host endianness does not matter
286
--
34
--
287
2.20.1
35
2.34.1
288
289
diff view generated by jsdifflib