1
Just my fp16 work, plus some small stuff for the sbsa-ref board;
1
Small pile of bug fixes for rc1. I've included my patches to get
2
but my rule of thumb is to send a pullreq once I get over about
2
our docs building with Sphinx 3, just for convenience...
3
30 patches...
4
3
5
-- PMM
4
-- PMM
6
5
7
The following changes since commit 2f4c51c0f384d7888a04b4815861e6d5fd244d75:
6
The following changes since commit b149dea55cce97cb226683d06af61984a1c11e96:
8
7
9
Merge remote-tracking branch 'remotes/kraxel/tags/usb-20200831-pull-request' into staging (2020-08-31 19:39:13 +0100)
8
Merge remote-tracking branch 'remotes/cschoenebeck/tags/pull-9p-20201102' into staging (2020-11-02 10:57:48 +0000)
10
9
11
are available in the Git repository at:
10
are available in the Git repository at:
12
11
13
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20200901
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20201102
14
13
15
for you to fetch changes up to 3f462bf0f6ea6382dd1502d4eb1fcd33c8e774f5:
14
for you to fetch changes up to ffb4fbf90a2f63c9cb33e4bb9f854c79bf04ca4a:
16
15
17
hw/arm/sbsa-ref : Add embedded controller in secure memory (2020-09-01 14:01:34 +0100)
16
tests/qtest/npcm7xx_rng-test: Disable randomness tests (2020-11-02 16:52:18 +0000)
18
17
19
----------------------------------------------------------------
18
----------------------------------------------------------------
20
target-arm queue:
19
target-arm queue:
21
* Implement fp16 support for AArch32 VFP and Neon
20
* target/arm: Fix Neon emulation bugs on big-endian hosts
22
* hw/arm/sbsa-ref: add "reg" property to DT cpu nodes
21
* target/arm: fix handling of HCR.FB
23
* hw/arm/sbsa-ref : Add embedded controller in secure memory
22
* target/arm: fix LORID_EL1 access check
23
* disas/capstone: Fix monitor disassembly of >32 bytes
24
* hw/arm/smmuv3: Fix potential integer overflow (CID 1432363)
25
* hw/arm/boot: fix SVE for EL3 direct kernel boot
26
* hw/display/omap_lcdc: Fix potential NULL pointer dereference
27
* hw/display/exynos4210_fimd: Fix potential NULL pointer dereference
28
* target/arm: Get correct MMU index for other-security-state
29
* configure: Test that gio libs from pkg-config work
30
* hw/intc/arm_gicv3_cpuif: Make GIC maintenance interrupts work
31
* docs: Fix building with Sphinx 3
32
* tests/qtest/npcm7xx_rng-test: Disable randomness tests
24
33
25
----------------------------------------------------------------
34
----------------------------------------------------------------
26
Graeme Gregory (2):
35
AlexChen (2):
27
hw/misc/sbsa_ec : Add an embedded controller for sbsa-ref
36
hw/display/omap_lcdc: Fix potential NULL pointer dereference
28
hw/arm/sbsa-ref : Add embedded controller in secure memory
37
hw/display/exynos4210_fimd: Fix potential NULL pointer dereference
29
38
30
Leif Lindholm (1):
39
Peter Maydell (9):
31
hw/arm/sbsa-ref: add "reg" property to DT cpu nodes
40
target/arm: Fix float16 pairwise Neon ops on big-endian hosts
41
target/arm: Fix VUDOT/VSDOT (scalar) on big-endian hosts
42
disas/capstone: Fix monitor disassembly of >32 bytes
43
target/arm: Get correct MMU index for other-security-state
44
configure: Test that gio libs from pkg-config work
45
hw/intc/arm_gicv3_cpuif: Make GIC maintenance interrupts work
46
scripts/kerneldoc: For Sphinx 3 use c:macro for macros with arguments
47
qemu-option-trace.rst.inc: Don't use option:: markup
48
tests/qtest/npcm7xx_rng-test: Disable randomness tests
32
49
33
Peter Maydell (44):
50
Philippe Mathieu-Daudé (1):
34
target/arm: Remove local definitions of float constants
51
hw/arm/smmuv3: Fix potential integer overflow (CID 1432363)
35
target/arm: Use correct ID register check for aa32_fp16_arith
36
target/arm: Implement VFP fp16 for VFP_BINOP operations
37
target/arm: Implement VFP fp16 VMLA, VMLS, VNMLS, VNMLA, VNMUL
38
target/arm: Macroify trans functions for VFMA, VFMS, VFNMA, VFNMS
39
target/arm: Implement VFP fp16 for fused-multiply-add
40
target/arm: Macroify uses of do_vfp_2op_sp() and do_vfp_2op_dp()
41
target/arm: Implement VFP fp16 for VABS, VNEG, VSQRT
42
target/arm: Implement VFP fp16 for VMOV immediate
43
target/arm: Implement VFP fp16 VCMP
44
target/arm: Implement VFP fp16 VLDR and VSTR
45
target/arm: Implement VFP fp16 VCVT between float and integer
46
target/arm: Make VFP_CONV_FIX macros take separate float type and float size
47
target/arm: Use macros instead of open-coding fp16 conversion helpers
48
target/arm: Implement VFP fp16 VCVT between float and fixed-point
49
target/arm: Implement VFP vp16 VCVT-with-specified-rounding-mode
50
target/arm: Implement VFP fp16 VSEL
51
target/arm: Implement VFP fp16 VRINT*
52
target/arm: Implement new VFP fp16 insn VINS
53
target/arm: Implement new VFP fp16 insn VMOVX
54
target/arm: Implement VFP fp16 VMOV between gp and halfprec registers
55
target/arm: Implement FP16 for Neon VADD, VSUB, VABD, VMUL
56
target/arm: Implement fp16 for Neon VRECPE, VRSQRTE using gvec
57
target/arm: Implement fp16 for Neon VABS, VNEG of floats
58
target/arm: Implement fp16 for VCEQ, VCGE, VCGT comparisons
59
target/arm: Implement fp16 for VACGE, VACGT
60
target/arm: Implement fp16 for Neon VMAX, VMIN
61
target/arm: Implement fp16 for Neon VMAXNM, VMINNM
62
target/arm: Implement fp16 for Neon VMLA, VMLS operations
63
target/arm: Implement fp16 for Neon VFMA, VMFS
64
target/arm: Implement fp16 for Neon fp compare-vs-0
65
target/arm: Implement fp16 for Neon VRECPS
66
target/arm: Implement fp16 for Neon VRSQRTS
67
target/arm: Implement fp16 for Neon pairwise fp ops
68
target/arm: Implement fp16 for Neon float-integer VCVT
69
target/arm: Convert Neon VCVT fixed-point to gvec
70
target/arm: Implement fp16 for Neon VCVT fixed-point
71
target/arm: Implement fp16 for Neon VCVT with rounding modes
72
target/arm: Implement fp16 for Neon VRINT-with-specified-rounding-mode
73
target/arm: Implement fp16 for Neon VRINTX
74
target/arm/vec_helper: Handle oprsz less than 16 bytes in indexed operations
75
target/arm/vec_helper: Add gvec fp indexed multiply-and-add operations
76
target/arm: Implement fp16 for Neon VMUL, VMLA, VMLS
77
target/arm: Enable FP16 in '-cpu max'
78
52
79
target/arm/cpu.h | 7 +-
53
Richard Henderson (11):
80
target/arm/helper.h | 133 ++++++-
54
target/arm: Introduce neon_full_reg_offset
81
target/arm/neon-dp.decode | 8 +-
55
target/arm: Move neon_element_offset to translate.c
82
target/arm/vfp-uncond.decode | 27 +-
56
target/arm: Use neon_element_offset in neon_load/store_reg
83
target/arm/vfp.decode | 34 +-
57
target/arm: Use neon_element_offset in vfp_reg_offset
84
hw/arm/sbsa-ref.c | 43 ++-
58
target/arm: Add read/write_neon_element32
85
hw/misc/sbsa_ec.c | 98 +++++
59
target/arm: Expand read/write_neon_element32 to all MemOp
86
target/arm/cpu.c | 3 +-
60
target/arm: Rename neon_load_reg32 to vfp_load_reg32
87
target/arm/cpu64.c | 10 +-
61
target/arm: Add read/write_neon_element64
88
target/arm/helper-a64.c | 11 -
62
target/arm: Rename neon_load_reg64 to vfp_load_reg64
89
target/arm/translate-sve.c | 4 -
63
target/arm: Simplify do_long_3d and do_2scalar_long
90
target/arm/vec_helper.c | 431 ++++++++++++++++++++-
64
target/arm: Improve do_prewiden_3d
91
target/arm/vfp_helper.c | 244 +++++-------
92
hw/misc/meson.build | 2 +
93
target/arm/translate-neon.c.inc | 755 +++++++++++++------------------------
94
target/arm/translate-vfp.c.inc | 810 ++++++++++++++++++++++++++++++++++++----
95
16 files changed, 1819 insertions(+), 801 deletions(-)
96
create mode 100644 hw/misc/sbsa_ec.c
97
65
66
Rémi Denis-Courmont (3):
67
target/arm: fix handling of HCR.FB
68
target/arm: fix LORID_EL1 access check
69
hw/arm/boot: fix SVE for EL3 direct kernel boot
70
71
docs/qemu-option-trace.rst.inc | 6 +-
72
configure | 10 +-
73
include/hw/intc/arm_gicv3_common.h | 1 -
74
disas/capstone.c | 2 +-
75
hw/arm/boot.c | 3 +
76
hw/arm/smmuv3.c | 3 +-
77
hw/display/exynos4210_fimd.c | 4 +-
78
hw/display/omap_lcdc.c | 10 +-
79
hw/intc/arm_gicv3_cpuif.c | 5 +-
80
target/arm/helper.c | 24 +-
81
target/arm/m_helper.c | 3 +-
82
target/arm/translate.c | 153 +++++++++---
83
target/arm/vec_helper.c | 12 +-
84
tests/qtest/npcm7xx_rng-test.c | 14 +-
85
scripts/kernel-doc | 18 +-
86
target/arm/translate-neon.c.inc | 472 ++++++++++++++++++++-----------------
87
target/arm/translate-vfp.c.inc | 341 +++++++++++----------------
88
17 files changed, 588 insertions(+), 493 deletions(-)
89
diff view generated by jsdifflib
Deleted patch
1
In several places the target/arm code defines local float constants
2
for 2, 3 and 1.5, which are also provided by include/fpu/softfloat.h.
3
Remove the unnecessary local duplicate versions.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20200828183354.27913-2-peter.maydell@linaro.org
8
---
9
target/arm/helper-a64.c | 11 -----------
10
target/arm/translate-sve.c | 4 ----
11
target/arm/vfp_helper.c | 4 ----
12
3 files changed, 19 deletions(-)
13
14
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-a64.c
17
+++ b/target/arm/helper-a64.c
18
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp)
19
* versions, these do a fully fused multiply-add or
20
* multiply-add-and-halve.
21
*/
22
-#define float16_two make_float16(0x4000)
23
-#define float16_three make_float16(0x4200)
24
-#define float16_one_point_five make_float16(0x3e00)
25
-
26
-#define float32_two make_float32(0x40000000)
27
-#define float32_three make_float32(0x40400000)
28
-#define float32_one_point_five make_float32(0x3fc00000)
29
-
30
-#define float64_two make_float64(0x4000000000000000ULL)
31
-#define float64_three make_float64(0x4008000000000000ULL)
32
-#define float64_one_point_five make_float64(0x3FF8000000000000ULL)
33
34
uint32_t HELPER(recpsf_f16)(uint32_t a, uint32_t b, void *fpstp)
35
{
36
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/translate-sve.c
39
+++ b/target/arm/translate-sve.c
40
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME##_zpzi(DisasContext *s, arg_rpri_esz *a) \
41
return true; \
42
}
43
44
-#define float16_two make_float16(0x4000)
45
-#define float32_two make_float32(0x40000000)
46
-#define float64_two make_float64(0x4000000000000000ULL)
47
-
48
DO_FP_IMM(FADD, fadds, half, one)
49
DO_FP_IMM(FSUB, fsubs, half, one)
50
DO_FP_IMM(FMUL, fmuls, half, two)
51
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/vfp_helper.c
54
+++ b/target/arm/vfp_helper.c
55
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode)
56
return r;
57
}
58
59
-#define float32_two make_float32(0x40000000)
60
-#define float32_three make_float32(0x40400000)
61
-#define float32_one_point_five make_float32(0x3fc00000)
62
-
63
float32 HELPER(recps_f32)(CPUARMState *env, float32 a, float32 b)
64
{
65
float_status *s = &env->vfp.standard_fp_status;
66
--
67
2.20.1
68
69
diff view generated by jsdifflib
Deleted patch
1
The aa32_fp16_arith feature check function currently looks at the
2
AArch64 ID_AA64PFR0 register. This is (as the comment notes) not
3
correct. The bogus check was put in mostly to allow testing of the
4
fp16 variants of the VCMLA instructions and it was something of
5
a mistake that we allowed them to exist in master.
6
1
7
Switch the feature check function to testing VMFR1.FPHP, which is
8
what it ought to be.
9
10
This will remove emulation of the VCMLA and VCADD insns from
11
AArch32 code running on an AArch64 '-cpu max' using system emulation.
12
(They were never enabled for aarch32 linux-user and system-emulation.)
13
Since we weren't advertising their existence via the AArch32 ID
14
register, well-behaved guests wouldn't have been using them anyway.
15
16
Once we have implemented all the AArch32 support for the FP16 extension
17
we will advertise it in the MVFR1 ID register field, which will reenable
18
these insns along with all the others.
19
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
22
Message-id: 20200828183354.27913-3-peter.maydell@linaro.org
23
---
24
target/arm/cpu.h | 7 +------
25
1 file changed, 1 insertion(+), 6 deletions(-)
26
27
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/cpu.h
30
+++ b/target/arm/cpu.h
31
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id)
32
33
static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
34
{
35
- /*
36
- * This is a placeholder for use by VCMA until the rest of
37
- * the ARMv8.2-FP16 extension is implemented for aa32 mode.
38
- * At which point we can properly set and check MVFR1.FPHP.
39
- */
40
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
41
+ return FIELD_EX32(id->mvfr1, MVFR1, FPHP) >= 3;
42
}
43
44
static inline bool isar_feature_aa32_vfp_simd(const ARMISARegisters *id)
45
--
46
2.20.1
47
48
diff view generated by jsdifflib
Deleted patch
1
Implmeent VFP fp16 support for simple binary-operator VFP insns VADD,
2
VSUB, VMUL, VDIV, VMINNM and VMAXNM:
3
1
4
* make the VFP_BINOP() macro generate float16 helpers as well as
5
float32 and float64
6
* implement a do_vfp_3op_hp() function similar to the existing
7
do_vfp_3op_sp()
8
* add decode for the half-precision insn patterns
9
10
Note that the VFP_BINOP macro use creates a couple of unused helper
11
functions vfp_maxh and vfp_minh, but they're small so it's not worth
12
splitting the BINOP operations into "needs halfprec" and "no
13
halfprec" groups.
14
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20200828183354.27913-4-peter.maydell@linaro.org
18
---
19
target/arm/helper.h | 8 ++++
20
target/arm/vfp-uncond.decode | 3 ++
21
target/arm/vfp.decode | 4 ++
22
target/arm/vfp_helper.c | 5 ++
23
target/arm/translate-vfp.c.inc | 86 ++++++++++++++++++++++++++++++++++
24
5 files changed, 106 insertions(+)
25
26
diff --git a/target/arm/helper.h b/target/arm/helper.h
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/helper.h
29
+++ b/target/arm/helper.h
30
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(probe_access, TCG_CALL_NO_WG, void, env, tl, i32, i32, i32)
31
DEF_HELPER_1(vfp_get_fpscr, i32, env)
32
DEF_HELPER_2(vfp_set_fpscr, void, env, i32)
33
34
+DEF_HELPER_3(vfp_addh, f16, f16, f16, ptr)
35
DEF_HELPER_3(vfp_adds, f32, f32, f32, ptr)
36
DEF_HELPER_3(vfp_addd, f64, f64, f64, ptr)
37
+DEF_HELPER_3(vfp_subh, f16, f16, f16, ptr)
38
DEF_HELPER_3(vfp_subs, f32, f32, f32, ptr)
39
DEF_HELPER_3(vfp_subd, f64, f64, f64, ptr)
40
+DEF_HELPER_3(vfp_mulh, f16, f16, f16, ptr)
41
DEF_HELPER_3(vfp_muls, f32, f32, f32, ptr)
42
DEF_HELPER_3(vfp_muld, f64, f64, f64, ptr)
43
+DEF_HELPER_3(vfp_divh, f16, f16, f16, ptr)
44
DEF_HELPER_3(vfp_divs, f32, f32, f32, ptr)
45
DEF_HELPER_3(vfp_divd, f64, f64, f64, ptr)
46
+DEF_HELPER_3(vfp_maxh, f16, f16, f16, ptr)
47
DEF_HELPER_3(vfp_maxs, f32, f32, f32, ptr)
48
DEF_HELPER_3(vfp_maxd, f64, f64, f64, ptr)
49
+DEF_HELPER_3(vfp_minh, f16, f16, f16, ptr)
50
DEF_HELPER_3(vfp_mins, f32, f32, f32, ptr)
51
DEF_HELPER_3(vfp_mind, f64, f64, f64, ptr)
52
+DEF_HELPER_3(vfp_maxnumh, f16, f16, f16, ptr)
53
DEF_HELPER_3(vfp_maxnums, f32, f32, f32, ptr)
54
DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, ptr)
55
+DEF_HELPER_3(vfp_minnumh, f16, f16, f16, ptr)
56
DEF_HELPER_3(vfp_minnums, f32, f32, f32, ptr)
57
DEF_HELPER_3(vfp_minnumd, f64, f64, f64, ptr)
58
DEF_HELPER_1(vfp_negs, f32, f32)
59
diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/vfp-uncond.decode
62
+++ b/target/arm/vfp-uncond.decode
63
@@ -XXX,XX +XXX,XX @@ VSEL 1111 1110 0. cc:2 .... .... 1010 .0.0 .... \
64
VSEL 1111 1110 0. cc:2 .... .... 1011 .0.0 .... \
65
vm=%vm_dp vn=%vn_dp vd=%vd_dp dp=1
66
67
+VMAXNM_hp 1111 1110 1.00 .... .... 1001 .0.0 .... @vfp_dnm_s
68
+VMINNM_hp 1111 1110 1.00 .... .... 1001 .1.0 .... @vfp_dnm_s
69
+
70
VMAXNM_sp 1111 1110 1.00 .... .... 1010 .0.0 .... @vfp_dnm_s
71
VMINNM_sp 1111 1110 1.00 .... .... 1010 .1.0 .... @vfp_dnm_s
72
73
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
74
index XXXXXXX..XXXXXXX 100644
75
--- a/target/arm/vfp.decode
76
+++ b/target/arm/vfp.decode
77
@@ -XXX,XX +XXX,XX @@ VNMLS_dp ---- 1110 0.01 .... .... 1011 .0.0 .... @vfp_dnm_d
78
VNMLA_sp ---- 1110 0.01 .... .... 1010 .1.0 .... @vfp_dnm_s
79
VNMLA_dp ---- 1110 0.01 .... .... 1011 .1.0 .... @vfp_dnm_d
80
81
+VMUL_hp ---- 1110 0.10 .... .... 1001 .0.0 .... @vfp_dnm_s
82
VMUL_sp ---- 1110 0.10 .... .... 1010 .0.0 .... @vfp_dnm_s
83
VMUL_dp ---- 1110 0.10 .... .... 1011 .0.0 .... @vfp_dnm_d
84
85
VNMUL_sp ---- 1110 0.10 .... .... 1010 .1.0 .... @vfp_dnm_s
86
VNMUL_dp ---- 1110 0.10 .... .... 1011 .1.0 .... @vfp_dnm_d
87
88
+VADD_hp ---- 1110 0.11 .... .... 1001 .0.0 .... @vfp_dnm_s
89
VADD_sp ---- 1110 0.11 .... .... 1010 .0.0 .... @vfp_dnm_s
90
VADD_dp ---- 1110 0.11 .... .... 1011 .0.0 .... @vfp_dnm_d
91
92
+VSUB_hp ---- 1110 0.11 .... .... 1001 .1.0 .... @vfp_dnm_s
93
VSUB_sp ---- 1110 0.11 .... .... 1010 .1.0 .... @vfp_dnm_s
94
VSUB_dp ---- 1110 0.11 .... .... 1011 .1.0 .... @vfp_dnm_d
95
96
+VDIV_hp ---- 1110 1.00 .... .... 1001 .0.0 .... @vfp_dnm_s
97
VDIV_sp ---- 1110 1.00 .... .... 1010 .0.0 .... @vfp_dnm_s
98
VDIV_dp ---- 1110 1.00 .... .... 1011 .0.0 .... @vfp_dnm_d
99
100
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/target/arm/vfp_helper.c
103
+++ b/target/arm/vfp_helper.c
104
@@ -XXX,XX +XXX,XX @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val)
105
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
106
107
#define VFP_BINOP(name) \
108
+dh_ctype_f16 VFP_HELPER(name, h)(dh_ctype_f16 a, dh_ctype_f16 b, void *fpstp) \
109
+{ \
110
+ float_status *fpst = fpstp; \
111
+ return float16_ ## name(a, b, fpst); \
112
+} \
113
float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
114
{ \
115
float_status *fpst = fpstp; \
116
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
117
index XXXXXXX..XXXXXXX 100644
118
--- a/target/arm/translate-vfp.c.inc
119
+++ b/target/arm/translate-vfp.c.inc
120
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
121
return true;
122
}
123
124
+static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn,
125
+ int vd, int vn, int vm, bool reads_vd)
126
+{
127
+ /*
128
+ * Do a half-precision operation. Functionally this is
129
+ * the same as do_vfp_3op_sp(), except:
130
+ * - it uses the FPST_FPCR_F16
131
+ * - it doesn't need the VFP vector handling (fp16 is a
132
+ * v8 feature, and in v8 VFP vectors don't exist)
133
+ * - it does the aa32_fp16_arith feature test
134
+ */
135
+ TCGv_i32 f0, f1, fd;
136
+ TCGv_ptr fpst;
137
+
138
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
139
+ return false;
140
+ }
141
+
142
+ if (s->vec_len != 0 || s->vec_stride != 0) {
143
+ return false;
144
+ }
145
+
146
+ if (!vfp_access_check(s)) {
147
+ return true;
148
+ }
149
+
150
+ f0 = tcg_temp_new_i32();
151
+ f1 = tcg_temp_new_i32();
152
+ fd = tcg_temp_new_i32();
153
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
154
+
155
+ neon_load_reg32(f0, vn);
156
+ neon_load_reg32(f1, vm);
157
+
158
+ if (reads_vd) {
159
+ neon_load_reg32(fd, vd);
160
+ }
161
+ fn(fd, f0, f1, fpst);
162
+ neon_store_reg32(fd, vd);
163
+
164
+ tcg_temp_free_i32(f0);
165
+ tcg_temp_free_i32(f1);
166
+ tcg_temp_free_i32(fd);
167
+ tcg_temp_free_ptr(fpst);
168
+
169
+ return true;
170
+}
171
+
172
static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
173
int vd, int vn, int vm, bool reads_vd)
174
{
175
@@ -XXX,XX +XXX,XX @@ static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a)
176
return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
177
}
178
179
+static bool trans_VMUL_hp(DisasContext *s, arg_VMUL_sp *a)
180
+{
181
+ return do_vfp_3op_hp(s, gen_helper_vfp_mulh, a->vd, a->vn, a->vm, false);
182
+}
183
+
184
static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a)
185
{
186
return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false);
187
@@ -XXX,XX +XXX,XX @@ static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_dp *a)
188
return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
189
}
190
191
+static bool trans_VADD_hp(DisasContext *s, arg_VADD_sp *a)
192
+{
193
+ return do_vfp_3op_hp(s, gen_helper_vfp_addh, a->vd, a->vn, a->vm, false);
194
+}
195
+
196
static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a)
197
{
198
return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false);
199
@@ -XXX,XX +XXX,XX @@ static bool trans_VADD_dp(DisasContext *s, arg_VADD_dp *a)
200
return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
201
}
202
203
+static bool trans_VSUB_hp(DisasContext *s, arg_VSUB_sp *a)
204
+{
205
+ return do_vfp_3op_hp(s, gen_helper_vfp_subh, a->vd, a->vn, a->vm, false);
206
+}
207
+
208
static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a)
209
{
210
return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false);
211
@@ -XXX,XX +XXX,XX @@ static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_dp *a)
212
return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
213
}
214
215
+static bool trans_VDIV_hp(DisasContext *s, arg_VDIV_sp *a)
216
+{
217
+ return do_vfp_3op_hp(s, gen_helper_vfp_divh, a->vd, a->vn, a->vm, false);
218
+}
219
+
220
static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a)
221
{
222
return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false);
223
@@ -XXX,XX +XXX,XX @@ static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a)
224
return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
225
}
226
227
+static bool trans_VMINNM_hp(DisasContext *s, arg_VMINNM_sp *a)
228
+{
229
+ if (!dc_isar_feature(aa32_vminmaxnm, s)) {
230
+ return false;
231
+ }
232
+ return do_vfp_3op_hp(s, gen_helper_vfp_minnumh,
233
+ a->vd, a->vn, a->vm, false);
234
+}
235
+
236
+static bool trans_VMAXNM_hp(DisasContext *s, arg_VMAXNM_sp *a)
237
+{
238
+ if (!dc_isar_feature(aa32_vminmaxnm, s)) {
239
+ return false;
240
+ }
241
+ return do_vfp_3op_hp(s, gen_helper_vfp_maxnumh,
242
+ a->vd, a->vn, a->vm, false);
243
+}
244
+
245
static bool trans_VMINNM_sp(DisasContext *s, arg_VMINNM_sp *a)
246
{
247
if (!dc_isar_feature(aa32_vminmaxnm, s)) {
248
--
249
2.20.1
250
251
diff view generated by jsdifflib
Deleted patch
1
Implement fp16 versions of the VFP VMLA, VMLS, VNMLS, VNMLA, VNMUL
2
instructions. (These are all the remaining ones which we implement
3
via do_vfp_3op_[hsd]p().)
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20200828183354.27913-5-peter.maydell@linaro.org
8
---
9
target/arm/helper.h | 1 +
10
target/arm/vfp.decode | 5 ++
11
target/arm/vfp_helper.c | 5 ++
12
target/arm/translate-vfp.c.inc | 84 ++++++++++++++++++++++++++++++++++
13
4 files changed, 95 insertions(+)
14
15
diff --git a/target/arm/helper.h b/target/arm/helper.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.h
18
+++ b/target/arm/helper.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, ptr)
20
DEF_HELPER_3(vfp_minnumh, f16, f16, f16, ptr)
21
DEF_HELPER_3(vfp_minnums, f32, f32, f32, ptr)
22
DEF_HELPER_3(vfp_minnumd, f64, f64, f64, ptr)
23
+DEF_HELPER_1(vfp_negh, f16, f16)
24
DEF_HELPER_1(vfp_negs, f32, f32)
25
DEF_HELPER_1(vfp_negd, f64, f64)
26
DEF_HELPER_1(vfp_abss, f32, f32)
27
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/vfp.decode
30
+++ b/target/arm/vfp.decode
31
@@ -XXX,XX +XXX,XX @@ VLDM_VSTM_dp ---- 1101 0.1 l:1 rn:4 .... 1011 imm:8 \
32
vd=%vd_dp p=1 u=0 w=1
33
34
# 3-register VFP data-processing; bits [23,21:20,6] identify the operation.
35
+VMLA_hp ---- 1110 0.00 .... .... 1001 .0.0 .... @vfp_dnm_s
36
VMLA_sp ---- 1110 0.00 .... .... 1010 .0.0 .... @vfp_dnm_s
37
VMLA_dp ---- 1110 0.00 .... .... 1011 .0.0 .... @vfp_dnm_d
38
39
+VMLS_hp ---- 1110 0.00 .... .... 1001 .1.0 .... @vfp_dnm_s
40
VMLS_sp ---- 1110 0.00 .... .... 1010 .1.0 .... @vfp_dnm_s
41
VMLS_dp ---- 1110 0.00 .... .... 1011 .1.0 .... @vfp_dnm_d
42
43
+VNMLS_hp ---- 1110 0.01 .... .... 1001 .0.0 .... @vfp_dnm_s
44
VNMLS_sp ---- 1110 0.01 .... .... 1010 .0.0 .... @vfp_dnm_s
45
VNMLS_dp ---- 1110 0.01 .... .... 1011 .0.0 .... @vfp_dnm_d
46
47
+VNMLA_hp ---- 1110 0.01 .... .... 1001 .1.0 .... @vfp_dnm_s
48
VNMLA_sp ---- 1110 0.01 .... .... 1010 .1.0 .... @vfp_dnm_s
49
VNMLA_dp ---- 1110 0.01 .... .... 1011 .1.0 .... @vfp_dnm_d
50
51
@@ -XXX,XX +XXX,XX @@ VMUL_hp ---- 1110 0.10 .... .... 1001 .0.0 .... @vfp_dnm_s
52
VMUL_sp ---- 1110 0.10 .... .... 1010 .0.0 .... @vfp_dnm_s
53
VMUL_dp ---- 1110 0.10 .... .... 1011 .0.0 .... @vfp_dnm_d
54
55
+VNMUL_hp ---- 1110 0.10 .... .... 1001 .1.0 .... @vfp_dnm_s
56
VNMUL_sp ---- 1110 0.10 .... .... 1010 .1.0 .... @vfp_dnm_s
57
VNMUL_dp ---- 1110 0.10 .... .... 1011 .1.0 .... @vfp_dnm_d
58
59
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/vfp_helper.c
62
+++ b/target/arm/vfp_helper.c
63
@@ -XXX,XX +XXX,XX @@ VFP_BINOP(minnum)
64
VFP_BINOP(maxnum)
65
#undef VFP_BINOP
66
67
+dh_ctype_f16 VFP_HELPER(neg, h)(dh_ctype_f16 a)
68
+{
69
+ return float16_chs(a);
70
+}
71
+
72
float32 VFP_HELPER(neg, s)(float32 a)
73
{
74
return float32_chs(a);
75
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/translate-vfp.c.inc
78
+++ b/target/arm/translate-vfp.c.inc
79
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
80
return true;
81
}
82
83
+static void gen_VMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
84
+{
85
+ /* Note that order of inputs to the add matters for NaNs */
86
+ TCGv_i32 tmp = tcg_temp_new_i32();
87
+
88
+ gen_helper_vfp_mulh(tmp, vn, vm, fpst);
89
+ gen_helper_vfp_addh(vd, vd, tmp, fpst);
90
+ tcg_temp_free_i32(tmp);
91
+}
92
+
93
+static bool trans_VMLA_hp(DisasContext *s, arg_VMLA_sp *a)
94
+{
95
+ return do_vfp_3op_hp(s, gen_VMLA_hp, a->vd, a->vn, a->vm, true);
96
+}
97
+
98
static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
99
{
100
/* Note that order of inputs to the add matters for NaNs */
101
@@ -XXX,XX +XXX,XX @@ static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a)
102
return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
103
}
104
105
+static void gen_VMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
106
+{
107
+ /*
108
+ * VMLS: vd = vd + -(vn * vm)
109
+ * Note that order of inputs to the add matters for NaNs.
110
+ */
111
+ TCGv_i32 tmp = tcg_temp_new_i32();
112
+
113
+ gen_helper_vfp_mulh(tmp, vn, vm, fpst);
114
+ gen_helper_vfp_negh(tmp, tmp);
115
+ gen_helper_vfp_addh(vd, vd, tmp, fpst);
116
+ tcg_temp_free_i32(tmp);
117
+}
118
+
119
+static bool trans_VMLS_hp(DisasContext *s, arg_VMLS_sp *a)
120
+{
121
+ return do_vfp_3op_hp(s, gen_VMLS_hp, a->vd, a->vn, a->vm, true);
122
+}
123
+
124
static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
125
{
126
/*
127
@@ -XXX,XX +XXX,XX @@ static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a)
128
return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
129
}
130
131
+static void gen_VNMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
132
+{
133
+ /*
134
+ * VNMLS: -fd + (fn * fm)
135
+ * Note that it isn't valid to replace (-A + B) with (B - A) or similar
136
+ * plausible looking simplifications because this will give wrong results
137
+ * for NaNs.
138
+ */
139
+ TCGv_i32 tmp = tcg_temp_new_i32();
140
+
141
+ gen_helper_vfp_mulh(tmp, vn, vm, fpst);
142
+ gen_helper_vfp_negh(vd, vd);
143
+ gen_helper_vfp_addh(vd, vd, tmp, fpst);
144
+ tcg_temp_free_i32(tmp);
145
+}
146
+
147
+static bool trans_VNMLS_hp(DisasContext *s, arg_VNMLS_sp *a)
148
+{
149
+ return do_vfp_3op_hp(s, gen_VNMLS_hp, a->vd, a->vn, a->vm, true);
150
+}
151
+
152
static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
153
{
154
/*
155
@@ -XXX,XX +XXX,XX @@ static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a)
156
return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
157
}
158
159
+static void gen_VNMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
160
+{
161
+ /* VNMLA: -fd + -(fn * fm) */
162
+ TCGv_i32 tmp = tcg_temp_new_i32();
163
+
164
+ gen_helper_vfp_mulh(tmp, vn, vm, fpst);
165
+ gen_helper_vfp_negh(tmp, tmp);
166
+ gen_helper_vfp_negh(vd, vd);
167
+ gen_helper_vfp_addh(vd, vd, tmp, fpst);
168
+ tcg_temp_free_i32(tmp);
169
+}
170
+
171
+static bool trans_VNMLA_hp(DisasContext *s, arg_VNMLA_sp *a)
172
+{
173
+ return do_vfp_3op_hp(s, gen_VNMLA_hp, a->vd, a->vn, a->vm, true);
174
+}
175
+
176
static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
177
{
178
/* VNMLA: -fd + -(fn * fm) */
179
@@ -XXX,XX +XXX,XX @@ static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_dp *a)
180
return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
181
}
182
183
+static void gen_VNMUL_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
184
+{
185
+ /* VNMUL: -(fn * fm) */
186
+ gen_helper_vfp_mulh(vd, vn, vm, fpst);
187
+ gen_helper_vfp_negh(vd, vd);
188
+}
189
+
190
+static bool trans_VNMUL_hp(DisasContext *s, arg_VNMUL_sp *a)
191
+{
192
+ return do_vfp_3op_hp(s, gen_VNMUL_hp, a->vd, a->vn, a->vm, false);
193
+}
194
+
195
static void gen_VNMUL_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
196
{
197
/* VNMUL: -(fn * fm) */
198
--
199
2.20.1
200
201
diff view generated by jsdifflib
1
Implement FP16 support for the Neon insns which use the DO_3S_FP_GVEC
1
From: Richard Henderson <richard.henderson@linaro.org>
2
macro: VADD, VSUB, VABD, VMUL.
3
2
4
For VABD this requires us to implement a new gvec_fabd_h helper
3
This function makes it clear that we're talking about the whole
5
using the machinery we have already for the other helpers.
4
register, and not the 32-bit piece at index 0. This fixes a bug
5
when running on a big-endian host.
6
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201030022618.785675-2-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200828183354.27913-24-peter.maydell@linaro.org
10
---
11
---
11
target/arm/helper.h | 1 +
12
target/arm/translate.c | 8 ++++++
12
target/arm/vec_helper.c | 6 ++++++
13
target/arm/translate-neon.c.inc | 44 ++++++++++++++++-----------------
13
target/arm/translate-neon.c.inc | 36 +++++++++++++++++----------------
14
target/arm/translate-vfp.c.inc | 2 +-
14
3 files changed, 26 insertions(+), 17 deletions(-)
15
3 files changed, 31 insertions(+), 23 deletions(-)
15
16
16
diff --git a/target/arm/helper.h b/target/arm/helper.h
17
diff --git a/target/arm/translate.c b/target/arm/translate.c
17
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper.h
19
--- a/target/arm/translate.c
19
+++ b/target/arm/helper.h
20
+++ b/target/arm/translate.c
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
21
@@ -XXX,XX +XXX,XX @@ static inline void gen_hlt(DisasContext *s, int imm)
21
DEF_HELPER_FLAGS_5(gvec_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
22
unallocated_encoding(s);
22
DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
23
24
+DEF_HELPER_FLAGS_5(gvec_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
DEF_HELPER_FLAGS_5(gvec_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
26
27
DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
28
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/vec_helper.c
31
+++ b/target/arm/vec_helper.c
32
@@ -XXX,XX +XXX,XX @@ static float64 float64_ftsmul(float64 op1, uint64_t op2, float_status *stat)
33
return result;
34
}
23
}
35
24
36
+static float16 float16_abd(float16 op1, float16 op2, float_status *stat)
25
+/*
26
+ * Return the offset of a "full" NEON Dreg.
27
+ */
28
+static long neon_full_reg_offset(unsigned reg)
37
+{
29
+{
38
+ return float16_abs(float16_sub(op1, op2, stat));
30
+ return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
39
+}
31
+}
40
+
32
+
41
static float32 float32_abd(float32 op1, float32 op2, float_status *stat)
33
static inline long vfp_reg_offset(bool dp, unsigned reg)
42
{
34
{
43
return float32_abs(float32_sub(op1, op2, stat));
35
if (dp) {
44
@@ -XXX,XX +XXX,XX @@ DO_3OP(gvec_ftsmul_h, float16_ftsmul, float16)
45
DO_3OP(gvec_ftsmul_s, float32_ftsmul, float32)
46
DO_3OP(gvec_ftsmul_d, float64_ftsmul, float64)
47
48
+DO_3OP(gvec_fabd_h, float16_abd, float16)
49
DO_3OP(gvec_fabd_s, float32_abd, float32)
50
51
#ifdef TARGET_AARCH64
52
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
36
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
53
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/translate-neon.c.inc
38
--- a/target/arm/translate-neon.c.inc
55
+++ b/target/arm/translate-neon.c.inc
39
+++ b/target/arm/translate-neon.c.inc
56
@@ -XXX,XX +XXX,XX @@ static bool do_3same_fp(DisasContext *s, arg_3same *a, VFPGen3OpSPFn *fn,
40
@@ -XXX,XX +XXX,XX @@ neon_element_offset(int reg, int element, MemOp size)
41
ofs ^= 8 - element_size;
42
}
43
#endif
44
- return neon_reg_offset(reg, 0) + ofs;
45
+ return neon_full_reg_offset(reg) + ofs;
46
}
47
48
static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop)
49
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
50
* We cannot write 16 bytes at once because the
51
* destination is unaligned.
52
*/
53
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
54
+ tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(vd),
55
8, 8, tmp);
56
- tcg_gen_gvec_mov(0, neon_reg_offset(vd + 1, 0),
57
- neon_reg_offset(vd, 0), 8, 8);
58
+ tcg_gen_gvec_mov(0, neon_full_reg_offset(vd + 1),
59
+ neon_full_reg_offset(vd), 8, 8);
60
} else {
61
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
62
+ tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(vd),
63
vec_size, vec_size, tmp);
64
}
65
tcg_gen_addi_i32(addr, addr, 1 << size);
66
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
67
static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn)
68
{
69
int vec_size = a->q ? 16 : 8;
70
- int rd_ofs = neon_reg_offset(a->vd, 0);
71
- int rn_ofs = neon_reg_offset(a->vn, 0);
72
- int rm_ofs = neon_reg_offset(a->vm, 0);
73
+ int rd_ofs = neon_full_reg_offset(a->vd);
74
+ int rn_ofs = neon_full_reg_offset(a->vn);
75
+ int rm_ofs = neon_full_reg_offset(a->vm);
76
77
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
78
return false;
79
@@ -XXX,XX +XXX,XX @@ static bool do_vector_2sh(DisasContext *s, arg_2reg_shift *a, GVecGen2iFn *fn)
80
{
81
/* Handle a 2-reg-shift insn which can be vectorized. */
82
int vec_size = a->q ? 16 : 8;
83
- int rd_ofs = neon_reg_offset(a->vd, 0);
84
- int rm_ofs = neon_reg_offset(a->vm, 0);
85
+ int rd_ofs = neon_full_reg_offset(a->vd);
86
+ int rm_ofs = neon_full_reg_offset(a->vm);
87
88
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
89
return false;
90
@@ -XXX,XX +XXX,XX @@ static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a,
91
{
92
/* FP operations in 2-reg-and-shift group */
93
int vec_size = a->q ? 16 : 8;
94
- int rd_ofs = neon_reg_offset(a->vd, 0);
95
- int rm_ofs = neon_reg_offset(a->vm, 0);
96
+ int rd_ofs = neon_full_reg_offset(a->vd);
97
+ int rm_ofs = neon_full_reg_offset(a->vm);
98
TCGv_ptr fpst;
99
100
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
101
@@ -XXX,XX +XXX,XX @@ static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
102
return true;
103
}
104
105
- reg_ofs = neon_reg_offset(a->vd, 0);
106
+ reg_ofs = neon_full_reg_offset(a->vd);
107
vec_size = a->q ? 16 : 8;
108
imm = asimd_imm_const(a->imm, a->cmode, a->op);
109
110
@@ -XXX,XX +XXX,XX @@ static bool trans_VMULL_P_3d(DisasContext *s, arg_3diff *a)
111
return true;
112
}
113
114
- tcg_gen_gvec_3_ool(neon_reg_offset(a->vd, 0),
115
- neon_reg_offset(a->vn, 0),
116
- neon_reg_offset(a->vm, 0),
117
+ tcg_gen_gvec_3_ool(neon_full_reg_offset(a->vd),
118
+ neon_full_reg_offset(a->vn),
119
+ neon_full_reg_offset(a->vm),
120
16, 16, 0, fn_gvec);
57
return true;
121
return true;
58
}
122
}
59
123
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_fp_vec(DisasContext *s, arg_2scalar *a,
60
-/*
124
{
61
- * For all the functions using this macro, size == 1 means fp16,
125
/* Two registers and a scalar, using gvec */
62
- * which is an architecture extension we don't implement yet.
126
int vec_size = a->q ? 16 : 8;
63
- */
127
- int rd_ofs = neon_reg_offset(a->vd, 0);
64
-#define DO_3S_FP_GVEC(INSN,FUNC) \
128
- int rn_ofs = neon_reg_offset(a->vn, 0);
65
- static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
129
+ int rd_ofs = neon_full_reg_offset(a->vd);
66
- uint32_t rn_ofs, uint32_t rm_ofs, \
130
+ int rn_ofs = neon_full_reg_offset(a->vn);
67
- uint32_t oprsz, uint32_t maxsz) \
131
int rm_ofs;
68
+#define WRAP_FP_GVEC(WRAPNAME, FPST, FUNC) \
132
int idx;
69
+ static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
133
TCGv_ptr fpstatus;
70
+ uint32_t rn_ofs, uint32_t rm_ofs, \
134
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_fp_vec(DisasContext *s, arg_2scalar *a,
71
+ uint32_t oprsz, uint32_t maxsz) \
135
/* a->vm is M:Vm, which encodes both register and index */
72
{ \
136
idx = extract32(a->vm, a->size + 2, 2);
73
- TCGv_ptr fpst = fpstatus_ptr(FPST_STD); \
137
a->vm = extract32(a->vm, 0, a->size + 2);
74
+ TCGv_ptr fpst = fpstatus_ptr(FPST); \
138
- rm_ofs = neon_reg_offset(a->vm, 0);
75
tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpst, \
139
+ rm_ofs = neon_full_reg_offset(a->vm);
76
oprsz, maxsz, 0, FUNC); \
140
77
tcg_temp_free_ptr(fpst); \
141
fpstatus = fpstatus_ptr(a->size == 1 ? FPST_STD_F16 : FPST_STD);
78
- } \
142
tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpstatus,
79
+ }
143
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a)
80
+
144
return true;
81
+#define DO_3S_FP_GVEC(INSN,SFUNC,HFUNC) \
82
+ WRAP_FP_GVEC(gen_##INSN##_fp32_3s, FPST_STD, SFUNC) \
83
+ WRAP_FP_GVEC(gen_##INSN##_fp16_3s, FPST_STD_F16, HFUNC) \
84
static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
85
{ \
86
if (a->size != 0) { \
87
- /* TODO fp16 support */ \
88
- return false; \
89
+ if (!dc_isar_feature(aa32_fp16_arith, s)) { \
90
+ return false; \
91
+ } \
92
+ return do_3same(s, a, gen_##INSN##_fp16_3s); \
93
} \
94
- return do_3same(s, a, gen_##INSN##_3s); \
95
+ return do_3same(s, a, gen_##INSN##_fp32_3s); \
96
}
145
}
97
146
98
147
- tcg_gen_gvec_dup_mem(a->size, neon_reg_offset(a->vd, 0),
99
-DO_3S_FP_GVEC(VADD, gen_helper_gvec_fadd_s)
148
+ tcg_gen_gvec_dup_mem(a->size, neon_full_reg_offset(a->vd),
100
-DO_3S_FP_GVEC(VSUB, gen_helper_gvec_fsub_s)
149
neon_element_offset(a->vm, a->index, a->size),
101
-DO_3S_FP_GVEC(VABD, gen_helper_gvec_fabd_s)
150
a->q ? 16 : 8, a->q ? 16 : 8);
102
-DO_3S_FP_GVEC(VMUL, gen_helper_gvec_fmul_s)
151
return true;
103
+DO_3S_FP_GVEC(VADD, gen_helper_gvec_fadd_s, gen_helper_gvec_fadd_h)
152
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_F32_F16(DisasContext *s, arg_2misc *a)
104
+DO_3S_FP_GVEC(VSUB, gen_helper_gvec_fsub_s, gen_helper_gvec_fsub_h)
153
static bool do_2misc_vec(DisasContext *s, arg_2misc *a, GVecGen2Fn *fn)
105
+DO_3S_FP_GVEC(VABD, gen_helper_gvec_fabd_s, gen_helper_gvec_fabd_h)
154
{
106
+DO_3S_FP_GVEC(VMUL, gen_helper_gvec_fmul_s, gen_helper_gvec_fmul_h)
155
int vec_size = a->q ? 16 : 8;
107
156
- int rd_ofs = neon_reg_offset(a->vd, 0);
108
/*
157
- int rm_ofs = neon_reg_offset(a->vm, 0);
109
* For all the functions using this macro, size == 1 means fp16,
158
+ int rd_ofs = neon_full_reg_offset(a->vd);
159
+ int rm_ofs = neon_full_reg_offset(a->vm);
160
161
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
162
return false;
163
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
164
index XXXXXXX..XXXXXXX 100644
165
--- a/target/arm/translate-vfp.c.inc
166
+++ b/target/arm/translate-vfp.c.inc
167
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
168
}
169
170
tmp = load_reg(s, a->rt);
171
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(a->vn, 0),
172
+ tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(a->vn),
173
vec_size, vec_size, tmp);
174
tcg_temp_free_i32(tmp);
175
110
--
176
--
111
2.20.1
177
2.20.1
112
178
113
179
diff view generated by jsdifflib
1
Convert the Neon VRECPS insn to using a gvec helper, and
1
From: Richard Henderson <richard.henderson@linaro.org>
2
use this to implement the fp16 case.
3
2
4
The phrasing of the new float32_recps_nf() is slightly different from
3
This will shortly have users outside of translate-neon.c.inc.
5
the old recps_f32() so that it parallels the f16 version; for f16 we
6
can't assume that flush-to-zero is always enabled.
7
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20201030022618.785675-3-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20200828183354.27913-34-peter.maydell@linaro.org
11
---
9
---
12
target/arm/helper.h | 4 +++-
10
target/arm/translate.c | 20 ++++++++++++++++++++
13
target/arm/vec_helper.c | 31 +++++++++++++++++++++++++++++++
11
target/arm/translate-neon.c.inc | 19 -------------------
14
target/arm/vfp_helper.c | 13 -------------
12
2 files changed, 20 insertions(+), 19 deletions(-)
15
target/arm/translate-neon.c.inc | 21 +--------------------
16
4 files changed, 35 insertions(+), 34 deletions(-)
17
13
18
diff --git a/target/arm/helper.h b/target/arm/helper.h
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
19
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper.h
16
--- a/target/arm/translate.c
21
+++ b/target/arm/helper.h
17
+++ b/target/arm/translate.c
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr)
18
@@ -XXX,XX +XXX,XX @@ static long neon_full_reg_offset(unsigned reg)
23
DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr)
19
return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
24
DEF_HELPER_4(vfp_muladdh, f16, f16, f16, f16, ptr)
25
26
-DEF_HELPER_3(recps_f32, f32, env, f32, f32)
27
DEF_HELPER_3(rsqrts_f32, f32, env, f32, f32)
28
DEF_HELPER_FLAGS_2(recpe_f16, TCG_CALL_NO_RWG, f16, f16, ptr)
29
DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, ptr)
30
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_fmaxnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i3
31
DEF_HELPER_FLAGS_5(gvec_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
32
DEF_HELPER_FLAGS_5(gvec_fminnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
33
34
+DEF_HELPER_FLAGS_5(gvec_recps_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_5(gvec_recps_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
36
+
37
DEF_HELPER_FLAGS_5(gvec_fmla_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
38
DEF_HELPER_FLAGS_5(gvec_fmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
39
40
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/vec_helper.c
43
+++ b/target/arm/vec_helper.c
44
@@ -XXX,XX +XXX,XX @@ static float32 float32_abd(float32 op1, float32 op2, float_status *stat)
45
return float32_abs(float32_sub(op1, op2, stat));
46
}
20
}
47
21
48
+/*
22
+/*
49
+ * Reciprocal step. These are the AArch32 version which uses a
23
+ * Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
50
+ * non-fused multiply-and-subtract.
24
+ * where 0 is the least significant end of the register.
51
+ */
25
+ */
52
+static float16 float16_recps_nf(float16 op1, float16 op2, float_status *stat)
26
+static long neon_element_offset(int reg, int element, MemOp size)
53
+{
27
+{
54
+ op1 = float16_squash_input_denormal(op1, stat);
28
+ int element_size = 1 << size;
55
+ op2 = float16_squash_input_denormal(op2, stat);
29
+ int ofs = element * element_size;
56
+
30
+#ifdef HOST_WORDS_BIGENDIAN
57
+ if ((float16_is_infinity(op1) && float16_is_zero(op2)) ||
31
+ /*
58
+ (float16_is_infinity(op2) && float16_is_zero(op1))) {
32
+ * Calculate the offset assuming fully little-endian,
59
+ return float16_two;
33
+ * then XOR to account for the order of the 8-byte units.
34
+ */
35
+ if (element_size < 8) {
36
+ ofs ^= 8 - element_size;
60
+ }
37
+ }
61
+ return float16_sub(float16_two, float16_mul(op1, op2, stat), stat);
38
+#endif
39
+ return neon_full_reg_offset(reg) + ofs;
62
+}
40
+}
63
+
41
+
64
+static float32 float32_recps_nf(float32 op1, float32 op2, float_status *stat)
42
static inline long vfp_reg_offset(bool dp, unsigned reg)
65
+{
66
+ op1 = float32_squash_input_denormal(op1, stat);
67
+ op2 = float32_squash_input_denormal(op2, stat);
68
+
69
+ if ((float32_is_infinity(op1) && float32_is_zero(op2)) ||
70
+ (float32_is_infinity(op2) && float32_is_zero(op1))) {
71
+ return float32_two;
72
+ }
73
+ return float32_sub(float32_two, float32_mul(op1, op2, stat), stat);
74
+}
75
+
76
#define DO_3OP(NAME, FUNC, TYPE) \
77
void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
78
{ \
79
@@ -XXX,XX +XXX,XX @@ DO_3OP(gvec_fmaxnum_s, float32_maxnum, float32)
80
DO_3OP(gvec_fminnum_h, float16_minnum, float16)
81
DO_3OP(gvec_fminnum_s, float32_minnum, float32)
82
83
+DO_3OP(gvec_recps_nf_h, float16_recps_nf, float16)
84
+DO_3OP(gvec_recps_nf_s, float32_recps_nf, float32)
85
+
86
#ifdef TARGET_AARCH64
87
88
DO_3OP(gvec_recps_h, helper_recpsf_f16, float16)
89
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
90
index XXXXXXX..XXXXXXX 100644
91
--- a/target/arm/vfp_helper.c
92
+++ b/target/arm/vfp_helper.c
93
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode)
94
return r;
95
}
96
97
-float32 HELPER(recps_f32)(CPUARMState *env, float32 a, float32 b)
98
-{
99
- float_status *s = &env->vfp.standard_fp_status;
100
- if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
101
- (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
102
- if (!(float32_is_zero(a) || float32_is_zero(b))) {
103
- float_raise(float_flag_input_denormal, s);
104
- }
105
- return float32_two;
106
- }
107
- return float32_sub(float32_two, float32_mul(a, b, s), s);
108
-}
109
-
110
float32 HELPER(rsqrts_f32)(CPUARMState *env, float32 a, float32 b)
111
{
43
{
112
float_status *s = &env->vfp.standard_fp_status;
44
if (dp) {
113
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
45
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
114
index XXXXXXX..XXXXXXX 100644
46
index XXXXXXX..XXXXXXX 100644
115
--- a/target/arm/translate-neon.c.inc
47
--- a/target/arm/translate-neon.c.inc
116
+++ b/target/arm/translate-neon.c.inc
48
+++ b/target/arm/translate-neon.c.inc
117
@@ -XXX,XX +XXX,XX @@ DO_3S_FP_GVEC(VMLA, gen_helper_gvec_fmla_s, gen_helper_gvec_fmla_h)
49
@@ -XXX,XX +XXX,XX @@ static inline int neon_3same_fp_size(DisasContext *s, int x)
118
DO_3S_FP_GVEC(VMLS, gen_helper_gvec_fmls_s, gen_helper_gvec_fmls_h)
50
#include "decode-neon-ls.c.inc"
119
DO_3S_FP_GVEC(VFMA, gen_helper_gvec_vfma_s, gen_helper_gvec_vfma_h)
51
#include "decode-neon-shared.c.inc"
120
DO_3S_FP_GVEC(VFMS, gen_helper_gvec_vfms_s, gen_helper_gvec_vfms_h)
52
121
+DO_3S_FP_GVEC(VRECPS, gen_helper_gvec_recps_nf_s, gen_helper_gvec_recps_nf_h)
53
-/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
122
54
- * where 0 is the least significant end of the register.
123
WRAP_FP_GVEC(gen_VMAXNM_fp32_3s, FPST_STD, gen_helper_gvec_fmaxnum_s)
55
- */
124
WRAP_FP_GVEC(gen_VMAXNM_fp16_3s, FPST_STD_F16, gen_helper_gvec_fmaxnum_h)
56
-static inline long
125
@@ -XXX,XX +XXX,XX @@ static bool trans_VMINNM_fp_3s(DisasContext *s, arg_3same *a)
57
-neon_element_offset(int reg, int element, MemOp size)
126
return do_3same(s, a, gen_VMINNM_fp32_3s);
127
}
128
129
-WRAP_ENV_FN(gen_VRECPS_tramp, gen_helper_recps_f32)
130
-
131
-static void gen_VRECPS_fp_3s(unsigned vece, uint32_t rd_ofs,
132
- uint32_t rn_ofs, uint32_t rm_ofs,
133
- uint32_t oprsz, uint32_t maxsz)
134
-{
58
-{
135
- static const GVecGen3 ops = { .fni4 = gen_VRECPS_tramp };
59
- int element_size = 1 << size;
136
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops);
60
- int ofs = element * element_size;
61
-#ifdef HOST_WORDS_BIGENDIAN
62
- /* Calculate the offset assuming fully little-endian,
63
- * then XOR to account for the order of the 8-byte units.
64
- */
65
- if (element_size < 8) {
66
- ofs ^= 8 - element_size;
67
- }
68
-#endif
69
- return neon_full_reg_offset(reg) + ofs;
137
-}
70
-}
138
-
71
-
139
-static bool trans_VRECPS_fp_3s(DisasContext *s, arg_3same *a)
72
static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop)
140
-{
73
{
141
- if (a->size != 0) {
74
long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
142
- /* TODO fp16 support */
143
- return false;
144
- }
145
-
146
- return do_3same(s, a, gen_VRECPS_fp_3s);
147
-}
148
-
149
WRAP_ENV_FN(gen_VRSQRTS_tramp, gen_helper_rsqrts_f32)
150
151
static void gen_VRSQRTS_fp_3s(unsigned vece, uint32_t rd_ofs,
152
--
75
--
153
2.20.1
76
2.20.1
154
77
155
78
diff view generated by jsdifflib
1
Macroify the uses of do_vfp_2op_sp() and do_vfp_2op_dp(); this will
1
From: Richard Henderson <richard.henderson@linaro.org>
2
make it easier to add the halfprec support.
3
2
3
These are the only users of neon_reg_offset, so remove that.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20201030022618.785675-4-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200828183354.27913-8-peter.maydell@linaro.org
7
---
9
---
8
target/arm/translate-vfp.c.inc | 49 ++++++++++------------------------
10
target/arm/translate.c | 14 ++------------
9
1 file changed, 14 insertions(+), 35 deletions(-)
11
1 file changed, 2 insertions(+), 12 deletions(-)
10
12
11
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.c.inc
15
--- a/target/arm/translate.c
14
+++ b/target/arm/translate-vfp.c.inc
16
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
17
@@ -XXX,XX +XXX,XX @@ static inline long vfp_reg_offset(bool dp, unsigned reg)
16
return true;
18
}
17
}
19
}
18
20
19
-static bool trans_VMOV_reg_sp(DisasContext *s, arg_VMOV_reg_sp *a)
21
-/* Return the offset of a 32-bit piece of a NEON register.
22
- zero is the least significant end of the register. */
23
-static inline long
24
-neon_reg_offset (int reg, int n)
20
-{
25
-{
21
- return do_vfp_2op_sp(s, tcg_gen_mov_i32, a->vd, a->vm);
26
- int sreg;
22
-}
27
- sreg = reg * 2 + n;
23
+#define DO_VFP_2OP(INSN, PREC, FN) \
28
- return vfp_reg_offset(0, sreg);
24
+ static bool trans_##INSN##_##PREC(DisasContext *s, \
25
+ arg_##INSN##_##PREC *a) \
26
+ { \
27
+ return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
28
+ }
29
30
-static bool trans_VMOV_reg_dp(DisasContext *s, arg_VMOV_reg_dp *a)
31
-{
32
- return do_vfp_2op_dp(s, tcg_gen_mov_i64, a->vd, a->vm);
33
-}
34
+DO_VFP_2OP(VMOV_reg, sp, tcg_gen_mov_i32)
35
+DO_VFP_2OP(VMOV_reg, dp, tcg_gen_mov_i64)
36
37
-static bool trans_VABS_sp(DisasContext *s, arg_VABS_sp *a)
38
-{
39
- return do_vfp_2op_sp(s, gen_helper_vfp_abss, a->vd, a->vm);
40
-}
41
+DO_VFP_2OP(VABS, sp, gen_helper_vfp_abss)
42
+DO_VFP_2OP(VABS, dp, gen_helper_vfp_absd)
43
44
-static bool trans_VABS_dp(DisasContext *s, arg_VABS_dp *a)
45
-{
46
- return do_vfp_2op_dp(s, gen_helper_vfp_absd, a->vd, a->vm);
47
-}
29
-}
48
-
30
-
49
-static bool trans_VNEG_sp(DisasContext *s, arg_VNEG_sp *a)
31
static TCGv_i32 neon_load_reg(int reg, int pass)
50
-{
51
- return do_vfp_2op_sp(s, gen_helper_vfp_negs, a->vd, a->vm);
52
-}
53
-
54
-static bool trans_VNEG_dp(DisasContext *s, arg_VNEG_dp *a)
55
-{
56
- return do_vfp_2op_dp(s, gen_helper_vfp_negd, a->vd, a->vm);
57
-}
58
+DO_VFP_2OP(VNEG, sp, gen_helper_vfp_negs)
59
+DO_VFP_2OP(VNEG, dp, gen_helper_vfp_negd)
60
61
static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
62
{
32
{
63
gen_helper_vfp_sqrts(vd, vm, cpu_env);
33
TCGv_i32 tmp = tcg_temp_new_i32();
34
- tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
35
+ tcg_gen_ld_i32(tmp, cpu_env, neon_element_offset(reg, pass, MO_32));
36
return tmp;
64
}
37
}
65
38
66
-static bool trans_VSQRT_sp(DisasContext *s, arg_VSQRT_sp *a)
39
static void neon_store_reg(int reg, int pass, TCGv_i32 var)
67
-{
68
- return do_vfp_2op_sp(s, gen_VSQRT_sp, a->vd, a->vm);
69
-}
70
-
71
static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
72
{
40
{
73
gen_helper_vfp_sqrtd(vd, vm, cpu_env);
41
- tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
42
+ tcg_gen_st_i32(var, cpu_env, neon_element_offset(reg, pass, MO_32));
43
tcg_temp_free_i32(var);
74
}
44
}
75
45
76
-static bool trans_VSQRT_dp(DisasContext *s, arg_VSQRT_dp *a)
77
-{
78
- return do_vfp_2op_dp(s, gen_VSQRT_dp, a->vd, a->vm);
79
-}
80
+DO_VFP_2OP(VSQRT, sp, gen_VSQRT_sp)
81
+DO_VFP_2OP(VSQRT, dp, gen_VSQRT_dp)
82
83
static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
84
{
85
--
46
--
86
2.20.1
47
2.20.1
87
48
88
49
diff view generated by jsdifflib
1
Convert the Neon floating-point VMUL, VMLA and VMLS to use gvec,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
and use this to implement fp16 support.
3
2
3
This seems a bit more readable than using offsetof CPU_DoubleU.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20201030022618.785675-5-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200828183354.27913-45-peter.maydell@linaro.org
7
---
9
---
8
target/arm/translate-neon.c.inc | 114 ++++++++++++++++----------------
10
target/arm/translate.c | 13 ++++---------
9
1 file changed, 57 insertions(+), 57 deletions(-)
11
1 file changed, 4 insertions(+), 9 deletions(-)
10
12
11
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-neon.c.inc
15
--- a/target/arm/translate.c
14
+++ b/target/arm/translate-neon.c.inc
16
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VMLS_2sc(DisasContext *s, arg_2scalar *a)
17
@@ -XXX,XX +XXX,XX @@ static long neon_element_offset(int reg, int element, MemOp size)
16
return do_2scalar(s, a, opfn[a->size], accfn[a->size]);
18
return neon_full_reg_offset(reg) + ofs;
17
}
19
}
18
20
19
-/*
21
-static inline long vfp_reg_offset(bool dp, unsigned reg)
20
- * Rather than have a float-specific version of do_2scalar just for
22
+/* Return the offset of a VFP Dreg (dp = true) or VFP Sreg (dp = false). */
21
- * three insns, we wrap a NeonGenTwoSingleOpFn to turn it into
23
+static long vfp_reg_offset(bool dp, unsigned reg)
22
- * a NeonGenTwoOpFn.
24
{
23
- */
25
if (dp) {
24
-#define WRAP_FP_FN(WRAPNAME, FUNC) \
26
- return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
25
- static void WRAPNAME(TCGv_i32 rd, TCGv_i32 rn, TCGv_i32 rm) \
27
+ return neon_element_offset(reg, 0, MO_64);
26
- { \
28
} else {
27
- TCGv_ptr fpstatus = fpstatus_ptr(FPST_STD); \
29
- long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
28
- FUNC(rd, rn, rm, fpstatus); \
30
- if (reg & 1) {
29
- tcg_temp_free_ptr(fpstatus); \
31
- ofs += offsetof(CPU_DoubleU, l.upper);
30
+static bool do_2scalar_fp_vec(DisasContext *s, arg_2scalar *a,
32
- } else {
31
+ gen_helper_gvec_3_ptr *fn)
33
- ofs += offsetof(CPU_DoubleU, l.lower);
32
+{
34
- }
33
+ /* Two registers and a scalar, using gvec */
35
- return ofs;
34
+ int vec_size = a->q ? 16 : 8;
36
+ return neon_element_offset(reg >> 1, reg & 1, MO_32);
35
+ int rd_ofs = neon_reg_offset(a->vd, 0);
36
+ int rn_ofs = neon_reg_offset(a->vn, 0);
37
+ int rm_ofs;
38
+ int idx;
39
+ TCGv_ptr fpstatus;
40
+
41
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
42
+ return false;
43
}
37
}
44
45
-WRAP_FP_FN(gen_VMUL_F_mul, gen_helper_vfp_muls)
46
-WRAP_FP_FN(gen_VMUL_F_add, gen_helper_vfp_adds)
47
-WRAP_FP_FN(gen_VMUL_F_sub, gen_helper_vfp_subs)
48
+ /* UNDEF accesses to D16-D31 if they don't exist. */
49
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
50
+ ((a->vd | a->vn | a->vm) & 0x10)) {
51
+ return false;
52
+ }
53
54
-static bool trans_VMUL_F_2sc(DisasContext *s, arg_2scalar *a)
55
-{
56
- static NeonGenTwoOpFn * const opfn[] = {
57
- NULL,
58
- NULL, /* TODO: fp16 support */
59
- gen_VMUL_F_mul,
60
- NULL,
61
- };
62
+ if (!fn) {
63
+ /* Bad size (including size == 3, which is a different insn group) */
64
+ return false;
65
+ }
66
67
- return do_2scalar(s, a, opfn[a->size], NULL);
68
+ if (a->q && ((a->vd | a->vn) & 1)) {
69
+ return false;
70
+ }
71
+
72
+ if (!vfp_access_check(s)) {
73
+ return true;
74
+ }
75
+
76
+ /* a->vm is M:Vm, which encodes both register and index */
77
+ idx = extract32(a->vm, a->size + 2, 2);
78
+ a->vm = extract32(a->vm, 0, a->size + 2);
79
+ rm_ofs = neon_reg_offset(a->vm, 0);
80
+
81
+ fpstatus = fpstatus_ptr(a->size == 1 ? FPST_STD_F16 : FPST_STD);
82
+ tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpstatus,
83
+ vec_size, vec_size, idx, fn);
84
+ tcg_temp_free_ptr(fpstatus);
85
+ return true;
86
}
38
}
87
39
88
-static bool trans_VMLA_F_2sc(DisasContext *s, arg_2scalar *a)
89
-{
90
- static NeonGenTwoOpFn * const opfn[] = {
91
- NULL,
92
- NULL, /* TODO: fp16 support */
93
- gen_VMUL_F_mul,
94
- NULL,
95
- };
96
- static NeonGenTwoOpFn * const accfn[] = {
97
- NULL,
98
- NULL, /* TODO: fp16 support */
99
- gen_VMUL_F_add,
100
- NULL,
101
- };
102
+#define DO_VMUL_F_2sc(NAME, FUNC) \
103
+ static bool trans_##NAME##_F_2sc(DisasContext *s, arg_2scalar *a) \
104
+ { \
105
+ static gen_helper_gvec_3_ptr * const opfn[] = { \
106
+ NULL, \
107
+ gen_helper_##FUNC##_h, \
108
+ gen_helper_##FUNC##_s, \
109
+ NULL, \
110
+ }; \
111
+ if (a->size == MO_16 && !dc_isar_feature(aa32_fp16_arith, s)) { \
112
+ return false; \
113
+ } \
114
+ return do_2scalar_fp_vec(s, a, opfn[a->size]); \
115
+ }
116
117
- return do_2scalar(s, a, opfn[a->size], accfn[a->size]);
118
-}
119
-
120
-static bool trans_VMLS_F_2sc(DisasContext *s, arg_2scalar *a)
121
-{
122
- static NeonGenTwoOpFn * const opfn[] = {
123
- NULL,
124
- NULL, /* TODO: fp16 support */
125
- gen_VMUL_F_mul,
126
- NULL,
127
- };
128
- static NeonGenTwoOpFn * const accfn[] = {
129
- NULL,
130
- NULL, /* TODO: fp16 support */
131
- gen_VMUL_F_sub,
132
- NULL,
133
- };
134
-
135
- return do_2scalar(s, a, opfn[a->size], accfn[a->size]);
136
-}
137
+DO_VMUL_F_2sc(VMUL, gvec_fmul_idx)
138
+DO_VMUL_F_2sc(VMLA, gvec_fmla_nf_idx)
139
+DO_VMUL_F_2sc(VMLS, gvec_fmls_nf_idx)
140
141
WRAP_ENV_FN(gen_VQDMULH_16, gen_helper_neon_qdmulh_s16)
142
WRAP_ENV_FN(gen_VQDMULH_32, gen_helper_neon_qdmulh_s32)
143
--
40
--
144
2.20.1
41
2.20.1
145
42
146
43
diff view generated by jsdifflib
1
Convert the Neon pairwise fp ops to use a single gvic-style
1
From: Richard Henderson <richard.henderson@linaro.org>
2
helper to do the full operation instead of one helper call
3
for each 32-bit part. This allows us to use the same
4
framework to implement the fp16.
5
2
3
Model these off the aa64 read/write_vec_element functions.
4
Use it within translate-neon.c.inc. The new functions do
5
not allocate or free temps, so this rearranges the calling
6
code a bit.
7
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20201030022618.785675-6-richard.henderson@linaro.org
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200828183354.27913-36-peter.maydell@linaro.org
9
---
12
---
10
target/arm/helper.h | 7 +++++
13
target/arm/translate.c | 26 ++++
11
target/arm/vec_helper.c | 45 +++++++++++++++++++++++++++++++++
14
target/arm/translate-neon.c.inc | 256 ++++++++++++++++++++------------
12
target/arm/translate-neon.c.inc | 42 ++++++++++++------------------
15
2 files changed, 183 insertions(+), 99 deletions(-)
13
3 files changed, 68 insertions(+), 26 deletions(-)
14
16
15
diff --git a/target/arm/helper.h b/target/arm/helper.h
17
diff --git a/target/arm/translate.c b/target/arm/translate.c
16
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.h
19
--- a/target/arm/translate.c
18
+++ b/target/arm/helper.h
20
+++ b/target/arm/translate.c
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
21
@@ -XXX,XX +XXX,XX @@ static inline void neon_store_reg32(TCGv_i32 var, int reg)
20
DEF_HELPER_FLAGS_5(gvec_fcmlad, TCG_CALL_NO_RWG,
22
tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
21
void, ptr, ptr, ptr, ptr, i32)
23
}
22
24
23
+DEF_HELPER_FLAGS_5(neon_paddh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
+static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp size)
24
+DEF_HELPER_FLAGS_5(neon_pmaxh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
26
+{
25
+DEF_HELPER_FLAGS_5(neon_pminh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
27
+ long off = neon_element_offset(reg, ele, size);
26
+DEF_HELPER_FLAGS_5(neon_padds, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
28
+
27
+DEF_HELPER_FLAGS_5(neon_pmaxs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
29
+ switch (size) {
28
+DEF_HELPER_FLAGS_5(neon_pmins, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
30
+ case MO_32:
29
+
31
+ tcg_gen_ld_i32(dest, cpu_env, off);
30
DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
+ break;
31
DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
33
+ default:
32
DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
34
+ g_assert_not_reached();
33
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/vec_helper.c
36
+++ b/target/arm/vec_helper.c
37
@@ -XXX,XX +XXX,XX @@ DO_ABA(gvec_uaba_s, uint32_t)
38
DO_ABA(gvec_uaba_d, uint64_t)
39
40
#undef DO_ABA
41
+
42
+#define DO_NEON_PAIRWISE(NAME, OP) \
43
+ void HELPER(NAME##s)(void *vd, void *vn, void *vm, \
44
+ void *stat, uint32_t oprsz) \
45
+ { \
46
+ float_status *fpst = stat; \
47
+ float32 *d = vd; \
48
+ float32 *n = vn; \
49
+ float32 *m = vm; \
50
+ float32 r0, r1; \
51
+ \
52
+ /* Read all inputs before writing outputs in case vm == vd */ \
53
+ r0 = float32_##OP(n[H4(0)], n[H4(1)], fpst); \
54
+ r1 = float32_##OP(m[H4(0)], m[H4(1)], fpst); \
55
+ \
56
+ d[H4(0)] = r0; \
57
+ d[H4(1)] = r1; \
58
+ } \
59
+ \
60
+ void HELPER(NAME##h)(void *vd, void *vn, void *vm, \
61
+ void *stat, uint32_t oprsz) \
62
+ { \
63
+ float_status *fpst = stat; \
64
+ float16 *d = vd; \
65
+ float16 *n = vn; \
66
+ float16 *m = vm; \
67
+ float16 r0, r1, r2, r3; \
68
+ \
69
+ /* Read all inputs before writing outputs in case vm == vd */ \
70
+ r0 = float16_##OP(n[H2(0)], n[H2(1)], fpst); \
71
+ r1 = float16_##OP(n[H2(2)], n[H2(3)], fpst); \
72
+ r2 = float16_##OP(m[H2(0)], m[H2(1)], fpst); \
73
+ r3 = float16_##OP(m[H2(2)], m[H2(3)], fpst); \
74
+ \
75
+ d[H4(0)] = r0; \
76
+ d[H4(1)] = r1; \
77
+ d[H4(2)] = r2; \
78
+ d[H4(3)] = r3; \
79
+ }
35
+ }
80
+
36
+}
81
+DO_NEON_PAIRWISE(neon_padd, add)
37
+
82
+DO_NEON_PAIRWISE(neon_pmax, max)
38
+static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp size)
83
+DO_NEON_PAIRWISE(neon_pmin, min)
39
+{
84
+
40
+ long off = neon_element_offset(reg, ele, size);
85
+#undef DO_NEON_PAIRWISE
41
+
42
+ switch (size) {
43
+ case MO_32:
44
+ tcg_gen_st_i32(src, cpu_env, off);
45
+ break;
46
+ default:
47
+ g_assert_not_reached();
48
+ }
49
+}
50
+
51
static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
52
{
53
TCGv_ptr ret = tcg_temp_new_ptr();
86
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
54
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
87
index XXXXXXX..XXXXXXX 100644
55
index XXXXXXX..XXXXXXX 100644
88
--- a/target/arm/translate-neon.c.inc
56
--- a/target/arm/translate-neon.c.inc
89
+++ b/target/arm/translate-neon.c.inc
57
+++ b/target/arm/translate-neon.c.inc
90
@@ -XXX,XX +XXX,XX @@ static bool trans_VMINNM_fp_3s(DisasContext *s, arg_3same *a)
58
@@ -XXX,XX +XXX,XX @@ static bool do_3same_pair(DisasContext *s, arg_3same *a, NeonGenTwoOpFn *fn)
91
return do_3same(s, a, gen_VMINNM_fp32_3s);
59
* early. Since Q is 0 there are always just two passes, so instead
92
}
60
* of a complicated loop over each pass we just unroll.
93
61
*/
94
-static bool do_3same_fp_pair(DisasContext *s, arg_3same *a, VFPGen3OpSPFn *fn)
95
+static bool do_3same_fp_pair(DisasContext *s, arg_3same *a,
96
+ gen_helper_gvec_3_ptr *fn)
97
{
98
- /* FP operations handled pairwise 32 bits at a time */
99
- TCGv_i32 tmp, tmp2, tmp3;
100
+ /* FP pairwise operations */
101
TCGv_ptr fpstatus;
102
103
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
104
@@ -XXX,XX +XXX,XX @@ static bool do_3same_fp_pair(DisasContext *s, arg_3same *a, VFPGen3OpSPFn *fn)
105
106
assert(a->q == 0); /* enforced by decode patterns */
107
108
- /*
109
- * Note that we have to be careful not to clobber the source operands
110
- * in the "vm == vd" case by storing the result of the first pass too
111
- * early. Since Q is 0 there are always just two passes, so instead
112
- * of a complicated loop over each pass we just unroll.
113
- */
114
- fpstatus = fpstatus_ptr(FPST_STD);
115
- tmp = neon_load_reg(a->vn, 0);
62
- tmp = neon_load_reg(a->vn, 0);
116
- tmp2 = neon_load_reg(a->vn, 1);
63
- tmp2 = neon_load_reg(a->vn, 1);
117
- fn(tmp, tmp, tmp2, fpstatus);
64
+ tmp = tcg_temp_new_i32();
65
+ tmp2 = tcg_temp_new_i32();
66
+ tmp3 = tcg_temp_new_i32();
67
+
68
+ read_neon_element32(tmp, a->vn, 0, MO_32);
69
+ read_neon_element32(tmp2, a->vn, 1, MO_32);
70
fn(tmp, tmp, tmp2);
118
- tcg_temp_free_i32(tmp2);
71
- tcg_temp_free_i32(tmp2);
119
72
120
- tmp3 = neon_load_reg(a->vm, 0);
73
- tmp3 = neon_load_reg(a->vm, 0);
121
- tmp2 = neon_load_reg(a->vm, 1);
74
- tmp2 = neon_load_reg(a->vm, 1);
122
- fn(tmp3, tmp3, tmp2, fpstatus);
75
+ read_neon_element32(tmp3, a->vm, 0, MO_32);
76
+ read_neon_element32(tmp2, a->vm, 1, MO_32);
77
fn(tmp3, tmp3, tmp2);
123
- tcg_temp_free_i32(tmp2);
78
- tcg_temp_free_i32(tmp2);
124
+ fpstatus = fpstatus_ptr(a->size != 0 ? FPST_STD_F16 : FPST_STD);
125
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
126
+ vfp_reg_offset(1, a->vn),
127
+ vfp_reg_offset(1, a->vm),
128
+ fpstatus, 8, 8, 0, fn);
129
tcg_temp_free_ptr(fpstatus);
130
79
131
- neon_store_reg(a->vd, 0, tmp);
80
- neon_store_reg(a->vd, 0, tmp);
132
- neon_store_reg(a->vd, 1, tmp3);
81
- neon_store_reg(a->vd, 1, tmp3);
133
return true;
82
+ write_neon_element32(tmp, a->vd, 0, MO_32);
134
}
83
+ write_neon_element32(tmp3, a->vd, 1, MO_32);
135
84
+
136
@@ -XXX,XX +XXX,XX @@ static bool do_3same_fp_pair(DisasContext *s, arg_3same *a, VFPGen3OpSPFn *fn)
85
+ tcg_temp_free_i32(tmp);
137
static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
86
+ tcg_temp_free_i32(tmp2);
138
{ \
87
+ tcg_temp_free_i32(tmp3);
139
if (a->size != 0) { \
88
return true;
140
- /* TODO fp16 support */ \
89
}
141
- return false; \
90
142
+ if (!dc_isar_feature(aa32_fp16_arith, s)) { \
91
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
143
+ return false; \
92
* 2-reg-and-shift operations, size < 3 case, where the
144
+ } \
93
* helper needs to be passed cpu_env.
145
+ return do_3same_fp_pair(s, a, FUNC##h); \
94
*/
146
} \
95
- TCGv_i32 constimm;
147
- return do_3same_fp_pair(s, a, FUNC); \
96
+ TCGv_i32 constimm, tmp;
148
+ return do_3same_fp_pair(s, a, FUNC##s); \
97
int pass;
149
}
98
150
99
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
151
-DO_3S_FP_PAIR(VPADD, gen_helper_vfp_adds)
100
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
152
-DO_3S_FP_PAIR(VPMAX, gen_helper_vfp_maxs)
101
* by immediate using the variable shift operations.
153
-DO_3S_FP_PAIR(VPMIN, gen_helper_vfp_mins)
102
*/
154
+DO_3S_FP_PAIR(VPADD, gen_helper_neon_padd)
103
constimm = tcg_const_i32(dup_const(a->size, a->shift));
155
+DO_3S_FP_PAIR(VPMAX, gen_helper_neon_pmax)
104
+ tmp = tcg_temp_new_i32();
156
+DO_3S_FP_PAIR(VPMIN, gen_helper_neon_pmin)
105
157
106
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
158
static bool do_vector_2sh(DisasContext *s, arg_2reg_shift *a, GVecGen2iFn *fn)
107
- TCGv_i32 tmp = neon_load_reg(a->vm, pass);
108
+ read_neon_element32(tmp, a->vm, pass, MO_32);
109
fn(tmp, cpu_env, tmp, constimm);
110
- neon_store_reg(a->vd, pass, tmp);
111
+ write_neon_element32(tmp, a->vd, pass, MO_32);
112
}
113
+ tcg_temp_free_i32(tmp);
114
tcg_temp_free_i32(constimm);
115
return true;
116
}
117
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
118
constimm = tcg_const_i64(-a->shift);
119
rm1 = tcg_temp_new_i64();
120
rm2 = tcg_temp_new_i64();
121
+ rd = tcg_temp_new_i32();
122
123
/* Load both inputs first to avoid potential overwrite if rm == rd */
124
neon_load_reg64(rm1, a->vm);
125
neon_load_reg64(rm2, a->vm + 1);
126
127
shiftfn(rm1, rm1, constimm);
128
- rd = tcg_temp_new_i32();
129
narrowfn(rd, cpu_env, rm1);
130
- neon_store_reg(a->vd, 0, rd);
131
+ write_neon_element32(rd, a->vd, 0, MO_32);
132
133
shiftfn(rm2, rm2, constimm);
134
- rd = tcg_temp_new_i32();
135
narrowfn(rd, cpu_env, rm2);
136
- neon_store_reg(a->vd, 1, rd);
137
+ write_neon_element32(rd, a->vd, 1, MO_32);
138
139
+ tcg_temp_free_i32(rd);
140
tcg_temp_free_i64(rm1);
141
tcg_temp_free_i64(rm2);
142
tcg_temp_free_i64(constimm);
143
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
144
constimm = tcg_const_i32(imm);
145
146
/* Load all inputs first to avoid potential overwrite */
147
- rm1 = neon_load_reg(a->vm, 0);
148
- rm2 = neon_load_reg(a->vm, 1);
149
- rm3 = neon_load_reg(a->vm + 1, 0);
150
- rm4 = neon_load_reg(a->vm + 1, 1);
151
+ rm1 = tcg_temp_new_i32();
152
+ rm2 = tcg_temp_new_i32();
153
+ rm3 = tcg_temp_new_i32();
154
+ rm4 = tcg_temp_new_i32();
155
+ read_neon_element32(rm1, a->vm, 0, MO_32);
156
+ read_neon_element32(rm2, a->vm, 1, MO_32);
157
+ read_neon_element32(rm3, a->vm, 2, MO_32);
158
+ read_neon_element32(rm4, a->vm, 3, MO_32);
159
rtmp = tcg_temp_new_i64();
160
161
shiftfn(rm1, rm1, constimm);
162
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
163
tcg_temp_free_i32(rm2);
164
165
narrowfn(rm1, cpu_env, rtmp);
166
- neon_store_reg(a->vd, 0, rm1);
167
+ write_neon_element32(rm1, a->vd, 0, MO_32);
168
+ tcg_temp_free_i32(rm1);
169
170
shiftfn(rm3, rm3, constimm);
171
shiftfn(rm4, rm4, constimm);
172
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
173
174
narrowfn(rm3, cpu_env, rtmp);
175
tcg_temp_free_i64(rtmp);
176
- neon_store_reg(a->vd, 1, rm3);
177
+ write_neon_element32(rm3, a->vd, 1, MO_32);
178
+ tcg_temp_free_i32(rm3);
179
return true;
180
}
181
182
@@ -XXX,XX +XXX,XX @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
183
widen_mask = dup_const(a->size + 1, widen_mask);
184
}
185
186
- rm0 = neon_load_reg(a->vm, 0);
187
- rm1 = neon_load_reg(a->vm, 1);
188
+ rm0 = tcg_temp_new_i32();
189
+ rm1 = tcg_temp_new_i32();
190
+ read_neon_element32(rm0, a->vm, 0, MO_32);
191
+ read_neon_element32(rm1, a->vm, 1, MO_32);
192
tmp = tcg_temp_new_i64();
193
194
widenfn(tmp, rm0);
195
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
196
if (src1_wide) {
197
neon_load_reg64(rn0_64, a->vn);
198
} else {
199
- TCGv_i32 tmp = neon_load_reg(a->vn, 0);
200
+ TCGv_i32 tmp = tcg_temp_new_i32();
201
+ read_neon_element32(tmp, a->vn, 0, MO_32);
202
widenfn(rn0_64, tmp);
203
tcg_temp_free_i32(tmp);
204
}
205
- rm = neon_load_reg(a->vm, 0);
206
+ rm = tcg_temp_new_i32();
207
+ read_neon_element32(rm, a->vm, 0, MO_32);
208
209
widenfn(rm_64, rm);
210
tcg_temp_free_i32(rm);
211
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
212
if (src1_wide) {
213
neon_load_reg64(rn1_64, a->vn + 1);
214
} else {
215
- TCGv_i32 tmp = neon_load_reg(a->vn, 1);
216
+ TCGv_i32 tmp = tcg_temp_new_i32();
217
+ read_neon_element32(tmp, a->vn, 1, MO_32);
218
widenfn(rn1_64, tmp);
219
tcg_temp_free_i32(tmp);
220
}
221
- rm = neon_load_reg(a->vm, 1);
222
+ rm = tcg_temp_new_i32();
223
+ read_neon_element32(rm, a->vm, 1, MO_32);
224
225
neon_store_reg64(rn0_64, a->vd);
226
227
@@ -XXX,XX +XXX,XX @@ static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
228
229
narrowfn(rd1, rn_64);
230
231
- neon_store_reg(a->vd, 0, rd0);
232
- neon_store_reg(a->vd, 1, rd1);
233
+ write_neon_element32(rd0, a->vd, 0, MO_32);
234
+ write_neon_element32(rd1, a->vd, 1, MO_32);
235
236
+ tcg_temp_free_i32(rd0);
237
+ tcg_temp_free_i32(rd1);
238
tcg_temp_free_i64(rn_64);
239
tcg_temp_free_i64(rm_64);
240
241
@@ -XXX,XX +XXX,XX @@ static bool do_long_3d(DisasContext *s, arg_3diff *a,
242
rd0 = tcg_temp_new_i64();
243
rd1 = tcg_temp_new_i64();
244
245
- rn = neon_load_reg(a->vn, 0);
246
- rm = neon_load_reg(a->vm, 0);
247
+ rn = tcg_temp_new_i32();
248
+ rm = tcg_temp_new_i32();
249
+ read_neon_element32(rn, a->vn, 0, MO_32);
250
+ read_neon_element32(rm, a->vm, 0, MO_32);
251
opfn(rd0, rn, rm);
252
- tcg_temp_free_i32(rn);
253
- tcg_temp_free_i32(rm);
254
255
- rn = neon_load_reg(a->vn, 1);
256
- rm = neon_load_reg(a->vm, 1);
257
+ read_neon_element32(rn, a->vn, 1, MO_32);
258
+ read_neon_element32(rm, a->vm, 1, MO_32);
259
opfn(rd1, rn, rm);
260
tcg_temp_free_i32(rn);
261
tcg_temp_free_i32(rm);
262
@@ -XXX,XX +XXX,XX @@ static void gen_neon_dup_high16(TCGv_i32 var)
263
264
static inline TCGv_i32 neon_get_scalar(int size, int reg)
159
{
265
{
266
- TCGv_i32 tmp;
267
- if (size == 1) {
268
- tmp = neon_load_reg(reg & 7, reg >> 4);
269
+ TCGv_i32 tmp = tcg_temp_new_i32();
270
+ if (size == MO_16) {
271
+ read_neon_element32(tmp, reg & 7, reg >> 4, MO_32);
272
if (reg & 8) {
273
gen_neon_dup_high16(tmp);
274
} else {
275
gen_neon_dup_low16(tmp);
276
}
277
} else {
278
- tmp = neon_load_reg(reg & 15, reg >> 4);
279
+ read_neon_element32(tmp, reg & 15, reg >> 4, MO_32);
280
}
281
return tmp;
282
}
283
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar(DisasContext *s, arg_2scalar *a,
284
* perform an accumulation operation of that result into the
285
* destination.
286
*/
287
- TCGv_i32 scalar;
288
+ TCGv_i32 scalar, tmp;
289
int pass;
290
291
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
292
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar(DisasContext *s, arg_2scalar *a,
293
}
294
295
scalar = neon_get_scalar(a->size, a->vm);
296
+ tmp = tcg_temp_new_i32();
297
298
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
299
- TCGv_i32 tmp = neon_load_reg(a->vn, pass);
300
+ read_neon_element32(tmp, a->vn, pass, MO_32);
301
opfn(tmp, tmp, scalar);
302
if (accfn) {
303
- TCGv_i32 rd = neon_load_reg(a->vd, pass);
304
+ TCGv_i32 rd = tcg_temp_new_i32();
305
+ read_neon_element32(rd, a->vd, pass, MO_32);
306
accfn(tmp, rd, tmp);
307
tcg_temp_free_i32(rd);
308
}
309
- neon_store_reg(a->vd, pass, tmp);
310
+ write_neon_element32(tmp, a->vd, pass, MO_32);
311
}
312
+ tcg_temp_free_i32(tmp);
313
tcg_temp_free_i32(scalar);
314
return true;
315
}
316
@@ -XXX,XX +XXX,XX @@ static bool do_vqrdmlah_2sc(DisasContext *s, arg_2scalar *a,
317
* performs a kind of fused op-then-accumulate using a helper
318
* function that takes all of rd, rn and the scalar at once.
319
*/
320
- TCGv_i32 scalar;
321
+ TCGv_i32 scalar, rn, rd;
322
int pass;
323
324
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
325
@@ -XXX,XX +XXX,XX @@ static bool do_vqrdmlah_2sc(DisasContext *s, arg_2scalar *a,
326
}
327
328
scalar = neon_get_scalar(a->size, a->vm);
329
+ rn = tcg_temp_new_i32();
330
+ rd = tcg_temp_new_i32();
331
332
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
333
- TCGv_i32 rn = neon_load_reg(a->vn, pass);
334
- TCGv_i32 rd = neon_load_reg(a->vd, pass);
335
+ read_neon_element32(rn, a->vn, pass, MO_32);
336
+ read_neon_element32(rd, a->vd, pass, MO_32);
337
opfn(rd, cpu_env, rn, scalar, rd);
338
- tcg_temp_free_i32(rn);
339
- neon_store_reg(a->vd, pass, rd);
340
+ write_neon_element32(rd, a->vd, pass, MO_32);
341
}
342
+ tcg_temp_free_i32(rn);
343
+ tcg_temp_free_i32(rd);
344
tcg_temp_free_i32(scalar);
345
346
return true;
347
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a,
348
scalar = neon_get_scalar(a->size, a->vm);
349
350
/* Load all inputs before writing any outputs, in case of overlap */
351
- rn = neon_load_reg(a->vn, 0);
352
+ rn = tcg_temp_new_i32();
353
+ read_neon_element32(rn, a->vn, 0, MO_32);
354
rn0_64 = tcg_temp_new_i64();
355
opfn(rn0_64, rn, scalar);
356
- tcg_temp_free_i32(rn);
357
358
- rn = neon_load_reg(a->vn, 1);
359
+ read_neon_element32(rn, a->vn, 1, MO_32);
360
rn1_64 = tcg_temp_new_i64();
361
opfn(rn1_64, rn, scalar);
362
tcg_temp_free_i32(rn);
363
@@ -XXX,XX +XXX,XX @@ static bool trans_VTBL(DisasContext *s, arg_VTBL *a)
364
return false;
365
}
366
n <<= 3;
367
+ tmp = tcg_temp_new_i32();
368
if (a->op) {
369
- tmp = neon_load_reg(a->vd, 0);
370
+ read_neon_element32(tmp, a->vd, 0, MO_32);
371
} else {
372
- tmp = tcg_temp_new_i32();
373
tcg_gen_movi_i32(tmp, 0);
374
}
375
- tmp2 = neon_load_reg(a->vm, 0);
376
+ tmp2 = tcg_temp_new_i32();
377
+ read_neon_element32(tmp2, a->vm, 0, MO_32);
378
ptr1 = vfp_reg_ptr(true, a->vn);
379
tmp4 = tcg_const_i32(n);
380
gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp4);
381
- tcg_temp_free_i32(tmp);
382
+
383
if (a->op) {
384
- tmp = neon_load_reg(a->vd, 1);
385
+ read_neon_element32(tmp, a->vd, 1, MO_32);
386
} else {
387
- tmp = tcg_temp_new_i32();
388
tcg_gen_movi_i32(tmp, 0);
389
}
390
- tmp3 = neon_load_reg(a->vm, 1);
391
+ tmp3 = tcg_temp_new_i32();
392
+ read_neon_element32(tmp3, a->vm, 1, MO_32);
393
gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp4);
394
+ tcg_temp_free_i32(tmp);
395
tcg_temp_free_i32(tmp4);
396
tcg_temp_free_ptr(ptr1);
397
- neon_store_reg(a->vd, 0, tmp2);
398
- neon_store_reg(a->vd, 1, tmp3);
399
- tcg_temp_free_i32(tmp);
400
+
401
+ write_neon_element32(tmp2, a->vd, 0, MO_32);
402
+ write_neon_element32(tmp3, a->vd, 1, MO_32);
403
+ tcg_temp_free_i32(tmp2);
404
+ tcg_temp_free_i32(tmp3);
405
return true;
406
}
407
408
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a)
409
static bool trans_VREV64(DisasContext *s, arg_VREV64 *a)
410
{
411
int pass, half;
412
+ TCGv_i32 tmp[2];
413
414
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
415
return false;
416
@@ -XXX,XX +XXX,XX @@ static bool trans_VREV64(DisasContext *s, arg_VREV64 *a)
417
return true;
418
}
419
420
- for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
421
- TCGv_i32 tmp[2];
422
+ tmp[0] = tcg_temp_new_i32();
423
+ tmp[1] = tcg_temp_new_i32();
424
425
+ for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
426
for (half = 0; half < 2; half++) {
427
- tmp[half] = neon_load_reg(a->vm, pass * 2 + half);
428
+ read_neon_element32(tmp[half], a->vm, pass * 2 + half, MO_32);
429
switch (a->size) {
430
case 0:
431
tcg_gen_bswap32_i32(tmp[half], tmp[half]);
432
@@ -XXX,XX +XXX,XX @@ static bool trans_VREV64(DisasContext *s, arg_VREV64 *a)
433
g_assert_not_reached();
434
}
435
}
436
- neon_store_reg(a->vd, pass * 2, tmp[1]);
437
- neon_store_reg(a->vd, pass * 2 + 1, tmp[0]);
438
+ write_neon_element32(tmp[1], a->vd, pass * 2, MO_32);
439
+ write_neon_element32(tmp[0], a->vd, pass * 2 + 1, MO_32);
440
}
441
+
442
+ tcg_temp_free_i32(tmp[0]);
443
+ tcg_temp_free_i32(tmp[1]);
444
return true;
445
}
446
447
@@ -XXX,XX +XXX,XX @@ static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a,
448
rm0_64 = tcg_temp_new_i64();
449
rm1_64 = tcg_temp_new_i64();
450
rd_64 = tcg_temp_new_i64();
451
- tmp = neon_load_reg(a->vm, pass * 2);
452
+
453
+ tmp = tcg_temp_new_i32();
454
+ read_neon_element32(tmp, a->vm, pass * 2, MO_32);
455
widenfn(rm0_64, tmp);
456
- tcg_temp_free_i32(tmp);
457
- tmp = neon_load_reg(a->vm, pass * 2 + 1);
458
+ read_neon_element32(tmp, a->vm, pass * 2 + 1, MO_32);
459
widenfn(rm1_64, tmp);
460
tcg_temp_free_i32(tmp);
461
+
462
opfn(rd_64, rm0_64, rm1_64);
463
tcg_temp_free_i64(rm0_64);
464
tcg_temp_free_i64(rm1_64);
465
@@ -XXX,XX +XXX,XX @@ static bool do_vmovn(DisasContext *s, arg_2misc *a,
466
narrowfn(rd0, cpu_env, rm);
467
neon_load_reg64(rm, a->vm + 1);
468
narrowfn(rd1, cpu_env, rm);
469
- neon_store_reg(a->vd, 0, rd0);
470
- neon_store_reg(a->vd, 1, rd1);
471
+ write_neon_element32(rd0, a->vd, 0, MO_32);
472
+ write_neon_element32(rd1, a->vd, 1, MO_32);
473
+ tcg_temp_free_i32(rd0);
474
+ tcg_temp_free_i32(rd1);
475
tcg_temp_free_i64(rm);
476
return true;
477
}
478
@@ -XXX,XX +XXX,XX @@ static bool trans_VSHLL(DisasContext *s, arg_2misc *a)
479
}
480
481
rd = tcg_temp_new_i64();
482
+ rm0 = tcg_temp_new_i32();
483
+ rm1 = tcg_temp_new_i32();
484
485
- rm0 = neon_load_reg(a->vm, 0);
486
- rm1 = neon_load_reg(a->vm, 1);
487
+ read_neon_element32(rm0, a->vm, 0, MO_32);
488
+ read_neon_element32(rm1, a->vm, 1, MO_32);
489
490
widenfn(rd, rm0);
491
tcg_gen_shli_i64(rd, rd, 8 << a->size);
492
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_F16_F32(DisasContext *s, arg_2misc *a)
493
494
fpst = fpstatus_ptr(FPST_STD);
495
ahp = get_ahp_flag();
496
- tmp = neon_load_reg(a->vm, 0);
497
+ tmp = tcg_temp_new_i32();
498
+ read_neon_element32(tmp, a->vm, 0, MO_32);
499
gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
500
- tmp2 = neon_load_reg(a->vm, 1);
501
+ tmp2 = tcg_temp_new_i32();
502
+ read_neon_element32(tmp2, a->vm, 1, MO_32);
503
gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
504
tcg_gen_shli_i32(tmp2, tmp2, 16);
505
tcg_gen_or_i32(tmp2, tmp2, tmp);
506
- tcg_temp_free_i32(tmp);
507
- tmp = neon_load_reg(a->vm, 2);
508
+ read_neon_element32(tmp, a->vm, 2, MO_32);
509
gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
510
- tmp3 = neon_load_reg(a->vm, 3);
511
- neon_store_reg(a->vd, 0, tmp2);
512
+ tmp3 = tcg_temp_new_i32();
513
+ read_neon_element32(tmp3, a->vm, 3, MO_32);
514
+ write_neon_element32(tmp2, a->vd, 0, MO_32);
515
+ tcg_temp_free_i32(tmp2);
516
gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
517
tcg_gen_shli_i32(tmp3, tmp3, 16);
518
tcg_gen_or_i32(tmp3, tmp3, tmp);
519
- neon_store_reg(a->vd, 1, tmp3);
520
+ write_neon_element32(tmp3, a->vd, 1, MO_32);
521
+ tcg_temp_free_i32(tmp3);
522
tcg_temp_free_i32(tmp);
523
tcg_temp_free_i32(ahp);
524
tcg_temp_free_ptr(fpst);
525
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_F32_F16(DisasContext *s, arg_2misc *a)
526
fpst = fpstatus_ptr(FPST_STD);
527
ahp = get_ahp_flag();
528
tmp3 = tcg_temp_new_i32();
529
- tmp = neon_load_reg(a->vm, 0);
530
- tmp2 = neon_load_reg(a->vm, 1);
531
+ tmp2 = tcg_temp_new_i32();
532
+ tmp = tcg_temp_new_i32();
533
+ read_neon_element32(tmp, a->vm, 0, MO_32);
534
+ read_neon_element32(tmp2, a->vm, 1, MO_32);
535
tcg_gen_ext16u_i32(tmp3, tmp);
536
gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
537
- neon_store_reg(a->vd, 0, tmp3);
538
+ write_neon_element32(tmp3, a->vd, 0, MO_32);
539
tcg_gen_shri_i32(tmp, tmp, 16);
540
gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
541
- neon_store_reg(a->vd, 1, tmp);
542
- tmp3 = tcg_temp_new_i32();
543
+ write_neon_element32(tmp, a->vd, 1, MO_32);
544
+ tcg_temp_free_i32(tmp);
545
tcg_gen_ext16u_i32(tmp3, tmp2);
546
gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
547
- neon_store_reg(a->vd, 2, tmp3);
548
+ write_neon_element32(tmp3, a->vd, 2, MO_32);
549
+ tcg_temp_free_i32(tmp3);
550
tcg_gen_shri_i32(tmp2, tmp2, 16);
551
gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
552
- neon_store_reg(a->vd, 3, tmp2);
553
+ write_neon_element32(tmp2, a->vd, 3, MO_32);
554
+ tcg_temp_free_i32(tmp2);
555
tcg_temp_free_i32(ahp);
556
tcg_temp_free_ptr(fpst);
557
558
@@ -XXX,XX +XXX,XX @@ DO_2M_CRYPTO(SHA256SU0, aa32_sha2, 2)
559
560
static bool do_2misc(DisasContext *s, arg_2misc *a, NeonGenOneOpFn *fn)
561
{
562
+ TCGv_i32 tmp;
563
int pass;
564
565
/* Handle a 2-reg-misc operation by iterating 32 bits at a time */
566
@@ -XXX,XX +XXX,XX @@ static bool do_2misc(DisasContext *s, arg_2misc *a, NeonGenOneOpFn *fn)
567
return true;
568
}
569
570
+ tmp = tcg_temp_new_i32();
571
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
572
- TCGv_i32 tmp = neon_load_reg(a->vm, pass);
573
+ read_neon_element32(tmp, a->vm, pass, MO_32);
574
fn(tmp, tmp);
575
- neon_store_reg(a->vd, pass, tmp);
576
+ write_neon_element32(tmp, a->vd, pass, MO_32);
577
}
578
+ tcg_temp_free_i32(tmp);
579
580
return true;
581
}
582
@@ -XXX,XX +XXX,XX @@ static bool trans_VTRN(DisasContext *s, arg_2misc *a)
583
return true;
584
}
585
586
- if (a->size == 2) {
587
+ tmp = tcg_temp_new_i32();
588
+ tmp2 = tcg_temp_new_i32();
589
+ if (a->size == MO_32) {
590
for (pass = 0; pass < (a->q ? 4 : 2); pass += 2) {
591
- tmp = neon_load_reg(a->vm, pass);
592
- tmp2 = neon_load_reg(a->vd, pass + 1);
593
- neon_store_reg(a->vm, pass, tmp2);
594
- neon_store_reg(a->vd, pass + 1, tmp);
595
+ read_neon_element32(tmp, a->vm, pass, MO_32);
596
+ read_neon_element32(tmp2, a->vd, pass + 1, MO_32);
597
+ write_neon_element32(tmp2, a->vm, pass, MO_32);
598
+ write_neon_element32(tmp, a->vd, pass + 1, MO_32);
599
}
600
} else {
601
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
602
- tmp = neon_load_reg(a->vm, pass);
603
- tmp2 = neon_load_reg(a->vd, pass);
604
- if (a->size == 0) {
605
+ read_neon_element32(tmp, a->vm, pass, MO_32);
606
+ read_neon_element32(tmp2, a->vd, pass, MO_32);
607
+ if (a->size == MO_8) {
608
gen_neon_trn_u8(tmp, tmp2);
609
} else {
610
gen_neon_trn_u16(tmp, tmp2);
611
}
612
- neon_store_reg(a->vm, pass, tmp2);
613
- neon_store_reg(a->vd, pass, tmp);
614
+ write_neon_element32(tmp2, a->vm, pass, MO_32);
615
+ write_neon_element32(tmp, a->vd, pass, MO_32);
616
}
617
}
618
+ tcg_temp_free_i32(tmp);
619
+ tcg_temp_free_i32(tmp2);
620
return true;
621
}
160
--
622
--
161
2.20.1
623
2.20.1
162
624
163
625
diff view generated by jsdifflib
1
Implement the fp16 versions of the VFP VCVT instruction forms
1
From: Richard Henderson <richard.henderson@linaro.org>
2
which convert between floating point and integer with a specified
2
3
rounding mode.
3
We can then use this to improve VMOV (scalar to gp) and
4
4
VMOV (gp to scalar) so that we simply perform the memory
5
operation that we wanted, rather than inserting or
6
extracting from a 32-bit quantity.
7
8
These were the last uses of neon_load/store_reg, so remove them.
9
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20201030022618.785675-7-richard.henderson@linaro.org
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20200828183354.27913-17-peter.maydell@linaro.org
8
---
14
---
9
target/arm/vfp-uncond.decode | 6 ++++--
15
target/arm/translate.c | 50 +++++++++++++-----------
10
target/arm/translate-vfp.c.inc | 32 ++++++++++++++++++++++++--------
16
target/arm/translate-vfp.c.inc | 71 +++++-----------------------------
11
2 files changed, 28 insertions(+), 10 deletions(-)
17
2 files changed, 37 insertions(+), 84 deletions(-)
12
18
13
diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode
19
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/vfp-uncond.decode
21
--- a/target/arm/translate.c
16
+++ b/target/arm/vfp-uncond.decode
22
+++ b/target/arm/translate.c
17
@@ -XXX,XX +XXX,XX @@ VRINT 1111 1110 1.11 10 rm:2 .... 1011 01.0 .... \
23
@@ -XXX,XX +XXX,XX @@ static long neon_full_reg_offset(unsigned reg)
18
vm=%vm_dp vd=%vd_dp dp=1
24
* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
19
25
* where 0 is the least significant end of the register.
20
# VCVT float to int with specified rounding mode; Vd is always single-precision
26
*/
21
+VCVT 1111 1110 1.11 11 rm:2 .... 1001 op:1 1.0 .... \
27
-static long neon_element_offset(int reg, int element, MemOp size)
22
+ vm=%vm_sp vd=%vd_sp sz=1
28
+static long neon_element_offset(int reg, int element, MemOp memop)
23
VCVT 1111 1110 1.11 11 rm:2 .... 1010 op:1 1.0 .... \
29
{
24
- vm=%vm_sp vd=%vd_sp dp=0
30
- int element_size = 1 << size;
25
+ vm=%vm_sp vd=%vd_sp sz=2
31
+ int element_size = 1 << (memop & MO_SIZE);
26
VCVT 1111 1110 1.11 11 rm:2 .... 1011 op:1 1.0 .... \
32
int ofs = element * element_size;
27
- vm=%vm_dp vd=%vd_sp dp=1
33
#ifdef HOST_WORDS_BIGENDIAN
28
+ vm=%vm_dp vd=%vd_sp sz=3
34
/*
35
@@ -XXX,XX +XXX,XX @@ static long vfp_reg_offset(bool dp, unsigned reg)
36
}
37
}
38
39
-static TCGv_i32 neon_load_reg(int reg, int pass)
40
-{
41
- TCGv_i32 tmp = tcg_temp_new_i32();
42
- tcg_gen_ld_i32(tmp, cpu_env, neon_element_offset(reg, pass, MO_32));
43
- return tmp;
44
-}
45
-
46
-static void neon_store_reg(int reg, int pass, TCGv_i32 var)
47
-{
48
- tcg_gen_st_i32(var, cpu_env, neon_element_offset(reg, pass, MO_32));
49
- tcg_temp_free_i32(var);
50
-}
51
-
52
static inline void neon_load_reg64(TCGv_i64 var, int reg)
53
{
54
tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
55
@@ -XXX,XX +XXX,XX @@ static inline void neon_store_reg32(TCGv_i32 var, int reg)
56
tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
57
}
58
59
-static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp size)
60
+static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop)
61
{
62
- long off = neon_element_offset(reg, ele, size);
63
+ long off = neon_element_offset(reg, ele, memop);
64
65
- switch (size) {
66
- case MO_32:
67
+ switch (memop) {
68
+ case MO_SB:
69
+ tcg_gen_ld8s_i32(dest, cpu_env, off);
70
+ break;
71
+ case MO_UB:
72
+ tcg_gen_ld8u_i32(dest, cpu_env, off);
73
+ break;
74
+ case MO_SW:
75
+ tcg_gen_ld16s_i32(dest, cpu_env, off);
76
+ break;
77
+ case MO_UW:
78
+ tcg_gen_ld16u_i32(dest, cpu_env, off);
79
+ break;
80
+ case MO_UL:
81
+ case MO_SL:
82
tcg_gen_ld_i32(dest, cpu_env, off);
83
break;
84
default:
85
@@ -XXX,XX +XXX,XX @@ static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp size)
86
}
87
}
88
89
-static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp size)
90
+static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
91
{
92
- long off = neon_element_offset(reg, ele, size);
93
+ long off = neon_element_offset(reg, ele, memop);
94
95
- switch (size) {
96
+ switch (memop) {
97
+ case MO_8:
98
+ tcg_gen_st8_i32(src, cpu_env, off);
99
+ break;
100
+ case MO_16:
101
+ tcg_gen_st16_i32(src, cpu_env, off);
102
+ break;
103
case MO_32:
104
tcg_gen_st_i32(src, cpu_env, off);
105
break;
29
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
106
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
30
index XXXXXXX..XXXXXXX 100644
107
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate-vfp.c.inc
108
--- a/target/arm/translate-vfp.c.inc
32
+++ b/target/arm/translate-vfp.c.inc
109
+++ b/target/arm/translate-vfp.c.inc
33
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
110
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
34
static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
111
{
35
{
112
/* VMOV scalar to general purpose register */
36
uint32_t rd, rm;
113
TCGv_i32 tmp;
37
- bool dp = a->dp;
114
- int pass;
38
+ int sz = a->sz;
115
- uint32_t offset;
39
TCGv_ptr fpst;
116
40
TCGv_i32 tcg_rmode, tcg_shift;
117
- /* SIZE == 2 is a VFP instruction; otherwise NEON. */
41
int rounding = fp_decode_rm[a->rm];
118
- if (a->size == 2
42
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
119
+ /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
43
return false;
120
+ if (a->size == MO_32
44
}
121
? !dc_isar_feature(aa32_fpsp_v2, s)
45
122
: !arm_dc_feature(s, ARM_FEATURE_NEON)) {
46
- if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
123
return false;
47
+ if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
124
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
48
+ return false;
125
return false;
49
+ }
126
}
50
+
127
51
+ if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
128
- offset = a->index << a->size;
52
return false;
129
- pass = extract32(offset, 2, 1);
53
}
130
- offset = extract32(offset, 0, 2) * 8;
54
131
-
55
/* UNDEF accesses to D16-D31 if they don't exist */
132
if (!vfp_access_check(s)) {
56
- if (dp && !dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
57
+ if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
58
return false;
59
}
60
61
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
62
return true;
133
return true;
63
}
134
}
64
135
65
- fpst = fpstatus_ptr(FPST_FPCR);
136
- tmp = neon_load_reg(a->vn, pass);
66
+ if (sz == 1) {
137
- switch (a->size) {
67
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
138
- case 0:
68
+ } else {
139
- if (offset) {
69
+ fpst = fpstatus_ptr(FPST_FPCR);
140
- tcg_gen_shri_i32(tmp, tmp, offset);
70
+ }
141
- }
71
142
- if (a->u) {
72
tcg_shift = tcg_const_i32(0);
143
- gen_uxtb(tmp);
73
144
- } else {
74
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
145
- gen_sxtb(tmp);
75
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
146
- }
76
147
- break;
77
- if (dp) {
148
- case 1:
78
+ if (sz == 3) {
149
- if (a->u) {
79
TCGv_i64 tcg_double, tcg_res;
150
- if (offset) {
80
TCGv_i32 tcg_tmp;
151
- tcg_gen_shri_i32(tmp, tmp, 16);
81
tcg_double = tcg_temp_new_i64();
152
- } else {
82
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
153
- gen_uxth(tmp);
83
tcg_single = tcg_temp_new_i32();
154
- }
84
tcg_res = tcg_temp_new_i32();
155
- } else {
85
neon_load_reg32(tcg_single, rm);
156
- if (offset) {
86
- if (is_signed) {
157
- tcg_gen_sari_i32(tmp, tmp, 16);
87
- gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
158
- } else {
88
+ if (sz == 1) {
159
- gen_sxth(tmp);
89
+ if (is_signed) {
160
- }
90
+ gen_helper_vfp_toslh(tcg_res, tcg_single, tcg_shift, fpst);
161
- }
91
+ } else {
162
- break;
92
+ gen_helper_vfp_toulh(tcg_res, tcg_single, tcg_shift, fpst);
163
- case 2:
93
+ }
164
- break;
94
} else {
165
- }
95
- gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
166
+ tmp = tcg_temp_new_i32();
96
+ if (is_signed) {
167
+ read_neon_element32(tmp, a->vn, a->index, a->size | (a->u ? 0 : MO_SIGN));
97
+ gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
168
store_reg(s, a->rt, tmp);
98
+ } else {
169
99
+ gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
170
return true;
100
+ }
171
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
101
}
172
static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
102
neon_store_reg32(tcg_res, rd);
173
{
103
tcg_temp_free_i32(tcg_res);
174
/* VMOV general purpose register to scalar */
175
- TCGv_i32 tmp, tmp2;
176
- int pass;
177
- uint32_t offset;
178
+ TCGv_i32 tmp;
179
180
- /* SIZE == 2 is a VFP instruction; otherwise NEON. */
181
- if (a->size == 2
182
+ /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
183
+ if (a->size == MO_32
184
? !dc_isar_feature(aa32_fpsp_v2, s)
185
: !arm_dc_feature(s, ARM_FEATURE_NEON)) {
186
return false;
187
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
188
return false;
189
}
190
191
- offset = a->index << a->size;
192
- pass = extract32(offset, 2, 1);
193
- offset = extract32(offset, 0, 2) * 8;
194
-
195
if (!vfp_access_check(s)) {
196
return true;
197
}
198
199
tmp = load_reg(s, a->rt);
200
- switch (a->size) {
201
- case 0:
202
- tmp2 = neon_load_reg(a->vn, pass);
203
- tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
204
- tcg_temp_free_i32(tmp2);
205
- break;
206
- case 1:
207
- tmp2 = neon_load_reg(a->vn, pass);
208
- tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
209
- tcg_temp_free_i32(tmp2);
210
- break;
211
- case 2:
212
- break;
213
- }
214
- neon_store_reg(a->vn, pass, tmp);
215
+ write_neon_element32(tmp, a->vn, a->index, a->size);
216
+ tcg_temp_free_i32(tmp);
217
218
return true;
219
}
104
--
220
--
105
2.20.1
221
2.20.1
106
222
107
223
diff view generated by jsdifflib
1
Implement the fp16 version of the VFP VRINT* insns.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The only uses of this function are for loading VFP
4
single-precision values, and nothing to do with NEON.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201030022618.785675-8-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200828183354.27913-19-peter.maydell@linaro.org
6
---
10
---
7
target/arm/helper.h | 2 +
11
target/arm/translate.c | 4 +-
8
target/arm/vfp-uncond.decode | 6 ++-
12
target/arm/translate-vfp.c.inc | 184 ++++++++++++++++-----------------
9
target/arm/vfp.decode | 3 ++
13
2 files changed, 94 insertions(+), 94 deletions(-)
10
target/arm/vfp_helper.c | 21 ++++++++
11
target/arm/translate-vfp.c.inc | 98 +++++++++++++++++++++++++++++++---
12
5 files changed, 122 insertions(+), 8 deletions(-)
13
14
14
diff --git a/target/arm/helper.h b/target/arm/helper.h
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.h
17
--- a/target/arm/translate.c
17
+++ b/target/arm/helper.h
18
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(shr_cc, i32, env, i32, i32)
19
@@ -XXX,XX +XXX,XX @@ static inline void neon_store_reg64(TCGv_i64 var, int reg)
19
DEF_HELPER_3(sar_cc, i32, env, i32, i32)
20
tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
20
DEF_HELPER_3(ror_cc, i32, env, i32, i32)
21
22
+DEF_HELPER_FLAGS_2(rinth_exact, TCG_CALL_NO_RWG, f16, f16, ptr)
23
DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, ptr)
24
DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, ptr)
25
+DEF_HELPER_FLAGS_2(rinth, TCG_CALL_NO_RWG, f16, f16, ptr)
26
DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, ptr)
27
DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, ptr)
28
29
diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/vfp-uncond.decode
32
+++ b/target/arm/vfp-uncond.decode
33
@@ -XXX,XX +XXX,XX @@ VMINNM_sp 1111 1110 1.00 .... .... 1010 .1.0 .... @vfp_dnm_s
34
VMAXNM_dp 1111 1110 1.00 .... .... 1011 .0.0 .... @vfp_dnm_d
35
VMINNM_dp 1111 1110 1.00 .... .... 1011 .1.0 .... @vfp_dnm_d
36
37
+VRINT 1111 1110 1.11 10 rm:2 .... 1001 01.0 .... \
38
+ vm=%vm_sp vd=%vd_sp sz=1
39
VRINT 1111 1110 1.11 10 rm:2 .... 1010 01.0 .... \
40
- vm=%vm_sp vd=%vd_sp dp=0
41
+ vm=%vm_sp vd=%vd_sp sz=2
42
VRINT 1111 1110 1.11 10 rm:2 .... 1011 01.0 .... \
43
- vm=%vm_dp vd=%vd_dp dp=1
44
+ vm=%vm_dp vd=%vd_dp sz=3
45
46
# VCVT float to int with specified rounding mode; Vd is always single-precision
47
VCVT 1111 1110 1.11 11 rm:2 .... 1001 op:1 1.0 .... \
48
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
49
index XXXXXXX..XXXXXXX 100644
50
--- a/target/arm/vfp.decode
51
+++ b/target/arm/vfp.decode
52
@@ -XXX,XX +XXX,XX @@ VCVT_f16_f32 ---- 1110 1.11 0011 .... 1010 t:1 1.0 .... \
53
VCVT_f16_f64 ---- 1110 1.11 0011 .... 1011 t:1 1.0 .... \
54
vd=%vd_sp vm=%vm_dp
55
56
+VRINTR_hp ---- 1110 1.11 0110 .... 1001 01.0 .... @vfp_dm_ss
57
VRINTR_sp ---- 1110 1.11 0110 .... 1010 01.0 .... @vfp_dm_ss
58
VRINTR_dp ---- 1110 1.11 0110 .... 1011 01.0 .... @vfp_dm_dd
59
60
+VRINTZ_hp ---- 1110 1.11 0110 .... 1001 11.0 .... @vfp_dm_ss
61
VRINTZ_sp ---- 1110 1.11 0110 .... 1010 11.0 .... @vfp_dm_ss
62
VRINTZ_dp ---- 1110 1.11 0110 .... 1011 11.0 .... @vfp_dm_dd
63
64
+VRINTX_hp ---- 1110 1.11 0111 .... 1001 01.0 .... @vfp_dm_ss
65
VRINTX_sp ---- 1110 1.11 0111 .... 1010 01.0 .... @vfp_dm_ss
66
VRINTX_dp ---- 1110 1.11 0111 .... 1011 01.0 .... @vfp_dm_dd
67
68
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/target/arm/vfp_helper.c
71
+++ b/target/arm/vfp_helper.c
72
@@ -XXX,XX +XXX,XX @@ float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
73
}
21
}
74
22
75
/* ARMv8 round to integral */
23
-static inline void neon_load_reg32(TCGv_i32 var, int reg)
76
+dh_ctype_f16 HELPER(rinth_exact)(dh_ctype_f16 x, void *fp_status)
24
+static inline void vfp_load_reg32(TCGv_i32 var, int reg)
77
+{
78
+ return float16_round_to_int(x, fp_status);
79
+}
80
+
81
float32 HELPER(rints_exact)(float32 x, void *fp_status)
82
{
25
{
83
return float32_round_to_int(x, fp_status);
26
tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
84
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rintd_exact)(float64 x, void *fp_status)
85
return float64_round_to_int(x, fp_status);
86
}
27
}
87
28
88
+dh_ctype_f16 HELPER(rinth)(dh_ctype_f16 x, void *fp_status)
29
-static inline void neon_store_reg32(TCGv_i32 var, int reg)
89
+{
30
+static inline void vfp_store_reg32(TCGv_i32 var, int reg)
90
+ int old_flags = get_float_exception_flags(fp_status), new_flags;
91
+ float16 ret;
92
+
93
+ ret = float16_round_to_int(x, fp_status);
94
+
95
+ /* Suppress any inexact exceptions the conversion produced */
96
+ if (!(old_flags & float_flag_inexact)) {
97
+ new_flags = get_float_exception_flags(fp_status);
98
+ set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
99
+ }
100
+
101
+ return ret;
102
+}
103
+
104
float32 HELPER(rints)(float32 x, void *fp_status)
105
{
31
{
106
int old_flags = get_float_exception_flags(fp_status), new_flags;
32
tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
33
}
107
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
34
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
108
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
109
--- a/target/arm/translate-vfp.c.inc
36
--- a/target/arm/translate-vfp.c.inc
110
+++ b/target/arm/translate-vfp.c.inc
37
+++ b/target/arm/translate-vfp.c.inc
111
@@ -XXX,XX +XXX,XX @@ static const uint8_t fp_decode_rm[] = {
38
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
112
static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
39
frn = tcg_temp_new_i32();
113
{
40
frm = tcg_temp_new_i32();
114
uint32_t rd, rm;
41
dest = tcg_temp_new_i32();
115
- bool dp = a->dp;
42
- neon_load_reg32(frn, rn);
116
+ int sz = a->sz;
43
- neon_load_reg32(frm, rm);
117
TCGv_ptr fpst;
44
+ vfp_load_reg32(frn, rn);
118
TCGv_i32 tcg_rmode;
45
+ vfp_load_reg32(frm, rm);
119
int rounding = fp_decode_rm[a->rm];
46
switch (a->cc) {
47
case 0: /* eq: Z */
48
tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
49
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
50
if (sz == 1) {
51
tcg_gen_andi_i32(dest, dest, 0xffff);
52
}
53
- neon_store_reg32(dest, rd);
54
+ vfp_store_reg32(dest, rd);
55
tcg_temp_free_i32(frn);
56
tcg_temp_free_i32(frm);
57
tcg_temp_free_i32(dest);
120
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
58
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
121
return false;
59
TCGv_i32 tcg_res;
122
}
123
124
- if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
125
+ if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
126
+ return false;
127
+ }
128
+
129
+ if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
130
return false;
131
}
132
133
/* UNDEF accesses to D16-D31 if they don't exist */
134
- if (dp && !dc_isar_feature(aa32_simd_r32, s) &&
135
+ if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) &&
136
((a->vm | a->vd) & 0x10)) {
137
return false;
138
}
139
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
140
return true;
141
}
142
143
- fpst = fpstatus_ptr(FPST_FPCR);
144
+ if (sz == 1) {
145
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
146
+ } else {
147
+ fpst = fpstatus_ptr(FPST_FPCR);
148
+ }
149
150
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
151
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
152
153
- if (dp) {
154
+ if (sz == 3) {
155
TCGv_i64 tcg_op;
156
TCGv_i64 tcg_res;
157
tcg_op = tcg_temp_new_i64();
158
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
159
tcg_op = tcg_temp_new_i32();
60
tcg_op = tcg_temp_new_i32();
160
tcg_res = tcg_temp_new_i32();
61
tcg_res = tcg_temp_new_i32();
161
neon_load_reg32(tcg_op, rm);
62
- neon_load_reg32(tcg_op, rm);
162
- gen_helper_rints(tcg_res, tcg_op, fpst);
63
+ vfp_load_reg32(tcg_op, rm);
163
+ if (sz == 1) {
64
if (sz == 1) {
164
+ gen_helper_rinth(tcg_res, tcg_op, fpst);
65
gen_helper_rinth(tcg_res, tcg_op, fpst);
165
+ } else {
66
} else {
166
+ gen_helper_rints(tcg_res, tcg_op, fpst);
67
gen_helper_rints(tcg_res, tcg_op, fpst);
167
+ }
68
}
168
neon_store_reg32(tcg_res, rd);
69
- neon_store_reg32(tcg_res, rd);
70
+ vfp_store_reg32(tcg_res, rd);
169
tcg_temp_free_i32(tcg_op);
71
tcg_temp_free_i32(tcg_op);
170
tcg_temp_free_i32(tcg_res);
72
tcg_temp_free_i32(tcg_res);
171
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
73
}
74
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
75
gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
76
}
77
tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
78
- neon_store_reg32(tcg_tmp, rd);
79
+ vfp_store_reg32(tcg_tmp, rd);
80
tcg_temp_free_i32(tcg_tmp);
81
tcg_temp_free_i64(tcg_res);
82
tcg_temp_free_i64(tcg_double);
83
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
84
TCGv_i32 tcg_single, tcg_res;
85
tcg_single = tcg_temp_new_i32();
86
tcg_res = tcg_temp_new_i32();
87
- neon_load_reg32(tcg_single, rm);
88
+ vfp_load_reg32(tcg_single, rm);
89
if (sz == 1) {
90
if (is_signed) {
91
gen_helper_vfp_toslh(tcg_res, tcg_single, tcg_shift, fpst);
92
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
93
gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
94
}
95
}
96
- neon_store_reg32(tcg_res, rd);
97
+ vfp_store_reg32(tcg_res, rd);
98
tcg_temp_free_i32(tcg_res);
99
tcg_temp_free_i32(tcg_single);
100
}
101
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a)
102
if (a->l) {
103
/* VFP to general purpose register */
104
tmp = tcg_temp_new_i32();
105
- neon_load_reg32(tmp, a->vn);
106
+ vfp_load_reg32(tmp, a->vn);
107
tcg_gen_andi_i32(tmp, tmp, 0xffff);
108
store_reg(s, a->rt, tmp);
109
} else {
110
/* general purpose register to VFP */
111
tmp = load_reg(s, a->rt);
112
tcg_gen_andi_i32(tmp, tmp, 0xffff);
113
- neon_store_reg32(tmp, a->vn);
114
+ vfp_store_reg32(tmp, a->vn);
115
tcg_temp_free_i32(tmp);
116
}
117
118
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
119
if (a->l) {
120
/* VFP to general purpose register */
121
tmp = tcg_temp_new_i32();
122
- neon_load_reg32(tmp, a->vn);
123
+ vfp_load_reg32(tmp, a->vn);
124
if (a->rt == 15) {
125
/* Set the 4 flag bits in the CPSR. */
126
gen_set_nzcv(tmp);
127
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
128
} else {
129
/* general purpose register to VFP */
130
tmp = load_reg(s, a->rt);
131
- neon_store_reg32(tmp, a->vn);
132
+ vfp_store_reg32(tmp, a->vn);
133
tcg_temp_free_i32(tmp);
134
}
135
136
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
137
if (a->op) {
138
/* fpreg to gpreg */
139
tmp = tcg_temp_new_i32();
140
- neon_load_reg32(tmp, a->vm);
141
+ vfp_load_reg32(tmp, a->vm);
142
store_reg(s, a->rt, tmp);
143
tmp = tcg_temp_new_i32();
144
- neon_load_reg32(tmp, a->vm + 1);
145
+ vfp_load_reg32(tmp, a->vm + 1);
146
store_reg(s, a->rt2, tmp);
147
} else {
148
/* gpreg to fpreg */
149
tmp = load_reg(s, a->rt);
150
- neon_store_reg32(tmp, a->vm);
151
+ vfp_store_reg32(tmp, a->vm);
152
tcg_temp_free_i32(tmp);
153
tmp = load_reg(s, a->rt2);
154
- neon_store_reg32(tmp, a->vm + 1);
155
+ vfp_store_reg32(tmp, a->vm + 1);
156
tcg_temp_free_i32(tmp);
157
}
158
159
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
160
if (a->op) {
161
/* fpreg to gpreg */
162
tmp = tcg_temp_new_i32();
163
- neon_load_reg32(tmp, a->vm * 2);
164
+ vfp_load_reg32(tmp, a->vm * 2);
165
store_reg(s, a->rt, tmp);
166
tmp = tcg_temp_new_i32();
167
- neon_load_reg32(tmp, a->vm * 2 + 1);
168
+ vfp_load_reg32(tmp, a->vm * 2 + 1);
169
store_reg(s, a->rt2, tmp);
170
} else {
171
/* gpreg to fpreg */
172
tmp = load_reg(s, a->rt);
173
- neon_store_reg32(tmp, a->vm * 2);
174
+ vfp_store_reg32(tmp, a->vm * 2);
175
tcg_temp_free_i32(tmp);
176
tmp = load_reg(s, a->rt2);
177
- neon_store_reg32(tmp, a->vm * 2 + 1);
178
+ vfp_store_reg32(tmp, a->vm * 2 + 1);
179
tcg_temp_free_i32(tmp);
180
}
181
182
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
183
tmp = tcg_temp_new_i32();
184
if (a->l) {
185
gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
186
- neon_store_reg32(tmp, a->vd);
187
+ vfp_store_reg32(tmp, a->vd);
188
} else {
189
- neon_load_reg32(tmp, a->vd);
190
+ vfp_load_reg32(tmp, a->vd);
191
gen_aa32_st16(s, tmp, addr, get_mem_index(s));
192
}
193
tcg_temp_free_i32(tmp);
194
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
195
tmp = tcg_temp_new_i32();
196
if (a->l) {
197
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
198
- neon_store_reg32(tmp, a->vd);
199
+ vfp_store_reg32(tmp, a->vd);
200
} else {
201
- neon_load_reg32(tmp, a->vd);
202
+ vfp_load_reg32(tmp, a->vd);
203
gen_aa32_st32(s, tmp, addr, get_mem_index(s));
204
}
205
tcg_temp_free_i32(tmp);
206
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
207
if (a->l) {
208
/* load */
209
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
210
- neon_store_reg32(tmp, a->vd + i);
211
+ vfp_store_reg32(tmp, a->vd + i);
212
} else {
213
/* store */
214
- neon_load_reg32(tmp, a->vd + i);
215
+ vfp_load_reg32(tmp, a->vd + i);
216
gen_aa32_st32(s, tmp, addr, get_mem_index(s));
217
}
218
tcg_gen_addi_i32(addr, addr, offset);
219
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
220
fd = tcg_temp_new_i32();
221
fpst = fpstatus_ptr(FPST_FPCR);
222
223
- neon_load_reg32(f0, vn);
224
- neon_load_reg32(f1, vm);
225
+ vfp_load_reg32(f0, vn);
226
+ vfp_load_reg32(f1, vm);
227
228
for (;;) {
229
if (reads_vd) {
230
- neon_load_reg32(fd, vd);
231
+ vfp_load_reg32(fd, vd);
232
}
233
fn(fd, f0, f1, fpst);
234
- neon_store_reg32(fd, vd);
235
+ vfp_store_reg32(fd, vd);
236
237
if (veclen == 0) {
238
break;
239
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
240
veclen--;
241
vd = vfp_advance_sreg(vd, delta_d);
242
vn = vfp_advance_sreg(vn, delta_d);
243
- neon_load_reg32(f0, vn);
244
+ vfp_load_reg32(f0, vn);
245
if (delta_m) {
246
vm = vfp_advance_sreg(vm, delta_m);
247
- neon_load_reg32(f1, vm);
248
+ vfp_load_reg32(f1, vm);
249
}
250
}
251
252
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn,
253
fd = tcg_temp_new_i32();
254
fpst = fpstatus_ptr(FPST_FPCR_F16);
255
256
- neon_load_reg32(f0, vn);
257
- neon_load_reg32(f1, vm);
258
+ vfp_load_reg32(f0, vn);
259
+ vfp_load_reg32(f1, vm);
260
261
if (reads_vd) {
262
- neon_load_reg32(fd, vd);
263
+ vfp_load_reg32(fd, vd);
264
}
265
fn(fd, f0, f1, fpst);
266
- neon_store_reg32(fd, vd);
267
+ vfp_store_reg32(fd, vd);
268
269
tcg_temp_free_i32(f0);
270
tcg_temp_free_i32(f1);
271
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
272
f0 = tcg_temp_new_i32();
273
fd = tcg_temp_new_i32();
274
275
- neon_load_reg32(f0, vm);
276
+ vfp_load_reg32(f0, vm);
277
278
for (;;) {
279
fn(fd, f0);
280
- neon_store_reg32(fd, vd);
281
+ vfp_store_reg32(fd, vd);
282
283
if (veclen == 0) {
284
break;
285
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
286
/* single source one-many */
287
while (veclen--) {
288
vd = vfp_advance_sreg(vd, delta_d);
289
- neon_store_reg32(fd, vd);
290
+ vfp_store_reg32(fd, vd);
291
}
292
break;
293
}
294
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
295
veclen--;
296
vd = vfp_advance_sreg(vd, delta_d);
297
vm = vfp_advance_sreg(vm, delta_m);
298
- neon_load_reg32(f0, vm);
299
+ vfp_load_reg32(f0, vm);
300
}
301
302
tcg_temp_free_i32(f0);
303
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_hp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
304
}
305
306
f0 = tcg_temp_new_i32();
307
- neon_load_reg32(f0, vm);
308
+ vfp_load_reg32(f0, vm);
309
fn(f0, f0);
310
- neon_store_reg32(f0, vd);
311
+ vfp_store_reg32(f0, vd);
312
tcg_temp_free_i32(f0);
313
314
return true;
315
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
316
vm = tcg_temp_new_i32();
317
vd = tcg_temp_new_i32();
318
319
- neon_load_reg32(vn, a->vn);
320
- neon_load_reg32(vm, a->vm);
321
+ vfp_load_reg32(vn, a->vn);
322
+ vfp_load_reg32(vm, a->vm);
323
if (neg_n) {
324
/* VFNMS, VFMS */
325
gen_helper_vfp_negh(vn, vn);
326
}
327
- neon_load_reg32(vd, a->vd);
328
+ vfp_load_reg32(vd, a->vd);
329
if (neg_d) {
330
/* VFNMA, VFNMS */
331
gen_helper_vfp_negh(vd, vd);
332
}
333
fpst = fpstatus_ptr(FPST_FPCR_F16);
334
gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst);
335
- neon_store_reg32(vd, a->vd);
336
+ vfp_store_reg32(vd, a->vd);
337
338
tcg_temp_free_ptr(fpst);
339
tcg_temp_free_i32(vn);
340
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
341
vm = tcg_temp_new_i32();
342
vd = tcg_temp_new_i32();
343
344
- neon_load_reg32(vn, a->vn);
345
- neon_load_reg32(vm, a->vm);
346
+ vfp_load_reg32(vn, a->vn);
347
+ vfp_load_reg32(vm, a->vm);
348
if (neg_n) {
349
/* VFNMS, VFMS */
350
gen_helper_vfp_negs(vn, vn);
351
}
352
- neon_load_reg32(vd, a->vd);
353
+ vfp_load_reg32(vd, a->vd);
354
if (neg_d) {
355
/* VFNMA, VFNMS */
356
gen_helper_vfp_negs(vd, vd);
357
}
358
fpst = fpstatus_ptr(FPST_FPCR);
359
gen_helper_vfp_muladds(vd, vn, vm, vd, fpst);
360
- neon_store_reg32(vd, a->vd);
361
+ vfp_store_reg32(vd, a->vd);
362
363
tcg_temp_free_ptr(fpst);
364
tcg_temp_free_i32(vn);
365
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_hp(DisasContext *s, arg_VMOV_imm_sp *a)
366
}
367
368
fd = tcg_const_i32(vfp_expand_imm(MO_16, a->imm));
369
- neon_store_reg32(fd, a->vd);
370
+ vfp_store_reg32(fd, a->vd);
371
tcg_temp_free_i32(fd);
172
return true;
372
return true;
173
}
373
}
174
374
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
175
+static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a)
375
fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
176
+{
376
177
+ TCGv_ptr fpst;
377
for (;;) {
178
+ TCGv_i32 tmp;
378
- neon_store_reg32(fd, vd);
179
+
379
+ vfp_store_reg32(fd, vd);
180
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
380
181
+ return false;
381
if (veclen == 0) {
182
+ }
382
break;
183
+
383
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a)
184
+ if (!vfp_access_check(s)) {
384
vd = tcg_temp_new_i32();
185
+ return true;
385
vm = tcg_temp_new_i32();
186
+ }
386
187
+
387
- neon_load_reg32(vd, a->vd);
188
+ tmp = tcg_temp_new_i32();
388
+ vfp_load_reg32(vd, a->vd);
189
+ neon_load_reg32(tmp, a->vm);
389
if (a->z) {
190
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
390
tcg_gen_movi_i32(vm, 0);
191
+ gen_helper_rinth(tmp, tmp, fpst);
391
} else {
192
+ neon_store_reg32(tmp, a->vd);
392
- neon_load_reg32(vm, a->vm);
193
+ tcg_temp_free_ptr(fpst);
393
+ vfp_load_reg32(vm, a->vm);
194
+ tcg_temp_free_i32(tmp);
394
}
195
+ return true;
395
196
+}
396
if (a->e) {
197
+
397
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
198
static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
398
vd = tcg_temp_new_i32();
199
{
399
vm = tcg_temp_new_i32();
200
TCGv_ptr fpst;
400
201
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
401
- neon_load_reg32(vd, a->vd);
402
+ vfp_load_reg32(vd, a->vd);
403
if (a->z) {
404
tcg_gen_movi_i32(vm, 0);
405
} else {
406
- neon_load_reg32(vm, a->vm);
407
+ vfp_load_reg32(vm, a->vm);
408
}
409
410
if (a->e) {
411
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
412
/* The T bit tells us if we want the low or high 16 bits of Vm */
413
tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
414
gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
415
- neon_store_reg32(tmp, a->vd);
416
+ vfp_store_reg32(tmp, a->vd);
417
tcg_temp_free_i32(ahp_mode);
418
tcg_temp_free_ptr(fpst);
419
tcg_temp_free_i32(tmp);
420
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
421
ahp_mode = get_ahp_flag();
422
tmp = tcg_temp_new_i32();
423
424
- neon_load_reg32(tmp, a->vm);
425
+ vfp_load_reg32(tmp, a->vm);
426
gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
427
tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
428
tcg_temp_free_i32(ahp_mode);
429
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a)
430
}
431
432
tmp = tcg_temp_new_i32();
433
- neon_load_reg32(tmp, a->vm);
434
+ vfp_load_reg32(tmp, a->vm);
435
fpst = fpstatus_ptr(FPST_FPCR_F16);
436
gen_helper_rinth(tmp, tmp, fpst);
437
- neon_store_reg32(tmp, a->vd);
438
+ vfp_store_reg32(tmp, a->vd);
439
tcg_temp_free_ptr(fpst);
440
tcg_temp_free_i32(tmp);
441
return true;
442
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
443
}
444
445
tmp = tcg_temp_new_i32();
446
- neon_load_reg32(tmp, a->vm);
447
+ vfp_load_reg32(tmp, a->vm);
448
fpst = fpstatus_ptr(FPST_FPCR);
449
gen_helper_rints(tmp, tmp, fpst);
450
- neon_store_reg32(tmp, a->vd);
451
+ vfp_store_reg32(tmp, a->vd);
452
tcg_temp_free_ptr(fpst);
453
tcg_temp_free_i32(tmp);
454
return true;
455
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a)
456
}
457
458
tmp = tcg_temp_new_i32();
459
- neon_load_reg32(tmp, a->vm);
460
+ vfp_load_reg32(tmp, a->vm);
461
fpst = fpstatus_ptr(FPST_FPCR_F16);
462
tcg_rmode = tcg_const_i32(float_round_to_zero);
463
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
464
gen_helper_rinth(tmp, tmp, fpst);
465
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
466
- neon_store_reg32(tmp, a->vd);
467
+ vfp_store_reg32(tmp, a->vd);
468
tcg_temp_free_ptr(fpst);
469
tcg_temp_free_i32(tcg_rmode);
470
tcg_temp_free_i32(tmp);
471
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
472
}
473
474
tmp = tcg_temp_new_i32();
475
- neon_load_reg32(tmp, a->vm);
476
+ vfp_load_reg32(tmp, a->vm);
477
fpst = fpstatus_ptr(FPST_FPCR);
478
tcg_rmode = tcg_const_i32(float_round_to_zero);
479
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
480
gen_helper_rints(tmp, tmp, fpst);
481
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
482
- neon_store_reg32(tmp, a->vd);
483
+ vfp_store_reg32(tmp, a->vd);
484
tcg_temp_free_ptr(fpst);
485
tcg_temp_free_i32(tcg_rmode);
486
tcg_temp_free_i32(tmp);
487
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a)
488
}
489
490
tmp = tcg_temp_new_i32();
491
- neon_load_reg32(tmp, a->vm);
492
+ vfp_load_reg32(tmp, a->vm);
493
fpst = fpstatus_ptr(FPST_FPCR_F16);
494
gen_helper_rinth_exact(tmp, tmp, fpst);
495
- neon_store_reg32(tmp, a->vd);
496
+ vfp_store_reg32(tmp, a->vd);
497
tcg_temp_free_ptr(fpst);
498
tcg_temp_free_i32(tmp);
499
return true;
500
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
501
}
502
503
tmp = tcg_temp_new_i32();
504
- neon_load_reg32(tmp, a->vm);
505
+ vfp_load_reg32(tmp, a->vm);
506
fpst = fpstatus_ptr(FPST_FPCR);
507
gen_helper_rints_exact(tmp, tmp, fpst);
508
- neon_store_reg32(tmp, a->vd);
509
+ vfp_store_reg32(tmp, a->vd);
510
tcg_temp_free_ptr(fpst);
511
tcg_temp_free_i32(tmp);
512
return true;
513
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
514
515
vm = tcg_temp_new_i32();
516
vd = tcg_temp_new_i64();
517
- neon_load_reg32(vm, a->vm);
518
+ vfp_load_reg32(vm, a->vm);
519
gen_helper_vfp_fcvtds(vd, vm, cpu_env);
520
neon_store_reg64(vd, a->vd);
521
tcg_temp_free_i32(vm);
522
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
523
vm = tcg_temp_new_i64();
524
neon_load_reg64(vm, a->vm);
525
gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
526
- neon_store_reg32(vd, a->vd);
527
+ vfp_store_reg32(vd, a->vd);
528
tcg_temp_free_i32(vd);
529
tcg_temp_free_i64(vm);
530
return true;
531
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a)
532
}
533
534
vm = tcg_temp_new_i32();
535
- neon_load_reg32(vm, a->vm);
536
+ vfp_load_reg32(vm, a->vm);
537
fpst = fpstatus_ptr(FPST_FPCR_F16);
538
if (a->s) {
539
/* i32 -> f16 */
540
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a)
541
/* u32 -> f16 */
542
gen_helper_vfp_uitoh(vm, vm, fpst);
543
}
544
- neon_store_reg32(vm, a->vd);
545
+ vfp_store_reg32(vm, a->vd);
546
tcg_temp_free_i32(vm);
547
tcg_temp_free_ptr(fpst);
548
return true;
549
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
550
}
551
552
vm = tcg_temp_new_i32();
553
- neon_load_reg32(vm, a->vm);
554
+ vfp_load_reg32(vm, a->vm);
555
fpst = fpstatus_ptr(FPST_FPCR);
556
if (a->s) {
557
/* i32 -> f32 */
558
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
559
/* u32 -> f32 */
560
gen_helper_vfp_uitos(vm, vm, fpst);
561
}
562
- neon_store_reg32(vm, a->vd);
563
+ vfp_store_reg32(vm, a->vd);
564
tcg_temp_free_i32(vm);
565
tcg_temp_free_ptr(fpst);
566
return true;
567
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
568
569
vm = tcg_temp_new_i32();
570
vd = tcg_temp_new_i64();
571
- neon_load_reg32(vm, a->vm);
572
+ vfp_load_reg32(vm, a->vm);
573
fpst = fpstatus_ptr(FPST_FPCR);
574
if (a->s) {
575
/* i32 -> f64 */
576
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
577
vd = tcg_temp_new_i32();
578
neon_load_reg64(vm, a->vm);
579
gen_helper_vjcvt(vd, vm, cpu_env);
580
- neon_store_reg32(vd, a->vd);
581
+ vfp_store_reg32(vd, a->vd);
582
tcg_temp_free_i64(vm);
583
tcg_temp_free_i32(vd);
584
return true;
585
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
586
frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
587
588
vd = tcg_temp_new_i32();
589
- neon_load_reg32(vd, a->vd);
590
+ vfp_load_reg32(vd, a->vd);
591
592
fpst = fpstatus_ptr(FPST_FPCR_F16);
593
shift = tcg_const_i32(frac_bits);
594
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
595
g_assert_not_reached();
596
}
597
598
- neon_store_reg32(vd, a->vd);
599
+ vfp_store_reg32(vd, a->vd);
600
tcg_temp_free_i32(vd);
601
tcg_temp_free_i32(shift);
602
tcg_temp_free_ptr(fpst);
603
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
604
frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
605
606
vd = tcg_temp_new_i32();
607
- neon_load_reg32(vd, a->vd);
608
+ vfp_load_reg32(vd, a->vd);
609
610
fpst = fpstatus_ptr(FPST_FPCR);
611
shift = tcg_const_i32(frac_bits);
612
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
613
g_assert_not_reached();
614
}
615
616
- neon_store_reg32(vd, a->vd);
617
+ vfp_store_reg32(vd, a->vd);
618
tcg_temp_free_i32(vd);
619
tcg_temp_free_i32(shift);
620
tcg_temp_free_ptr(fpst);
621
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a)
622
623
fpst = fpstatus_ptr(FPST_FPCR_F16);
624
vm = tcg_temp_new_i32();
625
- neon_load_reg32(vm, a->vm);
626
+ vfp_load_reg32(vm, a->vm);
627
628
if (a->s) {
629
if (a->rz) {
630
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a)
631
gen_helper_vfp_touih(vm, vm, fpst);
632
}
633
}
634
- neon_store_reg32(vm, a->vd);
635
+ vfp_store_reg32(vm, a->vd);
636
tcg_temp_free_i32(vm);
637
tcg_temp_free_ptr(fpst);
638
return true;
639
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
640
641
fpst = fpstatus_ptr(FPST_FPCR);
642
vm = tcg_temp_new_i32();
643
- neon_load_reg32(vm, a->vm);
644
+ vfp_load_reg32(vm, a->vm);
645
646
if (a->s) {
647
if (a->rz) {
648
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
649
gen_helper_vfp_touis(vm, vm, fpst);
650
}
651
}
652
- neon_store_reg32(vm, a->vd);
653
+ vfp_store_reg32(vm, a->vd);
654
tcg_temp_free_i32(vm);
655
tcg_temp_free_ptr(fpst);
656
return true;
657
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
658
gen_helper_vfp_touid(vd, vm, fpst);
659
}
660
}
661
- neon_store_reg32(vd, a->vd);
662
+ vfp_store_reg32(vd, a->vd);
663
tcg_temp_free_i32(vd);
664
tcg_temp_free_i64(vm);
665
tcg_temp_free_ptr(fpst);
666
@@ -XXX,XX +XXX,XX @@ static bool trans_VINS(DisasContext *s, arg_VINS *a)
667
/* Insert low half of Vm into high half of Vd */
668
rm = tcg_temp_new_i32();
669
rd = tcg_temp_new_i32();
670
- neon_load_reg32(rm, a->vm);
671
- neon_load_reg32(rd, a->vd);
672
+ vfp_load_reg32(rm, a->vm);
673
+ vfp_load_reg32(rd, a->vd);
674
tcg_gen_deposit_i32(rd, rd, rm, 16, 16);
675
- neon_store_reg32(rd, a->vd);
676
+ vfp_store_reg32(rd, a->vd);
677
tcg_temp_free_i32(rm);
678
tcg_temp_free_i32(rd);
679
return true;
680
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOVX(DisasContext *s, arg_VINS *a)
681
682
/* Set Vd to high half of Vm */
683
rm = tcg_temp_new_i32();
684
- neon_load_reg32(rm, a->vm);
685
+ vfp_load_reg32(rm, a->vm);
686
tcg_gen_shri_i32(rm, rm, 16);
687
- neon_store_reg32(rm, a->vd);
688
+ vfp_store_reg32(rm, a->vd);
689
tcg_temp_free_i32(rm);
202
return true;
690
return true;
203
}
691
}
204
205
+static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a)
206
+{
207
+ TCGv_ptr fpst;
208
+ TCGv_i32 tmp;
209
+ TCGv_i32 tcg_rmode;
210
+
211
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
212
+ return false;
213
+ }
214
+
215
+ if (!vfp_access_check(s)) {
216
+ return true;
217
+ }
218
+
219
+ tmp = tcg_temp_new_i32();
220
+ neon_load_reg32(tmp, a->vm);
221
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
222
+ tcg_rmode = tcg_const_i32(float_round_to_zero);
223
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
224
+ gen_helper_rinth(tmp, tmp, fpst);
225
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
226
+ neon_store_reg32(tmp, a->vd);
227
+ tcg_temp_free_ptr(fpst);
228
+ tcg_temp_free_i32(tcg_rmode);
229
+ tcg_temp_free_i32(tmp);
230
+ return true;
231
+}
232
+
233
static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
234
{
235
TCGv_ptr fpst;
236
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
237
return true;
238
}
239
240
+static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a)
241
+{
242
+ TCGv_ptr fpst;
243
+ TCGv_i32 tmp;
244
+
245
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
246
+ return false;
247
+ }
248
+
249
+ if (!vfp_access_check(s)) {
250
+ return true;
251
+ }
252
+
253
+ tmp = tcg_temp_new_i32();
254
+ neon_load_reg32(tmp, a->vm);
255
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
256
+ gen_helper_rinth_exact(tmp, tmp, fpst);
257
+ neon_store_reg32(tmp, a->vd);
258
+ tcg_temp_free_ptr(fpst);
259
+ tcg_temp_free_i32(tmp);
260
+ return true;
261
+}
262
+
263
static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
264
{
265
TCGv_ptr fpst;
266
--
692
--
267
2.20.1
693
2.20.1
268
694
269
695
diff view generated by jsdifflib
1
Convert the Neon float-integer VCVT insns to gvec, and use this
1
From: Richard Henderson <richard.henderson@linaro.org>
2
to implement fp16 support for them.
2
3
3
Replace all uses of neon_load/store_reg64 within translate-neon.c.inc.
4
Note that unlike the VFP int<->fp16 VCVT insns we converted
4
5
earlier and which convert to/from a 32-bit integer, these
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Neon insns convert to/from 16-bit integers. So we can use
6
Message-id: 20201030022618.785675-9-richard.henderson@linaro.org
7
the existing vfp conversion helpers for the f32<->u32/i32
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
case but need to provide our own for f16<->u16/i16.
9
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20200828183354.27913-37-peter.maydell@linaro.org
13
---
9
---
14
target/arm/helper.h | 9 +++++++++
10
target/arm/translate.c | 26 +++++++++
15
target/arm/vec_helper.c | 29 +++++++++++++++++++++++++++++
11
target/arm/translate-neon.c.inc | 94 ++++++++++++++++-----------------
16
target/arm/translate-neon.c.inc | 15 ++++-----------
12
2 files changed, 73 insertions(+), 47 deletions(-)
17
3 files changed, 42 insertions(+), 11 deletions(-)
13
18
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
19
diff --git a/target/arm/helper.h b/target/arm/helper.h
20
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper.h
16
--- a/target/arm/translate.c
22
+++ b/target/arm/helper.h
17
+++ b/target/arm/translate.c
23
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(neon_padds, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
18
@@ -XXX,XX +XXX,XX @@ static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop)
24
DEF_HELPER_FLAGS_5(neon_pmaxs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
19
}
25
DEF_HELPER_FLAGS_5(neon_pmins, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
20
}
26
21
27
+DEF_HELPER_FLAGS_4(gvec_sstoh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
22
+static void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
28
+DEF_HELPER_FLAGS_4(gvec_sitos, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
23
+{
29
+DEF_HELPER_FLAGS_4(gvec_ustoh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
+ long off = neon_element_offset(reg, ele, memop);
30
+DEF_HELPER_FLAGS_4(gvec_uitos, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(gvec_tosszh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(gvec_tosizs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_4(gvec_touszh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_4(gvec_touizs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
35
+
25
+
36
DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
26
+ switch (memop) {
37
DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
27
+ case MO_Q:
38
DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
28
+ tcg_gen_ld_i64(dest, cpu_env, off);
39
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
29
+ break;
40
index XXXXXXX..XXXXXXX 100644
30
+ default:
41
--- a/target/arm/vec_helper.c
31
+ g_assert_not_reached();
42
+++ b/target/arm/vec_helper.c
43
@@ -XXX,XX +XXX,XX @@ static uint32_t float32_acgt(float32 op1, float32 op2, float_status *stat)
44
return -float32_lt(float32_abs(op2), float32_abs(op1), stat);
45
}
46
47
+static int16_t vfp_tosszh(float16 x, void *fpstp)
48
+{
49
+ float_status *fpst = fpstp;
50
+ if (float16_is_any_nan(x)) {
51
+ float_raise(float_flag_invalid, fpst);
52
+ return 0;
53
+ }
32
+ }
54
+ return float16_to_int16_round_to_zero(x, fpst);
55
+}
33
+}
56
+
34
+
57
+static uint16_t vfp_touszh(float16 x, void *fpstp)
35
static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
36
{
37
long off = neon_element_offset(reg, ele, memop);
38
@@ -XXX,XX +XXX,XX @@ static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
39
}
40
}
41
42
+static void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop)
58
+{
43
+{
59
+ float_status *fpst = fpstp;
44
+ long off = neon_element_offset(reg, ele, memop);
60
+ if (float16_is_any_nan(x)) {
45
+
61
+ float_raise(float_flag_invalid, fpst);
46
+ switch (memop) {
62
+ return 0;
47
+ case MO_64:
48
+ tcg_gen_st_i64(src, cpu_env, off);
49
+ break;
50
+ default:
51
+ g_assert_not_reached();
63
+ }
52
+ }
64
+ return float16_to_uint16_round_to_zero(x, fpst);
65
+}
53
+}
66
+
54
+
67
#define DO_2OP(NAME, FUNC, TYPE) \
55
static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
68
void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
56
{
69
{ \
57
TCGv_ptr ret = tcg_temp_new_ptr();
70
@@ -XXX,XX +XXX,XX @@ DO_2OP(gvec_frsqrte_h, helper_rsqrte_f16, float16)
71
DO_2OP(gvec_frsqrte_s, helper_rsqrte_f32, float32)
72
DO_2OP(gvec_frsqrte_d, helper_rsqrte_f64, float64)
73
74
+DO_2OP(gvec_sitos, helper_vfp_sitos, int32_t)
75
+DO_2OP(gvec_uitos, helper_vfp_uitos, uint32_t)
76
+DO_2OP(gvec_tosizs, helper_vfp_tosizs, float32)
77
+DO_2OP(gvec_touizs, helper_vfp_touizs, float32)
78
+DO_2OP(gvec_sstoh, int16_to_float16, int16_t)
79
+DO_2OP(gvec_ustoh, uint16_to_float16, uint16_t)
80
+DO_2OP(gvec_tosszh, vfp_tosszh, float16)
81
+DO_2OP(gvec_touszh, vfp_touszh, float16)
82
+
83
#define WRAP_CMP0_FWD(FN, CMPOP, TYPE) \
84
static TYPE TYPE##_##FN##0(TYPE op, float_status *stat) \
85
{ \
86
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
58
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
87
index XXXXXXX..XXXXXXX 100644
59
index XXXXXXX..XXXXXXX 100644
88
--- a/target/arm/translate-neon.c.inc
60
--- a/target/arm/translate-neon.c.inc
89
+++ b/target/arm/translate-neon.c.inc
61
+++ b/target/arm/translate-neon.c.inc
90
@@ -XXX,XX +XXX,XX @@ static bool do_2misc_fp(DisasContext *s, arg_2misc *a,
62
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
63
for (pass = 0; pass < a->q + 1; pass++) {
64
TCGv_i64 tmp = tcg_temp_new_i64();
65
66
- neon_load_reg64(tmp, a->vm + pass);
67
+ read_neon_element64(tmp, a->vm, pass, MO_64);
68
fn(tmp, cpu_env, tmp, constimm);
69
- neon_store_reg64(tmp, a->vd + pass);
70
+ write_neon_element64(tmp, a->vd, pass, MO_64);
71
tcg_temp_free_i64(tmp);
72
}
73
tcg_temp_free_i64(constimm);
74
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
75
rd = tcg_temp_new_i32();
76
77
/* Load both inputs first to avoid potential overwrite if rm == rd */
78
- neon_load_reg64(rm1, a->vm);
79
- neon_load_reg64(rm2, a->vm + 1);
80
+ read_neon_element64(rm1, a->vm, 0, MO_64);
81
+ read_neon_element64(rm2, a->vm, 1, MO_64);
82
83
shiftfn(rm1, rm1, constimm);
84
narrowfn(rd, cpu_env, rm1);
85
@@ -XXX,XX +XXX,XX @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
86
tcg_gen_shli_i64(tmp, tmp, a->shift);
87
tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
88
}
89
- neon_store_reg64(tmp, a->vd);
90
+ write_neon_element64(tmp, a->vd, 0, MO_64);
91
92
widenfn(tmp, rm1);
93
tcg_temp_free_i32(rm1);
94
@@ -XXX,XX +XXX,XX @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
95
tcg_gen_shli_i64(tmp, tmp, a->shift);
96
tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
97
}
98
- neon_store_reg64(tmp, a->vd + 1);
99
+ write_neon_element64(tmp, a->vd, 1, MO_64);
100
tcg_temp_free_i64(tmp);
91
return true;
101
return true;
92
}
102
}
93
103
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
94
-#define DO_2MISC_FP(INSN, FUNC) \
104
rm_64 = tcg_temp_new_i64();
95
- static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
105
96
- { \
106
if (src1_wide) {
97
- return do_2misc_fp(s, a, FUNC); \
107
- neon_load_reg64(rn0_64, a->vn);
98
- }
108
+ read_neon_element64(rn0_64, a->vn, 0, MO_64);
99
-
109
} else {
100
-DO_2MISC_FP(VCVT_FS, gen_helper_vfp_sitos)
110
TCGv_i32 tmp = tcg_temp_new_i32();
101
-DO_2MISC_FP(VCVT_FU, gen_helper_vfp_uitos)
111
read_neon_element32(tmp, a->vn, 0, MO_32);
102
-DO_2MISC_FP(VCVT_SF, gen_helper_vfp_tosizs)
112
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
103
-DO_2MISC_FP(VCVT_UF, gen_helper_vfp_touizs)
113
* avoid incorrect results if a narrow input overlaps with the result.
104
-
114
*/
105
#define DO_2MISC_FP_VEC(INSN, HFUNC, SFUNC) \
115
if (src1_wide) {
106
static void gen_##INSN(unsigned vece, uint32_t rd_ofs, \
116
- neon_load_reg64(rn1_64, a->vn + 1);
107
uint32_t rm_ofs, \
117
+ read_neon_element64(rn1_64, a->vn, 1, MO_64);
108
@@ -XXX,XX +XXX,XX @@ DO_2MISC_FP_VEC(VCGE0_F, gen_helper_gvec_fcge0_h, gen_helper_gvec_fcge0_s)
118
} else {
109
DO_2MISC_FP_VEC(VCEQ0_F, gen_helper_gvec_fceq0_h, gen_helper_gvec_fceq0_s)
119
TCGv_i32 tmp = tcg_temp_new_i32();
110
DO_2MISC_FP_VEC(VCLT0_F, gen_helper_gvec_fclt0_h, gen_helper_gvec_fclt0_s)
120
read_neon_element32(tmp, a->vn, 1, MO_32);
111
DO_2MISC_FP_VEC(VCLE0_F, gen_helper_gvec_fcle0_h, gen_helper_gvec_fcle0_s)
121
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
112
+DO_2MISC_FP_VEC(VCVT_FS, gen_helper_gvec_sstoh, gen_helper_gvec_sitos)
122
rm = tcg_temp_new_i32();
113
+DO_2MISC_FP_VEC(VCVT_FU, gen_helper_gvec_ustoh, gen_helper_gvec_uitos)
123
read_neon_element32(rm, a->vm, 1, MO_32);
114
+DO_2MISC_FP_VEC(VCVT_SF, gen_helper_gvec_tosszh, gen_helper_gvec_tosizs)
124
115
+DO_2MISC_FP_VEC(VCVT_UF, gen_helper_gvec_touszh, gen_helper_gvec_touizs)
125
- neon_store_reg64(rn0_64, a->vd);
116
126
+ write_neon_element64(rn0_64, a->vd, 0, MO_64);
117
static bool trans_VRINTX(DisasContext *s, arg_2misc *a)
127
118
{
128
widenfn(rm_64, rm);
129
tcg_temp_free_i32(rm);
130
opfn(rn1_64, rn1_64, rm_64);
131
- neon_store_reg64(rn1_64, a->vd + 1);
132
+ write_neon_element64(rn1_64, a->vd, 1, MO_64);
133
134
tcg_temp_free_i64(rn0_64);
135
tcg_temp_free_i64(rn1_64);
136
@@ -XXX,XX +XXX,XX @@ static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
137
rd0 = tcg_temp_new_i32();
138
rd1 = tcg_temp_new_i32();
139
140
- neon_load_reg64(rn_64, a->vn);
141
- neon_load_reg64(rm_64, a->vm);
142
+ read_neon_element64(rn_64, a->vn, 0, MO_64);
143
+ read_neon_element64(rm_64, a->vm, 0, MO_64);
144
145
opfn(rn_64, rn_64, rm_64);
146
147
narrowfn(rd0, rn_64);
148
149
- neon_load_reg64(rn_64, a->vn + 1);
150
- neon_load_reg64(rm_64, a->vm + 1);
151
+ read_neon_element64(rn_64, a->vn, 1, MO_64);
152
+ read_neon_element64(rm_64, a->vm, 1, MO_64);
153
154
opfn(rn_64, rn_64, rm_64);
155
156
@@ -XXX,XX +XXX,XX @@ static bool do_long_3d(DisasContext *s, arg_3diff *a,
157
/* Don't store results until after all loads: they might overlap */
158
if (accfn) {
159
tmp = tcg_temp_new_i64();
160
- neon_load_reg64(tmp, a->vd);
161
+ read_neon_element64(tmp, a->vd, 0, MO_64);
162
accfn(tmp, tmp, rd0);
163
- neon_store_reg64(tmp, a->vd);
164
- neon_load_reg64(tmp, a->vd + 1);
165
+ write_neon_element64(tmp, a->vd, 0, MO_64);
166
+ read_neon_element64(tmp, a->vd, 1, MO_64);
167
accfn(tmp, tmp, rd1);
168
- neon_store_reg64(tmp, a->vd + 1);
169
+ write_neon_element64(tmp, a->vd, 1, MO_64);
170
tcg_temp_free_i64(tmp);
171
} else {
172
- neon_store_reg64(rd0, a->vd);
173
- neon_store_reg64(rd1, a->vd + 1);
174
+ write_neon_element64(rd0, a->vd, 0, MO_64);
175
+ write_neon_element64(rd1, a->vd, 1, MO_64);
176
}
177
178
tcg_temp_free_i64(rd0);
179
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a,
180
181
if (accfn) {
182
TCGv_i64 t64 = tcg_temp_new_i64();
183
- neon_load_reg64(t64, a->vd);
184
+ read_neon_element64(t64, a->vd, 0, MO_64);
185
accfn(t64, t64, rn0_64);
186
- neon_store_reg64(t64, a->vd);
187
- neon_load_reg64(t64, a->vd + 1);
188
+ write_neon_element64(t64, a->vd, 0, MO_64);
189
+ read_neon_element64(t64, a->vd, 1, MO_64);
190
accfn(t64, t64, rn1_64);
191
- neon_store_reg64(t64, a->vd + 1);
192
+ write_neon_element64(t64, a->vd, 1, MO_64);
193
tcg_temp_free_i64(t64);
194
} else {
195
- neon_store_reg64(rn0_64, a->vd);
196
- neon_store_reg64(rn1_64, a->vd + 1);
197
+ write_neon_element64(rn0_64, a->vd, 0, MO_64);
198
+ write_neon_element64(rn1_64, a->vd, 1, MO_64);
199
}
200
tcg_temp_free_i64(rn0_64);
201
tcg_temp_free_i64(rn1_64);
202
@@ -XXX,XX +XXX,XX @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a)
203
right = tcg_temp_new_i64();
204
dest = tcg_temp_new_i64();
205
206
- neon_load_reg64(right, a->vn);
207
- neon_load_reg64(left, a->vm);
208
+ read_neon_element64(right, a->vn, 0, MO_64);
209
+ read_neon_element64(left, a->vm, 0, MO_64);
210
tcg_gen_extract2_i64(dest, right, left, a->imm * 8);
211
- neon_store_reg64(dest, a->vd);
212
+ write_neon_element64(dest, a->vd, 0, MO_64);
213
214
tcg_temp_free_i64(left);
215
tcg_temp_free_i64(right);
216
@@ -XXX,XX +XXX,XX @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a)
217
destright = tcg_temp_new_i64();
218
219
if (a->imm < 8) {
220
- neon_load_reg64(right, a->vn);
221
- neon_load_reg64(middle, a->vn + 1);
222
+ read_neon_element64(right, a->vn, 0, MO_64);
223
+ read_neon_element64(middle, a->vn, 1, MO_64);
224
tcg_gen_extract2_i64(destright, right, middle, a->imm * 8);
225
- neon_load_reg64(left, a->vm);
226
+ read_neon_element64(left, a->vm, 0, MO_64);
227
tcg_gen_extract2_i64(destleft, middle, left, a->imm * 8);
228
} else {
229
- neon_load_reg64(right, a->vn + 1);
230
- neon_load_reg64(middle, a->vm);
231
+ read_neon_element64(right, a->vn, 1, MO_64);
232
+ read_neon_element64(middle, a->vm, 0, MO_64);
233
tcg_gen_extract2_i64(destright, right, middle, (a->imm - 8) * 8);
234
- neon_load_reg64(left, a->vm + 1);
235
+ read_neon_element64(left, a->vm, 1, MO_64);
236
tcg_gen_extract2_i64(destleft, middle, left, (a->imm - 8) * 8);
237
}
238
239
- neon_store_reg64(destright, a->vd);
240
- neon_store_reg64(destleft, a->vd + 1);
241
+ write_neon_element64(destright, a->vd, 0, MO_64);
242
+ write_neon_element64(destleft, a->vd, 1, MO_64);
243
244
tcg_temp_free_i64(destright);
245
tcg_temp_free_i64(destleft);
246
@@ -XXX,XX +XXX,XX @@ static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a,
247
248
if (accfn) {
249
TCGv_i64 tmp64 = tcg_temp_new_i64();
250
- neon_load_reg64(tmp64, a->vd + pass);
251
+ read_neon_element64(tmp64, a->vd, pass, MO_64);
252
accfn(rd_64, tmp64, rd_64);
253
tcg_temp_free_i64(tmp64);
254
}
255
- neon_store_reg64(rd_64, a->vd + pass);
256
+ write_neon_element64(rd_64, a->vd, pass, MO_64);
257
tcg_temp_free_i64(rd_64);
258
}
259
return true;
260
@@ -XXX,XX +XXX,XX @@ static bool do_vmovn(DisasContext *s, arg_2misc *a,
261
rd0 = tcg_temp_new_i32();
262
rd1 = tcg_temp_new_i32();
263
264
- neon_load_reg64(rm, a->vm);
265
+ read_neon_element64(rm, a->vm, 0, MO_64);
266
narrowfn(rd0, cpu_env, rm);
267
- neon_load_reg64(rm, a->vm + 1);
268
+ read_neon_element64(rm, a->vm, 1, MO_64);
269
narrowfn(rd1, cpu_env, rm);
270
write_neon_element32(rd0, a->vd, 0, MO_32);
271
write_neon_element32(rd1, a->vd, 1, MO_32);
272
@@ -XXX,XX +XXX,XX @@ static bool trans_VSHLL(DisasContext *s, arg_2misc *a)
273
274
widenfn(rd, rm0);
275
tcg_gen_shli_i64(rd, rd, 8 << a->size);
276
- neon_store_reg64(rd, a->vd);
277
+ write_neon_element64(rd, a->vd, 0, MO_64);
278
widenfn(rd, rm1);
279
tcg_gen_shli_i64(rd, rd, 8 << a->size);
280
- neon_store_reg64(rd, a->vd + 1);
281
+ write_neon_element64(rd, a->vd, 1, MO_64);
282
283
tcg_temp_free_i64(rd);
284
tcg_temp_free_i32(rm0);
285
@@ -XXX,XX +XXX,XX @@ static bool trans_VSWP(DisasContext *s, arg_2misc *a)
286
rm = tcg_temp_new_i64();
287
rd = tcg_temp_new_i64();
288
for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
289
- neon_load_reg64(rm, a->vm + pass);
290
- neon_load_reg64(rd, a->vd + pass);
291
- neon_store_reg64(rm, a->vd + pass);
292
- neon_store_reg64(rd, a->vm + pass);
293
+ read_neon_element64(rm, a->vm, pass, MO_64);
294
+ read_neon_element64(rd, a->vd, pass, MO_64);
295
+ write_neon_element64(rm, a->vd, pass, MO_64);
296
+ write_neon_element64(rd, a->vm, pass, MO_64);
297
}
298
tcg_temp_free_i64(rm);
299
tcg_temp_free_i64(rd);
119
--
300
--
120
2.20.1
301
2.20.1
121
302
122
303
diff view generated by jsdifflib
1
Implement VFP fp16 for VABS, VNEG and VSQRT. This is all
1
From: Richard Henderson <richard.henderson@linaro.org>
2
the fp16 insns that use the DO_VFP_2OP macro, because there
2
3
is no fp16 version of VMOV_reg.
3
The only uses of this function are for loading VFP
4
4
double-precision values, and nothing to do with NEON.
5
Notes:
5
6
* the gen_helper_vfp_negh already exists as we needed to create
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
it for the fp16 multiply-add insns
7
Message-id: 20201030022618.785675-10-richard.henderson@linaro.org
8
* as usual we need to use the f16 version of the fp_status;
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
this is only relevant for VSQRT
10
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20200828183354.27913-9-peter.maydell@linaro.org
14
---
10
---
15
target/arm/helper.h | 2 ++
11
target/arm/translate.c | 8 ++--
16
target/arm/vfp.decode | 3 +++
12
target/arm/translate-vfp.c.inc | 84 +++++++++++++++++-----------------
17
target/arm/vfp_helper.c | 10 +++++++++
13
2 files changed, 46 insertions(+), 46 deletions(-)
18
target/arm/translate-vfp.c.inc | 40 ++++++++++++++++++++++++++++++++++
14
19
4 files changed, 55 insertions(+)
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
20
21
diff --git a/target/arm/helper.h b/target/arm/helper.h
22
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/helper.h
17
--- a/target/arm/translate.c
24
+++ b/target/arm/helper.h
18
+++ b/target/arm/translate.c
25
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(vfp_minnumd, f64, f64, f64, ptr)
19
@@ -XXX,XX +XXX,XX @@ static long vfp_reg_offset(bool dp, unsigned reg)
26
DEF_HELPER_1(vfp_negh, f16, f16)
20
}
27
DEF_HELPER_1(vfp_negs, f32, f32)
28
DEF_HELPER_1(vfp_negd, f64, f64)
29
+DEF_HELPER_1(vfp_absh, f16, f16)
30
DEF_HELPER_1(vfp_abss, f32, f32)
31
DEF_HELPER_1(vfp_absd, f64, f64)
32
+DEF_HELPER_2(vfp_sqrth, f16, f16, env)
33
DEF_HELPER_2(vfp_sqrts, f32, f32, env)
34
DEF_HELPER_2(vfp_sqrtd, f64, f64, env)
35
DEF_HELPER_3(vfp_cmps, void, f32, f32, env)
36
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/vfp.decode
39
+++ b/target/arm/vfp.decode
40
@@ -XXX,XX +XXX,XX @@ VMOV_imm_dp ---- 1110 1.11 .... .... 1011 0000 .... \
41
VMOV_reg_sp ---- 1110 1.11 0000 .... 1010 01.0 .... @vfp_dm_ss
42
VMOV_reg_dp ---- 1110 1.11 0000 .... 1011 01.0 .... @vfp_dm_dd
43
44
+VABS_hp ---- 1110 1.11 0000 .... 1001 11.0 .... @vfp_dm_ss
45
VABS_sp ---- 1110 1.11 0000 .... 1010 11.0 .... @vfp_dm_ss
46
VABS_dp ---- 1110 1.11 0000 .... 1011 11.0 .... @vfp_dm_dd
47
48
+VNEG_hp ---- 1110 1.11 0001 .... 1001 01.0 .... @vfp_dm_ss
49
VNEG_sp ---- 1110 1.11 0001 .... 1010 01.0 .... @vfp_dm_ss
50
VNEG_dp ---- 1110 1.11 0001 .... 1011 01.0 .... @vfp_dm_dd
51
52
+VSQRT_hp ---- 1110 1.11 0001 .... 1001 11.0 .... @vfp_dm_ss
53
VSQRT_sp ---- 1110 1.11 0001 .... 1010 11.0 .... @vfp_dm_ss
54
VSQRT_dp ---- 1110 1.11 0001 .... 1011 11.0 .... @vfp_dm_dd
55
56
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/vfp_helper.c
59
+++ b/target/arm/vfp_helper.c
60
@@ -XXX,XX +XXX,XX @@ float64 VFP_HELPER(neg, d)(float64 a)
61
return float64_chs(a);
62
}
21
}
63
22
64
+dh_ctype_f16 VFP_HELPER(abs, h)(dh_ctype_f16 a)
23
-static inline void neon_load_reg64(TCGv_i64 var, int reg)
65
+{
24
+static inline void vfp_load_reg64(TCGv_i64 var, int reg)
66
+ return float16_abs(a);
67
+}
68
+
69
float32 VFP_HELPER(abs, s)(float32 a)
70
{
25
{
71
return float32_abs(a);
26
- tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
72
@@ -XXX,XX +XXX,XX @@ float64 VFP_HELPER(abs, d)(float64 a)
27
+ tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(true, reg));
73
return float64_abs(a);
74
}
28
}
75
29
76
+dh_ctype_f16 VFP_HELPER(sqrt, h)(dh_ctype_f16 a, CPUARMState *env)
30
-static inline void neon_store_reg64(TCGv_i64 var, int reg)
77
+{
31
+static inline void vfp_store_reg64(TCGv_i64 var, int reg)
78
+ return float16_sqrt(a, &env->vfp.fp_status_f16);
79
+}
80
+
81
float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
82
{
32
{
83
return float32_sqrt(a, &env->vfp.fp_status);
33
- tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
34
+ tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(true, reg));
35
}
36
37
static inline void vfp_load_reg32(TCGv_i32 var, int reg)
84
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
38
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
85
index XXXXXXX..XXXXXXX 100644
39
index XXXXXXX..XXXXXXX 100644
86
--- a/target/arm/translate-vfp.c.inc
40
--- a/target/arm/translate-vfp.c.inc
87
+++ b/target/arm/translate-vfp.c.inc
41
+++ b/target/arm/translate-vfp.c.inc
88
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
42
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
43
tcg_gen_ext_i32_i64(nf, cpu_NF);
44
tcg_gen_ext_i32_i64(vf, cpu_VF);
45
46
- neon_load_reg64(frn, rn);
47
- neon_load_reg64(frm, rm);
48
+ vfp_load_reg64(frn, rn);
49
+ vfp_load_reg64(frm, rm);
50
switch (a->cc) {
51
case 0: /* eq: Z */
52
tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
53
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
54
tcg_temp_free_i64(tmp);
55
break;
56
}
57
- neon_store_reg64(dest, rd);
58
+ vfp_store_reg64(dest, rd);
59
tcg_temp_free_i64(frn);
60
tcg_temp_free_i64(frm);
61
tcg_temp_free_i64(dest);
62
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
63
TCGv_i64 tcg_res;
64
tcg_op = tcg_temp_new_i64();
65
tcg_res = tcg_temp_new_i64();
66
- neon_load_reg64(tcg_op, rm);
67
+ vfp_load_reg64(tcg_op, rm);
68
gen_helper_rintd(tcg_res, tcg_op, fpst);
69
- neon_store_reg64(tcg_res, rd);
70
+ vfp_store_reg64(tcg_res, rd);
71
tcg_temp_free_i64(tcg_op);
72
tcg_temp_free_i64(tcg_res);
73
} else {
74
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
75
tcg_double = tcg_temp_new_i64();
76
tcg_res = tcg_temp_new_i64();
77
tcg_tmp = tcg_temp_new_i32();
78
- neon_load_reg64(tcg_double, rm);
79
+ vfp_load_reg64(tcg_double, rm);
80
if (is_signed) {
81
gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
82
} else {
83
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
84
tmp = tcg_temp_new_i64();
85
if (a->l) {
86
gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
87
- neon_store_reg64(tmp, a->vd);
88
+ vfp_store_reg64(tmp, a->vd);
89
} else {
90
- neon_load_reg64(tmp, a->vd);
91
+ vfp_load_reg64(tmp, a->vd);
92
gen_aa32_st64(s, tmp, addr, get_mem_index(s));
93
}
94
tcg_temp_free_i64(tmp);
95
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
96
if (a->l) {
97
/* load */
98
gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
99
- neon_store_reg64(tmp, a->vd + i);
100
+ vfp_store_reg64(tmp, a->vd + i);
101
} else {
102
/* store */
103
- neon_load_reg64(tmp, a->vd + i);
104
+ vfp_load_reg64(tmp, a->vd + i);
105
gen_aa32_st64(s, tmp, addr, get_mem_index(s));
106
}
107
tcg_gen_addi_i32(addr, addr, offset);
108
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
109
fd = tcg_temp_new_i64();
110
fpst = fpstatus_ptr(FPST_FPCR);
111
112
- neon_load_reg64(f0, vn);
113
- neon_load_reg64(f1, vm);
114
+ vfp_load_reg64(f0, vn);
115
+ vfp_load_reg64(f1, vm);
116
117
for (;;) {
118
if (reads_vd) {
119
- neon_load_reg64(fd, vd);
120
+ vfp_load_reg64(fd, vd);
121
}
122
fn(fd, f0, f1, fpst);
123
- neon_store_reg64(fd, vd);
124
+ vfp_store_reg64(fd, vd);
125
126
if (veclen == 0) {
127
break;
128
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
129
veclen--;
130
vd = vfp_advance_dreg(vd, delta_d);
131
vn = vfp_advance_dreg(vn, delta_d);
132
- neon_load_reg64(f0, vn);
133
+ vfp_load_reg64(f0, vn);
134
if (delta_m) {
135
vm = vfp_advance_dreg(vm, delta_m);
136
- neon_load_reg64(f1, vm);
137
+ vfp_load_reg64(f1, vm);
138
}
139
}
140
141
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
142
f0 = tcg_temp_new_i64();
143
fd = tcg_temp_new_i64();
144
145
- neon_load_reg64(f0, vm);
146
+ vfp_load_reg64(f0, vm);
147
148
for (;;) {
149
fn(fd, f0);
150
- neon_store_reg64(fd, vd);
151
+ vfp_store_reg64(fd, vd);
152
153
if (veclen == 0) {
154
break;
155
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
156
/* single source one-many */
157
while (veclen--) {
158
vd = vfp_advance_dreg(vd, delta_d);
159
- neon_store_reg64(fd, vd);
160
+ vfp_store_reg64(fd, vd);
161
}
162
break;
163
}
164
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
165
veclen--;
166
vd = vfp_advance_dreg(vd, delta_d);
167
vd = vfp_advance_dreg(vm, delta_m);
168
- neon_load_reg64(f0, vm);
169
+ vfp_load_reg64(f0, vm);
170
}
171
172
tcg_temp_free_i64(f0);
173
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
174
vm = tcg_temp_new_i64();
175
vd = tcg_temp_new_i64();
176
177
- neon_load_reg64(vn, a->vn);
178
- neon_load_reg64(vm, a->vm);
179
+ vfp_load_reg64(vn, a->vn);
180
+ vfp_load_reg64(vm, a->vm);
181
if (neg_n) {
182
/* VFNMS, VFMS */
183
gen_helper_vfp_negd(vn, vn);
184
}
185
- neon_load_reg64(vd, a->vd);
186
+ vfp_load_reg64(vd, a->vd);
187
if (neg_d) {
188
/* VFNMA, VFNMS */
189
gen_helper_vfp_negd(vd, vd);
190
}
191
fpst = fpstatus_ptr(FPST_FPCR);
192
gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst);
193
- neon_store_reg64(vd, a->vd);
194
+ vfp_store_reg64(vd, a->vd);
195
196
tcg_temp_free_ptr(fpst);
197
tcg_temp_free_i64(vn);
198
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
199
fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
200
201
for (;;) {
202
- neon_store_reg64(fd, vd);
203
+ vfp_store_reg64(fd, vd);
204
205
if (veclen == 0) {
206
break;
207
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
208
vd = tcg_temp_new_i64();
209
vm = tcg_temp_new_i64();
210
211
- neon_load_reg64(vd, a->vd);
212
+ vfp_load_reg64(vd, a->vd);
213
if (a->z) {
214
tcg_gen_movi_i64(vm, 0);
215
} else {
216
- neon_load_reg64(vm, a->vm);
217
+ vfp_load_reg64(vm, a->vm);
218
}
219
220
if (a->e) {
221
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
222
tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
223
vd = tcg_temp_new_i64();
224
gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
225
- neon_store_reg64(vd, a->vd);
226
+ vfp_store_reg64(vd, a->vd);
227
tcg_temp_free_i32(ahp_mode);
228
tcg_temp_free_ptr(fpst);
229
tcg_temp_free_i32(tmp);
230
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
231
tmp = tcg_temp_new_i32();
232
vm = tcg_temp_new_i64();
233
234
- neon_load_reg64(vm, a->vm);
235
+ vfp_load_reg64(vm, a->vm);
236
gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
237
tcg_temp_free_i64(vm);
238
tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
239
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
240
}
241
242
tmp = tcg_temp_new_i64();
243
- neon_load_reg64(tmp, a->vm);
244
+ vfp_load_reg64(tmp, a->vm);
245
fpst = fpstatus_ptr(FPST_FPCR);
246
gen_helper_rintd(tmp, tmp, fpst);
247
- neon_store_reg64(tmp, a->vd);
248
+ vfp_store_reg64(tmp, a->vd);
249
tcg_temp_free_ptr(fpst);
250
tcg_temp_free_i64(tmp);
89
return true;
251
return true;
90
}
252
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
91
253
}
92
+static bool do_vfp_2op_hp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
254
93
+{
255
tmp = tcg_temp_new_i64();
94
+ /*
256
- neon_load_reg64(tmp, a->vm);
95
+ * Do a half-precision operation. Functionally this is
257
+ vfp_load_reg64(tmp, a->vm);
96
+ * the same as do_vfp_2op_sp(), except:
258
fpst = fpstatus_ptr(FPST_FPCR);
97
+ * - it doesn't need the VFP vector handling (fp16 is a
259
tcg_rmode = tcg_const_i32(float_round_to_zero);
98
+ * v8 feature, and in v8 VFP vectors don't exist)
260
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
99
+ * - it does the aa32_fp16_arith feature test
261
gen_helper_rintd(tmp, tmp, fpst);
100
+ */
262
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
101
+ TCGv_i32 f0;
263
- neon_store_reg64(tmp, a->vd);
102
+
264
+ vfp_store_reg64(tmp, a->vd);
103
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
265
tcg_temp_free_ptr(fpst);
104
+ return false;
266
tcg_temp_free_i64(tmp);
105
+ }
267
tcg_temp_free_i32(tcg_rmode);
106
+
268
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
107
+ if (s->vec_len != 0 || s->vec_stride != 0) {
269
}
108
+ return false;
270
109
+ }
271
tmp = tcg_temp_new_i64();
110
+
272
- neon_load_reg64(tmp, a->vm);
111
+ if (!vfp_access_check(s)) {
273
+ vfp_load_reg64(tmp, a->vm);
112
+ return true;
274
fpst = fpstatus_ptr(FPST_FPCR);
113
+ }
275
gen_helper_rintd_exact(tmp, tmp, fpst);
114
+
276
- neon_store_reg64(tmp, a->vd);
115
+ f0 = tcg_temp_new_i32();
277
+ vfp_store_reg64(tmp, a->vd);
116
+ neon_load_reg32(f0, vm);
278
tcg_temp_free_ptr(fpst);
117
+ fn(f0, f0);
279
tcg_temp_free_i64(tmp);
118
+ neon_store_reg32(f0, vd);
280
return true;
119
+ tcg_temp_free_i32(f0);
281
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
120
+
282
vd = tcg_temp_new_i64();
121
+ return true;
283
vfp_load_reg32(vm, a->vm);
122
+}
284
gen_helper_vfp_fcvtds(vd, vm, cpu_env);
123
+
285
- neon_store_reg64(vd, a->vd);
124
static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
286
+ vfp_store_reg64(vd, a->vd);
125
{
287
tcg_temp_free_i32(vm);
126
uint32_t delta_m = 0;
288
tcg_temp_free_i64(vd);
127
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
289
return true;
128
DO_VFP_2OP(VMOV_reg, sp, tcg_gen_mov_i32)
290
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
129
DO_VFP_2OP(VMOV_reg, dp, tcg_gen_mov_i64)
291
130
292
vd = tcg_temp_new_i32();
131
+DO_VFP_2OP(VABS, hp, gen_helper_vfp_absh)
293
vm = tcg_temp_new_i64();
132
DO_VFP_2OP(VABS, sp, gen_helper_vfp_abss)
294
- neon_load_reg64(vm, a->vm);
133
DO_VFP_2OP(VABS, dp, gen_helper_vfp_absd)
295
+ vfp_load_reg64(vm, a->vm);
134
296
gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
135
+DO_VFP_2OP(VNEG, hp, gen_helper_vfp_negh)
297
vfp_store_reg32(vd, a->vd);
136
DO_VFP_2OP(VNEG, sp, gen_helper_vfp_negs)
298
tcg_temp_free_i32(vd);
137
DO_VFP_2OP(VNEG, dp, gen_helper_vfp_negd)
299
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
138
300
/* u32 -> f64 */
139
+static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm)
301
gen_helper_vfp_uitod(vd, vm, fpst);
140
+{
302
}
141
+ gen_helper_vfp_sqrth(vd, vm, cpu_env);
303
- neon_store_reg64(vd, a->vd);
142
+}
304
+ vfp_store_reg64(vd, a->vd);
143
+
305
tcg_temp_free_i32(vm);
144
static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
306
tcg_temp_free_i64(vd);
145
{
307
tcg_temp_free_ptr(fpst);
146
gen_helper_vfp_sqrts(vd, vm, cpu_env);
308
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
147
@@ -XXX,XX +XXX,XX @@ static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
309
148
gen_helper_vfp_sqrtd(vd, vm, cpu_env);
310
vm = tcg_temp_new_i64();
149
}
311
vd = tcg_temp_new_i32();
150
312
- neon_load_reg64(vm, a->vm);
151
+DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp)
313
+ vfp_load_reg64(vm, a->vm);
152
DO_VFP_2OP(VSQRT, sp, gen_VSQRT_sp)
314
gen_helper_vjcvt(vd, vm, cpu_env);
153
DO_VFP_2OP(VSQRT, dp, gen_VSQRT_dp)
315
vfp_store_reg32(vd, a->vd);
154
316
tcg_temp_free_i64(vm);
317
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
318
frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
319
320
vd = tcg_temp_new_i64();
321
- neon_load_reg64(vd, a->vd);
322
+ vfp_load_reg64(vd, a->vd);
323
324
fpst = fpstatus_ptr(FPST_FPCR);
325
shift = tcg_const_i32(frac_bits);
326
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
327
g_assert_not_reached();
328
}
329
330
- neon_store_reg64(vd, a->vd);
331
+ vfp_store_reg64(vd, a->vd);
332
tcg_temp_free_i64(vd);
333
tcg_temp_free_i32(shift);
334
tcg_temp_free_ptr(fpst);
335
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
336
fpst = fpstatus_ptr(FPST_FPCR);
337
vm = tcg_temp_new_i64();
338
vd = tcg_temp_new_i32();
339
- neon_load_reg64(vm, a->vm);
340
+ vfp_load_reg64(vm, a->vm);
341
342
if (a->s) {
343
if (a->rz) {
155
--
344
--
156
2.20.1
345
2.20.1
157
346
158
347
diff view generated by jsdifflib
1
We already have gvec helpers for floating point VRECPE and
1
From: Richard Henderson <richard.henderson@linaro.org>
2
VRQSRTE, so convert the Neon decoder to use them and
3
add the fp16 support.
4
2
3
In both cases, we can sink the write-back and perform
4
the accumulate into the normal destination temps.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201030022618.785675-11-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20200828183354.27913-25-peter.maydell@linaro.org
8
---
10
---
9
target/arm/translate-neon.c.inc | 31 +++++++++++++++++++++++++++++--
11
target/arm/translate-neon.c.inc | 23 +++++++++--------------
10
1 file changed, 29 insertions(+), 2 deletions(-)
12
1 file changed, 9 insertions(+), 14 deletions(-)
11
13
12
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
14
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate-neon.c.inc
16
--- a/target/arm/translate-neon.c.inc
15
+++ b/target/arm/translate-neon.c.inc
17
+++ b/target/arm/translate-neon.c.inc
16
@@ -XXX,XX +XXX,XX @@ static bool do_2misc_fp(DisasContext *s, arg_2misc *a,
18
@@ -XXX,XX +XXX,XX @@ static bool do_long_3d(DisasContext *s, arg_3diff *a,
17
return do_2misc_fp(s, a, FUNC); \
19
if (accfn) {
20
tmp = tcg_temp_new_i64();
21
read_neon_element64(tmp, a->vd, 0, MO_64);
22
- accfn(tmp, tmp, rd0);
23
- write_neon_element64(tmp, a->vd, 0, MO_64);
24
+ accfn(rd0, tmp, rd0);
25
read_neon_element64(tmp, a->vd, 1, MO_64);
26
- accfn(tmp, tmp, rd1);
27
- write_neon_element64(tmp, a->vd, 1, MO_64);
28
+ accfn(rd1, tmp, rd1);
29
tcg_temp_free_i64(tmp);
30
- } else {
31
- write_neon_element64(rd0, a->vd, 0, MO_64);
32
- write_neon_element64(rd1, a->vd, 1, MO_64);
18
}
33
}
19
34
20
-DO_2MISC_FP(VRECPE_F, gen_helper_recpe_f32)
35
+ write_neon_element64(rd0, a->vd, 0, MO_64);
21
-DO_2MISC_FP(VRSQRTE_F, gen_helper_rsqrte_f32)
36
+ write_neon_element64(rd1, a->vd, 1, MO_64);
22
DO_2MISC_FP(VCVT_FS, gen_helper_vfp_sitos)
37
tcg_temp_free_i64(rd0);
23
DO_2MISC_FP(VCVT_FU, gen_helper_vfp_uitos)
38
tcg_temp_free_i64(rd1);
24
DO_2MISC_FP(VCVT_SF, gen_helper_vfp_tosizs)
39
25
DO_2MISC_FP(VCVT_UF, gen_helper_vfp_touizs)
40
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a,
26
41
if (accfn) {
27
+#define DO_2MISC_FP_VEC(INSN, HFUNC, SFUNC) \
42
TCGv_i64 t64 = tcg_temp_new_i64();
28
+ static void gen_##INSN(unsigned vece, uint32_t rd_ofs, \
43
read_neon_element64(t64, a->vd, 0, MO_64);
29
+ uint32_t rm_ofs, \
44
- accfn(t64, t64, rn0_64);
30
+ uint32_t oprsz, uint32_t maxsz) \
45
- write_neon_element64(t64, a->vd, 0, MO_64);
31
+ { \
46
+ accfn(rn0_64, t64, rn0_64);
32
+ static gen_helper_gvec_2_ptr * const fns[4] = { \
47
read_neon_element64(t64, a->vd, 1, MO_64);
33
+ NULL, HFUNC, SFUNC, NULL, \
48
- accfn(t64, t64, rn1_64);
34
+ }; \
49
- write_neon_element64(t64, a->vd, 1, MO_64);
35
+ TCGv_ptr fpst; \
50
+ accfn(rn1_64, t64, rn1_64);
36
+ fpst = fpstatus_ptr(vece == MO_16 ? FPST_STD_F16 : FPST_STD); \
51
tcg_temp_free_i64(t64);
37
+ tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, oprsz, maxsz, 0, \
52
- } else {
38
+ fns[vece]); \
53
- write_neon_element64(rn0_64, a->vd, 0, MO_64);
39
+ tcg_temp_free_ptr(fpst); \
54
- write_neon_element64(rn1_64, a->vd, 1, MO_64);
40
+ } \
55
}
41
+ static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
42
+ { \
43
+ if (a->size == MO_16) { \
44
+ if (!dc_isar_feature(aa32_fp16_arith, s)) { \
45
+ return false; \
46
+ } \
47
+ } else if (a->size != MO_32) { \
48
+ return false; \
49
+ } \
50
+ return do_2misc_vec(s, a, gen_##INSN); \
51
+ }
52
+
56
+
53
+DO_2MISC_FP_VEC(VRECPE_F, gen_helper_gvec_frecpe_h, gen_helper_gvec_frecpe_s)
57
+ write_neon_element64(rn0_64, a->vd, 0, MO_64);
54
+DO_2MISC_FP_VEC(VRSQRTE_F, gen_helper_gvec_frsqrte_h, gen_helper_gvec_frsqrte_s)
58
+ write_neon_element64(rn1_64, a->vd, 1, MO_64);
55
+
59
tcg_temp_free_i64(rn0_64);
56
static bool trans_VRINTX(DisasContext *s, arg_2misc *a)
60
tcg_temp_free_i64(rn1_64);
57
{
61
return true;
58
if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
59
--
62
--
60
2.20.1
63
2.20.1
61
64
62
65
diff view generated by jsdifflib
1
Convert the Neon VCVT float<->fixed-point insns to a
1
From: Richard Henderson <richard.henderson@linaro.org>
2
gvec style, in preparation for adding fp16 support.
3
2
3
We can use proper widening loads to extend 32-bit inputs,
4
and skip the "widenfn" step.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201030022618.785675-12-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200828183354.27913-38-peter.maydell@linaro.org
7
---
10
---
8
target/arm/helper.h | 5 +++++
11
target/arm/translate.c | 6 +++
9
target/arm/vec_helper.c | 20 +++++++++++++++++++
12
target/arm/translate-neon.c.inc | 66 ++++++++++++++++++---------------
10
target/arm/translate-neon.c.inc | 35 +++++++++++++++++----------------
13
2 files changed, 43 insertions(+), 29 deletions(-)
11
3 files changed, 43 insertions(+), 17 deletions(-)
12
14
13
diff --git a/target/arm/helper.h b/target/arm/helper.h
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.h
17
--- a/target/arm/translate.c
16
+++ b/target/arm/helper.h
18
+++ b/target/arm/translate.c
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(gvec_tosizs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
19
@@ -XXX,XX +XXX,XX @@ static void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
18
DEF_HELPER_FLAGS_4(gvec_touszh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
20
long off = neon_element_offset(reg, ele, memop);
19
DEF_HELPER_FLAGS_4(gvec_touizs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
21
20
22
switch (memop) {
21
+DEF_HELPER_FLAGS_4(gvec_vcvt_sf, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
23
+ case MO_SL:
22
+DEF_HELPER_FLAGS_4(gvec_vcvt_uf, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
+ tcg_gen_ld32s_i64(dest, cpu_env, off);
23
+DEF_HELPER_FLAGS_4(gvec_vcvt_fs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
25
+ break;
24
+DEF_HELPER_FLAGS_4(gvec_vcvt_fu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
26
+ case MO_UL:
25
+
27
+ tcg_gen_ld32u_i64(dest, cpu_env, off);
26
DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
28
+ break;
27
DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
29
case MO_Q:
28
DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
30
tcg_gen_ld_i64(dest, cpu_env, off);
29
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
31
break;
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/vec_helper.c
32
+++ b/target/arm/vec_helper.c
33
@@ -XXX,XX +XXX,XX @@ DO_NEON_PAIRWISE(neon_pmax, max)
34
DO_NEON_PAIRWISE(neon_pmin, min)
35
36
#undef DO_NEON_PAIRWISE
37
+
38
+#define DO_VCVT_FIXED(NAME, FUNC, TYPE) \
39
+ void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
40
+ { \
41
+ intptr_t i, oprsz = simd_oprsz(desc); \
42
+ int shift = simd_data(desc); \
43
+ TYPE *d = vd, *n = vn; \
44
+ float_status *fpst = stat; \
45
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
46
+ d[i] = FUNC(n[i], shift, fpst); \
47
+ } \
48
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
49
+ }
50
+
51
+DO_VCVT_FIXED(gvec_vcvt_sf, helper_vfp_sltos, uint32_t)
52
+DO_VCVT_FIXED(gvec_vcvt_uf, helper_vfp_ultos, uint32_t)
53
+DO_VCVT_FIXED(gvec_vcvt_fs, helper_vfp_tosls_round_to_zero, uint32_t)
54
+DO_VCVT_FIXED(gvec_vcvt_fu, helper_vfp_touls_round_to_zero, uint32_t)
55
+
56
+#undef DO_VCVT_FIXED
57
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
32
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
58
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/translate-neon.c.inc
34
--- a/target/arm/translate-neon.c.inc
60
+++ b/target/arm/translate-neon.c.inc
35
+++ b/target/arm/translate-neon.c.inc
61
@@ -XXX,XX +XXX,XX @@ static bool trans_VSHLL_U_2sh(DisasContext *s, arg_2reg_shift *a)
36
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1reg_imm *a)
62
}
37
static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
63
38
NeonGenWidenFn *widenfn,
64
static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a,
39
NeonGenTwo64OpFn *opfn,
65
- NeonGenTwoSingleOpFn *fn)
40
- bool src1_wide)
66
+ gen_helper_gvec_2_ptr *fn)
41
+ int src1_mop, int src2_mop)
67
{
42
{
68
/* FP operations in 2-reg-and-shift group */
43
/* 3-regs different lengths, prewidening case (VADDL/VSUBL/VAADW/VSUBW) */
69
- TCGv_i32 tmp, shiftv;
44
TCGv_i64 rn0_64, rn1_64, rm_64;
70
- TCGv_ptr fpstatus;
45
- TCGv_i32 rm;
71
- int pass;
72
+ int vec_size = a->q ? 16 : 8;
73
+ int rd_ofs = neon_reg_offset(a->vd, 0);
74
+ int rm_ofs = neon_reg_offset(a->vm, 0);
75
+ TCGv_ptr fpst;
76
46
77
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
47
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
78
return false;
48
return false;
49
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
50
return false;
79
}
51
}
80
52
81
+ if (a->size != 0) {
53
- if (!widenfn || !opfn) {
82
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
54
+ if (!opfn) {
83
+ return false;
55
/* size == 3 case, which is an entirely different insn group */
84
+ }
56
return false;
57
}
58
59
- if ((a->vd & 1) || (src1_wide && (a->vn & 1))) {
60
+ if ((a->vd & 1) || (src1_mop == MO_Q && (a->vn & 1))) {
61
return false;
62
}
63
64
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
65
rn1_64 = tcg_temp_new_i64();
66
rm_64 = tcg_temp_new_i64();
67
68
- if (src1_wide) {
69
- read_neon_element64(rn0_64, a->vn, 0, MO_64);
70
+ if (src1_mop >= 0) {
71
+ read_neon_element64(rn0_64, a->vn, 0, src1_mop);
72
} else {
73
TCGv_i32 tmp = tcg_temp_new_i32();
74
read_neon_element32(tmp, a->vn, 0, MO_32);
75
widenfn(rn0_64, tmp);
76
tcg_temp_free_i32(tmp);
77
}
78
- rm = tcg_temp_new_i32();
79
- read_neon_element32(rm, a->vm, 0, MO_32);
80
+ if (src2_mop >= 0) {
81
+ read_neon_element64(rm_64, a->vm, 0, src2_mop);
82
+ } else {
83
+ TCGv_i32 tmp = tcg_temp_new_i32();
84
+ read_neon_element32(tmp, a->vm, 0, MO_32);
85
+ widenfn(rm_64, tmp);
86
+ tcg_temp_free_i32(tmp);
85
+ }
87
+ }
86
+
88
87
/* UNDEF accesses to D16-D31 if they don't exist. */
89
- widenfn(rm_64, rm);
88
if (!dc_isar_feature(aa32_simd_r32, s) &&
90
- tcg_temp_free_i32(rm);
89
((a->vd | a->vm) & 0x10)) {
91
opfn(rn0_64, rn0_64, rm_64);
90
@@ -XXX,XX +XXX,XX @@ static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a,
92
91
return true;
93
/*
94
* Load second pass inputs before storing the first pass result, to
95
* avoid incorrect results if a narrow input overlaps with the result.
96
*/
97
- if (src1_wide) {
98
- read_neon_element64(rn1_64, a->vn, 1, MO_64);
99
+ if (src1_mop >= 0) {
100
+ read_neon_element64(rn1_64, a->vn, 1, src1_mop);
101
} else {
102
TCGv_i32 tmp = tcg_temp_new_i32();
103
read_neon_element32(tmp, a->vn, 1, MO_32);
104
widenfn(rn1_64, tmp);
105
tcg_temp_free_i32(tmp);
92
}
106
}
93
107
- rm = tcg_temp_new_i32();
94
- fpstatus = fpstatus_ptr(FPST_STD);
108
- read_neon_element32(rm, a->vm, 1, MO_32);
95
- shiftv = tcg_const_i32(a->shift);
109
+ if (src2_mop >= 0) {
96
- for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
110
+ read_neon_element64(rm_64, a->vm, 1, src2_mop);
97
- tmp = neon_load_reg(a->vm, pass);
111
+ } else {
98
- fn(tmp, tmp, shiftv, fpstatus);
112
+ TCGv_i32 tmp = tcg_temp_new_i32();
99
- neon_store_reg(a->vd, pass, tmp);
113
+ read_neon_element32(tmp, a->vm, 1, MO_32);
100
- }
114
+ widenfn(rm_64, tmp);
101
- tcg_temp_free_ptr(fpstatus);
115
+ tcg_temp_free_i32(tmp);
102
- tcg_temp_free_i32(shiftv);
116
+ }
103
+ fpst = fpstatus_ptr(a->size ? FPST_STD_F16 : FPST_STD);
117
104
+ tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, vec_size, vec_size, a->shift, fn);
118
write_neon_element64(rn0_64, a->vd, 0, MO_64);
105
+ tcg_temp_free_ptr(fpst);
119
120
- widenfn(rm_64, rm);
121
- tcg_temp_free_i32(rm);
122
opfn(rn1_64, rn1_64, rm_64);
123
write_neon_element64(rn1_64, a->vd, 1, MO_64);
124
125
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
106
return true;
126
return true;
107
}
127
}
108
128
109
@@ -XXX,XX +XXX,XX @@ static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a,
129
-#define DO_PREWIDEN(INSN, S, EXT, OP, SRC1WIDE) \
110
return do_fp_2sh(s, a, FUNC); \
130
+#define DO_PREWIDEN(INSN, S, OP, SRC1WIDE, SIGN) \
131
static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
132
{ \
133
static NeonGenWidenFn * const widenfn[] = { \
134
gen_helper_neon_widen_##S##8, \
135
gen_helper_neon_widen_##S##16, \
136
- tcg_gen_##EXT##_i32_i64, \
137
- NULL, \
138
+ NULL, NULL, \
139
}; \
140
static NeonGenTwo64OpFn * const addfn[] = { \
141
gen_helper_neon_##OP##l_u16, \
142
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
143
tcg_gen_##OP##_i64, \
144
NULL, \
145
}; \
146
- return do_prewiden_3d(s, a, widenfn[a->size], \
147
- addfn[a->size], SRC1WIDE); \
148
+ int narrow_mop = a->size == MO_32 ? MO_32 | SIGN : -1; \
149
+ return do_prewiden_3d(s, a, widenfn[a->size], addfn[a->size], \
150
+ SRC1WIDE ? MO_Q : narrow_mop, \
151
+ narrow_mop); \
111
}
152
}
112
153
113
-DO_FP_2SH(VCVT_SF, gen_helper_vfp_sltos)
154
-DO_PREWIDEN(VADDL_S, s, ext, add, false)
114
-DO_FP_2SH(VCVT_UF, gen_helper_vfp_ultos)
155
-DO_PREWIDEN(VADDL_U, u, extu, add, false)
115
-DO_FP_2SH(VCVT_FS, gen_helper_vfp_tosls_round_to_zero)
156
-DO_PREWIDEN(VSUBL_S, s, ext, sub, false)
116
-DO_FP_2SH(VCVT_FU, gen_helper_vfp_touls_round_to_zero)
157
-DO_PREWIDEN(VSUBL_U, u, extu, sub, false)
117
+DO_FP_2SH(VCVT_SF, gen_helper_gvec_vcvt_sf)
158
-DO_PREWIDEN(VADDW_S, s, ext, add, true)
118
+DO_FP_2SH(VCVT_UF, gen_helper_gvec_vcvt_uf)
159
-DO_PREWIDEN(VADDW_U, u, extu, add, true)
119
+DO_FP_2SH(VCVT_FS, gen_helper_gvec_vcvt_fs)
160
-DO_PREWIDEN(VSUBW_S, s, ext, sub, true)
120
+DO_FP_2SH(VCVT_FU, gen_helper_gvec_vcvt_fu)
161
-DO_PREWIDEN(VSUBW_U, u, extu, sub, true)
121
162
+DO_PREWIDEN(VADDL_S, s, add, false, MO_SIGN)
122
static uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
163
+DO_PREWIDEN(VADDL_U, u, add, false, 0)
123
{
164
+DO_PREWIDEN(VSUBL_S, s, sub, false, MO_SIGN)
165
+DO_PREWIDEN(VSUBL_U, u, sub, false, 0)
166
+DO_PREWIDEN(VADDW_S, s, add, true, MO_SIGN)
167
+DO_PREWIDEN(VADDW_U, u, add, true, 0)
168
+DO_PREWIDEN(VSUBW_S, s, sub, true, MO_SIGN)
169
+DO_PREWIDEN(VSUBW_U, u, sub, true, 0)
170
171
static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
172
NeonGenTwo64OpFn *opfn, NeonGenNarrowFn *narrowfn)
124
--
173
--
125
2.20.1
174
2.20.1
126
175
127
176
diff view generated by jsdifflib
1
Convert the Neon VCVT with-specified-rounding-mode instructions
1
In the neon_padd/pmax/pmin helpers for float16, a cut-and-paste error
2
to gvec, and use this to implement fp16 support for them.
2
meant we were using the H4() address swizzler macro rather than the
3
H2() which is required for 2-byte data. This had no effect on
4
little-endian hosts but meant we put the result data into the
5
destination Dreg in the wrong order on big-endian hosts.
3
6
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200828183354.27913-40-peter.maydell@linaro.org
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Message-id: 20201028191712.4910-2-peter.maydell@linaro.org
7
---
11
---
8
target/arm/helper.h | 5 ++
12
target/arm/vec_helper.c | 8 ++++----
9
target/arm/vec_helper.c | 23 +++++++
13
1 file changed, 4 insertions(+), 4 deletions(-)
10
target/arm/translate-neon.c.inc | 105 ++++++++++++--------------------
11
3 files changed, 66 insertions(+), 67 deletions(-)
12
14
13
diff --git a/target/arm/helper.h b/target/arm/helper.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.h
16
+++ b/target/arm/helper.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(gvec_vcvt_uh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
18
DEF_HELPER_FLAGS_4(gvec_vcvt_hs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_4(gvec_vcvt_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
20
21
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ss, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
22
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_us, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_uh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
25
+
26
DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
27
DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
28
DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
29
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
15
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
30
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/vec_helper.c
17
--- a/target/arm/vec_helper.c
32
+++ b/target/arm/vec_helper.c
18
+++ b/target/arm/vec_helper.c
33
@@ -XXX,XX +XXX,XX @@ DO_VCVT_FIXED(gvec_vcvt_hs, helper_vfp_toshh_round_to_zero, uint16_t)
19
@@ -XXX,XX +XXX,XX @@ DO_ABA(gvec_uaba_d, uint64_t)
34
DO_VCVT_FIXED(gvec_vcvt_hu, helper_vfp_touhh_round_to_zero, uint16_t)
20
r2 = float16_##OP(m[H2(0)], m[H2(1)], fpst); \
35
21
r3 = float16_##OP(m[H2(2)], m[H2(3)], fpst); \
36
#undef DO_VCVT_FIXED
22
\
37
+
23
- d[H4(0)] = r0; \
38
+#define DO_VCVT_RMODE(NAME, FUNC, TYPE) \
24
- d[H4(1)] = r1; \
39
+ void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
25
- d[H4(2)] = r2; \
40
+ { \
26
- d[H4(3)] = r3; \
41
+ float_status *fpst = stat; \
27
+ d[H2(0)] = r0; \
42
+ intptr_t i, oprsz = simd_oprsz(desc); \
28
+ d[H2(1)] = r1; \
43
+ uint32_t rmode = simd_data(desc); \
29
+ d[H2(2)] = r2; \
44
+ uint32_t prev_rmode = get_float_rounding_mode(fpst); \
30
+ d[H2(3)] = r3; \
45
+ TYPE *d = vd, *n = vn; \
46
+ set_float_rounding_mode(rmode, fpst); \
47
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
48
+ d[i] = FUNC(n[i], 0, fpst); \
49
+ } \
50
+ set_float_rounding_mode(prev_rmode, fpst); \
51
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
52
+ }
53
+
54
+DO_VCVT_RMODE(gvec_vcvt_rm_ss, helper_vfp_tosls, uint32_t)
55
+DO_VCVT_RMODE(gvec_vcvt_rm_us, helper_vfp_touls, uint32_t)
56
+DO_VCVT_RMODE(gvec_vcvt_rm_sh, helper_vfp_toshh, uint16_t)
57
+DO_VCVT_RMODE(gvec_vcvt_rm_uh, helper_vfp_touhh, uint16_t)
58
+
59
+#undef DO_VCVT_RMODE
60
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/translate-neon.c.inc
63
+++ b/target/arm/translate-neon.c.inc
64
@@ -XXX,XX +XXX,XX @@ DO_VRINT(VRINTZ, FPROUNDING_ZERO)
65
DO_VRINT(VRINTM, FPROUNDING_NEGINF)
66
DO_VRINT(VRINTP, FPROUNDING_POSINF)
67
68
-static bool do_vcvt(DisasContext *s, arg_2misc *a, int rmode, bool is_signed)
69
-{
70
- /*
71
- * Handle a VCVT* operation by iterating 32 bits at a time,
72
- * with a specified rounding mode in operation.
73
- */
74
- int pass;
75
- TCGv_ptr fpst;
76
- TCGv_i32 tcg_rmode, tcg_shift;
77
-
78
- if (!arm_dc_feature(s, ARM_FEATURE_NEON) ||
79
- !arm_dc_feature(s, ARM_FEATURE_V8)) {
80
- return false;
81
+#define DO_VEC_RMODE(INSN, RMODE, OP) \
82
+ static void gen_##INSN(unsigned vece, uint32_t rd_ofs, \
83
+ uint32_t rm_ofs, \
84
+ uint32_t oprsz, uint32_t maxsz) \
85
+ { \
86
+ static gen_helper_gvec_2_ptr * const fns[4] = { \
87
+ NULL, \
88
+ gen_helper_gvec_##OP##h, \
89
+ gen_helper_gvec_##OP##s, \
90
+ NULL, \
91
+ }; \
92
+ TCGv_ptr fpst; \
93
+ fpst = fpstatus_ptr(vece == 1 ? FPST_STD_F16 : FPST_STD); \
94
+ tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, oprsz, maxsz, \
95
+ arm_rmode_to_sf(RMODE), fns[vece]); \
96
+ tcg_temp_free_ptr(fpst); \
97
+ } \
98
+ static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
99
+ { \
100
+ if (!arm_dc_feature(s, ARM_FEATURE_V8)) { \
101
+ return false; \
102
+ } \
103
+ if (a->size == MO_16) { \
104
+ if (!dc_isar_feature(aa32_fp16_arith, s)) { \
105
+ return false; \
106
+ } \
107
+ } else if (a->size != MO_32) { \
108
+ return false; \
109
+ } \
110
+ return do_2misc_vec(s, a, gen_##INSN); \
111
}
31
}
112
32
113
- /* UNDEF accesses to D16-D31 if they don't exist. */
33
DO_NEON_PAIRWISE(neon_padd, add)
114
- if (!dc_isar_feature(aa32_simd_r32, s) &&
115
- ((a->vd | a->vm) & 0x10)) {
116
- return false;
117
- }
118
-
119
- if (a->size != 2) {
120
- /* TODO: FP16 will be the size == 1 case */
121
- return false;
122
- }
123
-
124
- if ((a->vd | a->vm) & a->q) {
125
- return false;
126
- }
127
-
128
- if (!vfp_access_check(s)) {
129
- return true;
130
- }
131
-
132
- fpst = fpstatus_ptr(FPST_STD);
133
- tcg_shift = tcg_const_i32(0);
134
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
135
- gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode, cpu_env);
136
- for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
137
- TCGv_i32 tmp = neon_load_reg(a->vm, pass);
138
- if (is_signed) {
139
- gen_helper_vfp_tosls(tmp, tmp, tcg_shift, fpst);
140
- } else {
141
- gen_helper_vfp_touls(tmp, tmp, tcg_shift, fpst);
142
- }
143
- neon_store_reg(a->vd, pass, tmp);
144
- }
145
- gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode, cpu_env);
146
- tcg_temp_free_i32(tcg_rmode);
147
- tcg_temp_free_i32(tcg_shift);
148
- tcg_temp_free_ptr(fpst);
149
-
150
- return true;
151
-}
152
-
153
-#define DO_VCVT(INSN, RMODE, SIGNED) \
154
- static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
155
- { \
156
- return do_vcvt(s, a, RMODE, SIGNED); \
157
- }
158
-
159
-DO_VCVT(VCVTAU, FPROUNDING_TIEAWAY, false)
160
-DO_VCVT(VCVTAS, FPROUNDING_TIEAWAY, true)
161
-DO_VCVT(VCVTNU, FPROUNDING_TIEEVEN, false)
162
-DO_VCVT(VCVTNS, FPROUNDING_TIEEVEN, true)
163
-DO_VCVT(VCVTPU, FPROUNDING_POSINF, false)
164
-DO_VCVT(VCVTPS, FPROUNDING_POSINF, true)
165
-DO_VCVT(VCVTMU, FPROUNDING_NEGINF, false)
166
-DO_VCVT(VCVTMS, FPROUNDING_NEGINF, true)
167
+DO_VEC_RMODE(VCVTAU, FPROUNDING_TIEAWAY, vcvt_rm_u)
168
+DO_VEC_RMODE(VCVTAS, FPROUNDING_TIEAWAY, vcvt_rm_s)
169
+DO_VEC_RMODE(VCVTNU, FPROUNDING_TIEEVEN, vcvt_rm_u)
170
+DO_VEC_RMODE(VCVTNS, FPROUNDING_TIEEVEN, vcvt_rm_s)
171
+DO_VEC_RMODE(VCVTPU, FPROUNDING_POSINF, vcvt_rm_u)
172
+DO_VEC_RMODE(VCVTPS, FPROUNDING_POSINF, vcvt_rm_s)
173
+DO_VEC_RMODE(VCVTMU, FPROUNDING_NEGINF, vcvt_rm_u)
174
+DO_VEC_RMODE(VCVTMS, FPROUNDING_NEGINF, vcvt_rm_s)
175
176
static bool trans_VSWP(DisasContext *s, arg_2misc *a)
177
{
178
--
34
--
179
2.20.1
35
2.20.1
180
36
181
37
diff view generated by jsdifflib
1
In the gvec helper functions for indexed operations, for AArch32
1
The helper functions for performing the udot/sdot operations against
2
Neon the oprsz (total size of the vector) can be less than 16 bytes
2
a scalar were not using an address-swizzling macro when converting
3
if the operation is on a D reg. Since the inner loop in these
3
the index of the scalar element into a pointer into the vm array.
4
helpers always goes from 0 to segment, we must clamp it based
4
This had no effect on little-endian hosts but meant we generated
5
on oprsz to avoid processing a full 16 byte segment when asked to
5
incorrect results on big-endian hosts.
6
handle an 8 byte wide vector.
6
7
For these insns, the index is indexing over group of 4 8-bit values,
8
so 32 bits per indexed entity, and H4() is therefore what we want.
9
(For Neon the only possible input indexes are 0 and 1.)
7
10
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20200828183354.27913-43-peter.maydell@linaro.org
13
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
14
Message-id: 20201028191712.4910-3-peter.maydell@linaro.org
11
---
15
---
12
target/arm/vec_helper.c | 12 ++++++++----
16
target/arm/vec_helper.c | 4 ++--
13
1 file changed, 8 insertions(+), 4 deletions(-)
17
1 file changed, 2 insertions(+), 2 deletions(-)
14
18
15
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
19
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
16
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/vec_helper.c
21
--- a/target/arm/vec_helper.c
18
+++ b/target/arm/vec_helper.c
22
+++ b/target/arm/vec_helper.c
19
@@ -XXX,XX +XXX,XX @@ DO_MULADD(gvec_vfms_s, float32_mulsub_f, float32)
23
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
20
#define DO_MUL_IDX(NAME, TYPE, H) \
24
intptr_t index = simd_data(desc);
21
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
25
uint32_t *d = vd;
22
{ \
26
int8_t *n = vn;
23
- intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
27
- int8_t *m_indexed = (int8_t *)vm + index * 4;
24
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
28
+ int8_t *m_indexed = (int8_t *)vm + H4(index) * 4;
25
+ intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
29
26
intptr_t idx = simd_data(desc); \
30
/* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
27
TYPE *d = vd, *n = vn, *m = vm; \
31
* Otherwise opr_sz is a multiple of 16.
28
for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
32
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
29
@@ -XXX,XX +XXX,XX @@ DO_MUL_IDX(gvec_mul_idx_d, uint64_t, )
33
intptr_t index = simd_data(desc);
30
#define DO_MLA_IDX(NAME, TYPE, OP, H) \
34
uint32_t *d = vd;
31
void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
35
uint8_t *n = vn;
32
{ \
36
- uint8_t *m_indexed = (uint8_t *)vm + index * 4;
33
- intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
37
+ uint8_t *m_indexed = (uint8_t *)vm + H4(index) * 4;
34
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
38
35
+ intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
39
/* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
36
intptr_t idx = simd_data(desc); \
40
* Otherwise opr_sz is a multiple of 16.
37
TYPE *d = vd, *n = vn, *m = vm, *a = va; \
38
for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
39
@@ -XXX,XX +XXX,XX @@ DO_MLA_IDX(gvec_mls_idx_d, uint64_t, -, )
40
#define DO_FMUL_IDX(NAME, TYPE, H) \
41
void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
42
{ \
43
- intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
44
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
45
+ intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
46
intptr_t idx = simd_data(desc); \
47
TYPE *d = vd, *n = vn, *m = vm; \
48
for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
49
@@ -XXX,XX +XXX,XX @@ DO_FMUL_IDX(gvec_fmul_idx_d, float64, )
50
void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \
51
void *stat, uint32_t desc) \
52
{ \
53
- intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
54
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
55
+ intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
56
TYPE op1_neg = extract32(desc, SIMD_DATA_SHIFT, 1); \
57
intptr_t idx = desc >> (SIMD_DATA_SHIFT + 1); \
58
TYPE *d = vd, *n = vn, *m = vm, *a = va; \
59
--
41
--
60
2.20.1
42
2.20.1
61
43
62
44
diff view generated by jsdifflib
1
From: Graeme Gregory <graeme@nuviainc.com>
1
From: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
2
2
3
Add the previously created sbsa-ec device to the sbsa-ref machine in
3
HCR should be applied when NS is set, not when it is cleared.
4
secure memory so the PSCI implementation in ARM-TF can access it, but
5
not expose it to non secure firmware or OS except by via ARM-TF.
6
4
7
Signed-off-by: Graeme Gregory <graeme@nuviainc.com>
5
Signed-off-by: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
8
Reviewed-by: Leif Lindholm <leif@nuviainc.com>
9
Tested-by: Leif Lindholm <leif@nuviainc.com>
10
Message-id: 20200826141952.136164-3-graeme@nuviainc.com
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
8
---
14
hw/arm/sbsa-ref.c | 14 ++++++++++++++
9
target/arm/helper.c | 5 ++---
15
1 file changed, 14 insertions(+)
10
1 file changed, 2 insertions(+), 3 deletions(-)
16
11
17
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
12
diff --git a/target/arm/helper.c b/target/arm/helper.c
18
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/arm/sbsa-ref.c
14
--- a/target/arm/helper.c
20
+++ b/hw/arm/sbsa-ref.c
15
+++ b/target/arm/helper.c
21
@@ -XXX,XX +XXX,XX @@ enum {
16
@@ -XXX,XX +XXX,XX @@ static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
22
SBSA_CPUPERIPHS,
17
23
SBSA_GIC_DIST,
18
/*
24
SBSA_GIC_REDIST,
19
* Non-IS variants of TLB operations are upgraded to
25
+ SBSA_SECURE_EC,
20
- * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
26
SBSA_SMMU,
21
+ * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
27
SBSA_UART,
22
* force broadcast of these operations.
28
SBSA_RTC,
23
*/
29
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry sbsa_ref_memmap[] = {
24
static bool tlb_force_broadcast(CPUARMState *env)
30
[SBSA_CPUPERIPHS] = { 0x40000000, 0x00040000 },
25
{
31
[SBSA_GIC_DIST] = { 0x40060000, 0x00010000 },
26
- return (env->cp15.hcr_el2 & HCR_FB) &&
32
[SBSA_GIC_REDIST] = { 0x40080000, 0x04000000 },
27
- arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
33
+ [SBSA_SECURE_EC] = { 0x50000000, 0x00001000 },
28
+ return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
34
[SBSA_UART] = { 0x60000000, 0x00001000 },
35
[SBSA_RTC] = { 0x60010000, 0x00001000 },
36
[SBSA_GPIO] = { 0x60020000, 0x00001000 },
37
@@ -XXX,XX +XXX,XX @@ static void *sbsa_ref_dtb(const struct arm_boot_info *binfo, int *fdt_size)
38
return board->fdt;
39
}
29
}
40
30
41
+static void create_secure_ec(MemoryRegion *mem)
31
static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
42
+{
43
+ hwaddr base = sbsa_ref_memmap[SBSA_SECURE_EC].base;
44
+ DeviceState *dev = qdev_new("sbsa-ec");
45
+ SysBusDevice *s = SYS_BUS_DEVICE(dev);
46
+
47
+ memory_region_add_subregion(mem, base,
48
+ sysbus_mmio_get_region(s, 0));
49
+}
50
+
51
static void sbsa_ref_init(MachineState *machine)
52
{
53
unsigned int smp_cpus = machine->smp.cpus;
54
@@ -XXX,XX +XXX,XX @@ static void sbsa_ref_init(MachineState *machine)
55
56
create_pcie(sms);
57
58
+ create_secure_ec(secure_sysmem);
59
+
60
sms->bootinfo.ram_size = machine->ram_size;
61
sms->bootinfo.nb_cpus = smp_cpus;
62
sms->bootinfo.board_id = -1;
63
--
32
--
64
2.20.1
33
2.20.1
65
34
66
35
diff view generated by jsdifflib
1
Convert the Neon VRINTX insn to use gvec, and use this to implement
1
From: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
2
fp16 support for it.
3
2
3
Secure mode is not exempted from checking SCR_EL3.TLOR, and in the
4
future HCR_EL2.TLOR when S-EL2 is enabled.
5
6
Signed-off-by: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200828183354.27913-42-peter.maydell@linaro.org
7
---
9
---
8
target/arm/helper.h | 3 +++
10
target/arm/helper.c | 19 +++++--------------
9
target/arm/vec_helper.c | 3 +++
11
1 file changed, 5 insertions(+), 14 deletions(-)
10
target/arm/translate-neon.c.inc | 45 +++------------------------------
11
3 files changed, 9 insertions(+), 42 deletions(-)
12
12
13
diff --git a/target/arm/helper.h b/target/arm/helper.h
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.h
15
--- a/target/arm/helper.c
16
+++ b/target/arm/helper.h
16
+++ b/target/arm/helper.c
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(gvec_vcvt_rm_uh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
17
@@ -XXX,XX +XXX,XX @@ static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
18
DEF_HELPER_FLAGS_4(gvec_vrint_rm_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
18
#endif
19
DEF_HELPER_FLAGS_4(gvec_vrint_rm_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
19
20
20
/* Shared logic between LORID and the rest of the LOR* registers.
21
+DEF_HELPER_FLAGS_4(gvec_vrintx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
21
- * Secure state has already been delt with.
22
+DEF_HELPER_FLAGS_4(gvec_vrintx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
22
+ * Secure state exclusion has already been dealt with.
23
+
23
*/
24
DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
-static CPAccessResult access_lor_ns(CPUARMState *env)
25
DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
25
+static CPAccessResult access_lor_ns(CPUARMState *env,
26
DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
26
+ const ARMCPRegInfo *ri, bool isread)
27
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
27
{
28
index XXXXXXX..XXXXXXX 100644
28
int el = arm_current_el(env);
29
--- a/target/arm/vec_helper.c
29
30
+++ b/target/arm/vec_helper.c
30
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_lor_ns(CPUARMState *env)
31
@@ -XXX,XX +XXX,XX @@ DO_2OP(gvec_frsqrte_h, helper_rsqrte_f16, float16)
31
return CP_ACCESS_OK;
32
DO_2OP(gvec_frsqrte_s, helper_rsqrte_f32, float32)
33
DO_2OP(gvec_frsqrte_d, helper_rsqrte_f64, float64)
34
35
+DO_2OP(gvec_vrintx_h, float16_round_to_int, float16)
36
+DO_2OP(gvec_vrintx_s, float32_round_to_int, float32)
37
+
38
DO_2OP(gvec_sitos, helper_vfp_sitos, int32_t)
39
DO_2OP(gvec_uitos, helper_vfp_uitos, uint32_t)
40
DO_2OP(gvec_tosizs, helper_vfp_tosizs, float32)
41
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/translate-neon.c.inc
44
+++ b/target/arm/translate-neon.c.inc
45
@@ -XXX,XX +XXX,XX @@ static bool trans_VQNEG(DisasContext *s, arg_2misc *a)
46
return do_2misc(s, a, fn[a->size]);
47
}
32
}
48
33
49
-static bool do_2misc_fp(DisasContext *s, arg_2misc *a,
34
-static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri,
50
- NeonGenOneSingleOpFn *fn)
35
- bool isread)
51
-{
36
-{
52
- int pass;
37
- if (arm_is_secure_below_el3(env)) {
53
- TCGv_ptr fpst;
38
- /* Access ok in secure mode. */
54
-
39
- return CP_ACCESS_OK;
55
- /* Handle a 2-reg-misc operation by iterating 32 bits at a time */
56
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
57
- return false;
58
- }
40
- }
59
-
41
- return access_lor_ns(env);
60
- /* UNDEF accesses to D16-D31 if they don't exist. */
61
- if (!dc_isar_feature(aa32_simd_r32, s) &&
62
- ((a->vd | a->vm) & 0x10)) {
63
- return false;
64
- }
65
-
66
- if (a->size != 2) {
67
- /* TODO: FP16 will be the size == 1 case */
68
- return false;
69
- }
70
-
71
- if ((a->vd | a->vm) & a->q) {
72
- return false;
73
- }
74
-
75
- if (!vfp_access_check(s)) {
76
- return true;
77
- }
78
-
79
- fpst = fpstatus_ptr(FPST_STD);
80
- for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
81
- TCGv_i32 tmp = neon_load_reg(a->vm, pass);
82
- fn(tmp, tmp, fpst);
83
- neon_store_reg(a->vd, pass, tmp);
84
- }
85
- tcg_temp_free_ptr(fpst);
86
-
87
- return true;
88
-}
42
-}
89
-
43
-
90
#define DO_2MISC_FP_VEC(INSN, HFUNC, SFUNC) \
44
static CPAccessResult access_lor_other(CPUARMState *env,
91
static void gen_##INSN(unsigned vece, uint32_t rd_ofs, \
45
const ARMCPRegInfo *ri, bool isread)
92
uint32_t rm_ofs, \
93
@@ -XXX,XX +XXX,XX @@ DO_2MISC_FP_VEC(VCVT_FU, gen_helper_gvec_ustoh, gen_helper_gvec_uitos)
94
DO_2MISC_FP_VEC(VCVT_SF, gen_helper_gvec_tosszh, gen_helper_gvec_tosizs)
95
DO_2MISC_FP_VEC(VCVT_UF, gen_helper_gvec_touszh, gen_helper_gvec_touizs)
96
97
+DO_2MISC_FP_VEC(VRINTX_impl, gen_helper_gvec_vrintx_h, gen_helper_gvec_vrintx_s)
98
+
99
static bool trans_VRINTX(DisasContext *s, arg_2misc *a)
100
{
46
{
101
if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
47
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_lor_other(CPUARMState *env,
102
return false;
48
/* Access denied in secure mode. */
49
return CP_ACCESS_TRAP;
103
}
50
}
104
- return do_2misc_fp(s, a, gen_helper_rints_exact);
51
- return access_lor_ns(env);
105
+ return trans_VRINTX_impl(s, a);
52
+ return access_lor_ns(env, ri, isread);
106
}
53
}
107
54
108
#define DO_VEC_RMODE(INSN, RMODE, OP) \
55
/*
56
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo lor_reginfo[] = {
57
.type = ARM_CP_CONST, .resetvalue = 0 },
58
{ .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
59
.opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
60
- .access = PL1_R, .accessfn = access_lorid,
61
+ .access = PL1_R, .accessfn = access_lor_ns,
62
.type = ARM_CP_CONST, .resetvalue = 0 },
63
REGINFO_SENTINEL
64
};
109
--
65
--
110
2.20.1
66
2.20.1
111
67
112
68
diff view generated by jsdifflib
1
Set the MVFR1 ID register FPHP and SIMDHP fields to indicate
1
If we're using the capstone disassembler, disassembly of a run of
2
that our "-cpu max" has v8.2-FP16.
2
instructions more than 32 bytes long disassembles the wrong data for
3
instructions beyond the 32 byte mark:
3
4
5
(qemu) xp /16x 0x100
6
0000000000000100: 0x00000005 0x54410001 0x00000001 0x00001000
7
0000000000000110: 0x00000000 0x00000004 0x54410002 0x3c000000
8
0000000000000120: 0x00000000 0x00000004 0x54410009 0x74736574
9
0000000000000130: 0x00000000 0x00000000 0x00000000 0x00000000
10
(qemu) xp /16i 0x100
11
0x00000100: 00000005 andeq r0, r0, r5
12
0x00000104: 54410001 strbpl r0, [r1], #-1
13
0x00000108: 00000001 andeq r0, r0, r1
14
0x0000010c: 00001000 andeq r1, r0, r0
15
0x00000110: 00000000 andeq r0, r0, r0
16
0x00000114: 00000004 andeq r0, r0, r4
17
0x00000118: 54410002 strbpl r0, [r1], #-2
18
0x0000011c: 3c000000 .byte 0x00, 0x00, 0x00, 0x3c
19
0x00000120: 54410001 strbpl r0, [r1], #-1
20
0x00000124: 00000001 andeq r0, r0, r1
21
0x00000128: 00001000 andeq r1, r0, r0
22
0x0000012c: 00000000 andeq r0, r0, r0
23
0x00000130: 00000004 andeq r0, r0, r4
24
0x00000134: 54410002 strbpl r0, [r1], #-2
25
0x00000138: 3c000000 .byte 0x00, 0x00, 0x00, 0x3c
26
0x0000013c: 00000000 andeq r0, r0, r0
27
28
Here the disassembly of 0x120..0x13f is using the data that is in
29
0x104..0x123.
30
31
This is caused by passing the wrong value to the read_memory_func().
32
The intention is that at this point in the loop the 'cap_buf' buffer
33
already contains 'csize' bytes of data for the instruction at guest
34
addr 'pc', and we want to read in an extra 'tsize' bytes. Those
35
extra bytes are therefore at 'pc + csize', not 'pc'. On the first
36
time through the loop 'csize' happens to be zero, so the initial read
37
of 32 bytes into cap_buf is correct and as long as the disassembly
38
never needs to read more data we return the correct information.
39
40
Use the correct guest address in the call to read_memory_func().
41
42
Cc: qemu-stable@nongnu.org
43
Fixes: https://bugs.launchpad.net/qemu/+bug/1900779
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
44
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
45
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Message-id: 20200828183354.27913-46-peter.maydell@linaro.org
46
Message-id: 20201022132445.25039-1-peter.maydell@linaro.org
7
---
47
---
8
target/arm/cpu.c | 3 ++-
48
disas/capstone.c | 2 +-
9
target/arm/cpu64.c | 10 ++++------
49
1 file changed, 1 insertion(+), 1 deletion(-)
10
2 files changed, 6 insertions(+), 7 deletions(-)
11
50
12
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
51
diff --git a/disas/capstone.c b/disas/capstone.c
13
index XXXXXXX..XXXXXXX 100644
52
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/cpu.c
53
--- a/disas/capstone.c
15
+++ b/target/arm/cpu.c
54
+++ b/disas/capstone.c
16
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
55
@@ -XXX,XX +XXX,XX @@ bool cap_disas_monitor(disassemble_info *info, uint64_t pc, int count)
17
cpu->isar.id_isar6 = t;
56
18
57
/* Make certain that we can make progress. */
19
t = cpu->isar.mvfr1;
58
assert(tsize != 0);
20
- t = FIELD_DP32(t, MVFR1, FPHP, 2); /* v8.0 FP support */
59
- info->read_memory_func(pc, cap_buf + csize, tsize, info);
21
+ t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */
60
+ info->read_memory_func(pc + csize, cap_buf + csize, tsize, info);
22
+ t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
61
csize += tsize;
23
cpu->isar.mvfr1 = t;
62
24
63
if (cs_disasm_iter(handle, &cbuf, &csize, &pc, insn)) {
25
t = cpu->isar.mvfr2;
26
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/cpu64.c
29
+++ b/target/arm/cpu64.c
30
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
31
u = FIELD_DP32(u, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
32
cpu->isar.id_dfr0 = u;
33
34
- /*
35
- * FIXME: We do not yet support ARMv8.2-fp16 for AArch32 yet,
36
- * so do not set MVFR1.FPHP. Strictly speaking this is not legal,
37
- * but it is also not legal to enable SVE without support for FP16,
38
- * and enabling SVE in system mode is more useful in the short term.
39
- */
40
+ u = cpu->isar.mvfr1;
41
+ u = FIELD_DP32(u, MVFR1, FPHP, 3); /* v8.2-FP16 */
42
+ u = FIELD_DP32(u, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
43
+ cpu->isar.mvfr1 = u;
44
45
#ifdef CONFIG_USER_ONLY
46
/* For usermode -cpu max we can use a larger and more efficient DCZ
47
--
64
--
48
2.20.1
65
2.20.1
49
66
50
67
diff view generated by jsdifflib
1
From: Graeme Gregory <graeme@nuviainc.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
A difference between sbsa platform and the virt platform is PSCI is
3
Use the BIT_ULL() macro to ensure we use 64-bit arithmetic.
4
handled by ARM-TF in the sbsa platform. This means that the PSCI code
4
This fixes the following Coverity issue (OVERFLOW_BEFORE_WIDEN):
5
there needs to communicate some of the platform power changes down
6
to the qemu code for things like shutdown/reset control.
7
5
8
Space has been left to extend the EC if we find other use cases in
6
CID 1432363 (#1 of 1): Unintentional integer overflow:
9
future where ARM-TF and qemu need to communicate.
10
7
11
Signed-off-by: Graeme Gregory <graeme@nuviainc.com>
8
overflow_before_widen:
12
Reviewed-by: Leif Lindholm <leif@nuviainc.com>
9
Potentially overflowing expression 1 << scale with type int
13
Tested-by: Leif Lindholm <leif@nuviainc.com>
10
(32 bits, signed) is evaluated using 32-bit arithmetic, and
14
Message-id: 20200826141952.136164-2-graeme@nuviainc.com
11
then used in a context that expects an expression of type
12
hwaddr (64 bits, unsigned).
13
14
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
15
Acked-by: Eric Auger <eric.auger@redhat.com>
16
Message-id: 20201030144617.1535064-1-philmd@redhat.com
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
17
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
19
---
18
hw/misc/sbsa_ec.c | 98 +++++++++++++++++++++++++++++++++++++++++++++
20
hw/arm/smmuv3.c | 3 ++-
19
hw/misc/meson.build | 2 +
21
1 file changed, 2 insertions(+), 1 deletion(-)
20
2 files changed, 100 insertions(+)
21
create mode 100644 hw/misc/sbsa_ec.c
22
22
23
diff --git a/hw/misc/sbsa_ec.c b/hw/misc/sbsa_ec.c
23
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
24
new file mode 100644
24
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX
25
--- a/hw/arm/smmuv3.c
26
--- /dev/null
26
+++ b/hw/arm/smmuv3.c
27
+++ b/hw/misc/sbsa_ec.c
28
@@ -XXX,XX +XXX,XX @@
27
@@ -XXX,XX +XXX,XX @@
29
+/*
28
*/
30
+ * ARM SBSA Reference Platform Embedded Controller
29
31
+ *
30
#include "qemu/osdep.h"
32
+ * A device to allow PSCI running in the secure side of sbsa-ref machine
31
+#include "qemu/bitops.h"
33
+ * to communicate platform power states to qemu.
32
#include "hw/irq.h"
34
+ *
33
#include "hw/sysbus.h"
35
+ * Copyright (c) 2020 Nuvia Inc
34
#include "migration/vmstate.h"
36
+ * Written by Graeme Gregory <graeme@nuviainc.com>
35
@@ -XXX,XX +XXX,XX @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
37
+ *
36
scale = CMD_SCALE(cmd);
38
+ * SPDX-License-Identifer: GPL-2.0-or-later
37
num = CMD_NUM(cmd);
39
+ */
38
ttl = CMD_TTL(cmd);
40
+
39
- num_pages = (num + 1) * (1 << (scale));
41
+#include "qemu/osdep.h"
40
+ num_pages = (num + 1) * BIT_ULL(scale);
42
+#include "qemu-common.h"
41
}
43
+#include "qemu/log.h"
42
44
+#include "hw/sysbus.h"
43
if (type == SMMU_CMD_TLBI_NH_VA) {
45
+#include "sysemu/runstate.h"
46
+
47
+typedef struct {
48
+ SysBusDevice parent_obj;
49
+ MemoryRegion iomem;
50
+} SECUREECState;
51
+
52
+#define TYPE_SBSA_EC "sbsa-ec"
53
+#define SECURE_EC(obj) OBJECT_CHECK(SECUREECState, (obj), TYPE_SBSA_EC)
54
+
55
+enum sbsa_ec_powerstates {
56
+ SBSA_EC_CMD_POWEROFF = 0x01,
57
+ SBSA_EC_CMD_REBOOT = 0x02,
58
+};
59
+
60
+static uint64_t sbsa_ec_read(void *opaque, hwaddr offset, unsigned size)
61
+{
62
+ /* No use for this currently */
63
+ qemu_log_mask(LOG_GUEST_ERROR, "sbsa-ec: no readable registers");
64
+ return 0;
65
+}
66
+
67
+static void sbsa_ec_write(void *opaque, hwaddr offset,
68
+ uint64_t value, unsigned size)
69
+{
70
+ if (offset == 0) { /* PSCI machine power command register */
71
+ switch (value) {
72
+ case SBSA_EC_CMD_POWEROFF:
73
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
74
+ break;
75
+ case SBSA_EC_CMD_REBOOT:
76
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
77
+ break;
78
+ default:
79
+ qemu_log_mask(LOG_GUEST_ERROR,
80
+ "sbsa-ec: unknown power command");
81
+ }
82
+ } else {
83
+ qemu_log_mask(LOG_GUEST_ERROR, "sbsa-ec: unknown EC register");
84
+ }
85
+}
86
+
87
+static const MemoryRegionOps sbsa_ec_ops = {
88
+ .read = sbsa_ec_read,
89
+ .write = sbsa_ec_write,
90
+ .endianness = DEVICE_NATIVE_ENDIAN,
91
+ .valid.min_access_size = 4,
92
+ .valid.max_access_size = 4,
93
+};
94
+
95
+static void sbsa_ec_init(Object *obj)
96
+{
97
+ SECUREECState *s = SECURE_EC(obj);
98
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
99
+
100
+ memory_region_init_io(&s->iomem, obj, &sbsa_ec_ops, s, "sbsa-ec",
101
+ 0x1000);
102
+ sysbus_init_mmio(dev, &s->iomem);
103
+}
104
+
105
+static void sbsa_ec_class_init(ObjectClass *klass, void *data)
106
+{
107
+ DeviceClass *dc = DEVICE_CLASS(klass);
108
+
109
+ /* No vmstate or reset required: device has no internal state */
110
+ dc->user_creatable = false;
111
+}
112
+
113
+static const TypeInfo sbsa_ec_info = {
114
+ .name = TYPE_SBSA_EC,
115
+ .parent = TYPE_SYS_BUS_DEVICE,
116
+ .instance_size = sizeof(SECUREECState),
117
+ .instance_init = sbsa_ec_init,
118
+ .class_init = sbsa_ec_class_init,
119
+};
120
+
121
+static void sbsa_ec_register_type(void)
122
+{
123
+ type_register_static(&sbsa_ec_info);
124
+}
125
+
126
+type_init(sbsa_ec_register_type);
127
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
128
index XXXXXXX..XXXXXXX 100644
129
--- a/hw/misc/meson.build
130
+++ b/hw/misc/meson.build
131
@@ -XXX,XX +XXX,XX @@ specific_ss.add(when: 'CONFIG_MAC_VIA', if_true: files('mac_via.c'))
132
133
specific_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_cmgcr.c', 'mips_cpc.c'))
134
specific_ss.add(when: 'CONFIG_MIPS_ITU', if_true: files('mips_itu.c'))
135
+
136
+specific_ss.add(when: 'CONFIG_SBSA_REF', if_true: files('sbsa_ec.c'))
137
--
44
--
138
2.20.1
45
2.20.1
139
46
140
47
diff view generated by jsdifflib
1
From: Leif Lindholm <leif@nuviainc.com>
1
From: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
2
2
3
The sbsa-ref platform uses a minimal device tree to pass amount of memory
3
When booting a CPU with EL3 using the -kernel flag, set up CPTR_EL3 so
4
as well as number of cpus to the firmware. However, when dumping that
4
that SVE will not trap to EL3.
5
minimal dtb (with -M sbsa-virt,dumpdtb=<file>), the resulting blob
6
generates a warning when decompiled by dtc due to lack of reg property.
7
5
8
Add a simple reg property per cpu, representing a 64-bit MPIDR_EL1.
6
Signed-off-by: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
9
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
This also ends up being cleaner than having the firmware calculating its
8
Message-id: 20201030151541.11976-1-remi@remlab.net
11
own IDs for generating APCI.
12
13
Signed-off-by: Leif Lindholm <leif@nuviainc.com>
14
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
15
Message-id: 20200827124335.30586-1-leif@nuviainc.com
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
10
---
18
hw/arm/sbsa-ref.c | 29 +++++++++++++++++++++++------
11
hw/arm/boot.c | 3 +++
19
1 file changed, 23 insertions(+), 6 deletions(-)
12
1 file changed, 3 insertions(+)
20
13
21
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
14
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
22
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
23
--- a/hw/arm/sbsa-ref.c
16
--- a/hw/arm/boot.c
24
+++ b/hw/arm/sbsa-ref.c
17
+++ b/hw/arm/boot.c
25
@@ -XXX,XX +XXX,XX @@ static const int sbsa_ref_irqmap[] = {
18
@@ -XXX,XX +XXX,XX @@ static void do_cpu_reset(void *opaque)
26
[SBSA_EHCI] = 11,
19
if (cpu_isar_feature(aa64_mte, cpu)) {
27
};
20
env->cp15.scr_el3 |= SCR_ATA;
28
21
}
29
+static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx)
22
+ if (cpu_isar_feature(aa64_sve, cpu)) {
30
+{
23
+ env->cp15.cptr_el[3] |= CPTR_EZ;
31
+ uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER;
24
+ }
32
+ return arm_cpu_mp_affinity(idx, clustersz);
25
/* AArch64 kernels never boot in secure mode */
33
+}
26
assert(!info->secure_boot);
34
+
27
/* This hook is only supported for AArch32 currently:
35
/*
36
* Firmware on this machine only uses ACPI table to load OS, these limited
37
* device tree nodes are just to let firmware know the info which varies from
38
@@ -XXX,XX +XXX,XX @@ static void create_fdt(SBSAMachineState *sms)
39
g_free(matrix);
40
}
41
42
+ /*
43
+ * From Documentation/devicetree/bindings/arm/cpus.yaml
44
+ * On ARM v8 64-bit systems this property is required
45
+ * and matches the MPIDR_EL1 register affinity bits.
46
+ *
47
+ * * If cpus node's #address-cells property is set to 2
48
+ *
49
+ * The first reg cell bits [7:0] must be set to
50
+ * bits [39:32] of MPIDR_EL1.
51
+ *
52
+ * The second reg cell bits [23:0] must be set to
53
+ * bits [23:0] of MPIDR_EL1.
54
+ */
55
qemu_fdt_add_subnode(sms->fdt, "/cpus");
56
+ qemu_fdt_setprop_cell(sms->fdt, "/cpus", "#address-cells", 2);
57
+ qemu_fdt_setprop_cell(sms->fdt, "/cpus", "#size-cells", 0x0);
58
59
for (cpu = sms->smp_cpus - 1; cpu >= 0; cpu--) {
60
char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
61
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
62
CPUState *cs = CPU(armcpu);
63
+ uint64_t mpidr = sbsa_ref_cpu_mp_affinity(sms, cpu);
64
65
qemu_fdt_add_subnode(sms->fdt, nodename);
66
+ qemu_fdt_setprop_u64(sms->fdt, nodename, "reg", mpidr);
67
68
if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) {
69
qemu_fdt_setprop_cell(sms->fdt, nodename, "numa-node-id",
70
@@ -XXX,XX +XXX,XX @@ static void sbsa_ref_init(MachineState *machine)
71
arm_load_kernel(ARM_CPU(first_cpu), machine, &sms->bootinfo);
72
}
73
74
-static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx)
75
-{
76
- uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER;
77
- return arm_cpu_mp_affinity(idx, clustersz);
78
-}
79
-
80
static const CPUArchIdList *sbsa_ref_possible_cpu_arch_ids(MachineState *ms)
81
{
82
unsigned int max_cpus = ms->smp.max_cpus;
83
--
28
--
84
2.20.1
29
2.20.1
85
30
86
31
diff view generated by jsdifflib
1
Implement VFP fp16 support for fused multiply-add insns
1
From: AlexChen <alex.chen@huawei.com>
2
VFNMA, VFNMS, VFMA, VFMS.
3
2
3
In omap_lcd_interrupts(), the pointer omap_lcd is dereferinced before
4
being check if it is valid, which may lead to NULL pointer dereference.
5
So move the assignment to surface after checking that the omap_lcd is valid
6
and move surface_bits_per_pixel(surface) to after the surface assignment.
7
8
Reported-by: Euler Robot <euler.robot@huawei.com>
9
Signed-off-by: AlexChen <alex.chen@huawei.com>
10
Message-id: 5F9CDB8A.9000001@huawei.com
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200828183354.27913-7-peter.maydell@linaro.org
7
---
13
---
8
target/arm/helper.h | 1 +
14
hw/display/omap_lcdc.c | 10 +++++++---
9
target/arm/vfp.decode | 5 +++
15
1 file changed, 7 insertions(+), 3 deletions(-)
10
target/arm/vfp_helper.c | 7 ++++
11
target/arm/translate-vfp.c.inc | 64 ++++++++++++++++++++++++++++++++++
12
4 files changed, 77 insertions(+)
13
16
14
diff --git a/target/arm/helper.h b/target/arm/helper.h
17
diff --git a/hw/display/omap_lcdc.c b/hw/display/omap_lcdc.c
15
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.h
19
--- a/hw/display/omap_lcdc.c
17
+++ b/target/arm/helper.h
20
+++ b/hw/display/omap_lcdc.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, f16, f64, ptr, i32)
21
@@ -XXX,XX +XXX,XX @@ static void omap_lcd_interrupts(struct omap_lcd_panel_s *s)
19
22
static void omap_update_display(void *opaque)
20
DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr)
21
DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr)
22
+DEF_HELPER_4(vfp_muladdh, f16, f16, f16, f16, ptr)
23
24
DEF_HELPER_3(recps_f32, f32, env, f32, f32)
25
DEF_HELPER_3(rsqrts_f32, f32, env, f32, f32)
26
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/vfp.decode
29
+++ b/target/arm/vfp.decode
30
@@ -XXX,XX +XXX,XX @@ VDIV_hp ---- 1110 1.00 .... .... 1001 .0.0 .... @vfp_dnm_s
31
VDIV_sp ---- 1110 1.00 .... .... 1010 .0.0 .... @vfp_dnm_s
32
VDIV_dp ---- 1110 1.00 .... .... 1011 .0.0 .... @vfp_dnm_d
33
34
+VFMA_hp ---- 1110 1.10 .... .... 1001 .0. 0 .... @vfp_dnm_s
35
+VFMS_hp ---- 1110 1.10 .... .... 1001 .1. 0 .... @vfp_dnm_s
36
+VFNMA_hp ---- 1110 1.01 .... .... 1001 .0. 0 .... @vfp_dnm_s
37
+VFNMS_hp ---- 1110 1.01 .... .... 1001 .1. 0 .... @vfp_dnm_s
38
+
39
VFMA_sp ---- 1110 1.10 .... .... 1010 .0. 0 .... @vfp_dnm_s
40
VFMS_sp ---- 1110 1.10 .... .... 1010 .1. 0 .... @vfp_dnm_s
41
VFNMA_sp ---- 1110 1.01 .... .... 1010 .0. 0 .... @vfp_dnm_s
42
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/vfp_helper.c
45
+++ b/target/arm/vfp_helper.c
46
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrte_u32)(uint32_t a)
47
}
48
49
/* VFPv4 fused multiply-accumulate */
50
+dh_ctype_f16 VFP_HELPER(muladd, h)(dh_ctype_f16 a, dh_ctype_f16 b,
51
+ dh_ctype_f16 c, void *fpstp)
52
+{
53
+ float_status *fpst = fpstp;
54
+ return float16_muladd(a, b, c, 0, fpst);
55
+}
56
+
57
float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
58
{
23
{
59
float_status *fpst = fpstp;
24
struct omap_lcd_panel_s *omap_lcd = (struct omap_lcd_panel_s *) opaque;
60
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
25
- DisplaySurface *surface = qemu_console_surface(omap_lcd->con);
61
index XXXXXXX..XXXXXXX 100644
26
+ DisplaySurface *surface;
62
--- a/target/arm/translate-vfp.c.inc
27
draw_line_func draw_line;
63
+++ b/target/arm/translate-vfp.c.inc
28
int size, height, first, last;
64
@@ -XXX,XX +XXX,XX @@ static bool trans_VMAXNM_dp(DisasContext *s, arg_VMAXNM_dp *a)
29
int width, linesize, step, bpp, frame_offset;
65
a->vd, a->vn, a->vm, false);
30
hwaddr frame_base;
66
}
31
67
32
- if (!omap_lcd || omap_lcd->plm == 1 || !omap_lcd->enable ||
68
+static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
33
- !surface_bits_per_pixel(surface)) {
69
+{
34
+ if (!omap_lcd || omap_lcd->plm == 1 || !omap_lcd->enable) {
70
+ /*
35
+ return;
71
+ * VFNMA : fd = muladd(-fd, fn, fm)
72
+ * VFNMS : fd = muladd(-fd, -fn, fm)
73
+ * VFMA : fd = muladd( fd, fn, fm)
74
+ * VFMS : fd = muladd( fd, -fn, fm)
75
+ *
76
+ * These are fused multiply-add, and must be done as one floating
77
+ * point operation with no rounding between the multiplication and
78
+ * addition steps. NB that doing the negations here as separate
79
+ * steps is correct : an input NaN should come out with its sign
80
+ * bit flipped if it is a negated-input.
81
+ */
82
+ TCGv_ptr fpst;
83
+ TCGv_i32 vn, vm, vd;
84
+
85
+ /*
86
+ * Present in VFPv4 only, and only with the FP16 extension.
87
+ * Note that we can't rely on the SIMDFMAC check alone, because
88
+ * in a Neon-no-VFP core that ID register field will be non-zero.
89
+ */
90
+ if (!dc_isar_feature(aa32_fp16_arith, s) ||
91
+ !dc_isar_feature(aa32_simdfmac, s) ||
92
+ !dc_isar_feature(aa32_fpsp_v2, s)) {
93
+ return false;
94
+ }
36
+ }
95
+
37
+
96
+ if (s->vec_len != 0 || s->vec_stride != 0) {
38
+ surface = qemu_console_surface(omap_lcd->con);
97
+ return false;
39
+ if (!surface_bits_per_pixel(surface)) {
98
+ }
40
return;
99
+
41
}
100
+ if (!vfp_access_check(s)) {
101
+ return true;
102
+ }
103
+
104
+ vn = tcg_temp_new_i32();
105
+ vm = tcg_temp_new_i32();
106
+ vd = tcg_temp_new_i32();
107
+
108
+ neon_load_reg32(vn, a->vn);
109
+ neon_load_reg32(vm, a->vm);
110
+ if (neg_n) {
111
+ /* VFNMS, VFMS */
112
+ gen_helper_vfp_negh(vn, vn);
113
+ }
114
+ neon_load_reg32(vd, a->vd);
115
+ if (neg_d) {
116
+ /* VFNMA, VFNMS */
117
+ gen_helper_vfp_negh(vd, vd);
118
+ }
119
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
120
+ gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst);
121
+ neon_store_reg32(vd, a->vd);
122
+
123
+ tcg_temp_free_ptr(fpst);
124
+ tcg_temp_free_i32(vn);
125
+ tcg_temp_free_i32(vm);
126
+ tcg_temp_free_i32(vd);
127
+
128
+ return true;
129
+}
130
+
131
static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
132
{
133
/*
134
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
135
MAKE_ONE_VFM_TRANS_FN(VFNMA, PREC, false, true) \
136
MAKE_ONE_VFM_TRANS_FN(VFNMS, PREC, true, true)
137
138
+MAKE_VFM_TRANS_FNS(hp)
139
MAKE_VFM_TRANS_FNS(sp)
140
MAKE_VFM_TRANS_FNS(dp)
141
42
142
--
43
--
143
2.20.1
44
2.20.1
144
45
145
46
diff view generated by jsdifflib
1
Convert the neon floating-point vector absolute comparison ops
1
From: AlexChen <alex.chen@huawei.com>
2
VACGE and VACGT over to using a gvec hepler and use this to
3
implement the fp16 case.
4
2
3
In exynos4210_fimd_update(), the pointer s is dereferinced before
4
being check if it is valid, which may lead to NULL pointer dereference.
5
So move the assignment to global_width after checking that the s is valid.
6
7
Reported-by: Euler Robot <euler.robot@huawei.com>
8
Signed-off-by: Alex Chen <alex.chen@huawei.com>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 5F9F8D88.9030102@huawei.com
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20200828183354.27913-28-peter.maydell@linaro.org
8
---
12
---
9
target/arm/helper.h | 6 ++++++
13
hw/display/exynos4210_fimd.c | 4 +++-
10
target/arm/vec_helper.c | 26 ++++++++++++++++++++++++++
14
1 file changed, 3 insertions(+), 1 deletion(-)
11
target/arm/translate-neon.c.inc | 4 ++--
12
3 files changed, 34 insertions(+), 2 deletions(-)
13
15
14
diff --git a/target/arm/helper.h b/target/arm/helper.h
16
diff --git a/hw/display/exynos4210_fimd.c b/hw/display/exynos4210_fimd.c
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.h
18
--- a/hw/display/exynos4210_fimd.c
17
+++ b/target/arm/helper.h
19
+++ b/hw/display/exynos4210_fimd.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_fcge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
20
@@ -XXX,XX +XXX,XX @@ static void exynos4210_fimd_update(void *opaque)
19
DEF_HELPER_FLAGS_5(gvec_fcgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
21
bool blend = false;
20
DEF_HELPER_FLAGS_5(gvec_fcgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
22
uint8_t *host_fb_addr;
21
23
bool is_dirty = false;
22
+DEF_HELPER_FLAGS_5(gvec_facge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
24
- const int global_width = (s->vidtcon[2] & FIMD_VIDTCON2_SIZE_MASK) + 1;
23
+DEF_HELPER_FLAGS_5(gvec_facge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
+ int global_width;
26
27
if (!s || !s->console || !s->enabled ||
28
surface_bits_per_pixel(qemu_console_surface(s->console)) == 0) {
29
return;
30
}
24
+
31
+
25
+DEF_HELPER_FLAGS_5(gvec_facgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
32
+ global_width = (s->vidtcon[2] & FIMD_VIDTCON2_SIZE_MASK) + 1;
26
+DEF_HELPER_FLAGS_5(gvec_facgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
33
exynos4210_update_resolution(s);
27
+
34
surface = qemu_console_surface(s->console);
28
DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
29
void, ptr, ptr, ptr, ptr, i32)
30
DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG,
31
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/vec_helper.c
34
+++ b/target/arm/vec_helper.c
35
@@ -XXX,XX +XXX,XX @@ static uint32_t float32_cgt(float32 op1, float32 op2, float_status *stat)
36
return -float32_lt(op2, op1, stat);
37
}
38
39
+static uint16_t float16_acge(float16 op1, float16 op2, float_status *stat)
40
+{
41
+ return -float16_le(float16_abs(op2), float16_abs(op1), stat);
42
+}
43
+
44
+static uint32_t float32_acge(float32 op1, float32 op2, float_status *stat)
45
+{
46
+ return -float32_le(float32_abs(op2), float32_abs(op1), stat);
47
+}
48
+
49
+static uint16_t float16_acgt(float16 op1, float16 op2, float_status *stat)
50
+{
51
+ return -float16_lt(float16_abs(op2), float16_abs(op1), stat);
52
+}
53
+
54
+static uint32_t float32_acgt(float32 op1, float32 op2, float_status *stat)
55
+{
56
+ return -float32_lt(float32_abs(op2), float32_abs(op1), stat);
57
+}
58
+
59
#define DO_2OP(NAME, FUNC, TYPE) \
60
void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
61
{ \
62
@@ -XXX,XX +XXX,XX @@ DO_3OP(gvec_fcge_s, float32_cge, float32)
63
DO_3OP(gvec_fcgt_h, float16_cgt, float16)
64
DO_3OP(gvec_fcgt_s, float32_cgt, float32)
65
66
+DO_3OP(gvec_facge_h, float16_acge, float16)
67
+DO_3OP(gvec_facge_s, float32_acge, float32)
68
+
69
+DO_3OP(gvec_facgt_h, float16_acgt, float16)
70
+DO_3OP(gvec_facgt_s, float32_acgt, float32)
71
+
72
#ifdef TARGET_AARCH64
73
74
DO_3OP(gvec_recps_h, helper_recpsf_f16, float16)
75
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/translate-neon.c.inc
78
+++ b/target/arm/translate-neon.c.inc
79
@@ -XXX,XX +XXX,XX @@ DO_3S_FP_GVEC(VMUL, gen_helper_gvec_fmul_s, gen_helper_gvec_fmul_h)
80
DO_3S_FP_GVEC(VCEQ, gen_helper_gvec_fceq_s, gen_helper_gvec_fceq_h)
81
DO_3S_FP_GVEC(VCGE, gen_helper_gvec_fcge_s, gen_helper_gvec_fcge_h)
82
DO_3S_FP_GVEC(VCGT, gen_helper_gvec_fcgt_s, gen_helper_gvec_fcgt_h)
83
+DO_3S_FP_GVEC(VACGE, gen_helper_gvec_facge_s, gen_helper_gvec_facge_h)
84
+DO_3S_FP_GVEC(VACGT, gen_helper_gvec_facgt_s, gen_helper_gvec_facgt_h)
85
86
/*
87
* For all the functions using this macro, size == 1 means fp16,
88
@@ -XXX,XX +XXX,XX @@ DO_3S_FP_GVEC(VCGT, gen_helper_gvec_fcgt_s, gen_helper_gvec_fcgt_h)
89
return do_3same_fp(s, a, FUNC, READS_VD); \
90
}
91
92
-DO_3S_FP(VACGE, gen_helper_neon_acge_f32, false)
93
-DO_3S_FP(VACGT, gen_helper_neon_acgt_f32, false)
94
DO_3S_FP(VMAX, gen_helper_vfp_maxs, false)
95
DO_3S_FP(VMIN, gen_helper_vfp_mins, false)
96
35
97
--
36
--
98
2.20.1
37
2.20.1
99
38
100
39
diff view generated by jsdifflib
1
Convert the Neon VRINT-with-specified-rounding-mode insns to gvec,
1
In arm_v7m_mmu_idx_for_secstate() we get the 'priv' level to pass to
2
and use this to implement the fp16 versions.
2
armv7m_mmu_idx_for_secstate_and_priv() by calling arm_current_el().
3
This is incorrect when the security state being queried is not the
4
current one, because arm_current_el() uses the current security state
5
to determine which of the banked CONTROL.nPRIV bits to look at.
6
The effect was that if (for instance) Secure state was in privileged
7
mode but Non-Secure was not then we would return the wrong MMU index.
8
9
The only places where we are using this function in a way that could
10
trigger this bug are for the stack loads during a v8M function-return
11
and for the instruction fetch of a v8M SG insn.
12
13
Fix the bug by expanding out the M-profile version of the
14
arm_current_el() logic inline so it can use the passed in secstate
15
rather than env->v7m.secure.
3
16
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200828183354.27913-41-peter.maydell@linaro.org
19
Message-id: 20201022164408.13214-1-peter.maydell@linaro.org
7
---
20
---
8
target/arm/helper.h | 4 +-
21
target/arm/m_helper.c | 3 ++-
9
target/arm/vec_helper.c | 21 +++++++++++
22
1 file changed, 2 insertions(+), 1 deletion(-)
10
target/arm/vfp_helper.c | 17 ---------
11
target/arm/translate-neon.c.inc | 67 +++------------------------------
12
4 files changed, 30 insertions(+), 79 deletions(-)
13
23
14
diff --git a/target/arm/helper.h b/target/arm/helper.h
24
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
15
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.h
26
--- a/target/arm/m_helper.c
17
+++ b/target/arm/helper.h
27
+++ b/target/arm/m_helper.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(vfp_sqtoh, f16, i64, i32, ptr)
28
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
19
DEF_HELPER_3(vfp_uqtoh, f16, i64, i32, ptr)
29
/* Return the MMU index for a v7M CPU in the specified security state */
20
30
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
21
DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, ptr)
31
{
22
-DEF_HELPER_FLAGS_2(set_neon_rmode, TCG_CALL_NO_RWG, i32, i32, env)
32
- bool priv = arm_current_el(env) != 0;
23
33
+ bool priv = arm_v7m_is_handler_mode(env) ||
24
DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f32, TCG_CALL_NO_RWG, f32, f16, ptr, i32)
34
+ !(env->v7m.control[secstate] & 1);
25
DEF_HELPER_FLAGS_3(vfp_fcvt_f32_to_f16, TCG_CALL_NO_RWG, f16, f32, ptr, i32)
35
26
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(gvec_vcvt_rm_us, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
36
return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
27
DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
28
DEF_HELPER_FLAGS_4(gvec_vcvt_rm_uh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
29
30
+DEF_HELPER_FLAGS_4(gvec_vrint_rm_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(gvec_vrint_rm_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
+
33
DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
34
DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
35
DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
36
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/vec_helper.c
39
+++ b/target/arm/vec_helper.c
40
@@ -XXX,XX +XXX,XX @@ DO_VCVT_RMODE(gvec_vcvt_rm_sh, helper_vfp_toshh, uint16_t)
41
DO_VCVT_RMODE(gvec_vcvt_rm_uh, helper_vfp_touhh, uint16_t)
42
43
#undef DO_VCVT_RMODE
44
+
45
+#define DO_VRINT_RMODE(NAME, FUNC, TYPE) \
46
+ void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
47
+ { \
48
+ float_status *fpst = stat; \
49
+ intptr_t i, oprsz = simd_oprsz(desc); \
50
+ uint32_t rmode = simd_data(desc); \
51
+ uint32_t prev_rmode = get_float_rounding_mode(fpst); \
52
+ TYPE *d = vd, *n = vn; \
53
+ set_float_rounding_mode(rmode, fpst); \
54
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
55
+ d[i] = FUNC(n[i], fpst); \
56
+ } \
57
+ set_float_rounding_mode(prev_rmode, fpst); \
58
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
59
+ }
60
+
61
+DO_VRINT_RMODE(gvec_vrint_rm_h, helper_rinth, uint16_t)
62
+DO_VRINT_RMODE(gvec_vrint_rm_s, helper_rints, uint32_t)
63
+
64
+#undef DO_VRINT_RMODE
65
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/target/arm/vfp_helper.c
68
+++ b/target/arm/vfp_helper.c
69
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp)
70
return prev_rmode;
71
}
37
}
72
73
-/* Set the current fp rounding mode in the standard fp status and return
74
- * the old one. This is for NEON instructions that need to change the
75
- * rounding mode but wish to use the standard FPSCR values for everything
76
- * else. Always set the rounding mode back to the correct value after
77
- * modifying it.
78
- * The argument is a softfloat float_round_ value.
79
- */
80
-uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
81
-{
82
- float_status *fp_status = &env->vfp.standard_fp_status;
83
-
84
- uint32_t prev_rmode = get_float_rounding_mode(fp_status);
85
- set_float_rounding_mode(rmode, fp_status);
86
-
87
- return prev_rmode;
88
-}
89
-
90
/* Half precision conversions. */
91
float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode)
92
{
93
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
94
index XXXXXXX..XXXXXXX 100644
95
--- a/target/arm/translate-neon.c.inc
96
+++ b/target/arm/translate-neon.c.inc
97
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX(DisasContext *s, arg_2misc *a)
98
return do_2misc_fp(s, a, gen_helper_rints_exact);
99
}
100
101
-static bool do_vrint(DisasContext *s, arg_2misc *a, int rmode)
102
-{
103
- /*
104
- * Handle a VRINT* operation by iterating 32 bits at a time,
105
- * with a specified rounding mode in operation.
106
- */
107
- int pass;
108
- TCGv_ptr fpst;
109
- TCGv_i32 tcg_rmode;
110
-
111
- if (!arm_dc_feature(s, ARM_FEATURE_NEON) ||
112
- !arm_dc_feature(s, ARM_FEATURE_V8)) {
113
- return false;
114
- }
115
-
116
- /* UNDEF accesses to D16-D31 if they don't exist. */
117
- if (!dc_isar_feature(aa32_simd_r32, s) &&
118
- ((a->vd | a->vm) & 0x10)) {
119
- return false;
120
- }
121
-
122
- if (a->size != 2) {
123
- /* TODO: FP16 will be the size == 1 case */
124
- return false;
125
- }
126
-
127
- if ((a->vd | a->vm) & a->q) {
128
- return false;
129
- }
130
-
131
- if (!vfp_access_check(s)) {
132
- return true;
133
- }
134
-
135
- fpst = fpstatus_ptr(FPST_STD);
136
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
137
- gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode, cpu_env);
138
- for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
139
- TCGv_i32 tmp = neon_load_reg(a->vm, pass);
140
- gen_helper_rints(tmp, tmp, fpst);
141
- neon_store_reg(a->vd, pass, tmp);
142
- }
143
- gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode, cpu_env);
144
- tcg_temp_free_i32(tcg_rmode);
145
- tcg_temp_free_ptr(fpst);
146
-
147
- return true;
148
-}
149
-
150
-#define DO_VRINT(INSN, RMODE) \
151
- static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
152
- { \
153
- return do_vrint(s, a, RMODE); \
154
- }
155
-
156
-DO_VRINT(VRINTN, FPROUNDING_TIEEVEN)
157
-DO_VRINT(VRINTA, FPROUNDING_TIEAWAY)
158
-DO_VRINT(VRINTZ, FPROUNDING_ZERO)
159
-DO_VRINT(VRINTM, FPROUNDING_NEGINF)
160
-DO_VRINT(VRINTP, FPROUNDING_POSINF)
161
-
162
#define DO_VEC_RMODE(INSN, RMODE, OP) \
163
static void gen_##INSN(unsigned vece, uint32_t rd_ofs, \
164
uint32_t rm_ofs, \
165
@@ -XXX,XX +XXX,XX @@ DO_VEC_RMODE(VCVTPS, FPROUNDING_POSINF, vcvt_rm_s)
166
DO_VEC_RMODE(VCVTMU, FPROUNDING_NEGINF, vcvt_rm_u)
167
DO_VEC_RMODE(VCVTMS, FPROUNDING_NEGINF, vcvt_rm_s)
168
169
+DO_VEC_RMODE(VRINTN, FPROUNDING_TIEEVEN, vrint_rm_)
170
+DO_VEC_RMODE(VRINTA, FPROUNDING_TIEAWAY, vrint_rm_)
171
+DO_VEC_RMODE(VRINTZ, FPROUNDING_ZERO, vrint_rm_)
172
+DO_VEC_RMODE(VRINTM, FPROUNDING_NEGINF, vrint_rm_)
173
+DO_VEC_RMODE(VRINTP, FPROUNDING_POSINF, vrint_rm_)
174
+
175
static bool trans_VSWP(DisasContext *s, arg_2misc *a)
176
{
177
TCGv_i64 rm, rd;
178
--
38
--
179
2.20.1
39
2.20.1
180
40
181
41
diff view generated by jsdifflib
1
Add gvec helpers for doing Neon-style indexed non-fused fp
1
On some hosts (eg Ubuntu Bionic) pkg-config returns a set of
2
multiply-and-accumulate operations.
2
libraries for gio-2.0 which don't actually work when compiling
3
statically. (Specifically, the returned library string includes
4
-lmount, but not -lblkid which -lmount depends upon, so linking
5
fails due to missing symbols.)
6
7
Check that the libraries work, and don't enable gio if they don't,
8
in the same way we do for gnutls.
3
9
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Message-id: 20200828183354.27913-44-peter.maydell@linaro.org
11
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
13
Message-id: 20200928160402.7961-1-peter.maydell@linaro.org
6
---
14
---
7
target/arm/helper.h | 10 ++++++++++
15
configure | 10 +++++++++-
8
target/arm/vec_helper.c | 27 ++++++++++++++++++++++-----
16
1 file changed, 9 insertions(+), 1 deletion(-)
9
2 files changed, 32 insertions(+), 5 deletions(-)
10
17
11
diff --git a/target/arm/helper.h b/target/arm/helper.h
18
diff --git a/configure b/configure
12
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100755
13
--- a/target/arm/helper.h
20
--- a/configure
14
+++ b/target/arm/helper.h
21
+++ b/configure
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_fmul_idx_s, TCG_CALL_NO_RWG,
22
@@ -XXX,XX +XXX,XX @@ if test "$static" = yes && test "$mingw32" = yes; then
16
DEF_HELPER_FLAGS_5(gvec_fmul_idx_d, TCG_CALL_NO_RWG,
23
fi
17
void, ptr, ptr, ptr, ptr, i32)
24
18
25
if $pkg_config --atleast-version=$glib_req_ver gio-2.0; then
19
+DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_h, TCG_CALL_NO_RWG,
26
- gio=yes
20
+ void, ptr, ptr, ptr, ptr, i32)
27
gio_cflags=$($pkg_config --cflags gio-2.0)
21
+DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_s, TCG_CALL_NO_RWG,
28
gio_libs=$($pkg_config --libs gio-2.0)
22
+ void, ptr, ptr, ptr, ptr, i32)
29
gdbus_codegen=$($pkg_config --variable=gdbus_codegen gio-2.0)
23
+
30
if [ ! -x "$gdbus_codegen" ]; then
24
+DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_h, TCG_CALL_NO_RWG,
31
gdbus_codegen=
25
+ void, ptr, ptr, ptr, ptr, i32)
32
fi
26
+DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_s, TCG_CALL_NO_RWG,
33
+ # Check that the libraries actually work -- Ubuntu 18.04 ships
27
+ void, ptr, ptr, ptr, ptr, i32)
34
+ # with pkg-config --static --libs data for gio-2.0 that is missing
28
+
35
+ # -lblkid and will give a link error.
29
DEF_HELPER_FLAGS_6(gvec_fmla_idx_h, TCG_CALL_NO_RWG,
36
+ write_c_skeleton
30
void, ptr, ptr, ptr, ptr, ptr, i32)
37
+ if compile_prog "" "gio_libs" ; then
31
DEF_HELPER_FLAGS_6(gvec_fmla_idx_s, TCG_CALL_NO_RWG,
38
+ gio=yes
32
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
39
+ else
33
index XXXXXXX..XXXXXXX 100644
40
+ gio=no
34
--- a/target/arm/vec_helper.c
41
+ fi
35
+++ b/target/arm/vec_helper.c
42
else
36
@@ -XXX,XX +XXX,XX @@ DO_MLA_IDX(gvec_mls_idx_d, uint64_t, -, )
43
gio=no
37
44
fi
38
#undef DO_MLA_IDX
39
40
-#define DO_FMUL_IDX(NAME, TYPE, H) \
41
+#define DO_FMUL_IDX(NAME, ADD, TYPE, H) \
42
void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
43
{ \
44
intptr_t i, j, oprsz = simd_oprsz(desc); \
45
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
46
for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
47
TYPE mm = m[H(i + idx)]; \
48
for (j = 0; j < segment; j++) { \
49
- d[i + j] = TYPE##_mul(n[i + j], mm, stat); \
50
+ d[i + j] = TYPE##_##ADD(d[i + j], \
51
+ TYPE##_mul(n[i + j], mm, stat), stat); \
52
} \
53
} \
54
clear_tail(d, oprsz, simd_maxsz(desc)); \
55
}
56
57
-DO_FMUL_IDX(gvec_fmul_idx_h, float16, H2)
58
-DO_FMUL_IDX(gvec_fmul_idx_s, float32, H4)
59
-DO_FMUL_IDX(gvec_fmul_idx_d, float64, )
60
+#define float16_nop(N, M, S) (M)
61
+#define float32_nop(N, M, S) (M)
62
+#define float64_nop(N, M, S) (M)
63
64
+DO_FMUL_IDX(gvec_fmul_idx_h, nop, float16, H2)
65
+DO_FMUL_IDX(gvec_fmul_idx_s, nop, float32, H4)
66
+DO_FMUL_IDX(gvec_fmul_idx_d, nop, float64, )
67
+
68
+/*
69
+ * Non-fused multiply-accumulate operations, for Neon. NB that unlike
70
+ * the fused ops below they assume accumulate both from and into Vd.
71
+ */
72
+DO_FMUL_IDX(gvec_fmla_nf_idx_h, add, float16, H2)
73
+DO_FMUL_IDX(gvec_fmla_nf_idx_s, add, float32, H4)
74
+DO_FMUL_IDX(gvec_fmls_nf_idx_h, sub, float16, H2)
75
+DO_FMUL_IDX(gvec_fmls_nf_idx_s, sub, float32, H4)
76
+
77
+#undef float16_nop
78
+#undef float32_nop
79
+#undef float64_nop
80
#undef DO_FMUL_IDX
81
82
#define DO_FMLA_IDX(NAME, TYPE, H) \
83
--
45
--
84
2.20.1
46
2.20.1
85
47
86
48
diff view generated by jsdifflib
1
Convert the Neon VRSQRTS insn to using a gvec helper,
1
In gicv3_init_cpuif() we copy the ARMCPU gicv3_maintenance_interrupt
2
and use this to implement the fp16 case.
2
into the GICv3CPUState struct's maintenance_irq field. This will
3
only work if the board happens to have already wired up the CPU
4
maintenance IRQ before the GIC was realized. Unfortunately this is
5
not the case for the 'virt' board, and so the value that gets copied
6
is NULL (since a qemu_irq is really a pointer to an IRQState struct
7
under the hood). The effect is that the CPU interface code never
8
actually raises the maintenance interrupt line.
3
9
4
As with VRECPS, we adjust the phrasing of the new implementation
10
Instead, since the GICv3CPUState has a pointer to the CPUState, make
5
slightly so that the fp32 version parallels the fp16 one.
11
the dereference at the point where we want to raise the interrupt, to
12
avoid an implicit requirement on board code to wire things up in a
13
particular order.
6
14
15
Reported-by: Jose Martins <josemartins90@gmail.com>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20201009153904.28529-1-peter.maydell@linaro.org
9
Message-id: 20200828183354.27913-35-peter.maydell@linaro.org
18
Reviewed-by: Luc Michel <luc@lmichel.fr>
10
---
19
---
11
target/arm/helper.h | 4 +++-
20
include/hw/intc/arm_gicv3_common.h | 1 -
12
target/arm/vec_helper.c | 30 ++++++++++++++++++++++++++++++
21
hw/intc/arm_gicv3_cpuif.c | 5 ++---
13
target/arm/vfp_helper.c | 15 ---------------
22
2 files changed, 2 insertions(+), 4 deletions(-)
14
target/arm/translate-neon.c.inc | 21 +--------------------
15
4 files changed, 34 insertions(+), 36 deletions(-)
16
23
17
diff --git a/target/arm/helper.h b/target/arm/helper.h
24
diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h
18
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper.h
26
--- a/include/hw/intc/arm_gicv3_common.h
20
+++ b/target/arm/helper.h
27
+++ b/include/hw/intc/arm_gicv3_common.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr)
28
@@ -XXX,XX +XXX,XX @@ struct GICv3CPUState {
22
DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr)
29
qemu_irq parent_fiq;
23
DEF_HELPER_4(vfp_muladdh, f16, f16, f16, f16, ptr)
30
qemu_irq parent_virq;
24
31
qemu_irq parent_vfiq;
25
-DEF_HELPER_3(rsqrts_f32, f32, env, f32, f32)
32
- qemu_irq maintenance_irq;
26
DEF_HELPER_FLAGS_2(recpe_f16, TCG_CALL_NO_RWG, f16, f16, ptr)
33
27
DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, ptr)
34
/* Redistributor */
28
DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, ptr)
35
uint32_t level; /* Current IRQ level */
29
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_fminnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i3
36
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
30
DEF_HELPER_FLAGS_5(gvec_recps_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
31
DEF_HELPER_FLAGS_5(gvec_recps_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
32
33
+DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
35
+
36
DEF_HELPER_FLAGS_5(gvec_fmla_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
37
DEF_HELPER_FLAGS_5(gvec_fmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
38
39
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
40
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/vec_helper.c
38
--- a/hw/intc/arm_gicv3_cpuif.c
42
+++ b/target/arm/vec_helper.c
39
+++ b/hw/intc/arm_gicv3_cpuif.c
43
@@ -XXX,XX +XXX,XX @@ static float32 float32_recps_nf(float32 op1, float32 op2, float_status *stat)
40
@@ -XXX,XX +XXX,XX @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
44
return float32_sub(float32_two, float32_mul(op1, op2, stat), stat);
41
int irqlevel = 0;
42
int fiqlevel = 0;
43
int maintlevel = 0;
44
+ ARMCPU *cpu = ARM_CPU(cs->cpu);
45
46
idx = hppvi_index(cs);
47
trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx);
48
@@ -XXX,XX +XXX,XX @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
49
50
qemu_set_irq(cs->parent_vfiq, fiqlevel);
51
qemu_set_irq(cs->parent_virq, irqlevel);
52
- qemu_set_irq(cs->maintenance_irq, maintlevel);
53
+ qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel);
45
}
54
}
46
55
47
+/* Reciprocal square-root step. AArch32 non-fused semantics. */
56
static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
48
+static float16 float16_rsqrts_nf(float16 op1, float16 op2, float_status *stat)
57
@@ -XXX,XX +XXX,XX @@ void gicv3_init_cpuif(GICv3State *s)
49
+{
58
&& cpu->gic_num_lrs) {
50
+ op1 = float16_squash_input_denormal(op1, stat);
59
int j;
51
+ op2 = float16_squash_input_denormal(op2, stat);
60
52
+
61
- cs->maintenance_irq = cpu->gicv3_maintenance_interrupt;
53
+ if ((float16_is_infinity(op1) && float16_is_zero(op2)) ||
54
+ (float16_is_infinity(op2) && float16_is_zero(op1))) {
55
+ return float16_one_point_five;
56
+ }
57
+ op1 = float16_sub(float16_three, float16_mul(op1, op2, stat), stat);
58
+ return float16_div(op1, float16_two, stat);
59
+}
60
+
61
+static float32 float32_rsqrts_nf(float32 op1, float32 op2, float_status *stat)
62
+{
63
+ op1 = float32_squash_input_denormal(op1, stat);
64
+ op2 = float32_squash_input_denormal(op2, stat);
65
+
66
+ if ((float32_is_infinity(op1) && float32_is_zero(op2)) ||
67
+ (float32_is_infinity(op2) && float32_is_zero(op1))) {
68
+ return float32_one_point_five;
69
+ }
70
+ op1 = float32_sub(float32_three, float32_mul(op1, op2, stat), stat);
71
+ return float32_div(op1, float32_two, stat);
72
+}
73
+
74
#define DO_3OP(NAME, FUNC, TYPE) \
75
void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
76
{ \
77
@@ -XXX,XX +XXX,XX @@ DO_3OP(gvec_fminnum_s, float32_minnum, float32)
78
DO_3OP(gvec_recps_nf_h, float16_recps_nf, float16)
79
DO_3OP(gvec_recps_nf_s, float32_recps_nf, float32)
80
81
+DO_3OP(gvec_rsqrts_nf_h, float16_rsqrts_nf, float16)
82
+DO_3OP(gvec_rsqrts_nf_s, float32_rsqrts_nf, float32)
83
+
84
#ifdef TARGET_AARCH64
85
86
DO_3OP(gvec_recps_h, helper_recpsf_f16, float16)
87
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/arm/vfp_helper.c
90
+++ b/target/arm/vfp_helper.c
91
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode)
92
return r;
93
}
94
95
-float32 HELPER(rsqrts_f32)(CPUARMState *env, float32 a, float32 b)
96
-{
97
- float_status *s = &env->vfp.standard_fp_status;
98
- float32 product;
99
- if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
100
- (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
101
- if (!(float32_is_zero(a) || float32_is_zero(b))) {
102
- float_raise(float_flag_input_denormal, s);
103
- }
104
- return float32_one_point_five;
105
- }
106
- product = float32_mul(a, b, s);
107
- return float32_div(float32_sub(float32_three, product, s), float32_two, s);
108
-}
109
-
62
-
110
/* NEON helpers. */
63
cs->num_list_regs = cpu->gic_num_lrs;
111
64
cs->vpribits = cpu->gic_vpribits;
112
/* Constants 256 and 512 are used in some helpers; we avoid relying on
65
cs->vprebits = cpu->gic_vprebits;
113
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
114
index XXXXXXX..XXXXXXX 100644
115
--- a/target/arm/translate-neon.c.inc
116
+++ b/target/arm/translate-neon.c.inc
117
@@ -XXX,XX +XXX,XX @@ DO_3S_FP_GVEC(VMLS, gen_helper_gvec_fmls_s, gen_helper_gvec_fmls_h)
118
DO_3S_FP_GVEC(VFMA, gen_helper_gvec_vfma_s, gen_helper_gvec_vfma_h)
119
DO_3S_FP_GVEC(VFMS, gen_helper_gvec_vfms_s, gen_helper_gvec_vfms_h)
120
DO_3S_FP_GVEC(VRECPS, gen_helper_gvec_recps_nf_s, gen_helper_gvec_recps_nf_h)
121
+DO_3S_FP_GVEC(VRSQRTS, gen_helper_gvec_rsqrts_nf_s, gen_helper_gvec_rsqrts_nf_h)
122
123
WRAP_FP_GVEC(gen_VMAXNM_fp32_3s, FPST_STD, gen_helper_gvec_fmaxnum_s)
124
WRAP_FP_GVEC(gen_VMAXNM_fp16_3s, FPST_STD_F16, gen_helper_gvec_fmaxnum_h)
125
@@ -XXX,XX +XXX,XX @@ static bool trans_VMINNM_fp_3s(DisasContext *s, arg_3same *a)
126
return do_3same(s, a, gen_VMINNM_fp32_3s);
127
}
128
129
-WRAP_ENV_FN(gen_VRSQRTS_tramp, gen_helper_rsqrts_f32)
130
-
131
-static void gen_VRSQRTS_fp_3s(unsigned vece, uint32_t rd_ofs,
132
- uint32_t rn_ofs, uint32_t rm_ofs,
133
- uint32_t oprsz, uint32_t maxsz)
134
-{
135
- static const GVecGen3 ops = { .fni4 = gen_VRSQRTS_tramp };
136
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops);
137
-}
138
-
139
-static bool trans_VRSQRTS_fp_3s(DisasContext *s, arg_3same *a)
140
-{
141
- if (a->size != 0) {
142
- /* TODO fp16 support */
143
- return false;
144
- }
145
-
146
- return do_3same(s, a, gen_VRSQRTS_fp_3s);
147
-}
148
-
149
static bool do_3same_fp_pair(DisasContext *s, arg_3same *a, VFPGen3OpSPFn *fn)
150
{
151
/* FP operations handled pairwise 32 bits at a time */
152
--
66
--
153
2.20.1
67
2.20.1
154
68
155
69
diff view generated by jsdifflib
1
Implement fp16 for the Neon VCVT insns which convert between
1
The kerneldoc script currently emits Sphinx markup for a macro with
2
float and fixed-point.
2
arguments that uses the c:function directive. This is correct for
3
Sphinx versions earlier than Sphinx 3, where c:macro doesn't allow
4
documentation of macros with arguments and c:function is not picky
5
about the syntax of what it is passed. However, in Sphinx 3 the
6
c:macro directive was enhanced to support macros with arguments,
7
and c:function was made more picky about what syntax it accepted.
8
9
When kerneldoc is told that it needs to produce output for Sphinx
10
3 or later, make it emit c:function only for functions and c:macro
11
for macros with arguments. We assume that anything with a return
12
type is a function and anything without is a macro.
13
14
This fixes the Sphinx error:
15
16
/home/petmay01/linaro/qemu-from-laptop/qemu/docs/../include/qom/object.h:155:Error in declarator
17
If declarator-id with parameters (e.g., 'void f(int arg)'):
18
Invalid C declaration: Expected identifier in nested name. [error at 25]
19
DECLARE_INSTANCE_CHECKER ( InstanceType, OBJ_NAME, TYPENAME)
20
-------------------------^
21
If parenthesis in noptr-declarator (e.g., 'void (*f(int arg))(double)'):
22
Error in declarator or parameters
23
Invalid C declaration: Expecting "(" in parameters. [error at 39]
24
DECLARE_INSTANCE_CHECKER ( InstanceType, OBJ_NAME, TYPENAME)
25
---------------------------------------^
3
26
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
28
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
6
Message-id: 20200828183354.27913-39-peter.maydell@linaro.org
29
Tested-by: Stefan Hajnoczi <stefanha@redhat.com>
30
Message-id: 20201030174700.7204-2-peter.maydell@linaro.org
7
---
31
---
8
target/arm/helper.h | 5 +++++
32
scripts/kernel-doc | 18 +++++++++++++++++-
9
target/arm/neon-dp.decode | 8 +++++++-
33
1 file changed, 17 insertions(+), 1 deletion(-)
10
target/arm/vec_helper.c | 4 ++++
11
target/arm/translate-neon.c.inc | 5 +++++
12
4 files changed, 21 insertions(+), 1 deletion(-)
13
34
14
diff --git a/target/arm/helper.h b/target/arm/helper.h
35
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
15
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100755
16
--- a/target/arm/helper.h
37
--- a/scripts/kernel-doc
17
+++ b/target/arm/helper.h
38
+++ b/scripts/kernel-doc
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(gvec_vcvt_uf, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
39
@@ -XXX,XX +XXX,XX @@ sub output_function_rst(%) {
19
DEF_HELPER_FLAGS_4(gvec_vcvt_fs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
40
    output_highlight_rst($args{'purpose'});
20
DEF_HELPER_FLAGS_4(gvec_vcvt_fu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
41
    $start = "\n\n**Syntax**\n\n ``";
21
42
} else {
22
+DEF_HELPER_FLAGS_4(gvec_vcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
43
-    print ".. c:function:: ";
23
+DEF_HELPER_FLAGS_4(gvec_vcvt_uh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
44
+ if ((split(/\./, $sphinx_version))[0] >= 3) {
24
+DEF_HELPER_FLAGS_4(gvec_vcvt_hs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
45
+ # Sphinx 3 and later distinguish macros and functions and
25
+DEF_HELPER_FLAGS_4(gvec_vcvt_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
46
+ # complain if you use c:function with something that's not
26
+
47
+ # syntactically valid as a function declaration.
27
DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
48
+ # We assume that anything with a return type is a function
28
DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
49
+ # and anything without is a macro.
29
DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
50
+ if ($args{'functiontype'} ne "") {
30
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
51
+ print ".. c:function:: ";
31
index XXXXXXX..XXXXXXX 100644
52
+ } else {
32
--- a/target/arm/neon-dp.decode
53
+ print ".. c:macro:: ";
33
+++ b/target/arm/neon-dp.decode
54
+ }
34
@@ -XXX,XX +XXX,XX @@ VMINNM_fp_3s 1111 001 1 0 . 1 . .... .... 1111 ... 1 .... @3same_fp
55
+ } else {
35
# We use size=0 for fp32 and size=1 for fp16 to match the 3-same encodings.
56
+ # Older Sphinx don't support documenting macros that take
36
@2reg_vcvt .... ... . . . 1 ..... .... .... . q:1 . . .... \
57
+ # arguments with c:macro, and don't complain about the use
37
&2reg_shift vm=%vm_dp vd=%vd_dp size=0 shift=%neon_rshift_i5
58
+ # of c:function for this.
38
+@2reg_vcvt_f16 .... ... . . . 11 .... .... .... . q:1 . . .... \
59
+ print ".. c:function:: ";
39
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=1 shift=%neon_rshift_i4
60
+ }
40
61
}
41
VSHR_S_2sh 1111 001 0 1 . ...... .... 0000 . . . 1 .... @2reg_shr_d
62
if ($args{'functiontype'} ne "") {
42
VSHR_S_2sh 1111 001 0 1 . ...... .... 0000 . . . 1 .... @2reg_shr_s
63
    $start .= $args{'functiontype'} . " " . $args{'function'} . " (";
43
@@ -XXX,XX +XXX,XX @@ VSHLL_U_2sh 1111 001 1 1 . ...... .... 1010 . 0 . 1 .... @2reg_shll_h
44
VSHLL_U_2sh 1111 001 1 1 . ...... .... 1010 . 0 . 1 .... @2reg_shll_b
45
46
# VCVT fixed<->float conversions
47
-# TODO: FP16 fixed<->float conversions are opc==0b1100 and 0b1101
48
+VCVT_SH_2sh 1111 001 0 1 . ...... .... 1100 0 . . 1 .... @2reg_vcvt_f16
49
+VCVT_UH_2sh 1111 001 1 1 . ...... .... 1100 0 . . 1 .... @2reg_vcvt_f16
50
+VCVT_HS_2sh 1111 001 0 1 . ...... .... 1101 0 . . 1 .... @2reg_vcvt_f16
51
+VCVT_HU_2sh 1111 001 1 1 . ...... .... 1101 0 . . 1 .... @2reg_vcvt_f16
52
+
53
VCVT_SF_2sh 1111 001 0 1 . ...... .... 1110 0 . . 1 .... @2reg_vcvt
54
VCVT_UF_2sh 1111 001 1 1 . ...... .... 1110 0 . . 1 .... @2reg_vcvt
55
VCVT_FS_2sh 1111 001 0 1 . ...... .... 1111 0 . . 1 .... @2reg_vcvt
56
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/vec_helper.c
59
+++ b/target/arm/vec_helper.c
60
@@ -XXX,XX +XXX,XX @@ DO_VCVT_FIXED(gvec_vcvt_sf, helper_vfp_sltos, uint32_t)
61
DO_VCVT_FIXED(gvec_vcvt_uf, helper_vfp_ultos, uint32_t)
62
DO_VCVT_FIXED(gvec_vcvt_fs, helper_vfp_tosls_round_to_zero, uint32_t)
63
DO_VCVT_FIXED(gvec_vcvt_fu, helper_vfp_touls_round_to_zero, uint32_t)
64
+DO_VCVT_FIXED(gvec_vcvt_sh, helper_vfp_shtoh, uint16_t)
65
+DO_VCVT_FIXED(gvec_vcvt_uh, helper_vfp_uhtoh, uint16_t)
66
+DO_VCVT_FIXED(gvec_vcvt_hs, helper_vfp_toshh_round_to_zero, uint16_t)
67
+DO_VCVT_FIXED(gvec_vcvt_hu, helper_vfp_touhh_round_to_zero, uint16_t)
68
69
#undef DO_VCVT_FIXED
70
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
71
index XXXXXXX..XXXXXXX 100644
72
--- a/target/arm/translate-neon.c.inc
73
+++ b/target/arm/translate-neon.c.inc
74
@@ -XXX,XX +XXX,XX @@ DO_FP_2SH(VCVT_UF, gen_helper_gvec_vcvt_uf)
75
DO_FP_2SH(VCVT_FS, gen_helper_gvec_vcvt_fs)
76
DO_FP_2SH(VCVT_FU, gen_helper_gvec_vcvt_fu)
77
78
+DO_FP_2SH(VCVT_SH, gen_helper_gvec_vcvt_sh)
79
+DO_FP_2SH(VCVT_UH, gen_helper_gvec_vcvt_uh)
80
+DO_FP_2SH(VCVT_HS, gen_helper_gvec_vcvt_hs)
81
+DO_FP_2SH(VCVT_HU, gen_helper_gvec_vcvt_hu)
82
+
83
static uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
84
{
85
/*
86
--
64
--
87
2.20.1
65
2.20.1
88
66
89
67
diff view generated by jsdifflib
1
Convert the neon floating-point vector compare-vs-0 insns VCEQ0,
1
Sphinx 3.2 is pickier than earlier versions about the option:: markup,
2
VCGT0, VCLE0, VCGE0 and VCLT0 to use a gvec helper, and use this to
2
and complains about our usage in qemu-option-trace.rst:
3
implement the fp16 case.
3
4
../../docs/qemu-option-trace.rst.inc:4:Malformed option description
5
'[enable=]PATTERN', should look like "opt", "-opt args", "--opt args",
6
"/opt args" or "+opt args"
7
8
In this file, we're really trying to document the different parts of
9
the top-level --trace option, which qemu-nbd.rst and qemu-img.rst
10
have already introduced with an option:: markup. So it's not right
11
to use option:: here anyway. Switch to a different markup
12
(definition lists) which gives about the same formatted output.
13
14
(Unlike option::, this markup doesn't produce index entries; but
15
at the moment we don't do anything much with indexes anyway, and
16
in any case I think it doesn't make much sense to have individual
17
index entries for the sub-parts of the --trace option.)
4
18
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
20
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
7
Message-id: 20200828183354.27913-33-peter.maydell@linaro.org
21
Tested-by: Stefan Hajnoczi <stefanha@redhat.com>
22
Message-id: 20201030174700.7204-3-peter.maydell@linaro.org
8
---
23
---
9
target/arm/helper.h | 15 +++++++++++++++
24
docs/qemu-option-trace.rst.inc | 6 +++---
10
target/arm/vec_helper.c | 25 +++++++++++++++++++++++++
25
1 file changed, 3 insertions(+), 3 deletions(-)
11
target/arm/translate-neon.c.inc | 33 +++++----------------------------
12
3 files changed, 45 insertions(+), 28 deletions(-)
13
26
14
diff --git a/target/arm/helper.h b/target/arm/helper.h
27
diff --git a/docs/qemu-option-trace.rst.inc b/docs/qemu-option-trace.rst.inc
15
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.h
29
--- a/docs/qemu-option-trace.rst.inc
17
+++ b/target/arm/helper.h
30
+++ b/docs/qemu-option-trace.rst.inc
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(gvec_frsqrte_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
@@ -XXX,XX +XXX,XX @@
19
DEF_HELPER_FLAGS_4(gvec_frsqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
20
DEF_HELPER_FLAGS_4(gvec_frsqrte_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
33
Specify tracing options.
21
34
22
+DEF_HELPER_FLAGS_4(gvec_fcgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
35
-.. option:: [enable=]PATTERN
23
+DEF_HELPER_FLAGS_4(gvec_fcgt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
36
+``[enable=]PATTERN``
24
+
37
25
+DEF_HELPER_FLAGS_4(gvec_fcge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
38
Immediately enable events matching *PATTERN*
26
+DEF_HELPER_FLAGS_4(gvec_fcge0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
39
(either event name or a globbing pattern). This option is only
27
+
40
@@ -XXX,XX +XXX,XX @@ Specify tracing options.
28
+DEF_HELPER_FLAGS_4(gvec_fceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
41
29
+DEF_HELPER_FLAGS_4(gvec_fceq0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
42
Use :option:`-trace help` to print a list of names of trace points.
30
+
43
31
+DEF_HELPER_FLAGS_4(gvec_fcle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
44
-.. option:: events=FILE
32
+DEF_HELPER_FLAGS_4(gvec_fcle0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
45
+``events=FILE``
33
+
46
34
+DEF_HELPER_FLAGS_4(gvec_fclt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
47
Immediately enable events listed in *FILE*.
35
+DEF_HELPER_FLAGS_4(gvec_fclt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
48
The file must contain one event name (as listed in the ``trace-events-all``
36
+
49
@@ -XXX,XX +XXX,XX @@ Specify tracing options.
37
DEF_HELPER_FLAGS_5(gvec_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
50
available if QEMU has been compiled with the ``simple``, ``log`` or
38
DEF_HELPER_FLAGS_5(gvec_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
51
``ftrace`` tracing backend.
39
DEF_HELPER_FLAGS_5(gvec_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
52
40
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
53
-.. option:: file=FILE
41
index XXXXXXX..XXXXXXX 100644
54
+``file=FILE``
42
--- a/target/arm/vec_helper.c
55
43
+++ b/target/arm/vec_helper.c
56
Log output traces to *FILE*.
44
@@ -XXX,XX +XXX,XX @@ DO_2OP(gvec_frsqrte_h, helper_rsqrte_f16, float16)
57
This option is only available if QEMU has been compiled with
45
DO_2OP(gvec_frsqrte_s, helper_rsqrte_f32, float32)
46
DO_2OP(gvec_frsqrte_d, helper_rsqrte_f64, float64)
47
48
+#define WRAP_CMP0_FWD(FN, CMPOP, TYPE) \
49
+ static TYPE TYPE##_##FN##0(TYPE op, float_status *stat) \
50
+ { \
51
+ return TYPE##_##CMPOP(op, TYPE##_zero, stat); \
52
+ }
53
+
54
+#define WRAP_CMP0_REV(FN, CMPOP, TYPE) \
55
+ static TYPE TYPE##_##FN##0(TYPE op, float_status *stat) \
56
+ { \
57
+ return TYPE##_##CMPOP(TYPE##_zero, op, stat); \
58
+ }
59
+
60
+#define DO_2OP_CMP0(FN, CMPOP, DIRN) \
61
+ WRAP_CMP0_##DIRN(FN, CMPOP, float16) \
62
+ WRAP_CMP0_##DIRN(FN, CMPOP, float32) \
63
+ DO_2OP(gvec_f##FN##0_h, float16_##FN##0, float16) \
64
+ DO_2OP(gvec_f##FN##0_s, float32_##FN##0, float32)
65
+
66
+DO_2OP_CMP0(cgt, cgt, FWD)
67
+DO_2OP_CMP0(cge, cge, FWD)
68
+DO_2OP_CMP0(ceq, ceq, FWD)
69
+DO_2OP_CMP0(clt, cgt, REV)
70
+DO_2OP_CMP0(cle, cge, REV)
71
+
72
#undef DO_2OP
73
+#undef DO_2OP_CMP0
74
75
/* Floating-point trigonometric starting value.
76
* See the ARM ARM pseudocode function FPTrigSMul.
77
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/translate-neon.c.inc
80
+++ b/target/arm/translate-neon.c.inc
81
@@ -XXX,XX +XXX,XX @@ DO_2MISC_FP(VCVT_UF, gen_helper_vfp_touizs)
82
83
DO_2MISC_FP_VEC(VRECPE_F, gen_helper_gvec_frecpe_h, gen_helper_gvec_frecpe_s)
84
DO_2MISC_FP_VEC(VRSQRTE_F, gen_helper_gvec_frsqrte_h, gen_helper_gvec_frsqrte_s)
85
+DO_2MISC_FP_VEC(VCGT0_F, gen_helper_gvec_fcgt0_h, gen_helper_gvec_fcgt0_s)
86
+DO_2MISC_FP_VEC(VCGE0_F, gen_helper_gvec_fcge0_h, gen_helper_gvec_fcge0_s)
87
+DO_2MISC_FP_VEC(VCEQ0_F, gen_helper_gvec_fceq0_h, gen_helper_gvec_fceq0_s)
88
+DO_2MISC_FP_VEC(VCLT0_F, gen_helper_gvec_fclt0_h, gen_helper_gvec_fclt0_s)
89
+DO_2MISC_FP_VEC(VCLE0_F, gen_helper_gvec_fcle0_h, gen_helper_gvec_fcle0_s)
90
91
static bool trans_VRINTX(DisasContext *s, arg_2misc *a)
92
{
93
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX(DisasContext *s, arg_2misc *a)
94
return do_2misc_fp(s, a, gen_helper_rints_exact);
95
}
96
97
-#define WRAP_FP_CMP0_FWD(WRAPNAME, FUNC) \
98
- static void WRAPNAME(TCGv_i32 d, TCGv_i32 m, TCGv_ptr fpst) \
99
- { \
100
- TCGv_i32 zero = tcg_const_i32(0); \
101
- FUNC(d, m, zero, fpst); \
102
- tcg_temp_free_i32(zero); \
103
- }
104
-#define WRAP_FP_CMP0_REV(WRAPNAME, FUNC) \
105
- static void WRAPNAME(TCGv_i32 d, TCGv_i32 m, TCGv_ptr fpst) \
106
- { \
107
- TCGv_i32 zero = tcg_const_i32(0); \
108
- FUNC(d, zero, m, fpst); \
109
- tcg_temp_free_i32(zero); \
110
- }
111
-
112
-#define DO_FP_CMP0(INSN, FUNC, REV) \
113
- WRAP_FP_CMP0_##REV(gen_##INSN, FUNC) \
114
- static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
115
- { \
116
- return do_2misc_fp(s, a, gen_##INSN); \
117
- }
118
-
119
-DO_FP_CMP0(VCGT0_F, gen_helper_neon_cgt_f32, FWD)
120
-DO_FP_CMP0(VCGE0_F, gen_helper_neon_cge_f32, FWD)
121
-DO_FP_CMP0(VCEQ0_F, gen_helper_neon_ceq_f32, FWD)
122
-DO_FP_CMP0(VCLE0_F, gen_helper_neon_cge_f32, REV)
123
-DO_FP_CMP0(VCLT0_F, gen_helper_neon_cgt_f32, REV)
124
-
125
static bool do_vrint(DisasContext *s, arg_2misc *a, int rmode)
126
{
127
/*
128
--
58
--
129
2.20.1
59
2.20.1
130
60
131
61
diff view generated by jsdifflib
1
Macroify creation of the trans functions for single and double
1
The randomness tests in the NPCM7xx RNG test fail intermittently
2
precision VFMA, VFMS, VFNMA, VFNMS. The repetition was OK for
2
but fairly frequently. On my machine running the test in a loop:
3
two sizes, but we're about to add halfprec and it will get a bit
3
while QTEST_QEMU_BINARY=./qemu-system-aarch64 ./tests/qtest/npcm7xx_rng-test; do true; done
4
more than seems reasonable.
4
5
will fail in less than a minute with an error like:
6
ERROR:../../tests/qtest/npcm7xx_rng-test.c:256:test_first_byte_runs:
7
assertion failed (calc_runs_p(buf.l, sizeof(buf) * BITS_PER_BYTE) > 0.01): (0.00286205989 > 0.01)
8
9
(Failures have been observed on all 4 of the randomness tests,
10
not just first_byte_runs.)
11
12
It's not clear why these tests are failing like this, but intermittent
13
failures make CI and merge testing awkward, so disable running them
14
unless a developer specifically sets QEMU_TEST_FLAKY_RNG_TESTS when
15
running the test suite, until we work out the cause.
5
16
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20200828183354.27913-6-peter.maydell@linaro.org
19
Message-id: 20201102152454.8287-1-peter.maydell@linaro.org
20
Reviewed-by: Havard Skinnemoen <hskinnemoen@google.com>
9
---
21
---
10
target/arm/translate-vfp.c.inc | 50 +++++++++-------------------------
22
tests/qtest/npcm7xx_rng-test.c | 14 ++++++++++----
11
1 file changed, 13 insertions(+), 37 deletions(-)
23
1 file changed, 10 insertions(+), 4 deletions(-)
12
24
13
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
25
diff --git a/tests/qtest/npcm7xx_rng-test.c b/tests/qtest/npcm7xx_rng-test.c
14
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-vfp.c.inc
27
--- a/tests/qtest/npcm7xx_rng-test.c
16
+++ b/target/arm/translate-vfp.c.inc
28
+++ b/tests/qtest/npcm7xx_rng-test.c
17
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
29
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
18
return true;
30
19
}
31
qtest_add_func("npcm7xx_rng/enable_disable", test_enable_disable);
20
32
qtest_add_func("npcm7xx_rng/rosel", test_rosel);
21
-static bool trans_VFMA_sp(DisasContext *s, arg_VFMA_sp *a)
33
- qtest_add_func("npcm7xx_rng/continuous/monobit", test_continuous_monobit);
22
-{
34
- qtest_add_func("npcm7xx_rng/continuous/runs", test_continuous_runs);
23
- return do_vfm_sp(s, a, false, false);
35
- qtest_add_func("npcm7xx_rng/first_byte/monobit", test_first_byte_monobit);
24
-}
36
- qtest_add_func("npcm7xx_rng/first_byte/runs", test_first_byte_runs);
25
-
37
+ /*
26
-static bool trans_VFMS_sp(DisasContext *s, arg_VFMS_sp *a)
38
+ * These tests fail intermittently; only run them on explicit
27
-{
39
+ * request until we figure out why.
28
- return do_vfm_sp(s, a, true, false);
40
+ */
29
-}
41
+ if (getenv("QEMU_TEST_FLAKY_RNG_TESTS")) {
30
-
42
+ qtest_add_func("npcm7xx_rng/continuous/monobit", test_continuous_monobit);
31
-static bool trans_VFNMA_sp(DisasContext *s, arg_VFNMA_sp *a)
43
+ qtest_add_func("npcm7xx_rng/continuous/runs", test_continuous_runs);
32
-{
44
+ qtest_add_func("npcm7xx_rng/first_byte/monobit", test_first_byte_monobit);
33
- return do_vfm_sp(s, a, false, true);
45
+ qtest_add_func("npcm7xx_rng/first_byte/runs", test_first_byte_runs);
34
-}
35
-
36
-static bool trans_VFNMS_sp(DisasContext *s, arg_VFNMS_sp *a)
37
-{
38
- return do_vfm_sp(s, a, true, true);
39
-}
40
-
41
static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
42
{
43
/*
44
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
45
return true;
46
}
47
48
-static bool trans_VFMA_dp(DisasContext *s, arg_VFMA_dp *a)
49
-{
50
- return do_vfm_dp(s, a, false, false);
51
-}
52
+#define MAKE_ONE_VFM_TRANS_FN(INSN, PREC, NEGN, NEGD) \
53
+ static bool trans_##INSN##_##PREC(DisasContext *s, \
54
+ arg_##INSN##_##PREC *a) \
55
+ { \
56
+ return do_vfm_##PREC(s, a, NEGN, NEGD); \
57
+ }
46
+ }
58
47
59
-static bool trans_VFMS_dp(DisasContext *s, arg_VFMS_dp *a)
48
qtest_start("-machine npcm750-evb");
60
-{
49
ret = g_test_run();
61
- return do_vfm_dp(s, a, true, false);
62
-}
63
+#define MAKE_VFM_TRANS_FNS(PREC) \
64
+ MAKE_ONE_VFM_TRANS_FN(VFMA, PREC, false, false) \
65
+ MAKE_ONE_VFM_TRANS_FN(VFMS, PREC, true, false) \
66
+ MAKE_ONE_VFM_TRANS_FN(VFNMA, PREC, false, true) \
67
+ MAKE_ONE_VFM_TRANS_FN(VFNMS, PREC, true, true)
68
69
-static bool trans_VFNMA_dp(DisasContext *s, arg_VFNMA_dp *a)
70
-{
71
- return do_vfm_dp(s, a, false, true);
72
-}
73
-
74
-static bool trans_VFNMS_dp(DisasContext *s, arg_VFNMS_dp *a)
75
-{
76
- return do_vfm_dp(s, a, true, true);
77
-}
78
+MAKE_VFM_TRANS_FNS(sp)
79
+MAKE_VFM_TRANS_FNS(dp)
80
81
static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
82
{
83
--
50
--
84
2.20.1
51
2.20.1
85
52
86
53
diff view generated by jsdifflib
Deleted patch
1
Implement VFP fp16 support for the VMOV immediate insn.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200828183354.27913-10-peter.maydell@linaro.org
6
---
7
target/arm/vfp.decode | 2 ++
8
target/arm/translate-vfp.c.inc | 22 ++++++++++++++++++++++
9
2 files changed, 24 insertions(+)
10
11
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/vfp.decode
14
+++ b/target/arm/vfp.decode
15
@@ -XXX,XX +XXX,XX @@ VFMS_dp ---- 1110 1.10 .... .... 1011 .1.0 .... @vfp_dnm_d
16
VFNMA_dp ---- 1110 1.01 .... .... 1011 .0.0 .... @vfp_dnm_d
17
VFNMS_dp ---- 1110 1.01 .... .... 1011 .1.0 .... @vfp_dnm_d
18
19
+VMOV_imm_hp ---- 1110 1.11 .... .... 1001 0000 .... \
20
+ vd=%vd_sp imm=%vmov_imm
21
VMOV_imm_sp ---- 1110 1.11 .... .... 1010 0000 .... \
22
vd=%vd_sp imm=%vmov_imm
23
VMOV_imm_dp ---- 1110 1.11 .... .... 1011 0000 .... \
24
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate-vfp.c.inc
27
+++ b/target/arm/translate-vfp.c.inc
28
@@ -XXX,XX +XXX,XX @@ MAKE_VFM_TRANS_FNS(hp)
29
MAKE_VFM_TRANS_FNS(sp)
30
MAKE_VFM_TRANS_FNS(dp)
31
32
+static bool trans_VMOV_imm_hp(DisasContext *s, arg_VMOV_imm_sp *a)
33
+{
34
+ TCGv_i32 fd;
35
+
36
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
37
+ return false;
38
+ }
39
+
40
+ if (s->vec_len != 0 || s->vec_stride != 0) {
41
+ return false;
42
+ }
43
+
44
+ if (!vfp_access_check(s)) {
45
+ return true;
46
+ }
47
+
48
+ fd = tcg_const_i32(vfp_expand_imm(MO_16, a->imm));
49
+ neon_store_reg32(fd, a->vd);
50
+ tcg_temp_free_i32(fd);
51
+ return true;
52
+}
53
+
54
static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
55
{
56
uint32_t delta_d = 0;
57
--
58
2.20.1
59
60
diff view generated by jsdifflib
Deleted patch
1
Implement fp16 version of VCMP.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200828183354.27913-11-peter.maydell@linaro.org
6
---
7
target/arm/helper.h | 2 ++
8
target/arm/vfp.decode | 2 ++
9
target/arm/vfp_helper.c | 15 +++++++------
10
target/arm/translate-vfp.c.inc | 39 ++++++++++++++++++++++++++++++++++
11
4 files changed, 51 insertions(+), 7 deletions(-)
12
13
diff --git a/target/arm/helper.h b/target/arm/helper.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.h
16
+++ b/target/arm/helper.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(vfp_absd, f64, f64)
18
DEF_HELPER_2(vfp_sqrth, f16, f16, env)
19
DEF_HELPER_2(vfp_sqrts, f32, f32, env)
20
DEF_HELPER_2(vfp_sqrtd, f64, f64, env)
21
+DEF_HELPER_3(vfp_cmph, void, f16, f16, env)
22
DEF_HELPER_3(vfp_cmps, void, f32, f32, env)
23
DEF_HELPER_3(vfp_cmpd, void, f64, f64, env)
24
+DEF_HELPER_3(vfp_cmpeh, void, f16, f16, env)
25
DEF_HELPER_3(vfp_cmpes, void, f32, f32, env)
26
DEF_HELPER_3(vfp_cmped, void, f64, f64, env)
27
28
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/vfp.decode
31
+++ b/target/arm/vfp.decode
32
@@ -XXX,XX +XXX,XX @@ VSQRT_hp ---- 1110 1.11 0001 .... 1001 11.0 .... @vfp_dm_ss
33
VSQRT_sp ---- 1110 1.11 0001 .... 1010 11.0 .... @vfp_dm_ss
34
VSQRT_dp ---- 1110 1.11 0001 .... 1011 11.0 .... @vfp_dm_dd
35
36
+VCMP_hp ---- 1110 1.11 010 z:1 .... 1001 e:1 1.0 .... \
37
+ vd=%vd_sp vm=%vm_sp
38
VCMP_sp ---- 1110 1.11 010 z:1 .... 1010 e:1 1.0 .... \
39
vd=%vd_sp vm=%vm_sp
40
VCMP_dp ---- 1110 1.11 010 z:1 .... 1011 e:1 1.0 .... \
41
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/vfp_helper.c
44
+++ b/target/arm/vfp_helper.c
45
@@ -XXX,XX +XXX,XX @@ static void softfloat_to_vfp_compare(CPUARMState *env, FloatRelation cmp)
46
}
47
48
/* XXX: check quiet/signaling case */
49
-#define DO_VFP_cmp(p, type) \
50
-void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
51
+#define DO_VFP_cmp(P, FLOATTYPE, ARGTYPE, FPST) \
52
+void VFP_HELPER(cmp, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \
53
{ \
54
softfloat_to_vfp_compare(env, \
55
- type ## _compare_quiet(a, b, &env->vfp.fp_status)); \
56
+ FLOATTYPE ## _compare_quiet(a, b, &env->vfp.FPST)); \
57
} \
58
-void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
59
+void VFP_HELPER(cmpe, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \
60
{ \
61
softfloat_to_vfp_compare(env, \
62
- type ## _compare(a, b, &env->vfp.fp_status)); \
63
+ FLOATTYPE ## _compare(a, b, &env->vfp.FPST)); \
64
}
65
-DO_VFP_cmp(s, float32)
66
-DO_VFP_cmp(d, float64)
67
+DO_VFP_cmp(h, float16, dh_ctype_f16, fp_status_f16)
68
+DO_VFP_cmp(s, float32, float32, fp_status)
69
+DO_VFP_cmp(d, float64, float64, fp_status)
70
#undef DO_VFP_cmp
71
72
/* Integer to float and float to integer conversions */
73
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
74
index XXXXXXX..XXXXXXX 100644
75
--- a/target/arm/translate-vfp.c.inc
76
+++ b/target/arm/translate-vfp.c.inc
77
@@ -XXX,XX +XXX,XX @@ DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp)
78
DO_VFP_2OP(VSQRT, sp, gen_VSQRT_sp)
79
DO_VFP_2OP(VSQRT, dp, gen_VSQRT_dp)
80
81
+static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a)
82
+{
83
+ TCGv_i32 vd, vm;
84
+
85
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
86
+ return false;
87
+ }
88
+
89
+ /* Vm/M bits must be zero for the Z variant */
90
+ if (a->z && a->vm != 0) {
91
+ return false;
92
+ }
93
+
94
+ if (!vfp_access_check(s)) {
95
+ return true;
96
+ }
97
+
98
+ vd = tcg_temp_new_i32();
99
+ vm = tcg_temp_new_i32();
100
+
101
+ neon_load_reg32(vd, a->vd);
102
+ if (a->z) {
103
+ tcg_gen_movi_i32(vm, 0);
104
+ } else {
105
+ neon_load_reg32(vm, a->vm);
106
+ }
107
+
108
+ if (a->e) {
109
+ gen_helper_vfp_cmpeh(vd, vm, cpu_env);
110
+ } else {
111
+ gen_helper_vfp_cmph(vd, vm, cpu_env);
112
+ }
113
+
114
+ tcg_temp_free_i32(vd);
115
+ tcg_temp_free_i32(vm);
116
+
117
+ return true;
118
+}
119
+
120
static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
121
{
122
TCGv_i32 vd, vm;
123
--
124
2.20.1
125
126
diff view generated by jsdifflib
Deleted patch
1
Implement the fp16 versions of the VFP VLDR/VSTR (immediate).
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200828183354.27913-12-peter.maydell@linaro.org
6
---
7
target/arm/vfp.decode | 3 +--
8
target/arm/translate-vfp.c.inc | 35 ++++++++++++++++++++++++++++++++++
9
2 files changed, 36 insertions(+), 2 deletions(-)
10
11
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/vfp.decode
14
+++ b/target/arm/vfp.decode
15
@@ -XXX,XX +XXX,XX @@ VMOV_single ---- 1110 000 l:1 .... rt:4 1010 . 001 0000 vn=%vn_sp
16
VMOV_64_sp ---- 1100 010 op:1 rt2:4 rt:4 1010 00.1 .... vm=%vm_sp
17
VMOV_64_dp ---- 1100 010 op:1 rt2:4 rt:4 1011 00.1 .... vm=%vm_dp
18
19
-# Note that the half-precision variants of VLDR and VSTR are
20
-# not part of this decodetree at all because they have bits [9:8] == 0b01
21
+VLDR_VSTR_hp ---- 1101 u:1 .0 l:1 rn:4 .... 1001 imm:8 vd=%vd_sp
22
VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 vd=%vd_sp
23
VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 vd=%vd_dp
24
25
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate-vfp.c.inc
28
+++ b/target/arm/translate-vfp.c.inc
29
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
30
return true;
31
}
32
33
+static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
34
+{
35
+ uint32_t offset;
36
+ TCGv_i32 addr, tmp;
37
+
38
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
39
+ return false;
40
+ }
41
+
42
+ if (!vfp_access_check(s)) {
43
+ return true;
44
+ }
45
+
46
+ /* imm8 field is offset/2 for fp16, unlike fp32 and fp64 */
47
+ offset = a->imm << 1;
48
+ if (!a->u) {
49
+ offset = -offset;
50
+ }
51
+
52
+ /* For thumb, use of PC is UNPREDICTABLE. */
53
+ addr = add_reg_for_lit(s, a->rn, offset);
54
+ tmp = tcg_temp_new_i32();
55
+ if (a->l) {
56
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
57
+ neon_store_reg32(tmp, a->vd);
58
+ } else {
59
+ neon_load_reg32(tmp, a->vd);
60
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
61
+ }
62
+ tcg_temp_free_i32(tmp);
63
+ tcg_temp_free_i32(addr);
64
+
65
+ return true;
66
+}
67
+
68
static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
69
{
70
uint32_t offset;
71
--
72
2.20.1
73
74
diff view generated by jsdifflib
Deleted patch
1
Implement the fp16 versions of the VFP VCVT instruction forms which
2
convert between floating point and integer.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200828183354.27913-13-peter.maydell@linaro.org
7
---
8
target/arm/vfp.decode | 4 +++
9
target/arm/translate-vfp.c.inc | 65 ++++++++++++++++++++++++++++++++++
10
2 files changed, 69 insertions(+)
11
12
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/vfp.decode
15
+++ b/target/arm/vfp.decode
16
@@ -XXX,XX +XXX,XX @@ VCVT_sp ---- 1110 1.11 0111 .... 1010 11.0 .... @vfp_dm_ds
17
VCVT_dp ---- 1110 1.11 0111 .... 1011 11.0 .... @vfp_dm_sd
18
19
# VCVT from integer to floating point: Vm always single; Vd depends on size
20
+VCVT_int_hp ---- 1110 1.11 1000 .... 1001 s:1 1.0 .... \
21
+ vd=%vd_sp vm=%vm_sp
22
VCVT_int_sp ---- 1110 1.11 1000 .... 1010 s:1 1.0 .... \
23
vd=%vd_sp vm=%vm_sp
24
VCVT_int_dp ---- 1110 1.11 1000 .... 1011 s:1 1.0 .... \
25
@@ -XXX,XX +XXX,XX @@ VCVT_fix_dp ---- 1110 1.11 1.1. .... 1011 .1.0 .... \
26
vd=%vd_dp imm=%vm_sp opc=%vcvt_fix_op
27
28
# VCVT float to integer (VCVT and VCVTR): Vd always single; Vd depends on size
29
+VCVT_hp_int ---- 1110 1.11 110 s:1 .... 1001 rz:1 1.0 .... \
30
+ vd=%vd_sp vm=%vm_sp
31
VCVT_sp_int ---- 1110 1.11 110 s:1 .... 1010 rz:1 1.0 .... \
32
vd=%vd_sp vm=%vm_sp
33
VCVT_dp_int ---- 1110 1.11 110 s:1 .... 1011 rz:1 1.0 .... \
34
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/translate-vfp.c.inc
37
+++ b/target/arm/translate-vfp.c.inc
38
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
39
return true;
40
}
41
42
+static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a)
43
+{
44
+ TCGv_i32 vm;
45
+ TCGv_ptr fpst;
46
+
47
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
48
+ return false;
49
+ }
50
+
51
+ if (!vfp_access_check(s)) {
52
+ return true;
53
+ }
54
+
55
+ vm = tcg_temp_new_i32();
56
+ neon_load_reg32(vm, a->vm);
57
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
58
+ if (a->s) {
59
+ /* i32 -> f16 */
60
+ gen_helper_vfp_sitoh(vm, vm, fpst);
61
+ } else {
62
+ /* u32 -> f16 */
63
+ gen_helper_vfp_uitoh(vm, vm, fpst);
64
+ }
65
+ neon_store_reg32(vm, a->vd);
66
+ tcg_temp_free_i32(vm);
67
+ tcg_temp_free_ptr(fpst);
68
+ return true;
69
+}
70
+
71
static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
72
{
73
TCGv_i32 vm;
74
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
75
return true;
76
}
77
78
+static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a)
79
+{
80
+ TCGv_i32 vm;
81
+ TCGv_ptr fpst;
82
+
83
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
84
+ return false;
85
+ }
86
+
87
+ if (!vfp_access_check(s)) {
88
+ return true;
89
+ }
90
+
91
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
92
+ vm = tcg_temp_new_i32();
93
+ neon_load_reg32(vm, a->vm);
94
+
95
+ if (a->s) {
96
+ if (a->rz) {
97
+ gen_helper_vfp_tosizh(vm, vm, fpst);
98
+ } else {
99
+ gen_helper_vfp_tosih(vm, vm, fpst);
100
+ }
101
+ } else {
102
+ if (a->rz) {
103
+ gen_helper_vfp_touizh(vm, vm, fpst);
104
+ } else {
105
+ gen_helper_vfp_touih(vm, vm, fpst);
106
+ }
107
+ }
108
+ neon_store_reg32(vm, a->vd);
109
+ tcg_temp_free_i32(vm);
110
+ tcg_temp_free_ptr(fpst);
111
+ return true;
112
+}
113
+
114
static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
115
{
116
TCGv_i32 vm;
117
--
118
2.20.1
119
120
diff view generated by jsdifflib
Deleted patch
1
Currently the VFP_CONV_FIX macros take a single fsz argument for the
2
size of the float type, which is used both to select the name of
3
the functions to call (eg float32_is_any_nan()) and also for the
4
type to use for the float inputs and outputs (eg float32).
5
1
6
Separate these into fsz and ftype arguments, so that we can use them
7
for fp16, which uses 'float16' in the function names but is still
8
passing inputs and outputs in a 32-bit sized type.
9
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20200828183354.27913-14-peter.maydell@linaro.org
13
---
14
target/arm/vfp_helper.c | 46 ++++++++++++++++++++---------------------
15
1 file changed, 23 insertions(+), 23 deletions(-)
16
17
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/vfp_helper.c
20
+++ b/target/arm/vfp_helper.c
21
@@ -XXX,XX +XXX,XX @@ float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
22
}
23
24
/* VFP3 fixed point conversion. */
25
-#define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
26
-float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
27
+#define VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \
28
+ftype HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
29
void *fpstp) \
30
{ return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); }
31
32
-#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ROUND, suff) \
33
-uint##isz##_t HELPER(vfp_to##name##p##suff)(float##fsz x, uint32_t shift, \
34
+#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, ROUND, suff) \
35
+uint##isz##_t HELPER(vfp_to##name##p##suff)(ftype x, uint32_t shift, \
36
void *fpst) \
37
{ \
38
if (unlikely(float##fsz##_is_any_nan(x))) { \
39
@@ -XXX,XX +XXX,XX @@ uint##isz##_t HELPER(vfp_to##name##p##suff)(float##fsz x, uint32_t shift, \
40
return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \
41
}
42
43
-#define VFP_CONV_FIX(name, p, fsz, isz, itype) \
44
-VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
45
-VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
46
+#define VFP_CONV_FIX(name, p, fsz, ftype, isz, itype) \
47
+VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \
48
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \
49
float_round_to_zero, _round_to_zero) \
50
-VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
51
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \
52
get_float_rounding_mode(fpst), )
53
54
-#define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
55
-VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
56
-VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
57
+#define VFP_CONV_FIX_A64(name, p, fsz, ftype, isz, itype) \
58
+VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \
59
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \
60
get_float_rounding_mode(fpst), )
61
62
-VFP_CONV_FIX(sh, d, 64, 64, int16)
63
-VFP_CONV_FIX(sl, d, 64, 64, int32)
64
-VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
65
-VFP_CONV_FIX(uh, d, 64, 64, uint16)
66
-VFP_CONV_FIX(ul, d, 64, 64, uint32)
67
-VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
68
-VFP_CONV_FIX(sh, s, 32, 32, int16)
69
-VFP_CONV_FIX(sl, s, 32, 32, int32)
70
-VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
71
-VFP_CONV_FIX(uh, s, 32, 32, uint16)
72
-VFP_CONV_FIX(ul, s, 32, 32, uint32)
73
-VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
74
+VFP_CONV_FIX(sh, d, 64, float64, 64, int16)
75
+VFP_CONV_FIX(sl, d, 64, float64, 64, int32)
76
+VFP_CONV_FIX_A64(sq, d, 64, float64, 64, int64)
77
+VFP_CONV_FIX(uh, d, 64, float64, 64, uint16)
78
+VFP_CONV_FIX(ul, d, 64, float64, 64, uint32)
79
+VFP_CONV_FIX_A64(uq, d, 64, float64, 64, uint64)
80
+VFP_CONV_FIX(sh, s, 32, float32, 32, int16)
81
+VFP_CONV_FIX(sl, s, 32, float32, 32, int32)
82
+VFP_CONV_FIX_A64(sq, s, 32, float32, 64, int64)
83
+VFP_CONV_FIX(uh, s, 32, float32, 32, uint16)
84
+VFP_CONV_FIX(ul, s, 32, float32, 32, uint32)
85
+VFP_CONV_FIX_A64(uq, s, 32, float32, 64, uint64)
86
87
#undef VFP_CONV_FIX
88
#undef VFP_CONV_FIX_FLOAT
89
--
90
2.20.1
91
92
diff view generated by jsdifflib
Deleted patch
1
Now the VFP_CONV_FIX macros can handle fp16's distinction between the
2
width of the operation and the width of the type used to pass operands,
3
use the macros rather than the open-coded functions.
4
1
5
This creates an extra six helper functions, all of which we are going
6
to need for the AArch32 VFP fp16 instructions.
7
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20200828183354.27913-15-peter.maydell@linaro.org
11
---
12
target/arm/helper.h | 6 +++
13
target/arm/vfp_helper.c | 86 +++--------------------------------------
14
2 files changed, 12 insertions(+), 80 deletions(-)
15
16
diff --git a/target/arm/helper.h b/target/arm/helper.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper.h
19
+++ b/target/arm/helper.h
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(vfp_tosizh, s32, f16, ptr)
21
DEF_HELPER_2(vfp_tosizs, s32, f32, ptr)
22
DEF_HELPER_2(vfp_tosizd, s32, f64, ptr)
23
24
+DEF_HELPER_3(vfp_toshh_round_to_zero, i32, f16, i32, ptr)
25
+DEF_HELPER_3(vfp_toslh_round_to_zero, i32, f16, i32, ptr)
26
+DEF_HELPER_3(vfp_touhh_round_to_zero, i32, f16, i32, ptr)
27
+DEF_HELPER_3(vfp_toulh_round_to_zero, i32, f16, i32, ptr)
28
DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, ptr)
29
DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, ptr)
30
DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, ptr)
31
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(vfp_sqtod, f64, i64, i32, ptr)
32
DEF_HELPER_3(vfp_uhtod, f64, i64, i32, ptr)
33
DEF_HELPER_3(vfp_ultod, f64, i64, i32, ptr)
34
DEF_HELPER_3(vfp_uqtod, f64, i64, i32, ptr)
35
+DEF_HELPER_3(vfp_shtoh, f16, i32, i32, ptr)
36
+DEF_HELPER_3(vfp_uhtoh, f16, i32, i32, ptr)
37
DEF_HELPER_3(vfp_sltoh, f16, i32, i32, ptr)
38
DEF_HELPER_3(vfp_ultoh, f16, i32, i32, ptr)
39
DEF_HELPER_3(vfp_sqtoh, f16, i64, i32, ptr)
40
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/vfp_helper.c
43
+++ b/target/arm/vfp_helper.c
44
@@ -XXX,XX +XXX,XX @@ VFP_CONV_FIX_A64(sq, s, 32, float32, 64, int64)
45
VFP_CONV_FIX(uh, s, 32, float32, 32, uint16)
46
VFP_CONV_FIX(ul, s, 32, float32, 32, uint32)
47
VFP_CONV_FIX_A64(uq, s, 32, float32, 64, uint64)
48
+VFP_CONV_FIX(sh, h, 16, dh_ctype_f16, 32, int16)
49
+VFP_CONV_FIX(sl, h, 16, dh_ctype_f16, 32, int32)
50
+VFP_CONV_FIX_A64(sq, h, 16, dh_ctype_f16, 64, int64)
51
+VFP_CONV_FIX(uh, h, 16, dh_ctype_f16, 32, uint16)
52
+VFP_CONV_FIX(ul, h, 16, dh_ctype_f16, 32, uint32)
53
+VFP_CONV_FIX_A64(uq, h, 16, dh_ctype_f16, 64, uint64)
54
55
#undef VFP_CONV_FIX
56
#undef VFP_CONV_FIX_FLOAT
57
#undef VFP_CONV_FLOAT_FIX_ROUND
58
#undef VFP_CONV_FIX_A64
59
60
-uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst)
61
-{
62
- return int32_to_float16_scalbn(x, -shift, fpst);
63
-}
64
-
65
-uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst)
66
-{
67
- return uint32_to_float16_scalbn(x, -shift, fpst);
68
-}
69
-
70
-uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst)
71
-{
72
- return int64_to_float16_scalbn(x, -shift, fpst);
73
-}
74
-
75
-uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst)
76
-{
77
- return uint64_to_float16_scalbn(x, -shift, fpst);
78
-}
79
-
80
-uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst)
81
-{
82
- if (unlikely(float16_is_any_nan(x))) {
83
- float_raise(float_flag_invalid, fpst);
84
- return 0;
85
- }
86
- return float16_to_int16_scalbn(x, get_float_rounding_mode(fpst),
87
- shift, fpst);
88
-}
89
-
90
-uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst)
91
-{
92
- if (unlikely(float16_is_any_nan(x))) {
93
- float_raise(float_flag_invalid, fpst);
94
- return 0;
95
- }
96
- return float16_to_uint16_scalbn(x, get_float_rounding_mode(fpst),
97
- shift, fpst);
98
-}
99
-
100
-uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst)
101
-{
102
- if (unlikely(float16_is_any_nan(x))) {
103
- float_raise(float_flag_invalid, fpst);
104
- return 0;
105
- }
106
- return float16_to_int32_scalbn(x, get_float_rounding_mode(fpst),
107
- shift, fpst);
108
-}
109
-
110
-uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst)
111
-{
112
- if (unlikely(float16_is_any_nan(x))) {
113
- float_raise(float_flag_invalid, fpst);
114
- return 0;
115
- }
116
- return float16_to_uint32_scalbn(x, get_float_rounding_mode(fpst),
117
- shift, fpst);
118
-}
119
-
120
-uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst)
121
-{
122
- if (unlikely(float16_is_any_nan(x))) {
123
- float_raise(float_flag_invalid, fpst);
124
- return 0;
125
- }
126
- return float16_to_int64_scalbn(x, get_float_rounding_mode(fpst),
127
- shift, fpst);
128
-}
129
-
130
-uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst)
131
-{
132
- if (unlikely(float16_is_any_nan(x))) {
133
- float_raise(float_flag_invalid, fpst);
134
- return 0;
135
- }
136
- return float16_to_uint64_scalbn(x, get_float_rounding_mode(fpst),
137
- shift, fpst);
138
-}
139
-
140
/* Set the current fp rounding mode and return the old one.
141
* The argument is a softfloat float_round_ value.
142
*/
143
--
144
2.20.1
145
146
diff view generated by jsdifflib
Deleted patch
1
Implement the fp16 versions of the VFP VCVT instruction forms which
2
convert between floating point and fixed-point.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200828183354.27913-16-peter.maydell@linaro.org
7
---
8
target/arm/vfp.decode | 2 ++
9
target/arm/translate-vfp.c.inc | 59 ++++++++++++++++++++++++++++++++++
10
2 files changed, 61 insertions(+)
11
12
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/vfp.decode
15
+++ b/target/arm/vfp.decode
16
@@ -XXX,XX +XXX,XX @@ VJCVT ---- 1110 1.11 1001 .... 1011 11.0 .... @vfp_dm_sd
17
# We assemble bits 18 (op), 16 (u) and 7 (sx) into a single opc field
18
# for the convenience of the trans_VCVT_fix functions.
19
%vcvt_fix_op 18:1 16:1 7:1
20
+VCVT_fix_hp ---- 1110 1.11 1.1. .... 1001 .1.0 .... \
21
+ vd=%vd_sp imm=%vm_sp opc=%vcvt_fix_op
22
VCVT_fix_sp ---- 1110 1.11 1.1. .... 1010 .1.0 .... \
23
vd=%vd_sp imm=%vm_sp opc=%vcvt_fix_op
24
VCVT_fix_dp ---- 1110 1.11 1.1. .... 1011 .1.0 .... \
25
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate-vfp.c.inc
28
+++ b/target/arm/translate-vfp.c.inc
29
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
30
return true;
31
}
32
33
+static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
34
+{
35
+ TCGv_i32 vd, shift;
36
+ TCGv_ptr fpst;
37
+ int frac_bits;
38
+
39
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
40
+ return false;
41
+ }
42
+
43
+ if (!vfp_access_check(s)) {
44
+ return true;
45
+ }
46
+
47
+ frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
48
+
49
+ vd = tcg_temp_new_i32();
50
+ neon_load_reg32(vd, a->vd);
51
+
52
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
53
+ shift = tcg_const_i32(frac_bits);
54
+
55
+ /* Switch on op:U:sx bits */
56
+ switch (a->opc) {
57
+ case 0:
58
+ gen_helper_vfp_shtoh(vd, vd, shift, fpst);
59
+ break;
60
+ case 1:
61
+ gen_helper_vfp_sltoh(vd, vd, shift, fpst);
62
+ break;
63
+ case 2:
64
+ gen_helper_vfp_uhtoh(vd, vd, shift, fpst);
65
+ break;
66
+ case 3:
67
+ gen_helper_vfp_ultoh(vd, vd, shift, fpst);
68
+ break;
69
+ case 4:
70
+ gen_helper_vfp_toshh_round_to_zero(vd, vd, shift, fpst);
71
+ break;
72
+ case 5:
73
+ gen_helper_vfp_toslh_round_to_zero(vd, vd, shift, fpst);
74
+ break;
75
+ case 6:
76
+ gen_helper_vfp_touhh_round_to_zero(vd, vd, shift, fpst);
77
+ break;
78
+ case 7:
79
+ gen_helper_vfp_toulh_round_to_zero(vd, vd, shift, fpst);
80
+ break;
81
+ default:
82
+ g_assert_not_reached();
83
+ }
84
+
85
+ neon_store_reg32(vd, a->vd);
86
+ tcg_temp_free_i32(vd);
87
+ tcg_temp_free_i32(shift);
88
+ tcg_temp_free_ptr(fpst);
89
+ return true;
90
+}
91
+
92
static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
93
{
94
TCGv_i32 vd, shift;
95
--
96
2.20.1
97
98
diff view generated by jsdifflib
Deleted patch
1
Implement the fp16 versions of the VFP VSEL instruction.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20200828183354.27913-18-peter.maydell@linaro.org
6
---
7
target/arm/vfp-uncond.decode | 6 ++++--
8
target/arm/translate-vfp.c.inc | 16 ++++++++++++----
9
2 files changed, 16 insertions(+), 6 deletions(-)
10
11
diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/vfp-uncond.decode
14
+++ b/target/arm/vfp-uncond.decode
15
@@ -XXX,XX +XXX,XX @@
16
@vfp_dnm_s ................................ vm=%vm_sp vn=%vn_sp vd=%vd_sp
17
@vfp_dnm_d ................................ vm=%vm_dp vn=%vn_dp vd=%vd_dp
18
19
+VSEL 1111 1110 0. cc:2 .... .... 1001 .0.0 .... \
20
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp sz=1
21
VSEL 1111 1110 0. cc:2 .... .... 1010 .0.0 .... \
22
- vm=%vm_sp vn=%vn_sp vd=%vd_sp dp=0
23
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp sz=2
24
VSEL 1111 1110 0. cc:2 .... .... 1011 .0.0 .... \
25
- vm=%vm_dp vn=%vn_dp vd=%vd_dp dp=1
26
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp sz=3
27
28
VMAXNM_hp 1111 1110 1.00 .... .... 1001 .0.0 .... @vfp_dnm_s
29
VMINNM_hp 1111 1110 1.00 .... .... 1001 .1.0 .... @vfp_dnm_s
30
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/translate-vfp.c.inc
33
+++ b/target/arm/translate-vfp.c.inc
34
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check(DisasContext *s)
35
static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
36
{
37
uint32_t rd, rn, rm;
38
- bool dp = a->dp;
39
+ int sz = a->sz;
40
41
if (!dc_isar_feature(aa32_vsel, s)) {
42
return false;
43
}
44
45
- if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) {
46
+ if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
47
+ return false;
48
+ }
49
+
50
+ if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
51
return false;
52
}
53
54
/* UNDEF accesses to D16-D31 if they don't exist */
55
- if (dp && !dc_isar_feature(aa32_simd_r32, s) &&
56
+ if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) &&
57
((a->vm | a->vn | a->vd) & 0x10)) {
58
return false;
59
}
60
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
61
return true;
62
}
63
64
- if (dp) {
65
+ if (sz == 3) {
66
TCGv_i64 frn, frm, dest;
67
TCGv_i64 tmp, zero, zf, nf, vf;
68
69
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
70
tcg_temp_free_i32(tmp);
71
break;
72
}
73
+ /* For fp16 the top half is always zeroes */
74
+ if (sz == 1) {
75
+ tcg_gen_andi_i32(dest, dest, 0xffff);
76
+ }
77
neon_store_reg32(dest, rd);
78
tcg_temp_free_i32(frn);
79
tcg_temp_free_i32(frm);
80
--
81
2.20.1
82
83
diff view generated by jsdifflib
Deleted patch
1
The fp16 extension includes a new instruction VINS, which copies the
2
lower 16 bits of a 32-bit source VFP register into the upper 16 bits
3
of the destination. Implement it.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20200828183354.27913-20-peter.maydell@linaro.org
8
---
9
target/arm/vfp-uncond.decode | 3 +++
10
target/arm/translate-vfp.c.inc | 28 ++++++++++++++++++++++++++++
11
2 files changed, 31 insertions(+)
12
13
diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/vfp-uncond.decode
16
+++ b/target/arm/vfp-uncond.decode
17
@@ -XXX,XX +XXX,XX @@ VCVT 1111 1110 1.11 11 rm:2 .... 1010 op:1 1.0 .... \
18
vm=%vm_sp vd=%vd_sp sz=2
19
VCVT 1111 1110 1.11 11 rm:2 .... 1011 op:1 1.0 .... \
20
vm=%vm_dp vd=%vd_sp sz=3
21
+
22
+VINS 1111 1110 1.11 0000 .... 1010 11 . 0 .... \
23
+ vd=%vd_sp vm=%vm_sp
24
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate-vfp.c.inc
27
+++ b/target/arm/translate-vfp.c.inc
28
@@ -XXX,XX +XXX,XX @@ static bool trans_NOCP(DisasContext *s, arg_NOCP *a)
29
30
return false;
31
}
32
+
33
+static bool trans_VINS(DisasContext *s, arg_VINS *a)
34
+{
35
+ TCGv_i32 rd, rm;
36
+
37
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
38
+ return false;
39
+ }
40
+
41
+ if (s->vec_len != 0 || s->vec_stride != 0) {
42
+ return false;
43
+ }
44
+
45
+ if (!vfp_access_check(s)) {
46
+ return true;
47
+ }
48
+
49
+ /* Insert low half of Vm into high half of Vd */
50
+ rm = tcg_temp_new_i32();
51
+ rd = tcg_temp_new_i32();
52
+ neon_load_reg32(rm, a->vm);
53
+ neon_load_reg32(rd, a->vd);
54
+ tcg_gen_deposit_i32(rd, rd, rm, 16, 16);
55
+ neon_store_reg32(rd, a->vd);
56
+ tcg_temp_free_i32(rm);
57
+ tcg_temp_free_i32(rd);
58
+ return true;
59
+}
60
--
61
2.20.1
62
63
diff view generated by jsdifflib
Deleted patch
1
The fp16 extension includes a new instruction VMOVX, which copies the
2
upper 16 bits of a 32-bit source VFP register into the lower 16
3
bits of the destination and zeroes the high half of the destination.
4
Implement it.
5
1
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20200828183354.27913-21-peter.maydell@linaro.org
9
---
10
target/arm/vfp-uncond.decode | 3 +++
11
target/arm/translate-vfp.c.inc | 25 +++++++++++++++++++++++++
12
2 files changed, 28 insertions(+)
13
14
diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/vfp-uncond.decode
17
+++ b/target/arm/vfp-uncond.decode
18
@@ -XXX,XX +XXX,XX @@ VCVT 1111 1110 1.11 11 rm:2 .... 1010 op:1 1.0 .... \
19
VCVT 1111 1110 1.11 11 rm:2 .... 1011 op:1 1.0 .... \
20
vm=%vm_dp vd=%vd_sp sz=3
21
22
+VMOVX 1111 1110 1.11 0000 .... 1010 01 . 0 .... \
23
+ vd=%vd_sp vm=%vm_sp
24
+
25
VINS 1111 1110 1.11 0000 .... 1010 11 . 0 .... \
26
vd=%vd_sp vm=%vm_sp
27
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-vfp.c.inc
30
+++ b/target/arm/translate-vfp.c.inc
31
@@ -XXX,XX +XXX,XX @@ static bool trans_VINS(DisasContext *s, arg_VINS *a)
32
tcg_temp_free_i32(rd);
33
return true;
34
}
35
+
36
+static bool trans_VMOVX(DisasContext *s, arg_VINS *a)
37
+{
38
+ TCGv_i32 rm;
39
+
40
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
41
+ return false;
42
+ }
43
+
44
+ if (s->vec_len != 0 || s->vec_stride != 0) {
45
+ return false;
46
+ }
47
+
48
+ if (!vfp_access_check(s)) {
49
+ return true;
50
+ }
51
+
52
+ /* Set Vd to high half of Vm */
53
+ rm = tcg_temp_new_i32();
54
+ neon_load_reg32(rm, a->vm);
55
+ tcg_gen_shri_i32(rm, rm, 16);
56
+ neon_store_reg32(rm, a->vd);
57
+ tcg_temp_free_i32(rm);
58
+ return true;
59
+}
60
--
61
2.20.1
62
63
diff view generated by jsdifflib
Deleted patch
1
Implement the VFP fp16 variant of VMOV that transfers a 16-bit
2
value between a general purpose register and a VFP register.
3
1
4
Note that Rt == 15 is UNPREDICTABLE; since this insn is v8 and later
5
only we have no need to replicate the old "updates CPSR.NZCV"
6
behaviour that the singleprec version of this insn does.
7
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20200828183354.27913-22-peter.maydell@linaro.org
11
---
12
target/arm/vfp.decode | 1 +
13
target/arm/translate-vfp.c.inc | 34 ++++++++++++++++++++++++++++++++++
14
2 files changed, 35 insertions(+)
15
16
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/vfp.decode
19
+++ b/target/arm/vfp.decode
20
@@ -XXX,XX +XXX,XX @@ VDUP ---- 1110 1 b:1 q:1 0 .... rt:4 1011 . 0 e:1 1 0000 \
21
vn=%vn_dp
22
23
VMSR_VMRS ---- 1110 111 l:1 reg:4 rt:4 1010 0001 0000
24
+VMOV_half ---- 1110 000 l:1 .... rt:4 1001 . 001 0000 vn=%vn_sp
25
VMOV_single ---- 1110 000 l:1 .... rt:4 1010 . 001 0000 vn=%vn_sp
26
27
VMOV_64_sp ---- 1100 010 op:1 rt2:4 rt:4 1010 00.1 .... vm=%vm_sp
28
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-vfp.c.inc
31
+++ b/target/arm/translate-vfp.c.inc
32
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
33
return true;
34
}
35
36
+static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a)
37
+{
38
+ TCGv_i32 tmp;
39
+
40
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
41
+ return false;
42
+ }
43
+
44
+ if (a->rt == 15) {
45
+ /* UNPREDICTABLE; we choose to UNDEF */
46
+ return false;
47
+ }
48
+
49
+ if (!vfp_access_check(s)) {
50
+ return true;
51
+ }
52
+
53
+ if (a->l) {
54
+ /* VFP to general purpose register */
55
+ tmp = tcg_temp_new_i32();
56
+ neon_load_reg32(tmp, a->vn);
57
+ tcg_gen_andi_i32(tmp, tmp, 0xffff);
58
+ store_reg(s, a->rt, tmp);
59
+ } else {
60
+ /* general purpose register to VFP */
61
+ tmp = load_reg(s, a->rt);
62
+ tcg_gen_andi_i32(tmp, tmp, 0xffff);
63
+ neon_store_reg32(tmp, a->vn);
64
+ tcg_temp_free_i32(tmp);
65
+ }
66
+
67
+ return true;
68
+}
69
+
70
static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
71
{
72
TCGv_i32 tmp;
73
--
74
2.20.1
75
76
diff view generated by jsdifflib
Deleted patch
1
Rewrite Neon VABS/VNEG of floats to use gvec logical AND and XOR, so
2
that we can implement the fp16 version of the insns.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200828183354.27913-26-peter.maydell@linaro.org
7
---
8
target/arm/translate-neon.c.inc | 34 +++++++++++++++++++++++++++------
9
1 file changed, 28 insertions(+), 6 deletions(-)
10
11
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-neon.c.inc
14
+++ b/target/arm/translate-neon.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VCNT(DisasContext *s, arg_2misc *a)
16
return do_2misc(s, a, gen_helper_neon_cnt_u8);
17
}
18
19
+static void gen_VABS_F(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
20
+ uint32_t oprsz, uint32_t maxsz)
21
+{
22
+ tcg_gen_gvec_andi(vece, rd_ofs, rm_ofs,
23
+ vece == MO_16 ? 0x7fff : 0x7fffffff,
24
+ oprsz, maxsz);
25
+}
26
+
27
static bool trans_VABS_F(DisasContext *s, arg_2misc *a)
28
{
29
- if (a->size != 2) {
30
+ if (a->size == MO_16) {
31
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
32
+ return false;
33
+ }
34
+ } else if (a->size != MO_32) {
35
return false;
36
}
37
- /* TODO: FP16 : size == 1 */
38
- return do_2misc(s, a, gen_helper_vfp_abss);
39
+ return do_2misc_vec(s, a, gen_VABS_F);
40
+}
41
+
42
+static void gen_VNEG_F(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
43
+ uint32_t oprsz, uint32_t maxsz)
44
+{
45
+ tcg_gen_gvec_xori(vece, rd_ofs, rm_ofs,
46
+ vece == MO_16 ? 0x8000 : 0x80000000,
47
+ oprsz, maxsz);
48
}
49
50
static bool trans_VNEG_F(DisasContext *s, arg_2misc *a)
51
{
52
- if (a->size != 2) {
53
+ if (a->size == MO_16) {
54
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
55
+ return false;
56
+ }
57
+ } else if (a->size != MO_32) {
58
return false;
59
}
60
- /* TODO: FP16 : size == 1 */
61
- return do_2misc(s, a, gen_helper_vfp_negs);
62
+ return do_2misc_vec(s, a, gen_VNEG_F);
63
}
64
65
static bool trans_VRECPE(DisasContext *s, arg_2misc *a)
66
--
67
2.20.1
68
69
diff view generated by jsdifflib
Deleted patch
1
Convert the Neon floating-point vector comparison ops VCEQ,
2
VCGE and VCGT over to using a gvec helper and use this to
3
implement the fp16 case.
4
1
5
(We put the float16_ceq() etc functions above the DO_2OP()
6
macro definition because later when we convert the
7
compare-against-zero instructions we'll want their
8
definitions to be visible at that point in the source file.)
9
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20200828183354.27913-27-peter.maydell@linaro.org
13
---
14
target/arm/helper.h | 9 +++++++
15
target/arm/vec_helper.c | 44 +++++++++++++++++++++++++++++++++
16
target/arm/translate-neon.c.inc | 6 ++---
17
3 files changed, 56 insertions(+), 3 deletions(-)
18
19
diff --git a/target/arm/helper.h b/target/arm/helper.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper.h
22
+++ b/target/arm/helper.h
23
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
24
DEF_HELPER_FLAGS_5(gvec_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
DEF_HELPER_FLAGS_5(gvec_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
26
27
+DEF_HELPER_FLAGS_5(gvec_fceq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_5(gvec_fceq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_5(gvec_fcge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_5(gvec_fcge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
32
+
33
+DEF_HELPER_FLAGS_5(gvec_fcgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_5(gvec_fcgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
35
+
36
DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
37
void, ptr, ptr, ptr, ptr, i32)
38
DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG,
39
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/vec_helper.c
42
+++ b/target/arm/vec_helper.c
43
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm,
44
clear_tail(d, opr_sz, simd_maxsz(desc));
45
}
46
47
+/*
48
+ * Floating point comparisons producing an integer result (all 1s or all 0s).
49
+ * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
50
+ * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
51
+ */
52
+static uint16_t float16_ceq(float16 op1, float16 op2, float_status *stat)
53
+{
54
+ return -float16_eq_quiet(op1, op2, stat);
55
+}
56
+
57
+static uint32_t float32_ceq(float32 op1, float32 op2, float_status *stat)
58
+{
59
+ return -float32_eq_quiet(op1, op2, stat);
60
+}
61
+
62
+static uint16_t float16_cge(float16 op1, float16 op2, float_status *stat)
63
+{
64
+ return -float16_le(op2, op1, stat);
65
+}
66
+
67
+static uint32_t float32_cge(float32 op1, float32 op2, float_status *stat)
68
+{
69
+ return -float32_le(op2, op1, stat);
70
+}
71
+
72
+static uint16_t float16_cgt(float16 op1, float16 op2, float_status *stat)
73
+{
74
+ return -float16_lt(op2, op1, stat);
75
+}
76
+
77
+static uint32_t float32_cgt(float32 op1, float32 op2, float_status *stat)
78
+{
79
+ return -float32_lt(op2, op1, stat);
80
+}
81
+
82
#define DO_2OP(NAME, FUNC, TYPE) \
83
void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
84
{ \
85
@@ -XXX,XX +XXX,XX @@ DO_3OP(gvec_ftsmul_d, float64_ftsmul, float64)
86
DO_3OP(gvec_fabd_h, float16_abd, float16)
87
DO_3OP(gvec_fabd_s, float32_abd, float32)
88
89
+DO_3OP(gvec_fceq_h, float16_ceq, float16)
90
+DO_3OP(gvec_fceq_s, float32_ceq, float32)
91
+
92
+DO_3OP(gvec_fcge_h, float16_cge, float16)
93
+DO_3OP(gvec_fcge_s, float32_cge, float32)
94
+
95
+DO_3OP(gvec_fcgt_h, float16_cgt, float16)
96
+DO_3OP(gvec_fcgt_s, float32_cgt, float32)
97
+
98
#ifdef TARGET_AARCH64
99
100
DO_3OP(gvec_recps_h, helper_recpsf_f16, float16)
101
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
102
index XXXXXXX..XXXXXXX 100644
103
--- a/target/arm/translate-neon.c.inc
104
+++ b/target/arm/translate-neon.c.inc
105
@@ -XXX,XX +XXX,XX @@ DO_3S_FP_GVEC(VADD, gen_helper_gvec_fadd_s, gen_helper_gvec_fadd_h)
106
DO_3S_FP_GVEC(VSUB, gen_helper_gvec_fsub_s, gen_helper_gvec_fsub_h)
107
DO_3S_FP_GVEC(VABD, gen_helper_gvec_fabd_s, gen_helper_gvec_fabd_h)
108
DO_3S_FP_GVEC(VMUL, gen_helper_gvec_fmul_s, gen_helper_gvec_fmul_h)
109
+DO_3S_FP_GVEC(VCEQ, gen_helper_gvec_fceq_s, gen_helper_gvec_fceq_h)
110
+DO_3S_FP_GVEC(VCGE, gen_helper_gvec_fcge_s, gen_helper_gvec_fcge_h)
111
+DO_3S_FP_GVEC(VCGT, gen_helper_gvec_fcgt_s, gen_helper_gvec_fcgt_h)
112
113
/*
114
* For all the functions using this macro, size == 1 means fp16,
115
@@ -XXX,XX +XXX,XX @@ DO_3S_FP_GVEC(VMUL, gen_helper_gvec_fmul_s, gen_helper_gvec_fmul_h)
116
return do_3same_fp(s, a, FUNC, READS_VD); \
117
}
118
119
-DO_3S_FP(VCEQ, gen_helper_neon_ceq_f32, false)
120
-DO_3S_FP(VCGE, gen_helper_neon_cge_f32, false)
121
-DO_3S_FP(VCGT, gen_helper_neon_cgt_f32, false)
122
DO_3S_FP(VACGE, gen_helper_neon_acge_f32, false)
123
DO_3S_FP(VACGT, gen_helper_neon_acgt_f32, false)
124
DO_3S_FP(VMAX, gen_helper_vfp_maxs, false)
125
--
126
2.20.1
127
128
diff view generated by jsdifflib
Deleted patch
1
Convert the Neon float-point VMAX and VMIN insns over to using
2
a gvec helper, and use this to implement the fp16 case.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200828183354.27913-29-peter.maydell@linaro.org
7
---
8
target/arm/helper.h | 6 ++++++
9
target/arm/vec_helper.c | 6 ++++++
10
target/arm/translate-neon.c.inc | 5 ++---
11
3 files changed, 14 insertions(+), 3 deletions(-)
12
13
diff --git a/target/arm/helper.h b/target/arm/helper.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.h
16
+++ b/target/arm/helper.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_facge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
18
DEF_HELPER_FLAGS_5(gvec_facgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_5(gvec_facgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
20
21
+DEF_HELPER_FLAGS_5(gvec_fmax_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
22
+DEF_HELPER_FLAGS_5(gvec_fmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
23
+
24
+DEF_HELPER_FLAGS_5(gvec_fmin_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_5(gvec_fmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
26
+
27
DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
28
void, ptr, ptr, ptr, ptr, i32)
29
DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG,
30
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/vec_helper.c
33
+++ b/target/arm/vec_helper.c
34
@@ -XXX,XX +XXX,XX @@ DO_3OP(gvec_facge_s, float32_acge, float32)
35
DO_3OP(gvec_facgt_h, float16_acgt, float16)
36
DO_3OP(gvec_facgt_s, float32_acgt, float32)
37
38
+DO_3OP(gvec_fmax_h, float16_max, float16)
39
+DO_3OP(gvec_fmax_s, float32_max, float32)
40
+
41
+DO_3OP(gvec_fmin_h, float16_min, float16)
42
+DO_3OP(gvec_fmin_s, float32_min, float32)
43
+
44
#ifdef TARGET_AARCH64
45
46
DO_3OP(gvec_recps_h, helper_recpsf_f16, float16)
47
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/translate-neon.c.inc
50
+++ b/target/arm/translate-neon.c.inc
51
@@ -XXX,XX +XXX,XX @@ DO_3S_FP_GVEC(VCGE, gen_helper_gvec_fcge_s, gen_helper_gvec_fcge_h)
52
DO_3S_FP_GVEC(VCGT, gen_helper_gvec_fcgt_s, gen_helper_gvec_fcgt_h)
53
DO_3S_FP_GVEC(VACGE, gen_helper_gvec_facge_s, gen_helper_gvec_facge_h)
54
DO_3S_FP_GVEC(VACGT, gen_helper_gvec_facgt_s, gen_helper_gvec_facgt_h)
55
+DO_3S_FP_GVEC(VMAX, gen_helper_gvec_fmax_s, gen_helper_gvec_fmax_h)
56
+DO_3S_FP_GVEC(VMIN, gen_helper_gvec_fmin_s, gen_helper_gvec_fmin_h)
57
58
/*
59
* For all the functions using this macro, size == 1 means fp16,
60
@@ -XXX,XX +XXX,XX @@ DO_3S_FP_GVEC(VACGT, gen_helper_gvec_facgt_s, gen_helper_gvec_facgt_h)
61
return do_3same_fp(s, a, FUNC, READS_VD); \
62
}
63
64
-DO_3S_FP(VMAX, gen_helper_vfp_maxs, false)
65
-DO_3S_FP(VMIN, gen_helper_vfp_mins, false)
66
-
67
static void gen_VMLA_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
68
TCGv_ptr fpstatus)
69
{
70
--
71
2.20.1
72
73
diff view generated by jsdifflib
Deleted patch
1
Convert the Neon floating point VMAXNM and VMINNM insns to
2
using a gvec helper and use this to implement the fp16 case.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200828183354.27913-30-peter.maydell@linaro.org
7
---
8
target/arm/helper.h | 6 ++++++
9
target/arm/vec_helper.c | 6 ++++++
10
target/arm/translate-neon.c.inc | 23 +++++++++++++++--------
11
3 files changed, 27 insertions(+), 8 deletions(-)
12
13
diff --git a/target/arm/helper.h b/target/arm/helper.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.h
16
+++ b/target/arm/helper.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_fmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
18
DEF_HELPER_FLAGS_5(gvec_fmin_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_5(gvec_fmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
20
21
+DEF_HELPER_FLAGS_5(gvec_fmaxnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
22
+DEF_HELPER_FLAGS_5(gvec_fmaxnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
23
+
24
+DEF_HELPER_FLAGS_5(gvec_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_5(gvec_fminnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
26
+
27
DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
28
void, ptr, ptr, ptr, ptr, i32)
29
DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG,
30
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/vec_helper.c
33
+++ b/target/arm/vec_helper.c
34
@@ -XXX,XX +XXX,XX @@ DO_3OP(gvec_fmax_s, float32_max, float32)
35
DO_3OP(gvec_fmin_h, float16_min, float16)
36
DO_3OP(gvec_fmin_s, float32_min, float32)
37
38
+DO_3OP(gvec_fmaxnum_h, float16_maxnum, float16)
39
+DO_3OP(gvec_fmaxnum_s, float32_maxnum, float32)
40
+
41
+DO_3OP(gvec_fminnum_h, float16_minnum, float16)
42
+DO_3OP(gvec_fminnum_s, float32_minnum, float32)
43
+
44
#ifdef TARGET_AARCH64
45
46
DO_3OP(gvec_recps_h, helper_recpsf_f16, float16)
47
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/translate-neon.c.inc
50
+++ b/target/arm/translate-neon.c.inc
51
@@ -XXX,XX +XXX,XX @@ static void gen_VMLS_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
52
DO_3S_FP(VMLA, gen_VMLA_fp_3s, true)
53
DO_3S_FP(VMLS, gen_VMLS_fp_3s, true)
54
55
+WRAP_FP_GVEC(gen_VMAXNM_fp32_3s, FPST_STD, gen_helper_gvec_fmaxnum_s)
56
+WRAP_FP_GVEC(gen_VMAXNM_fp16_3s, FPST_STD_F16, gen_helper_gvec_fmaxnum_h)
57
+WRAP_FP_GVEC(gen_VMINNM_fp32_3s, FPST_STD, gen_helper_gvec_fminnum_s)
58
+WRAP_FP_GVEC(gen_VMINNM_fp16_3s, FPST_STD_F16, gen_helper_gvec_fminnum_h)
59
+
60
static bool trans_VMAXNM_fp_3s(DisasContext *s, arg_3same *a)
61
{
62
if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
63
@@ -XXX,XX +XXX,XX @@ static bool trans_VMAXNM_fp_3s(DisasContext *s, arg_3same *a)
64
}
65
66
if (a->size != 0) {
67
- /* TODO fp16 support */
68
- return false;
69
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
70
+ return false;
71
+ }
72
+ return do_3same(s, a, gen_VMAXNM_fp16_3s);
73
}
74
-
75
- return do_3same_fp(s, a, gen_helper_vfp_maxnums, false);
76
+ return do_3same(s, a, gen_VMAXNM_fp32_3s);
77
}
78
79
static bool trans_VMINNM_fp_3s(DisasContext *s, arg_3same *a)
80
@@ -XXX,XX +XXX,XX @@ static bool trans_VMINNM_fp_3s(DisasContext *s, arg_3same *a)
81
}
82
83
if (a->size != 0) {
84
- /* TODO fp16 support */
85
- return false;
86
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
87
+ return false;
88
+ }
89
+ return do_3same(s, a, gen_VMINNM_fp16_3s);
90
}
91
-
92
- return do_3same_fp(s, a, gen_helper_vfp_minnums, false);
93
+ return do_3same(s, a, gen_VMINNM_fp32_3s);
94
}
95
96
WRAP_ENV_FN(gen_VRECPS_tramp, gen_helper_recps_f32)
97
--
98
2.20.1
99
100
diff view generated by jsdifflib
Deleted patch
1
Convert the Neon floating-point VMLA and VMLS insns over to using a
2
gvec helper, and use this to implement the fp16 case.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20200828183354.27913-31-peter.maydell@linaro.org
7
---
8
target/arm/helper.h | 6 +++++
9
target/arm/vec_helper.c | 42 +++++++++++++++++++++++++++++++++
10
target/arm/translate-neon.c.inc | 33 ++------------------------
11
3 files changed, 50 insertions(+), 31 deletions(-)
12
13
diff --git a/target/arm/helper.h b/target/arm/helper.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper.h
16
+++ b/target/arm/helper.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_fmaxnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i3
18
DEF_HELPER_FLAGS_5(gvec_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_5(gvec_fminnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
20
21
+DEF_HELPER_FLAGS_5(gvec_fmla_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
22
+DEF_HELPER_FLAGS_5(gvec_fmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
23
+
24
+DEF_HELPER_FLAGS_5(gvec_fmls_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_5(gvec_fmls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
26
+
27
DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
28
void, ptr, ptr, ptr, ptr, i32)
29
DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG,
30
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/vec_helper.c
33
+++ b/target/arm/vec_helper.c
34
@@ -XXX,XX +XXX,XX @@ DO_3OP(gvec_rsqrts_d, helper_rsqrtsf_f64, float64)
35
#endif
36
#undef DO_3OP
37
38
+/* Non-fused multiply-add (unlike float16_muladd etc, which are fused) */
39
+static float16 float16_muladd_nf(float16 dest, float16 op1, float16 op2,
40
+ float_status *stat)
41
+{
42
+ return float16_add(dest, float16_mul(op1, op2, stat), stat);
43
+}
44
+
45
+static float32 float32_muladd_nf(float32 dest, float32 op1, float32 op2,
46
+ float_status *stat)
47
+{
48
+ return float32_add(dest, float32_mul(op1, op2, stat), stat);
49
+}
50
+
51
+static float16 float16_mulsub_nf(float16 dest, float16 op1, float16 op2,
52
+ float_status *stat)
53
+{
54
+ return float16_sub(dest, float16_mul(op1, op2, stat), stat);
55
+}
56
+
57
+static float32 float32_mulsub_nf(float32 dest, float32 op1, float32 op2,
58
+ float_status *stat)
59
+{
60
+ return float32_sub(dest, float32_mul(op1, op2, stat), stat);
61
+}
62
+
63
+#define DO_MULADD(NAME, FUNC, TYPE) \
64
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
65
+{ \
66
+ intptr_t i, oprsz = simd_oprsz(desc); \
67
+ TYPE *d = vd, *n = vn, *m = vm; \
68
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
69
+ d[i] = FUNC(d[i], n[i], m[i], stat); \
70
+ } \
71
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
72
+}
73
+
74
+DO_MULADD(gvec_fmla_h, float16_muladd_nf, float16)
75
+DO_MULADD(gvec_fmla_s, float32_muladd_nf, float32)
76
+
77
+DO_MULADD(gvec_fmls_h, float16_mulsub_nf, float16)
78
+DO_MULADD(gvec_fmls_s, float32_mulsub_nf, float32)
79
+
80
/* For the indexed ops, SVE applies the index per 128-bit vector segment.
81
* For AdvSIMD, there is of course only one such vector segment.
82
*/
83
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
84
index XXXXXXX..XXXXXXX 100644
85
--- a/target/arm/translate-neon.c.inc
86
+++ b/target/arm/translate-neon.c.inc
87
@@ -XXX,XX +XXX,XX @@ DO_3S_FP_GVEC(VACGE, gen_helper_gvec_facge_s, gen_helper_gvec_facge_h)
88
DO_3S_FP_GVEC(VACGT, gen_helper_gvec_facgt_s, gen_helper_gvec_facgt_h)
89
DO_3S_FP_GVEC(VMAX, gen_helper_gvec_fmax_s, gen_helper_gvec_fmax_h)
90
DO_3S_FP_GVEC(VMIN, gen_helper_gvec_fmin_s, gen_helper_gvec_fmin_h)
91
-
92
-/*
93
- * For all the functions using this macro, size == 1 means fp16,
94
- * which is an architecture extension we don't implement yet.
95
- */
96
-#define DO_3S_FP(INSN,FUNC,READS_VD) \
97
- static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
98
- { \
99
- if (a->size != 0) { \
100
- /* TODO fp16 support */ \
101
- return false; \
102
- } \
103
- return do_3same_fp(s, a, FUNC, READS_VD); \
104
- }
105
-
106
-static void gen_VMLA_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
107
- TCGv_ptr fpstatus)
108
-{
109
- gen_helper_vfp_muls(vn, vn, vm, fpstatus);
110
- gen_helper_vfp_adds(vd, vd, vn, fpstatus);
111
-}
112
-
113
-static void gen_VMLS_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
114
- TCGv_ptr fpstatus)
115
-{
116
- gen_helper_vfp_muls(vn, vn, vm, fpstatus);
117
- gen_helper_vfp_subs(vd, vd, vn, fpstatus);
118
-}
119
-
120
-DO_3S_FP(VMLA, gen_VMLA_fp_3s, true)
121
-DO_3S_FP(VMLS, gen_VMLS_fp_3s, true)
122
+DO_3S_FP_GVEC(VMLA, gen_helper_gvec_fmla_s, gen_helper_gvec_fmla_h)
123
+DO_3S_FP_GVEC(VMLS, gen_helper_gvec_fmls_s, gen_helper_gvec_fmls_h)
124
125
WRAP_FP_GVEC(gen_VMAXNM_fp32_3s, FPST_STD, gen_helper_gvec_fmaxnum_s)
126
WRAP_FP_GVEC(gen_VMAXNM_fp16_3s, FPST_STD_F16, gen_helper_gvec_fmaxnum_h)
127
--
128
2.20.1
129
130
diff view generated by jsdifflib
Deleted patch
1
Convert the neon floating-point vector operations VFMA and VFMS
2
to use a gvec helper, and use this to implement the fp16 case.
3
1
4
This is the last use of do_3same_fp() so we can now delete
5
that function.
6
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200828183354.27913-32-peter.maydell@linaro.org
10
---
11
target/arm/helper.h | 6 +++
12
target/arm/vec_helper.c | 33 +++++++++++-
13
target/arm/translate-neon.c.inc | 92 +--------------------------------
14
3 files changed, 40 insertions(+), 91 deletions(-)
15
16
diff --git a/target/arm/helper.h b/target/arm/helper.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper.h
19
+++ b/target/arm/helper.h
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_fmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_5(gvec_fmls_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
22
DEF_HELPER_FLAGS_5(gvec_fmls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
23
24
+DEF_HELPER_FLAGS_5(gvec_vfma_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_5(gvec_vfma_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
26
+
27
+DEF_HELPER_FLAGS_5(gvec_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_5(gvec_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
29
+
30
DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
31
void, ptr, ptr, ptr, ptr, i32)
32
DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG,
33
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/vec_helper.c
36
+++ b/target/arm/vec_helper.c
37
@@ -XXX,XX +XXX,XX @@ static float32 float32_mulsub_nf(float32 dest, float32 op1, float32 op2,
38
return float32_sub(dest, float32_mul(op1, op2, stat), stat);
39
}
40
41
-#define DO_MULADD(NAME, FUNC, TYPE) \
42
+/* Fused versions; these have the semantics Neon VFMA/VFMS want */
43
+static float16 float16_muladd_f(float16 dest, float16 op1, float16 op2,
44
+ float_status *stat)
45
+{
46
+ return float16_muladd(op1, op2, dest, 0, stat);
47
+}
48
+
49
+static float32 float32_muladd_f(float32 dest, float32 op1, float32 op2,
50
+ float_status *stat)
51
+{
52
+ return float32_muladd(op1, op2, dest, 0, stat);
53
+}
54
+
55
+static float16 float16_mulsub_f(float16 dest, float16 op1, float16 op2,
56
+ float_status *stat)
57
+{
58
+ return float16_muladd(float16_chs(op1), op2, dest, 0, stat);
59
+}
60
+
61
+static float32 float32_mulsub_f(float32 dest, float32 op1, float32 op2,
62
+ float_status *stat)
63
+{
64
+ return float32_muladd(float32_chs(op1), op2, dest, 0, stat);
65
+}
66
+
67
+#define DO_MULADD(NAME, FUNC, TYPE) \
68
void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
69
{ \
70
intptr_t i, oprsz = simd_oprsz(desc); \
71
@@ -XXX,XX +XXX,XX @@ DO_MULADD(gvec_fmla_s, float32_muladd_nf, float32)
72
DO_MULADD(gvec_fmls_h, float16_mulsub_nf, float16)
73
DO_MULADD(gvec_fmls_s, float32_mulsub_nf, float32)
74
75
+DO_MULADD(gvec_vfma_h, float16_muladd_f, float16)
76
+DO_MULADD(gvec_vfma_s, float32_muladd_f, float32)
77
+
78
+DO_MULADD(gvec_vfms_h, float16_mulsub_f, float16)
79
+DO_MULADD(gvec_vfms_s, float32_mulsub_f, float32)
80
+
81
/* For the indexed ops, SVE applies the index per 128-bit vector segment.
82
* For AdvSIMD, there is of course only one such vector segment.
83
*/
84
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
85
index XXXXXXX..XXXXXXX 100644
86
--- a/target/arm/translate-neon.c.inc
87
+++ b/target/arm/translate-neon.c.inc
88
@@ -XXX,XX +XXX,XX @@ DO_3SAME_PAIR(VPADD, padd_u)
89
DO_3SAME_VQDMULH(VQDMULH, qdmulh)
90
DO_3SAME_VQDMULH(VQRDMULH, qrdmulh)
91
92
-static bool do_3same_fp(DisasContext *s, arg_3same *a, VFPGen3OpSPFn *fn,
93
- bool reads_vd)
94
-{
95
- /*
96
- * FP operations handled elementwise 32 bits at a time.
97
- * If reads_vd is true then the old value of Vd will be
98
- * loaded before calling the callback function. This is
99
- * used for multiply-accumulate type operations.
100
- */
101
- TCGv_i32 tmp, tmp2;
102
- int pass;
103
-
104
- if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
105
- return false;
106
- }
107
-
108
- /* UNDEF accesses to D16-D31 if they don't exist. */
109
- if (!dc_isar_feature(aa32_simd_r32, s) &&
110
- ((a->vd | a->vn | a->vm) & 0x10)) {
111
- return false;
112
- }
113
-
114
- if ((a->vn | a->vm | a->vd) & a->q) {
115
- return false;
116
- }
117
-
118
- if (!vfp_access_check(s)) {
119
- return true;
120
- }
121
-
122
- TCGv_ptr fpstatus = fpstatus_ptr(FPST_STD);
123
- for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
124
- tmp = neon_load_reg(a->vn, pass);
125
- tmp2 = neon_load_reg(a->vm, pass);
126
- if (reads_vd) {
127
- TCGv_i32 tmp_rd = neon_load_reg(a->vd, pass);
128
- fn(tmp_rd, tmp, tmp2, fpstatus);
129
- neon_store_reg(a->vd, pass, tmp_rd);
130
- tcg_temp_free_i32(tmp);
131
- } else {
132
- fn(tmp, tmp, tmp2, fpstatus);
133
- neon_store_reg(a->vd, pass, tmp);
134
- }
135
- tcg_temp_free_i32(tmp2);
136
- }
137
- tcg_temp_free_ptr(fpstatus);
138
- return true;
139
-}
140
-
141
#define WRAP_FP_GVEC(WRAPNAME, FPST, FUNC) \
142
static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
143
uint32_t rn_ofs, uint32_t rm_ofs, \
144
@@ -XXX,XX +XXX,XX @@ DO_3S_FP_GVEC(VMAX, gen_helper_gvec_fmax_s, gen_helper_gvec_fmax_h)
145
DO_3S_FP_GVEC(VMIN, gen_helper_gvec_fmin_s, gen_helper_gvec_fmin_h)
146
DO_3S_FP_GVEC(VMLA, gen_helper_gvec_fmla_s, gen_helper_gvec_fmla_h)
147
DO_3S_FP_GVEC(VMLS, gen_helper_gvec_fmls_s, gen_helper_gvec_fmls_h)
148
+DO_3S_FP_GVEC(VFMA, gen_helper_gvec_vfma_s, gen_helper_gvec_vfma_h)
149
+DO_3S_FP_GVEC(VFMS, gen_helper_gvec_vfms_s, gen_helper_gvec_vfms_h)
150
151
WRAP_FP_GVEC(gen_VMAXNM_fp32_3s, FPST_STD, gen_helper_gvec_fmaxnum_s)
152
WRAP_FP_GVEC(gen_VMAXNM_fp16_3s, FPST_STD_F16, gen_helper_gvec_fmaxnum_h)
153
@@ -XXX,XX +XXX,XX @@ static bool trans_VRSQRTS_fp_3s(DisasContext *s, arg_3same *a)
154
return do_3same(s, a, gen_VRSQRTS_fp_3s);
155
}
156
157
-static void gen_VFMA_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
158
- TCGv_ptr fpstatus)
159
-{
160
- gen_helper_vfp_muladds(vd, vn, vm, vd, fpstatus);
161
-}
162
-
163
-static bool trans_VFMA_fp_3s(DisasContext *s, arg_3same *a)
164
-{
165
- if (!dc_isar_feature(aa32_simdfmac, s)) {
166
- return false;
167
- }
168
-
169
- if (a->size != 0) {
170
- /* TODO fp16 support */
171
- return false;
172
- }
173
-
174
- return do_3same_fp(s, a, gen_VFMA_fp_3s, true);
175
-}
176
-
177
-static void gen_VFMS_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
178
- TCGv_ptr fpstatus)
179
-{
180
- gen_helper_vfp_negs(vn, vn);
181
- gen_helper_vfp_muladds(vd, vn, vm, vd, fpstatus);
182
-}
183
-
184
-static bool trans_VFMS_fp_3s(DisasContext *s, arg_3same *a)
185
-{
186
- if (!dc_isar_feature(aa32_simdfmac, s)) {
187
- return false;
188
- }
189
-
190
- if (a->size != 0) {
191
- /* TODO fp16 support */
192
- return false;
193
- }
194
-
195
- return do_3same_fp(s, a, gen_VFMS_fp_3s, true);
196
-}
197
-
198
static bool do_3same_fp_pair(DisasContext *s, arg_3same *a, VFPGen3OpSPFn *fn)
199
{
200
/* FP operations handled pairwise 32 bits at a time */
201
--
202
2.20.1
203
204
diff view generated by jsdifflib