1
Latest arm queue, half minor code cleanups and half minor
1
Small pile of bug fixes for rc1. I've included my patches to get
2
bug fixes.
2
our docs building with Sphinx 3, just for convenience...
3
3
4
-- PMM
4
-- PMM
5
5
6
The following changes since commit 5d0e5694470d2952b4f257bc985cac8c89b4fd92:
6
The following changes since commit b149dea55cce97cb226683d06af61984a1c11e96:
7
7
8
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging (2019-06-17 11:55:14 +0100)
8
Merge remote-tracking branch 'remotes/cschoenebeck/tags/pull-9p-20201102' into staging (2020-11-02 10:57:48 +0000)
9
9
10
are available in the Git repository at:
10
are available in the Git repository at:
11
11
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20190617
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20201102
13
13
14
for you to fetch changes up to 1120827fa182f0e76226df7ffe7a86598d1df54f:
14
for you to fetch changes up to ffb4fbf90a2f63c9cb33e4bb9f854c79bf04ca4a:
15
15
16
target/arm: Only implement doubles if the FPU supports them (2019-06-17 15:15:06 +0100)
16
tests/qtest/npcm7xx_rng-test: Disable randomness tests (2020-11-02 16:52:18 +0000)
17
17
18
----------------------------------------------------------------
18
----------------------------------------------------------------
19
target-arm queue:
19
target-arm queue:
20
* support large kernel images in bootloader (by avoiding
20
* target/arm: Fix Neon emulation bugs on big-endian hosts
21
putting the initrd over the top of them)
21
* target/arm: fix handling of HCR.FB
22
* correctly disable FPU/DSP in the CPU for the mps2-an521, musca-a boards
22
* target/arm: fix LORID_EL1 access check
23
* arm_gicv3: Fix decoding of ID register range
23
* disas/capstone: Fix monitor disassembly of >32 bytes
24
* arm_gicv3: GICD_TYPER.SecurityExtn is RAZ if GICD_CTLR.DS == 1
24
* hw/arm/smmuv3: Fix potential integer overflow (CID 1432363)
25
* some code cleanups following on from the VFP decodetree conversion
25
* hw/arm/boot: fix SVE for EL3 direct kernel boot
26
* Only implement doubles if the FPU supports them
26
* hw/display/omap_lcdc: Fix potential NULL pointer dereference
27
(so we now correctly model Cortex-M4, -M33 as single precision only)
27
* hw/display/exynos4210_fimd: Fix potential NULL pointer dereference
28
* target/arm: Get correct MMU index for other-security-state
29
* configure: Test that gio libs from pkg-config work
30
* hw/intc/arm_gicv3_cpuif: Make GIC maintenance interrupts work
31
* docs: Fix building with Sphinx 3
32
* tests/qtest/npcm7xx_rng-test: Disable randomness tests
28
33
29
----------------------------------------------------------------
34
----------------------------------------------------------------
30
Peter Maydell (24):
35
AlexChen (2):
31
hw/arm/boot: Don't assume RAM starts at address zero
36
hw/display/omap_lcdc: Fix potential NULL pointer dereference
32
hw/arm/boot: Diagnose layouts that put initrd or DTB off the end of RAM
37
hw/display/exynos4210_fimd: Fix potential NULL pointer dereference
33
hw/arm/boot: Avoid placing the initrd on top of the kernel
34
hw/arm/boot: Honour image size field in AArch64 Image format kernels
35
target/arm: Allow VFP and Neon to be disabled via a CPU property
36
target/arm: Allow M-profile CPUs to disable the DSP extension via CPU property
37
hw/arm/armv7m: Forward "vfp" and "dsp" properties to CPU
38
hw/arm: Correctly disable FPU/DSP for some ARMSSE-based boards
39
hw/intc/arm_gicv3: Fix decoding of ID register range
40
hw/intc/arm_gicv3: GICD_TYPER.SecurityExtn is RAZ if GICD_CTLR.DS == 1
41
target/arm: Move vfp_expand_imm() to translate.[ch]
42
target/arm: Use vfp_expand_imm() for AArch32 VFP VMOV_imm
43
target/arm: Stop using cpu_F0s for NEON_2RM_VABS_F
44
target/arm: Stop using cpu_F0s for NEON_2RM_VNEG_F
45
target/arm: Stop using cpu_F0s for NEON_2RM_VRINT*
46
target/arm: Stop using cpu_F0s for NEON_2RM_VCVT[ANPM][US]
47
target/arm: Stop using cpu_F0s for NEON_2RM_VRECPE_F and NEON_2RM_VRSQRTE_F
48
target/arm: Stop using cpu_F0s for Neon f32/s32 VCVT
49
target/arm: Stop using cpu_F0s in Neon VCVT fixed-point ops
50
target/arm: stop using deprecated functions in NEON_2RM_VCVT_F16_F32
51
target/arm: Stop using deprecated functions in NEON_2RM_VCVT_F32_F16
52
target/arm: Remove unused cpu_F0s, cpu_F0d, cpu_F1s, cpu_F1d
53
target/arm: Fix typos in trans function prototypes
54
target/arm: Only implement doubles if the FPU supports them
55
38
56
include/hw/arm/armsse.h | 7 ++
39
Peter Maydell (9):
57
include/hw/arm/armv7m.h | 4 +
40
target/arm: Fix float16 pairwise Neon ops on big-endian hosts
58
target/arm/cpu.h | 12 +++
41
target/arm: Fix VUDOT/VSDOT (scalar) on big-endian hosts
59
target/arm/translate-a64.h | 1 -
42
disas/capstone: Fix monitor disassembly of >32 bytes
60
target/arm/translate.h | 7 ++
43
target/arm: Get correct MMU index for other-security-state
61
hw/arm/armsse.c | 58 +++++++---
44
configure: Test that gio libs from pkg-config work
62
hw/arm/armv7m.c | 18 ++++
45
hw/intc/arm_gicv3_cpuif: Make GIC maintenance interrupts work
63
hw/arm/boot.c | 83 ++++++++++----
46
scripts/kerneldoc: For Sphinx 3 use c:macro for macros with arguments
64
hw/arm/musca.c | 8 ++
47
qemu-option-trace.rst.inc: Don't use option:: markup
65
hw/intc/arm_gicv3_dist.c | 12 ++-
48
tests/qtest/npcm7xx_rng-test: Disable randomness tests
66
hw/intc/arm_gicv3_redist.c | 4 +-
67
target/arm/cpu.c | 179 ++++++++++++++++++++++++++++--
68
target/arm/translate-a64.c | 32 ------
69
target/arm/translate-vfp.inc.c | 173 ++++++++++++++++++++++-------
70
target/arm/translate.c | 240 ++++++++++++++---------------------------
71
target/arm/vfp.decode | 10 +-
72
16 files changed, 572 insertions(+), 276 deletions(-)
73
49
50
Philippe Mathieu-Daudé (1):
51
hw/arm/smmuv3: Fix potential integer overflow (CID 1432363)
52
53
Richard Henderson (11):
54
target/arm: Introduce neon_full_reg_offset
55
target/arm: Move neon_element_offset to translate.c
56
target/arm: Use neon_element_offset in neon_load/store_reg
57
target/arm: Use neon_element_offset in vfp_reg_offset
58
target/arm: Add read/write_neon_element32
59
target/arm: Expand read/write_neon_element32 to all MemOp
60
target/arm: Rename neon_load_reg32 to vfp_load_reg32
61
target/arm: Add read/write_neon_element64
62
target/arm: Rename neon_load_reg64 to vfp_load_reg64
63
target/arm: Simplify do_long_3d and do_2scalar_long
64
target/arm: Improve do_prewiden_3d
65
66
Rémi Denis-Courmont (3):
67
target/arm: fix handling of HCR.FB
68
target/arm: fix LORID_EL1 access check
69
hw/arm/boot: fix SVE for EL3 direct kernel boot
70
71
docs/qemu-option-trace.rst.inc | 6 +-
72
configure | 10 +-
73
include/hw/intc/arm_gicv3_common.h | 1 -
74
disas/capstone.c | 2 +-
75
hw/arm/boot.c | 3 +
76
hw/arm/smmuv3.c | 3 +-
77
hw/display/exynos4210_fimd.c | 4 +-
78
hw/display/omap_lcdc.c | 10 +-
79
hw/intc/arm_gicv3_cpuif.c | 5 +-
80
target/arm/helper.c | 24 +-
81
target/arm/m_helper.c | 3 +-
82
target/arm/translate.c | 153 +++++++++---
83
target/arm/vec_helper.c | 12 +-
84
tests/qtest/npcm7xx_rng-test.c | 14 +-
85
scripts/kernel-doc | 18 +-
86
target/arm/translate-neon.c.inc | 472 ++++++++++++++++++++-----------------
87
target/arm/translate-vfp.c.inc | 341 +++++++++++----------------
88
17 files changed, 588 insertions(+), 493 deletions(-)
89
diff view generated by jsdifflib
1
Stop using cpu_F0s in the Neon VCVT fixed-point operations.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This function makes it clear that we're talking about the whole
4
register, and not the 32-bit piece at index 0. This fixes a bug
5
when running on a big-endian host.
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201030022618.785675-2-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190613163917.28589-10-peter.maydell@linaro.org
7
---
11
---
8
target/arm/translate.c | 62 +++++++++++++++++++-----------------------
12
target/arm/translate.c | 8 ++++++
9
1 file changed, 28 insertions(+), 34 deletions(-)
13
target/arm/translate-neon.c.inc | 44 ++++++++++++++++-----------------
14
target/arm/translate-vfp.c.inc | 2 +-
15
3 files changed, 31 insertions(+), 23 deletions(-)
10
16
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
17
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
19
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
20
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static const char * const regnames[] =
21
@@ -XXX,XX +XXX,XX @@ static inline void gen_hlt(DisasContext *s, int imm)
16
/* Function prototypes for gen_ functions calling Neon helpers. */
22
unallocated_encoding(s);
17
typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
18
TCGv_i32, TCGv_i32);
19
+/* Function prototypes for gen_ functions for fix point conversions */
20
+typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
21
22
/* initialize TCG globals. */
23
void arm_translate_init(void)
24
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr get_fpstatus_ptr(int neon)
25
return statusptr;
26
}
23
}
27
24
28
-#define VFP_GEN_FIX(name, round) \
25
+/*
29
-static inline void gen_vfp_##name(int dp, int shift, int neon) \
26
+ * Return the offset of a "full" NEON Dreg.
30
-{ \
27
+ */
31
- TCGv_i32 tmp_shift = tcg_const_i32(shift); \
28
+static long neon_full_reg_offset(unsigned reg)
32
- TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
29
+{
33
- if (dp) { \
30
+ return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
34
- gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
31
+}
35
- statusptr); \
32
+
36
- } else { \
37
- gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
38
- statusptr); \
39
- } \
40
- tcg_temp_free_i32(tmp_shift); \
41
- tcg_temp_free_ptr(statusptr); \
42
-}
43
-VFP_GEN_FIX(tosl, _round_to_zero)
44
-VFP_GEN_FIX(toul, _round_to_zero)
45
-VFP_GEN_FIX(slto, )
46
-VFP_GEN_FIX(ulto, )
47
-#undef VFP_GEN_FIX
48
-
49
static inline long vfp_reg_offset(bool dp, unsigned reg)
33
static inline long vfp_reg_offset(bool dp, unsigned reg)
50
{
34
{
51
if (dp) {
35
if (dp) {
52
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
36
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
53
}
37
index XXXXXXX..XXXXXXX 100644
54
} else if (op >= 14) {
38
--- a/target/arm/translate-neon.c.inc
55
/* VCVT fixed-point. */
39
+++ b/target/arm/translate-neon.c.inc
56
+ TCGv_ptr fpst;
40
@@ -XXX,XX +XXX,XX @@ neon_element_offset(int reg, int element, MemOp size)
57
+ TCGv_i32 shiftv;
41
ofs ^= 8 - element_size;
58
+ VFPGenFixPointFn *fn;
42
}
59
+
43
#endif
60
if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
44
- return neon_reg_offset(reg, 0) + ofs;
61
return 1;
45
+ return neon_full_reg_offset(reg) + ofs;
62
}
46
}
63
+
47
64
+ if (!(op & 1)) {
48
static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop)
65
+ if (u) {
49
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
66
+ fn = gen_helper_vfp_ultos;
50
* We cannot write 16 bytes at once because the
67
+ } else {
51
* destination is unaligned.
68
+ fn = gen_helper_vfp_sltos;
52
*/
69
+ }
53
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
70
+ } else {
54
+ tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(vd),
71
+ if (u) {
55
8, 8, tmp);
72
+ fn = gen_helper_vfp_touls_round_to_zero;
56
- tcg_gen_gvec_mov(0, neon_reg_offset(vd + 1, 0),
73
+ } else {
57
- neon_reg_offset(vd, 0), 8, 8);
74
+ fn = gen_helper_vfp_tosls_round_to_zero;
58
+ tcg_gen_gvec_mov(0, neon_full_reg_offset(vd + 1),
75
+ }
59
+ neon_full_reg_offset(vd), 8, 8);
76
+ }
60
} else {
77
+
61
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
78
/* We have already masked out the must-be-1 top bit of imm6,
62
+ tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(vd),
79
* hence this 32-shift where the ARM ARM has 64-imm6.
63
vec_size, vec_size, tmp);
80
*/
64
}
81
shift = 32 - shift;
65
tcg_gen_addi_i32(addr, addr, 1 << size);
82
+ fpst = get_fpstatus_ptr(1);
66
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
83
+ shiftv = tcg_const_i32(shift);
67
static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn)
84
for (pass = 0; pass < (q ? 4 : 2); pass++) {
68
{
85
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
69
int vec_size = a->q ? 16 : 8;
86
- if (!(op & 1)) {
70
- int rd_ofs = neon_reg_offset(a->vd, 0);
87
- if (u)
71
- int rn_ofs = neon_reg_offset(a->vn, 0);
88
- gen_vfp_ulto(0, shift, 1);
72
- int rm_ofs = neon_reg_offset(a->vm, 0);
89
- else
73
+ int rd_ofs = neon_full_reg_offset(a->vd);
90
- gen_vfp_slto(0, shift, 1);
74
+ int rn_ofs = neon_full_reg_offset(a->vn);
91
- } else {
75
+ int rm_ofs = neon_full_reg_offset(a->vm);
92
- if (u)
76
93
- gen_vfp_toul(0, shift, 1);
77
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
94
- else
78
return false;
95
- gen_vfp_tosl(0, shift, 1);
79
@@ -XXX,XX +XXX,XX @@ static bool do_vector_2sh(DisasContext *s, arg_2reg_shift *a, GVecGen2iFn *fn)
96
- }
80
{
97
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
81
/* Handle a 2-reg-shift insn which can be vectorized. */
98
+ TCGv_i32 tmpf = neon_load_reg(rm, pass);
82
int vec_size = a->q ? 16 : 8;
99
+ fn(tmpf, tmpf, shiftv, fpst);
83
- int rd_ofs = neon_reg_offset(a->vd, 0);
100
+ neon_store_reg(rd, pass, tmpf);
84
- int rm_ofs = neon_reg_offset(a->vm, 0);
101
}
85
+ int rd_ofs = neon_full_reg_offset(a->vd);
102
+ tcg_temp_free_ptr(fpst);
86
+ int rm_ofs = neon_full_reg_offset(a->vm);
103
+ tcg_temp_free_i32(shiftv);
87
104
} else {
88
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
105
return 1;
89
return false;
106
}
90
@@ -XXX,XX +XXX,XX @@ static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a,
91
{
92
/* FP operations in 2-reg-and-shift group */
93
int vec_size = a->q ? 16 : 8;
94
- int rd_ofs = neon_reg_offset(a->vd, 0);
95
- int rm_ofs = neon_reg_offset(a->vm, 0);
96
+ int rd_ofs = neon_full_reg_offset(a->vd);
97
+ int rm_ofs = neon_full_reg_offset(a->vm);
98
TCGv_ptr fpst;
99
100
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
101
@@ -XXX,XX +XXX,XX @@ static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
102
return true;
103
}
104
105
- reg_ofs = neon_reg_offset(a->vd, 0);
106
+ reg_ofs = neon_full_reg_offset(a->vd);
107
vec_size = a->q ? 16 : 8;
108
imm = asimd_imm_const(a->imm, a->cmode, a->op);
109
110
@@ -XXX,XX +XXX,XX @@ static bool trans_VMULL_P_3d(DisasContext *s, arg_3diff *a)
111
return true;
112
}
113
114
- tcg_gen_gvec_3_ool(neon_reg_offset(a->vd, 0),
115
- neon_reg_offset(a->vn, 0),
116
- neon_reg_offset(a->vm, 0),
117
+ tcg_gen_gvec_3_ool(neon_full_reg_offset(a->vd),
118
+ neon_full_reg_offset(a->vn),
119
+ neon_full_reg_offset(a->vm),
120
16, 16, 0, fn_gvec);
121
return true;
122
}
123
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_fp_vec(DisasContext *s, arg_2scalar *a,
124
{
125
/* Two registers and a scalar, using gvec */
126
int vec_size = a->q ? 16 : 8;
127
- int rd_ofs = neon_reg_offset(a->vd, 0);
128
- int rn_ofs = neon_reg_offset(a->vn, 0);
129
+ int rd_ofs = neon_full_reg_offset(a->vd);
130
+ int rn_ofs = neon_full_reg_offset(a->vn);
131
int rm_ofs;
132
int idx;
133
TCGv_ptr fpstatus;
134
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_fp_vec(DisasContext *s, arg_2scalar *a,
135
/* a->vm is M:Vm, which encodes both register and index */
136
idx = extract32(a->vm, a->size + 2, 2);
137
a->vm = extract32(a->vm, 0, a->size + 2);
138
- rm_ofs = neon_reg_offset(a->vm, 0);
139
+ rm_ofs = neon_full_reg_offset(a->vm);
140
141
fpstatus = fpstatus_ptr(a->size == 1 ? FPST_STD_F16 : FPST_STD);
142
tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpstatus,
143
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a)
144
return true;
145
}
146
147
- tcg_gen_gvec_dup_mem(a->size, neon_reg_offset(a->vd, 0),
148
+ tcg_gen_gvec_dup_mem(a->size, neon_full_reg_offset(a->vd),
149
neon_element_offset(a->vm, a->index, a->size),
150
a->q ? 16 : 8, a->q ? 16 : 8);
151
return true;
152
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_F32_F16(DisasContext *s, arg_2misc *a)
153
static bool do_2misc_vec(DisasContext *s, arg_2misc *a, GVecGen2Fn *fn)
154
{
155
int vec_size = a->q ? 16 : 8;
156
- int rd_ofs = neon_reg_offset(a->vd, 0);
157
- int rm_ofs = neon_reg_offset(a->vm, 0);
158
+ int rd_ofs = neon_full_reg_offset(a->vd);
159
+ int rm_ofs = neon_full_reg_offset(a->vm);
160
161
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
162
return false;
163
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
164
index XXXXXXX..XXXXXXX 100644
165
--- a/target/arm/translate-vfp.c.inc
166
+++ b/target/arm/translate-vfp.c.inc
167
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
168
}
169
170
tmp = load_reg(s, a->rt);
171
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(a->vn, 0),
172
+ tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(a->vn),
173
vec_size, vec_size, tmp);
174
tcg_temp_free_i32(tmp);
175
107
--
176
--
108
2.20.1
177
2.20.1
109
178
110
179
diff view generated by jsdifflib
1
Where Neon instructions are floating point operations, we
1
From: Richard Henderson <richard.henderson@linaro.org>
2
mostly use the old VFP utility functions like gen_vfp_abs()
3
which work on the TCG globals cpu_F0s and cpu_F1s. The
4
Neon for-each-element loop conditionally loads the inputs
5
into either a plain old TCG temporary for most operations
6
or into cpu_F0s for float operations, and similarly stores
7
back either cpu_F0s or the temporary.
8
2
9
Switch NEON_2RM_VABS_F away from using cpu_F0s, and
3
This will shortly have users outside of translate-neon.c.inc.
10
update neon_2rm_is_float_op() accordingly.
11
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20201030022618.785675-3-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
15
Message-id: 20190613163917.28589-4-peter.maydell@linaro.org
16
---
9
---
17
target/arm/translate.c | 19 ++++++++-----------
10
target/arm/translate.c | 20 ++++++++++++++++++++
18
1 file changed, 8 insertions(+), 11 deletions(-)
11
target/arm/translate-neon.c.inc | 19 -------------------
12
2 files changed, 20 insertions(+), 19 deletions(-)
19
13
20
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
21
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/translate.c
16
--- a/target/arm/translate.c
23
+++ b/target/arm/translate.c
17
+++ b/target/arm/translate.c
24
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr get_fpstatus_ptr(int neon)
18
@@ -XXX,XX +XXX,XX @@ static long neon_full_reg_offset(unsigned reg)
25
return statusptr;
19
return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
26
}
20
}
27
21
28
-static inline void gen_vfp_abs(int dp)
22
+/*
23
+ * Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
24
+ * where 0 is the least significant end of the register.
25
+ */
26
+static long neon_element_offset(int reg, int element, MemOp size)
27
+{
28
+ int element_size = 1 << size;
29
+ int ofs = element * element_size;
30
+#ifdef HOST_WORDS_BIGENDIAN
31
+ /*
32
+ * Calculate the offset assuming fully little-endian,
33
+ * then XOR to account for the order of the 8-byte units.
34
+ */
35
+ if (element_size < 8) {
36
+ ofs ^= 8 - element_size;
37
+ }
38
+#endif
39
+ return neon_full_reg_offset(reg) + ofs;
40
+}
41
+
42
static inline long vfp_reg_offset(bool dp, unsigned reg)
43
{
44
if (dp) {
45
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/translate-neon.c.inc
48
+++ b/target/arm/translate-neon.c.inc
49
@@ -XXX,XX +XXX,XX @@ static inline int neon_3same_fp_size(DisasContext *s, int x)
50
#include "decode-neon-ls.c.inc"
51
#include "decode-neon-shared.c.inc"
52
53
-/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
54
- * where 0 is the least significant end of the register.
55
- */
56
-static inline long
57
-neon_element_offset(int reg, int element, MemOp size)
29
-{
58
-{
30
- if (dp)
59
- int element_size = 1 << size;
31
- gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
60
- int ofs = element * element_size;
32
- else
61
-#ifdef HOST_WORDS_BIGENDIAN
33
- gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
62
- /* Calculate the offset assuming fully little-endian,
63
- * then XOR to account for the order of the 8-byte units.
64
- */
65
- if (element_size < 8) {
66
- ofs ^= 8 - element_size;
67
- }
68
-#endif
69
- return neon_full_reg_offset(reg) + ofs;
34
-}
70
-}
35
-
71
-
36
static inline void gen_vfp_neg(int dp)
72
static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop)
37
{
73
{
38
if (dp)
74
long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
39
@@ -XXX,XX +XXX,XX @@ static const uint8_t neon_3r_sizes[] = {
40
41
static int neon_2rm_is_float_op(int op)
42
{
43
- /* Return true if this neon 2reg-misc op is float-to-float */
44
- return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
45
+ /*
46
+ * Return true if this neon 2reg-misc op is float-to-float.
47
+ * This is not a property of the operation but of our code --
48
+ * what we are asking here is "does the code for this case in
49
+ * the Neon for-each-pass loop use cpu_F0s?".
50
+ */
51
+ return (op == NEON_2RM_VNEG_F ||
52
(op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
53
op == NEON_2RM_VRINTM ||
54
(op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
55
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
56
break;
57
}
58
case NEON_2RM_VABS_F:
59
- gen_vfp_abs(0);
60
+ gen_helper_vfp_abss(tmp, tmp);
61
break;
62
case NEON_2RM_VNEG_F:
63
gen_vfp_neg(0);
64
--
75
--
65
2.20.1
76
2.20.1
66
77
67
78
diff view generated by jsdifflib
1
Switch NEON_2RM_VABS_F away from using cpu_F0s.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
These are the only users of neon_reg_offset, so remove that.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20201030022618.785675-4-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190613163917.28589-5-peter.maydell@linaro.org
7
---
9
---
8
target/arm/translate.c | 13 ++-----------
10
target/arm/translate.c | 14 ++------------
9
1 file changed, 2 insertions(+), 11 deletions(-)
11
1 file changed, 2 insertions(+), 12 deletions(-)
10
12
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
15
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
16
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr get_fpstatus_ptr(int neon)
17
@@ -XXX,XX +XXX,XX @@ static inline long vfp_reg_offset(bool dp, unsigned reg)
16
return statusptr;
18
}
17
}
19
}
18
20
19
-static inline void gen_vfp_neg(int dp)
21
-/* Return the offset of a 32-bit piece of a NEON register.
22
- zero is the least significant end of the register. */
23
-static inline long
24
-neon_reg_offset (int reg, int n)
20
-{
25
-{
21
- if (dp)
26
- int sreg;
22
- gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
27
- sreg = reg * 2 + n;
23
- else
28
- return vfp_reg_offset(0, sreg);
24
- gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
25
-}
29
-}
26
-
30
-
27
#define VFP_GEN_ITOF(name) \
31
static TCGv_i32 neon_load_reg(int reg, int pass)
28
static inline void gen_vfp_##name(int dp, int neon) \
32
{
29
{ \
33
TCGv_i32 tmp = tcg_temp_new_i32();
30
@@ -XXX,XX +XXX,XX @@ static int neon_2rm_is_float_op(int op)
34
- tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
31
* what we are asking here is "does the code for this case in
35
+ tcg_gen_ld_i32(tmp, cpu_env, neon_element_offset(reg, pass, MO_32));
32
* the Neon for-each-pass loop use cpu_F0s?".
36
return tmp;
33
*/
37
}
34
- return (op == NEON_2RM_VNEG_F ||
38
35
- (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
39
static void neon_store_reg(int reg, int pass, TCGv_i32 var)
36
+ return ((op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
40
{
37
op == NEON_2RM_VRINTM ||
41
- tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
38
(op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
42
+ tcg_gen_st_i32(var, cpu_env, neon_element_offset(reg, pass, MO_32));
39
op >= NEON_2RM_VRECPE_F);
43
tcg_temp_free_i32(var);
40
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
44
}
41
gen_helper_vfp_abss(tmp, tmp);
45
42
break;
43
case NEON_2RM_VNEG_F:
44
- gen_vfp_neg(0);
45
+ gen_helper_vfp_negs(tmp, tmp);
46
break;
47
case NEON_2RM_VSWP:
48
tmp2 = neon_load_reg(rd, pass);
49
--
46
--
50
2.20.1
47
2.20.1
51
48
52
49
diff view generated by jsdifflib
1
Stop using cpu_F0s for the NEON_2RM_VCVT[ANPM][US] ops.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This seems a bit more readable than using offsetof CPU_DoubleU.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20201030022618.785675-5-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190613163917.28589-7-peter.maydell@linaro.org
7
---
9
---
8
target/arm/translate.c | 7 +++----
10
target/arm/translate.c | 13 ++++---------
9
1 file changed, 3 insertions(+), 4 deletions(-)
11
1 file changed, 4 insertions(+), 9 deletions(-)
10
12
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
15
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
16
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static int neon_2rm_is_float_op(int op)
17
@@ -XXX,XX +XXX,XX @@ static long neon_element_offset(int reg, int element, MemOp size)
16
* what we are asking here is "does the code for this case in
18
return neon_full_reg_offset(reg) + ofs;
17
* the Neon for-each-pass loop use cpu_F0s?".
18
*/
19
- return ((op >= NEON_2RM_VCVTAU && op <= NEON_2RM_VCVTMS) ||
20
- op >= NEON_2RM_VRECPE_F);
21
+ return op >= NEON_2RM_VRECPE_F;
22
}
19
}
23
20
24
static bool neon_2rm_is_v8_op(int op)
21
-static inline long vfp_reg_offset(bool dp, unsigned reg)
25
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
22
+/* Return the offset of a VFP Dreg (dp = true) or VFP Sreg (dp = false). */
26
cpu_env);
23
+static long vfp_reg_offset(bool dp, unsigned reg)
27
24
{
28
if (is_signed) {
25
if (dp) {
29
- gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
26
- return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
30
+ gen_helper_vfp_tosls(tmp, tmp,
27
+ return neon_element_offset(reg, 0, MO_64);
31
tcg_shift, fpst);
28
} else {
32
} else {
29
- long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
33
- gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
30
- if (reg & 1) {
34
+ gen_helper_vfp_touls(tmp, tmp,
31
- ofs += offsetof(CPU_DoubleU, l.upper);
35
tcg_shift, fpst);
32
- } else {
36
}
33
- ofs += offsetof(CPU_DoubleU, l.lower);
34
- }
35
- return ofs;
36
+ return neon_element_offset(reg >> 1, reg & 1, MO_32);
37
}
38
}
37
39
38
--
40
--
39
2.20.1
41
2.20.1
40
42
41
43
diff view generated by jsdifflib
1
The architecture permits FPUs which have only single-precision
1
From: Richard Henderson <richard.henderson@linaro.org>
2
support, not double-precision; Cortex-M4 and Cortex-M33 are
3
both like that. Add the necessary checks on the MVFR0 FPDP
4
field so that we UNDEF any double-precision instructions on
5
CPUs like this.
6
2
7
Note that even if FPDP==0 the insns like VMOV-to/from-gpreg,
3
Model these off the aa64 read/write_vec_element functions.
8
VLDM/VSTM, VLDR/VSTR which take double precision registers
4
Use it within translate-neon.c.inc. The new functions do
9
still exist.
5
not allocate or free temps, so this rearranges the calling
6
code a bit.
10
7
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20201030022618.785675-6-richard.henderson@linaro.org
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20190614104457.24703-3-peter.maydell@linaro.org
14
---
12
---
15
target/arm/cpu.h | 6 +++
13
target/arm/translate.c | 26 ++++
16
target/arm/translate-vfp.inc.c | 84 ++++++++++++++++++++++++++++++++++
14
target/arm/translate-neon.c.inc | 256 ++++++++++++++++++++------------
17
2 files changed, 90 insertions(+)
15
2 files changed, 183 insertions(+), 99 deletions(-)
18
16
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
diff --git a/target/arm/translate.c b/target/arm/translate.c
20
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
19
--- a/target/arm/translate.c
22
+++ b/target/arm/cpu.h
20
+++ b/target/arm/translate.c
23
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id)
21
@@ -XXX,XX +XXX,XX @@ static inline void neon_store_reg32(TCGv_i32 var, int reg)
24
return FIELD_EX64(id->mvfr0, MVFR0, FPSHVEC) > 0;
22
tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
25
}
23
}
26
24
27
+static inline bool isar_feature_aa32_fpdp(const ARMISARegisters *id)
25
+static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp size)
28
+{
26
+{
29
+ /* Return true if CPU supports double precision floating point */
27
+ long off = neon_element_offset(reg, ele, size);
30
+ return FIELD_EX64(id->mvfr0, MVFR0, FPDP) > 0;
28
+
29
+ switch (size) {
30
+ case MO_32:
31
+ tcg_gen_ld_i32(dest, cpu_env, off);
32
+ break;
33
+ default:
34
+ g_assert_not_reached();
35
+ }
31
+}
36
+}
32
+
37
+
33
/*
38
+static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp size)
34
* We always set the FP and SIMD FP16 fields to indicate identical
39
+{
35
* levels of support (assuming SIMD is implemented at all), so
40
+ long off = neon_element_offset(reg, ele, size);
36
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
41
+
42
+ switch (size) {
43
+ case MO_32:
44
+ tcg_gen_st_i32(src, cpu_env, off);
45
+ break;
46
+ default:
47
+ g_assert_not_reached();
48
+ }
49
+}
50
+
51
static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
52
{
53
TCGv_ptr ret = tcg_temp_new_ptr();
54
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
37
index XXXXXXX..XXXXXXX 100644
55
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/translate-vfp.inc.c
56
--- a/target/arm/translate-neon.c.inc
39
+++ b/target/arm/translate-vfp.inc.c
57
+++ b/target/arm/translate-neon.c.inc
40
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
58
@@ -XXX,XX +XXX,XX @@ static bool do_3same_pair(DisasContext *s, arg_3same *a, NeonGenTwoOpFn *fn)
41
((a->vm | a->vn | a->vd) & 0x10)) {
59
* early. Since Q is 0 there are always just two passes, so instead
60
* of a complicated loop over each pass we just unroll.
61
*/
62
- tmp = neon_load_reg(a->vn, 0);
63
- tmp2 = neon_load_reg(a->vn, 1);
64
+ tmp = tcg_temp_new_i32();
65
+ tmp2 = tcg_temp_new_i32();
66
+ tmp3 = tcg_temp_new_i32();
67
+
68
+ read_neon_element32(tmp, a->vn, 0, MO_32);
69
+ read_neon_element32(tmp2, a->vn, 1, MO_32);
70
fn(tmp, tmp, tmp2);
71
- tcg_temp_free_i32(tmp2);
72
73
- tmp3 = neon_load_reg(a->vm, 0);
74
- tmp2 = neon_load_reg(a->vm, 1);
75
+ read_neon_element32(tmp3, a->vm, 0, MO_32);
76
+ read_neon_element32(tmp2, a->vm, 1, MO_32);
77
fn(tmp3, tmp3, tmp2);
78
- tcg_temp_free_i32(tmp2);
79
80
- neon_store_reg(a->vd, 0, tmp);
81
- neon_store_reg(a->vd, 1, tmp3);
82
+ write_neon_element32(tmp, a->vd, 0, MO_32);
83
+ write_neon_element32(tmp3, a->vd, 1, MO_32);
84
+
85
+ tcg_temp_free_i32(tmp);
86
+ tcg_temp_free_i32(tmp2);
87
+ tcg_temp_free_i32(tmp3);
88
return true;
89
}
90
91
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
92
* 2-reg-and-shift operations, size < 3 case, where the
93
* helper needs to be passed cpu_env.
94
*/
95
- TCGv_i32 constimm;
96
+ TCGv_i32 constimm, tmp;
97
int pass;
98
99
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
100
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
101
* by immediate using the variable shift operations.
102
*/
103
constimm = tcg_const_i32(dup_const(a->size, a->shift));
104
+ tmp = tcg_temp_new_i32();
105
106
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
107
- TCGv_i32 tmp = neon_load_reg(a->vm, pass);
108
+ read_neon_element32(tmp, a->vm, pass, MO_32);
109
fn(tmp, cpu_env, tmp, constimm);
110
- neon_store_reg(a->vd, pass, tmp);
111
+ write_neon_element32(tmp, a->vd, pass, MO_32);
112
}
113
+ tcg_temp_free_i32(tmp);
114
tcg_temp_free_i32(constimm);
115
return true;
116
}
117
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
118
constimm = tcg_const_i64(-a->shift);
119
rm1 = tcg_temp_new_i64();
120
rm2 = tcg_temp_new_i64();
121
+ rd = tcg_temp_new_i32();
122
123
/* Load both inputs first to avoid potential overwrite if rm == rd */
124
neon_load_reg64(rm1, a->vm);
125
neon_load_reg64(rm2, a->vm + 1);
126
127
shiftfn(rm1, rm1, constimm);
128
- rd = tcg_temp_new_i32();
129
narrowfn(rd, cpu_env, rm1);
130
- neon_store_reg(a->vd, 0, rd);
131
+ write_neon_element32(rd, a->vd, 0, MO_32);
132
133
shiftfn(rm2, rm2, constimm);
134
- rd = tcg_temp_new_i32();
135
narrowfn(rd, cpu_env, rm2);
136
- neon_store_reg(a->vd, 1, rd);
137
+ write_neon_element32(rd, a->vd, 1, MO_32);
138
139
+ tcg_temp_free_i32(rd);
140
tcg_temp_free_i64(rm1);
141
tcg_temp_free_i64(rm2);
142
tcg_temp_free_i64(constimm);
143
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
144
constimm = tcg_const_i32(imm);
145
146
/* Load all inputs first to avoid potential overwrite */
147
- rm1 = neon_load_reg(a->vm, 0);
148
- rm2 = neon_load_reg(a->vm, 1);
149
- rm3 = neon_load_reg(a->vm + 1, 0);
150
- rm4 = neon_load_reg(a->vm + 1, 1);
151
+ rm1 = tcg_temp_new_i32();
152
+ rm2 = tcg_temp_new_i32();
153
+ rm3 = tcg_temp_new_i32();
154
+ rm4 = tcg_temp_new_i32();
155
+ read_neon_element32(rm1, a->vm, 0, MO_32);
156
+ read_neon_element32(rm2, a->vm, 1, MO_32);
157
+ read_neon_element32(rm3, a->vm, 2, MO_32);
158
+ read_neon_element32(rm4, a->vm, 3, MO_32);
159
rtmp = tcg_temp_new_i64();
160
161
shiftfn(rm1, rm1, constimm);
162
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
163
tcg_temp_free_i32(rm2);
164
165
narrowfn(rm1, cpu_env, rtmp);
166
- neon_store_reg(a->vd, 0, rm1);
167
+ write_neon_element32(rm1, a->vd, 0, MO_32);
168
+ tcg_temp_free_i32(rm1);
169
170
shiftfn(rm3, rm3, constimm);
171
shiftfn(rm4, rm4, constimm);
172
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
173
174
narrowfn(rm3, cpu_env, rtmp);
175
tcg_temp_free_i64(rtmp);
176
- neon_store_reg(a->vd, 1, rm3);
177
+ write_neon_element32(rm3, a->vd, 1, MO_32);
178
+ tcg_temp_free_i32(rm3);
179
return true;
180
}
181
182
@@ -XXX,XX +XXX,XX @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
183
widen_mask = dup_const(a->size + 1, widen_mask);
184
}
185
186
- rm0 = neon_load_reg(a->vm, 0);
187
- rm1 = neon_load_reg(a->vm, 1);
188
+ rm0 = tcg_temp_new_i32();
189
+ rm1 = tcg_temp_new_i32();
190
+ read_neon_element32(rm0, a->vm, 0, MO_32);
191
+ read_neon_element32(rm1, a->vm, 1, MO_32);
192
tmp = tcg_temp_new_i64();
193
194
widenfn(tmp, rm0);
195
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
196
if (src1_wide) {
197
neon_load_reg64(rn0_64, a->vn);
198
} else {
199
- TCGv_i32 tmp = neon_load_reg(a->vn, 0);
200
+ TCGv_i32 tmp = tcg_temp_new_i32();
201
+ read_neon_element32(tmp, a->vn, 0, MO_32);
202
widenfn(rn0_64, tmp);
203
tcg_temp_free_i32(tmp);
204
}
205
- rm = neon_load_reg(a->vm, 0);
206
+ rm = tcg_temp_new_i32();
207
+ read_neon_element32(rm, a->vm, 0, MO_32);
208
209
widenfn(rm_64, rm);
210
tcg_temp_free_i32(rm);
211
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
212
if (src1_wide) {
213
neon_load_reg64(rn1_64, a->vn + 1);
214
} else {
215
- TCGv_i32 tmp = neon_load_reg(a->vn, 1);
216
+ TCGv_i32 tmp = tcg_temp_new_i32();
217
+ read_neon_element32(tmp, a->vn, 1, MO_32);
218
widenfn(rn1_64, tmp);
219
tcg_temp_free_i32(tmp);
220
}
221
- rm = neon_load_reg(a->vm, 1);
222
+ rm = tcg_temp_new_i32();
223
+ read_neon_element32(rm, a->vm, 1, MO_32);
224
225
neon_store_reg64(rn0_64, a->vd);
226
227
@@ -XXX,XX +XXX,XX @@ static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
228
229
narrowfn(rd1, rn_64);
230
231
- neon_store_reg(a->vd, 0, rd0);
232
- neon_store_reg(a->vd, 1, rd1);
233
+ write_neon_element32(rd0, a->vd, 0, MO_32);
234
+ write_neon_element32(rd1, a->vd, 1, MO_32);
235
236
+ tcg_temp_free_i32(rd0);
237
+ tcg_temp_free_i32(rd1);
238
tcg_temp_free_i64(rn_64);
239
tcg_temp_free_i64(rm_64);
240
241
@@ -XXX,XX +XXX,XX @@ static bool do_long_3d(DisasContext *s, arg_3diff *a,
242
rd0 = tcg_temp_new_i64();
243
rd1 = tcg_temp_new_i64();
244
245
- rn = neon_load_reg(a->vn, 0);
246
- rm = neon_load_reg(a->vm, 0);
247
+ rn = tcg_temp_new_i32();
248
+ rm = tcg_temp_new_i32();
249
+ read_neon_element32(rn, a->vn, 0, MO_32);
250
+ read_neon_element32(rm, a->vm, 0, MO_32);
251
opfn(rd0, rn, rm);
252
- tcg_temp_free_i32(rn);
253
- tcg_temp_free_i32(rm);
254
255
- rn = neon_load_reg(a->vn, 1);
256
- rm = neon_load_reg(a->vm, 1);
257
+ read_neon_element32(rn, a->vn, 1, MO_32);
258
+ read_neon_element32(rm, a->vm, 1, MO_32);
259
opfn(rd1, rn, rm);
260
tcg_temp_free_i32(rn);
261
tcg_temp_free_i32(rm);
262
@@ -XXX,XX +XXX,XX @@ static void gen_neon_dup_high16(TCGv_i32 var)
263
264
static inline TCGv_i32 neon_get_scalar(int size, int reg)
265
{
266
- TCGv_i32 tmp;
267
- if (size == 1) {
268
- tmp = neon_load_reg(reg & 7, reg >> 4);
269
+ TCGv_i32 tmp = tcg_temp_new_i32();
270
+ if (size == MO_16) {
271
+ read_neon_element32(tmp, reg & 7, reg >> 4, MO_32);
272
if (reg & 8) {
273
gen_neon_dup_high16(tmp);
274
} else {
275
gen_neon_dup_low16(tmp);
276
}
277
} else {
278
- tmp = neon_load_reg(reg & 15, reg >> 4);
279
+ read_neon_element32(tmp, reg & 15, reg >> 4, MO_32);
280
}
281
return tmp;
282
}
283
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar(DisasContext *s, arg_2scalar *a,
284
* perform an accumulation operation of that result into the
285
* destination.
286
*/
287
- TCGv_i32 scalar;
288
+ TCGv_i32 scalar, tmp;
289
int pass;
290
291
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
292
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar(DisasContext *s, arg_2scalar *a,
293
}
294
295
scalar = neon_get_scalar(a->size, a->vm);
296
+ tmp = tcg_temp_new_i32();
297
298
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
299
- TCGv_i32 tmp = neon_load_reg(a->vn, pass);
300
+ read_neon_element32(tmp, a->vn, pass, MO_32);
301
opfn(tmp, tmp, scalar);
302
if (accfn) {
303
- TCGv_i32 rd = neon_load_reg(a->vd, pass);
304
+ TCGv_i32 rd = tcg_temp_new_i32();
305
+ read_neon_element32(rd, a->vd, pass, MO_32);
306
accfn(tmp, rd, tmp);
307
tcg_temp_free_i32(rd);
308
}
309
- neon_store_reg(a->vd, pass, tmp);
310
+ write_neon_element32(tmp, a->vd, pass, MO_32);
311
}
312
+ tcg_temp_free_i32(tmp);
313
tcg_temp_free_i32(scalar);
314
return true;
315
}
316
@@ -XXX,XX +XXX,XX @@ static bool do_vqrdmlah_2sc(DisasContext *s, arg_2scalar *a,
317
* performs a kind of fused op-then-accumulate using a helper
318
* function that takes all of rd, rn and the scalar at once.
319
*/
320
- TCGv_i32 scalar;
321
+ TCGv_i32 scalar, rn, rd;
322
int pass;
323
324
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
325
@@ -XXX,XX +XXX,XX @@ static bool do_vqrdmlah_2sc(DisasContext *s, arg_2scalar *a,
326
}
327
328
scalar = neon_get_scalar(a->size, a->vm);
329
+ rn = tcg_temp_new_i32();
330
+ rd = tcg_temp_new_i32();
331
332
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
333
- TCGv_i32 rn = neon_load_reg(a->vn, pass);
334
- TCGv_i32 rd = neon_load_reg(a->vd, pass);
335
+ read_neon_element32(rn, a->vn, pass, MO_32);
336
+ read_neon_element32(rd, a->vd, pass, MO_32);
337
opfn(rd, cpu_env, rn, scalar, rd);
338
- tcg_temp_free_i32(rn);
339
- neon_store_reg(a->vd, pass, rd);
340
+ write_neon_element32(rd, a->vd, pass, MO_32);
341
}
342
+ tcg_temp_free_i32(rn);
343
+ tcg_temp_free_i32(rd);
344
tcg_temp_free_i32(scalar);
345
346
return true;
347
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a,
348
scalar = neon_get_scalar(a->size, a->vm);
349
350
/* Load all inputs before writing any outputs, in case of overlap */
351
- rn = neon_load_reg(a->vn, 0);
352
+ rn = tcg_temp_new_i32();
353
+ read_neon_element32(rn, a->vn, 0, MO_32);
354
rn0_64 = tcg_temp_new_i64();
355
opfn(rn0_64, rn, scalar);
356
- tcg_temp_free_i32(rn);
357
358
- rn = neon_load_reg(a->vn, 1);
359
+ read_neon_element32(rn, a->vn, 1, MO_32);
360
rn1_64 = tcg_temp_new_i64();
361
opfn(rn1_64, rn, scalar);
362
tcg_temp_free_i32(rn);
363
@@ -XXX,XX +XXX,XX @@ static bool trans_VTBL(DisasContext *s, arg_VTBL *a)
42
return false;
364
return false;
43
}
365
}
44
+
366
n <<= 3;
45
+ if (dp && !dc_isar_feature(aa32_fpdp, s)) {
367
+ tmp = tcg_temp_new_i32();
46
+ return false;
368
if (a->op) {
47
+ }
369
- tmp = neon_load_reg(a->vd, 0);
48
+
370
+ read_neon_element32(tmp, a->vd, 0, MO_32);
49
rd = a->vd;
371
} else {
50
rn = a->vn;
372
- tmp = tcg_temp_new_i32();
51
rm = a->vm;
373
tcg_gen_movi_i32(tmp, 0);
52
@@ -XXX,XX +XXX,XX @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
374
}
53
((a->vm | a->vn | a->vd) & 0x10)) {
375
- tmp2 = neon_load_reg(a->vm, 0);
376
+ tmp2 = tcg_temp_new_i32();
377
+ read_neon_element32(tmp2, a->vm, 0, MO_32);
378
ptr1 = vfp_reg_ptr(true, a->vn);
379
tmp4 = tcg_const_i32(n);
380
gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp4);
381
- tcg_temp_free_i32(tmp);
382
+
383
if (a->op) {
384
- tmp = neon_load_reg(a->vd, 1);
385
+ read_neon_element32(tmp, a->vd, 1, MO_32);
386
} else {
387
- tmp = tcg_temp_new_i32();
388
tcg_gen_movi_i32(tmp, 0);
389
}
390
- tmp3 = neon_load_reg(a->vm, 1);
391
+ tmp3 = tcg_temp_new_i32();
392
+ read_neon_element32(tmp3, a->vm, 1, MO_32);
393
gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp4);
394
+ tcg_temp_free_i32(tmp);
395
tcg_temp_free_i32(tmp4);
396
tcg_temp_free_ptr(ptr1);
397
- neon_store_reg(a->vd, 0, tmp2);
398
- neon_store_reg(a->vd, 1, tmp3);
399
- tcg_temp_free_i32(tmp);
400
+
401
+ write_neon_element32(tmp2, a->vd, 0, MO_32);
402
+ write_neon_element32(tmp3, a->vd, 1, MO_32);
403
+ tcg_temp_free_i32(tmp2);
404
+ tcg_temp_free_i32(tmp3);
405
return true;
406
}
407
408
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a)
409
static bool trans_VREV64(DisasContext *s, arg_VREV64 *a)
410
{
411
int pass, half;
412
+ TCGv_i32 tmp[2];
413
414
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
54
return false;
415
return false;
55
}
416
@@ -XXX,XX +XXX,XX @@ static bool trans_VREV64(DisasContext *s, arg_VREV64 *a)
56
+
57
+ if (dp && !dc_isar_feature(aa32_fpdp, s)) {
58
+ return false;
59
+ }
60
+
61
rd = a->vd;
62
rn = a->vn;
63
rm = a->vm;
64
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
65
((a->vm | a->vd) & 0x10)) {
66
return false;
67
}
68
+
69
+ if (dp && !dc_isar_feature(aa32_fpdp, s)) {
70
+ return false;
71
+ }
72
+
73
rd = a->vd;
74
rm = a->vm;
75
76
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
77
if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
78
return false;
79
}
80
+
81
+ if (dp && !dc_isar_feature(aa32_fpdp, s)) {
82
+ return false;
83
+ }
84
+
85
rd = a->vd;
86
rm = a->vm;
87
88
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
89
return false;
90
}
91
92
+ if (!dc_isar_feature(aa32_fpdp, s)) {
93
+ return false;
94
+ }
95
+
96
if (!dc_isar_feature(aa32_fpshvec, s) &&
97
(veclen != 0 || s->vec_stride != 0)) {
98
return false;
99
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
100
return false;
101
}
102
103
+ if (!dc_isar_feature(aa32_fpdp, s)) {
104
+ return false;
105
+ }
106
+
107
if (!dc_isar_feature(aa32_fpshvec, s) &&
108
(veclen != 0 || s->vec_stride != 0)) {
109
return false;
110
@@ -XXX,XX +XXX,XX @@ static bool trans_VFM_sp(DisasContext *s, arg_VFM_sp *a)
111
return false;
112
}
113
114
+ if (!dc_isar_feature(aa32_fpdp, s)) {
115
+ return false;
116
+ }
117
+
118
if (!vfp_access_check(s)) {
119
return true;
417
return true;
120
}
418
}
121
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
419
122
return false;
420
- for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
123
}
421
- TCGv_i32 tmp[2];
124
422
+ tmp[0] = tcg_temp_new_i32();
125
+ if (!dc_isar_feature(aa32_fpdp, s)) {
423
+ tmp[1] = tcg_temp_new_i32();
126
+ return false;
424
127
+ }
425
+ for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
128
+
426
for (half = 0; half < 2; half++) {
129
if (!dc_isar_feature(aa32_fpshvec, s) &&
427
- tmp[half] = neon_load_reg(a->vm, pass * 2 + half);
130
(veclen != 0 || s->vec_stride != 0)) {
428
+ read_neon_element32(tmp[half], a->vm, pass * 2 + half, MO_32);
131
return false;
429
switch (a->size) {
132
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
430
case 0:
133
return false;
431
tcg_gen_bswap32_i32(tmp[half], tmp[half]);
134
}
432
@@ -XXX,XX +XXX,XX @@ static bool trans_VREV64(DisasContext *s, arg_VREV64 *a)
135
433
g_assert_not_reached();
136
+ if (!dc_isar_feature(aa32_fpdp, s)) {
434
}
137
+ return false;
435
}
138
+ }
436
- neon_store_reg(a->vd, pass * 2, tmp[1]);
139
+
437
- neon_store_reg(a->vd, pass * 2 + 1, tmp[0]);
140
if (!vfp_access_check(s)) {
438
+ write_neon_element32(tmp[1], a->vd, pass * 2, MO_32);
439
+ write_neon_element32(tmp[0], a->vd, pass * 2 + 1, MO_32);
440
}
441
+
442
+ tcg_temp_free_i32(tmp[0]);
443
+ tcg_temp_free_i32(tmp[1]);
444
return true;
445
}
446
447
@@ -XXX,XX +XXX,XX @@ static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a,
448
rm0_64 = tcg_temp_new_i64();
449
rm1_64 = tcg_temp_new_i64();
450
rd_64 = tcg_temp_new_i64();
451
- tmp = neon_load_reg(a->vm, pass * 2);
452
+
453
+ tmp = tcg_temp_new_i32();
454
+ read_neon_element32(tmp, a->vm, pass * 2, MO_32);
455
widenfn(rm0_64, tmp);
456
- tcg_temp_free_i32(tmp);
457
- tmp = neon_load_reg(a->vm, pass * 2 + 1);
458
+ read_neon_element32(tmp, a->vm, pass * 2 + 1, MO_32);
459
widenfn(rm1_64, tmp);
460
tcg_temp_free_i32(tmp);
461
+
462
opfn(rd_64, rm0_64, rm1_64);
463
tcg_temp_free_i64(rm0_64);
464
tcg_temp_free_i64(rm1_64);
465
@@ -XXX,XX +XXX,XX @@ static bool do_vmovn(DisasContext *s, arg_2misc *a,
466
narrowfn(rd0, cpu_env, rm);
467
neon_load_reg64(rm, a->vm + 1);
468
narrowfn(rd1, cpu_env, rm);
469
- neon_store_reg(a->vd, 0, rd0);
470
- neon_store_reg(a->vd, 1, rd1);
471
+ write_neon_element32(rd0, a->vd, 0, MO_32);
472
+ write_neon_element32(rd1, a->vd, 1, MO_32);
473
+ tcg_temp_free_i32(rd0);
474
+ tcg_temp_free_i32(rd1);
475
tcg_temp_free_i64(rm);
476
return true;
477
}
478
@@ -XXX,XX +XXX,XX @@ static bool trans_VSHLL(DisasContext *s, arg_2misc *a)
479
}
480
481
rd = tcg_temp_new_i64();
482
+ rm0 = tcg_temp_new_i32();
483
+ rm1 = tcg_temp_new_i32();
484
485
- rm0 = neon_load_reg(a->vm, 0);
486
- rm1 = neon_load_reg(a->vm, 1);
487
+ read_neon_element32(rm0, a->vm, 0, MO_32);
488
+ read_neon_element32(rm1, a->vm, 1, MO_32);
489
490
widenfn(rd, rm0);
491
tcg_gen_shli_i64(rd, rd, 8 << a->size);
492
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_F16_F32(DisasContext *s, arg_2misc *a)
493
494
fpst = fpstatus_ptr(FPST_STD);
495
ahp = get_ahp_flag();
496
- tmp = neon_load_reg(a->vm, 0);
497
+ tmp = tcg_temp_new_i32();
498
+ read_neon_element32(tmp, a->vm, 0, MO_32);
499
gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
500
- tmp2 = neon_load_reg(a->vm, 1);
501
+ tmp2 = tcg_temp_new_i32();
502
+ read_neon_element32(tmp2, a->vm, 1, MO_32);
503
gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
504
tcg_gen_shli_i32(tmp2, tmp2, 16);
505
tcg_gen_or_i32(tmp2, tmp2, tmp);
506
- tcg_temp_free_i32(tmp);
507
- tmp = neon_load_reg(a->vm, 2);
508
+ read_neon_element32(tmp, a->vm, 2, MO_32);
509
gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
510
- tmp3 = neon_load_reg(a->vm, 3);
511
- neon_store_reg(a->vd, 0, tmp2);
512
+ tmp3 = tcg_temp_new_i32();
513
+ read_neon_element32(tmp3, a->vm, 3, MO_32);
514
+ write_neon_element32(tmp2, a->vd, 0, MO_32);
515
+ tcg_temp_free_i32(tmp2);
516
gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
517
tcg_gen_shli_i32(tmp3, tmp3, 16);
518
tcg_gen_or_i32(tmp3, tmp3, tmp);
519
- neon_store_reg(a->vd, 1, tmp3);
520
+ write_neon_element32(tmp3, a->vd, 1, MO_32);
521
+ tcg_temp_free_i32(tmp3);
522
tcg_temp_free_i32(tmp);
523
tcg_temp_free_i32(ahp);
524
tcg_temp_free_ptr(fpst);
525
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_F32_F16(DisasContext *s, arg_2misc *a)
526
fpst = fpstatus_ptr(FPST_STD);
527
ahp = get_ahp_flag();
528
tmp3 = tcg_temp_new_i32();
529
- tmp = neon_load_reg(a->vm, 0);
530
- tmp2 = neon_load_reg(a->vm, 1);
531
+ tmp2 = tcg_temp_new_i32();
532
+ tmp = tcg_temp_new_i32();
533
+ read_neon_element32(tmp, a->vm, 0, MO_32);
534
+ read_neon_element32(tmp2, a->vm, 1, MO_32);
535
tcg_gen_ext16u_i32(tmp3, tmp);
536
gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
537
- neon_store_reg(a->vd, 0, tmp3);
538
+ write_neon_element32(tmp3, a->vd, 0, MO_32);
539
tcg_gen_shri_i32(tmp, tmp, 16);
540
gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
541
- neon_store_reg(a->vd, 1, tmp);
542
- tmp3 = tcg_temp_new_i32();
543
+ write_neon_element32(tmp, a->vd, 1, MO_32);
544
+ tcg_temp_free_i32(tmp);
545
tcg_gen_ext16u_i32(tmp3, tmp2);
546
gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
547
- neon_store_reg(a->vd, 2, tmp3);
548
+ write_neon_element32(tmp3, a->vd, 2, MO_32);
549
+ tcg_temp_free_i32(tmp3);
550
tcg_gen_shri_i32(tmp2, tmp2, 16);
551
gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
552
- neon_store_reg(a->vd, 3, tmp2);
553
+ write_neon_element32(tmp2, a->vd, 3, MO_32);
554
+ tcg_temp_free_i32(tmp2);
555
tcg_temp_free_i32(ahp);
556
tcg_temp_free_ptr(fpst);
557
558
@@ -XXX,XX +XXX,XX @@ DO_2M_CRYPTO(SHA256SU0, aa32_sha2, 2)
559
560
static bool do_2misc(DisasContext *s, arg_2misc *a, NeonGenOneOpFn *fn)
561
{
562
+ TCGv_i32 tmp;
563
int pass;
564
565
/* Handle a 2-reg-misc operation by iterating 32 bits at a time */
566
@@ -XXX,XX +XXX,XX @@ static bool do_2misc(DisasContext *s, arg_2misc *a, NeonGenOneOpFn *fn)
141
return true;
567
return true;
142
}
568
}
143
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
569
144
return false;
570
+ tmp = tcg_temp_new_i32();
145
}
571
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
146
572
- TCGv_i32 tmp = neon_load_reg(a->vm, pass);
147
+ if (!dc_isar_feature(aa32_fpdp, s)) {
573
+ read_neon_element32(tmp, a->vm, pass, MO_32);
148
+ return false;
574
fn(tmp, tmp);
149
+ }
575
- neon_store_reg(a->vd, pass, tmp);
150
+
576
+ write_neon_element32(tmp, a->vd, pass, MO_32);
151
if (!vfp_access_check(s)) {
577
}
578
+ tcg_temp_free_i32(tmp);
579
580
return true;
581
}
582
@@ -XXX,XX +XXX,XX @@ static bool trans_VTRN(DisasContext *s, arg_2misc *a)
152
return true;
583
return true;
153
}
584
}
154
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
585
155
return false;
586
- if (a->size == 2) {
156
}
587
+ tmp = tcg_temp_new_i32();
157
588
+ tmp2 = tcg_temp_new_i32();
158
+ if (!dc_isar_feature(aa32_fpdp, s)) {
589
+ if (a->size == MO_32) {
159
+ return false;
590
for (pass = 0; pass < (a->q ? 4 : 2); pass += 2) {
160
+ }
591
- tmp = neon_load_reg(a->vm, pass);
161
+
592
- tmp2 = neon_load_reg(a->vd, pass + 1);
162
if (!vfp_access_check(s)) {
593
- neon_store_reg(a->vm, pass, tmp2);
163
return true;
594
- neon_store_reg(a->vd, pass + 1, tmp);
164
}
595
+ read_neon_element32(tmp, a->vm, pass, MO_32);
165
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
596
+ read_neon_element32(tmp2, a->vd, pass + 1, MO_32);
166
return false;
597
+ write_neon_element32(tmp2, a->vm, pass, MO_32);
167
}
598
+ write_neon_element32(tmp, a->vd, pass + 1, MO_32);
168
599
}
169
+ if (!dc_isar_feature(aa32_fpdp, s)) {
600
} else {
170
+ return false;
601
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
171
+ }
602
- tmp = neon_load_reg(a->vm, pass);
172
+
603
- tmp2 = neon_load_reg(a->vd, pass);
173
if (!vfp_access_check(s)) {
604
- if (a->size == 0) {
174
return true;
605
+ read_neon_element32(tmp, a->vm, pass, MO_32);
175
}
606
+ read_neon_element32(tmp2, a->vd, pass, MO_32);
176
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
607
+ if (a->size == MO_8) {
177
return false;
608
gen_neon_trn_u8(tmp, tmp2);
178
}
609
} else {
179
610
gen_neon_trn_u16(tmp, tmp2);
180
+ if (!dc_isar_feature(aa32_fpdp, s)) {
611
}
181
+ return false;
612
- neon_store_reg(a->vm, pass, tmp2);
182
+ }
613
- neon_store_reg(a->vd, pass, tmp);
183
+
614
+ write_neon_element32(tmp2, a->vm, pass, MO_32);
184
if (!vfp_access_check(s)) {
615
+ write_neon_element32(tmp, a->vd, pass, MO_32);
185
return true;
616
}
186
}
617
}
187
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
618
+ tcg_temp_free_i32(tmp);
188
return false;
619
+ tcg_temp_free_i32(tmp2);
189
}
620
return true;
190
621
}
191
+ if (!dc_isar_feature(aa32_fpdp, s)) {
192
+ return false;
193
+ }
194
+
195
if (!vfp_access_check(s)) {
196
return true;
197
}
198
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
199
return false;
200
}
201
202
+ if (!dc_isar_feature(aa32_fpdp, s)) {
203
+ return false;
204
+ }
205
+
206
if (!vfp_access_check(s)) {
207
return true;
208
}
209
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
210
return false;
211
}
212
213
+ if (!dc_isar_feature(aa32_fpdp, s)) {
214
+ return false;
215
+ }
216
+
217
if (!vfp_access_check(s)) {
218
return true;
219
}
220
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
221
return false;
222
}
223
224
+ if (!dc_isar_feature(aa32_fpdp, s)) {
225
+ return false;
226
+ }
227
+
228
if (!vfp_access_check(s)) {
229
return true;
230
}
231
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
232
return false;
233
}
234
235
+ if (!dc_isar_feature(aa32_fpdp, s)) {
236
+ return false;
237
+ }
238
+
239
if (!vfp_access_check(s)) {
240
return true;
241
}
242
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
243
return false;
244
}
245
246
+ if (!dc_isar_feature(aa32_fpdp, s)) {
247
+ return false;
248
+ }
249
+
250
if (!vfp_access_check(s)) {
251
return true;
252
}
253
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
254
return false;
255
}
256
257
+ if (!dc_isar_feature(aa32_fpdp, s)) {
258
+ return false;
259
+ }
260
+
261
if (!vfp_access_check(s)) {
262
return true;
263
}
264
--
622
--
265
2.20.1
623
2.20.1
266
624
267
625
diff view generated by jsdifflib
1
Stop using cpu_F0s for the Neon f32/s32 VCVT operations.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
Since this is the last user of cpu_F0s in the Neon 2rm-op
2
3
loop, we can remove the handling code for it too.
3
We can then use this to improve VMOV (scalar to gp) and
4
4
VMOV (gp to scalar) so that we simply perform the memory
5
operation that we wanted, rather than inserting or
6
extracting from a 32-bit quantity.
7
8
These were the last uses of neon_load/store_reg, so remove them.
9
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20201030022618.785675-7-richard.henderson@linaro.org
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20190613163917.28589-9-peter.maydell@linaro.org
9
---
14
---
10
target/arm/translate.c | 82 ++++++++++++------------------------------
15
target/arm/translate.c | 50 +++++++++++++-----------
11
1 file changed, 22 insertions(+), 60 deletions(-)
16
target/arm/translate-vfp.c.inc | 71 +++++-----------------------------
17
2 files changed, 37 insertions(+), 84 deletions(-)
12
18
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
19
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate.c
21
--- a/target/arm/translate.c
16
+++ b/target/arm/translate.c
22
+++ b/target/arm/translate.c
17
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr get_fpstatus_ptr(int neon)
23
@@ -XXX,XX +XXX,XX @@ static long neon_full_reg_offset(unsigned reg)
18
return statusptr;
24
* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
19
}
25
* where 0 is the least significant end of the register.
20
26
*/
21
-#define VFP_GEN_ITOF(name) \
27
-static long neon_element_offset(int reg, int element, MemOp size)
22
-static inline void gen_vfp_##name(int dp, int neon) \
28
+static long neon_element_offset(int reg, int element, MemOp memop)
23
-{ \
29
{
24
- TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
30
- int element_size = 1 << size;
25
- if (dp) { \
31
+ int element_size = 1 << (memop & MO_SIZE);
26
- gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
32
int ofs = element * element_size;
27
- } else { \
33
#ifdef HOST_WORDS_BIGENDIAN
28
- gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
34
/*
29
- } \
35
@@ -XXX,XX +XXX,XX @@ static long vfp_reg_offset(bool dp, unsigned reg)
30
- tcg_temp_free_ptr(statusptr); \
36
}
37
}
38
39
-static TCGv_i32 neon_load_reg(int reg, int pass)
40
-{
41
- TCGv_i32 tmp = tcg_temp_new_i32();
42
- tcg_gen_ld_i32(tmp, cpu_env, neon_element_offset(reg, pass, MO_32));
43
- return tmp;
31
-}
44
-}
32
-
45
-
33
-VFP_GEN_ITOF(uito)
46
-static void neon_store_reg(int reg, int pass, TCGv_i32 var)
34
-VFP_GEN_ITOF(sito)
47
-{
35
-#undef VFP_GEN_ITOF
48
- tcg_gen_st_i32(var, cpu_env, neon_element_offset(reg, pass, MO_32));
36
-
49
- tcg_temp_free_i32(var);
37
-#define VFP_GEN_FTOI(name) \
38
-static inline void gen_vfp_##name(int dp, int neon) \
39
-{ \
40
- TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
41
- if (dp) { \
42
- gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
43
- } else { \
44
- gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
45
- } \
46
- tcg_temp_free_ptr(statusptr); \
47
-}
50
-}
48
-
51
-
49
-VFP_GEN_FTOI(touiz)
52
static inline void neon_load_reg64(TCGv_i64 var, int reg)
50
-VFP_GEN_FTOI(tosiz)
53
{
51
-#undef VFP_GEN_FTOI
54
tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
52
-
55
@@ -XXX,XX +XXX,XX @@ static inline void neon_store_reg32(TCGv_i32 var, int reg)
53
#define VFP_GEN_FIX(name, round) \
56
tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
54
static inline void gen_vfp_##name(int dp, int shift, int neon) \
57
}
55
{ \
58
56
@@ -XXX,XX +XXX,XX @@ static const uint8_t neon_3r_sizes[] = {
59
-static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp size)
57
#define NEON_2RM_VCVT_SF 62
60
+static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop)
58
#define NEON_2RM_VCVT_UF 63
61
{
59
62
- long off = neon_element_offset(reg, ele, size);
60
-static int neon_2rm_is_float_op(int op)
63
+ long off = neon_element_offset(reg, ele, memop);
61
-{
64
62
- /*
65
- switch (size) {
63
- * Return true if this neon 2reg-misc op is float-to-float.
66
- case MO_32:
64
- * This is not a property of the operation but of our code --
67
+ switch (memop) {
65
- * what we are asking here is "does the code for this case in
68
+ case MO_SB:
66
- * the Neon for-each-pass loop use cpu_F0s?".
69
+ tcg_gen_ld8s_i32(dest, cpu_env, off);
67
- */
70
+ break;
68
- return op >= NEON_2RM_VCVT_FS;
71
+ case MO_UB:
69
-}
72
+ tcg_gen_ld8u_i32(dest, cpu_env, off);
70
-
73
+ break;
71
static bool neon_2rm_is_v8_op(int op)
74
+ case MO_SW:
72
{
75
+ tcg_gen_ld16s_i32(dest, cpu_env, off);
73
/* Return true if this neon 2reg-misc op is ARMv8 and up */
76
+ break;
74
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
77
+ case MO_UW:
75
default:
78
+ tcg_gen_ld16u_i32(dest, cpu_env, off);
76
elementwise:
79
+ break;
77
for (pass = 0; pass < (q ? 4 : 2); pass++) {
80
+ case MO_UL:
78
- if (neon_2rm_is_float_op(op)) {
81
+ case MO_SL:
79
- tcg_gen_ld_f32(cpu_F0s, cpu_env,
82
tcg_gen_ld_i32(dest, cpu_env, off);
80
- neon_reg_offset(rm, pass));
83
break;
81
- tmp = NULL;
84
default:
82
- } else {
85
@@ -XXX,XX +XXX,XX @@ static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp size)
83
- tmp = neon_load_reg(rm, pass);
86
}
84
- }
87
}
85
+ tmp = neon_load_reg(rm, pass);
88
86
switch (op) {
89
-static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp size)
87
case NEON_2RM_VREV32:
90
+static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
88
switch (size) {
91
{
89
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
92
- long off = neon_element_offset(reg, ele, size);
90
break;
93
+ long off = neon_element_offset(reg, ele, memop);
91
}
94
92
case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
95
- switch (size) {
93
- gen_vfp_sito(0, 1);
96
+ switch (memop) {
94
+ {
97
+ case MO_8:
95
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
98
+ tcg_gen_st8_i32(src, cpu_env, off);
96
+ gen_helper_vfp_sitos(tmp, tmp, fpstatus);
99
+ break;
97
+ tcg_temp_free_ptr(fpstatus);
100
+ case MO_16:
98
break;
101
+ tcg_gen_st16_i32(src, cpu_env, off);
99
+ }
102
+ break;
100
case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
103
case MO_32:
101
- gen_vfp_uito(0, 1);
104
tcg_gen_st_i32(src, cpu_env, off);
102
+ {
105
break;
103
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
106
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
104
+ gen_helper_vfp_uitos(tmp, tmp, fpstatus);
107
index XXXXXXX..XXXXXXX 100644
105
+ tcg_temp_free_ptr(fpstatus);
108
--- a/target/arm/translate-vfp.c.inc
106
break;
109
+++ b/target/arm/translate-vfp.c.inc
107
+ }
110
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
108
case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
111
{
109
- gen_vfp_tosiz(0, 1);
112
/* VMOV scalar to general purpose register */
110
+ {
113
TCGv_i32 tmp;
111
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
114
- int pass;
112
+ gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
115
- uint32_t offset;
113
+ tcg_temp_free_ptr(fpstatus);
116
114
break;
117
- /* SIZE == 2 is a VFP instruction; otherwise NEON. */
115
+ }
118
- if (a->size == 2
116
case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
119
+ /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
117
- gen_vfp_touiz(0, 1);
120
+ if (a->size == MO_32
118
+ {
121
? !dc_isar_feature(aa32_fpsp_v2, s)
119
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
122
: !arm_dc_feature(s, ARM_FEATURE_NEON)) {
120
+ gen_helper_vfp_touizs(tmp, tmp, fpstatus);
123
return false;
121
+ tcg_temp_free_ptr(fpstatus);
124
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
122
break;
125
return false;
123
+ }
126
}
124
default:
127
125
/* Reserved op values were caught by the
128
- offset = a->index << a->size;
126
* neon_2rm_sizes[] check earlier.
129
- pass = extract32(offset, 2, 1);
127
*/
130
- offset = extract32(offset, 0, 2) * 8;
128
abort();
131
-
129
}
132
if (!vfp_access_check(s)) {
130
- if (neon_2rm_is_float_op(op)) {
133
return true;
131
- tcg_gen_st_f32(cpu_F0s, cpu_env,
134
}
132
- neon_reg_offset(rd, pass));
135
133
- } else {
136
- tmp = neon_load_reg(a->vn, pass);
134
- neon_store_reg(rd, pass, tmp);
137
- switch (a->size) {
135
- }
138
- case 0:
136
+ neon_store_reg(rd, pass, tmp);
139
- if (offset) {
137
}
140
- tcg_gen_shri_i32(tmp, tmp, offset);
138
break;
141
- }
139
}
142
- if (a->u) {
143
- gen_uxtb(tmp);
144
- } else {
145
- gen_sxtb(tmp);
146
- }
147
- break;
148
- case 1:
149
- if (a->u) {
150
- if (offset) {
151
- tcg_gen_shri_i32(tmp, tmp, 16);
152
- } else {
153
- gen_uxth(tmp);
154
- }
155
- } else {
156
- if (offset) {
157
- tcg_gen_sari_i32(tmp, tmp, 16);
158
- } else {
159
- gen_sxth(tmp);
160
- }
161
- }
162
- break;
163
- case 2:
164
- break;
165
- }
166
+ tmp = tcg_temp_new_i32();
167
+ read_neon_element32(tmp, a->vn, a->index, a->size | (a->u ? 0 : MO_SIGN));
168
store_reg(s, a->rt, tmp);
169
170
return true;
171
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
172
static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
173
{
174
/* VMOV general purpose register to scalar */
175
- TCGv_i32 tmp, tmp2;
176
- int pass;
177
- uint32_t offset;
178
+ TCGv_i32 tmp;
179
180
- /* SIZE == 2 is a VFP instruction; otherwise NEON. */
181
- if (a->size == 2
182
+ /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
183
+ if (a->size == MO_32
184
? !dc_isar_feature(aa32_fpsp_v2, s)
185
: !arm_dc_feature(s, ARM_FEATURE_NEON)) {
186
return false;
187
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
188
return false;
189
}
190
191
- offset = a->index << a->size;
192
- pass = extract32(offset, 2, 1);
193
- offset = extract32(offset, 0, 2) * 8;
194
-
195
if (!vfp_access_check(s)) {
196
return true;
197
}
198
199
tmp = load_reg(s, a->rt);
200
- switch (a->size) {
201
- case 0:
202
- tmp2 = neon_load_reg(a->vn, pass);
203
- tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
204
- tcg_temp_free_i32(tmp2);
205
- break;
206
- case 1:
207
- tmp2 = neon_load_reg(a->vn, pass);
208
- tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
209
- tcg_temp_free_i32(tmp2);
210
- break;
211
- case 2:
212
- break;
213
- }
214
- neon_store_reg(a->vn, pass, tmp);
215
+ write_neon_element32(tmp, a->vn, a->index, a->size);
216
+ tcg_temp_free_i32(tmp);
217
218
return true;
219
}
140
--
220
--
141
2.20.1
221
2.20.1
142
222
143
223
diff view generated by jsdifflib
1
Remove some old constructns from NEON_2RM_VCVT_F16_F32 code:
1
From: Richard Henderson <richard.henderson@linaro.org>
2
* don't use CPU_F0s
3
* don't use tcg_gen_st_f32
4
2
3
The only uses of this function are for loading VFP
4
single-precision values, and nothing to do with NEON.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201030022618.785675-8-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20190613163917.28589-12-peter.maydell@linaro.org
9
---
10
---
10
target/arm/translate.c | 26 +++++++++++---------------
11
target/arm/translate.c | 4 +-
11
1 file changed, 11 insertions(+), 15 deletions(-)
12
target/arm/translate-vfp.c.inc | 184 ++++++++++++++++-----------------
13
2 files changed, 94 insertions(+), 94 deletions(-)
12
14
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate.c
17
--- a/target/arm/translate.c
16
+++ b/target/arm/translate.c
18
+++ b/target/arm/translate.c
17
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
19
@@ -XXX,XX +XXX,XX @@ static inline void neon_store_reg64(TCGv_i64 var, int reg)
18
return ret;
20
tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
19
}
21
}
20
22
21
-#define tcg_gen_st_f32 tcg_gen_st_i32
23
-static inline void neon_load_reg32(TCGv_i32 var, int reg)
22
-
24
+static inline void vfp_load_reg32(TCGv_i32 var, int reg)
23
#define ARM_CP_RW_BIT (1 << 20)
25
{
24
26
tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
25
/* Include the VFP decoder */
27
}
26
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
28
27
tmp = neon_load_reg(rm, 0);
29
-static inline void neon_store_reg32(TCGv_i32 var, int reg)
28
tmp2 = neon_load_reg(rm, 1);
30
+static inline void vfp_store_reg32(TCGv_i32 var, int reg)
29
tcg_gen_ext16u_i32(tmp3, tmp);
31
{
30
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
32
tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
31
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
33
}
32
- tcg_gen_shri_i32(tmp3, tmp, 16);
34
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
33
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
35
index XXXXXXX..XXXXXXX 100644
34
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
36
--- a/target/arm/translate-vfp.c.inc
35
- tcg_temp_free_i32(tmp);
37
+++ b/target/arm/translate-vfp.c.inc
36
+ gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
38
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
37
+ neon_store_reg(rd, 0, tmp3);
39
frn = tcg_temp_new_i32();
38
+ tcg_gen_shri_i32(tmp, tmp, 16);
40
frm = tcg_temp_new_i32();
39
+ gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
41
dest = tcg_temp_new_i32();
40
+ neon_store_reg(rd, 1, tmp);
42
- neon_load_reg32(frn, rn);
41
+ tmp3 = tcg_temp_new_i32();
43
- neon_load_reg32(frm, rm);
42
tcg_gen_ext16u_i32(tmp3, tmp2);
44
+ vfp_load_reg32(frn, rn);
43
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
45
+ vfp_load_reg32(frm, rm);
44
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
46
switch (a->cc) {
45
- tcg_gen_shri_i32(tmp3, tmp2, 16);
47
case 0: /* eq: Z */
46
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
48
tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
47
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
49
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
48
- tcg_temp_free_i32(tmp2);
50
if (sz == 1) {
49
- tcg_temp_free_i32(tmp3);
51
tcg_gen_andi_i32(dest, dest, 0xffff);
50
+ gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
52
}
51
+ neon_store_reg(rd, 2, tmp3);
53
- neon_store_reg32(dest, rd);
52
+ tcg_gen_shri_i32(tmp2, tmp2, 16);
54
+ vfp_store_reg32(dest, rd);
53
+ gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
55
tcg_temp_free_i32(frn);
54
+ neon_store_reg(rd, 3, tmp2);
56
tcg_temp_free_i32(frm);
55
tcg_temp_free_i32(ahp);
57
tcg_temp_free_i32(dest);
56
tcg_temp_free_ptr(fpst);
58
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
57
break;
59
TCGv_i32 tcg_res;
60
tcg_op = tcg_temp_new_i32();
61
tcg_res = tcg_temp_new_i32();
62
- neon_load_reg32(tcg_op, rm);
63
+ vfp_load_reg32(tcg_op, rm);
64
if (sz == 1) {
65
gen_helper_rinth(tcg_res, tcg_op, fpst);
66
} else {
67
gen_helper_rints(tcg_res, tcg_op, fpst);
68
}
69
- neon_store_reg32(tcg_res, rd);
70
+ vfp_store_reg32(tcg_res, rd);
71
tcg_temp_free_i32(tcg_op);
72
tcg_temp_free_i32(tcg_res);
73
}
74
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
75
gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
76
}
77
tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
78
- neon_store_reg32(tcg_tmp, rd);
79
+ vfp_store_reg32(tcg_tmp, rd);
80
tcg_temp_free_i32(tcg_tmp);
81
tcg_temp_free_i64(tcg_res);
82
tcg_temp_free_i64(tcg_double);
83
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
84
TCGv_i32 tcg_single, tcg_res;
85
tcg_single = tcg_temp_new_i32();
86
tcg_res = tcg_temp_new_i32();
87
- neon_load_reg32(tcg_single, rm);
88
+ vfp_load_reg32(tcg_single, rm);
89
if (sz == 1) {
90
if (is_signed) {
91
gen_helper_vfp_toslh(tcg_res, tcg_single, tcg_shift, fpst);
92
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
93
gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
94
}
95
}
96
- neon_store_reg32(tcg_res, rd);
97
+ vfp_store_reg32(tcg_res, rd);
98
tcg_temp_free_i32(tcg_res);
99
tcg_temp_free_i32(tcg_single);
100
}
101
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a)
102
if (a->l) {
103
/* VFP to general purpose register */
104
tmp = tcg_temp_new_i32();
105
- neon_load_reg32(tmp, a->vn);
106
+ vfp_load_reg32(tmp, a->vn);
107
tcg_gen_andi_i32(tmp, tmp, 0xffff);
108
store_reg(s, a->rt, tmp);
109
} else {
110
/* general purpose register to VFP */
111
tmp = load_reg(s, a->rt);
112
tcg_gen_andi_i32(tmp, tmp, 0xffff);
113
- neon_store_reg32(tmp, a->vn);
114
+ vfp_store_reg32(tmp, a->vn);
115
tcg_temp_free_i32(tmp);
116
}
117
118
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
119
if (a->l) {
120
/* VFP to general purpose register */
121
tmp = tcg_temp_new_i32();
122
- neon_load_reg32(tmp, a->vn);
123
+ vfp_load_reg32(tmp, a->vn);
124
if (a->rt == 15) {
125
/* Set the 4 flag bits in the CPSR. */
126
gen_set_nzcv(tmp);
127
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
128
} else {
129
/* general purpose register to VFP */
130
tmp = load_reg(s, a->rt);
131
- neon_store_reg32(tmp, a->vn);
132
+ vfp_store_reg32(tmp, a->vn);
133
tcg_temp_free_i32(tmp);
134
}
135
136
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
137
if (a->op) {
138
/* fpreg to gpreg */
139
tmp = tcg_temp_new_i32();
140
- neon_load_reg32(tmp, a->vm);
141
+ vfp_load_reg32(tmp, a->vm);
142
store_reg(s, a->rt, tmp);
143
tmp = tcg_temp_new_i32();
144
- neon_load_reg32(tmp, a->vm + 1);
145
+ vfp_load_reg32(tmp, a->vm + 1);
146
store_reg(s, a->rt2, tmp);
147
} else {
148
/* gpreg to fpreg */
149
tmp = load_reg(s, a->rt);
150
- neon_store_reg32(tmp, a->vm);
151
+ vfp_store_reg32(tmp, a->vm);
152
tcg_temp_free_i32(tmp);
153
tmp = load_reg(s, a->rt2);
154
- neon_store_reg32(tmp, a->vm + 1);
155
+ vfp_store_reg32(tmp, a->vm + 1);
156
tcg_temp_free_i32(tmp);
157
}
158
159
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
160
if (a->op) {
161
/* fpreg to gpreg */
162
tmp = tcg_temp_new_i32();
163
- neon_load_reg32(tmp, a->vm * 2);
164
+ vfp_load_reg32(tmp, a->vm * 2);
165
store_reg(s, a->rt, tmp);
166
tmp = tcg_temp_new_i32();
167
- neon_load_reg32(tmp, a->vm * 2 + 1);
168
+ vfp_load_reg32(tmp, a->vm * 2 + 1);
169
store_reg(s, a->rt2, tmp);
170
} else {
171
/* gpreg to fpreg */
172
tmp = load_reg(s, a->rt);
173
- neon_store_reg32(tmp, a->vm * 2);
174
+ vfp_store_reg32(tmp, a->vm * 2);
175
tcg_temp_free_i32(tmp);
176
tmp = load_reg(s, a->rt2);
177
- neon_store_reg32(tmp, a->vm * 2 + 1);
178
+ vfp_store_reg32(tmp, a->vm * 2 + 1);
179
tcg_temp_free_i32(tmp);
180
}
181
182
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
183
tmp = tcg_temp_new_i32();
184
if (a->l) {
185
gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
186
- neon_store_reg32(tmp, a->vd);
187
+ vfp_store_reg32(tmp, a->vd);
188
} else {
189
- neon_load_reg32(tmp, a->vd);
190
+ vfp_load_reg32(tmp, a->vd);
191
gen_aa32_st16(s, tmp, addr, get_mem_index(s));
192
}
193
tcg_temp_free_i32(tmp);
194
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
195
tmp = tcg_temp_new_i32();
196
if (a->l) {
197
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
198
- neon_store_reg32(tmp, a->vd);
199
+ vfp_store_reg32(tmp, a->vd);
200
} else {
201
- neon_load_reg32(tmp, a->vd);
202
+ vfp_load_reg32(tmp, a->vd);
203
gen_aa32_st32(s, tmp, addr, get_mem_index(s));
204
}
205
tcg_temp_free_i32(tmp);
206
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
207
if (a->l) {
208
/* load */
209
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
210
- neon_store_reg32(tmp, a->vd + i);
211
+ vfp_store_reg32(tmp, a->vd + i);
212
} else {
213
/* store */
214
- neon_load_reg32(tmp, a->vd + i);
215
+ vfp_load_reg32(tmp, a->vd + i);
216
gen_aa32_st32(s, tmp, addr, get_mem_index(s));
217
}
218
tcg_gen_addi_i32(addr, addr, offset);
219
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
220
fd = tcg_temp_new_i32();
221
fpst = fpstatus_ptr(FPST_FPCR);
222
223
- neon_load_reg32(f0, vn);
224
- neon_load_reg32(f1, vm);
225
+ vfp_load_reg32(f0, vn);
226
+ vfp_load_reg32(f1, vm);
227
228
for (;;) {
229
if (reads_vd) {
230
- neon_load_reg32(fd, vd);
231
+ vfp_load_reg32(fd, vd);
232
}
233
fn(fd, f0, f1, fpst);
234
- neon_store_reg32(fd, vd);
235
+ vfp_store_reg32(fd, vd);
236
237
if (veclen == 0) {
238
break;
239
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
240
veclen--;
241
vd = vfp_advance_sreg(vd, delta_d);
242
vn = vfp_advance_sreg(vn, delta_d);
243
- neon_load_reg32(f0, vn);
244
+ vfp_load_reg32(f0, vn);
245
if (delta_m) {
246
vm = vfp_advance_sreg(vm, delta_m);
247
- neon_load_reg32(f1, vm);
248
+ vfp_load_reg32(f1, vm);
249
}
250
}
251
252
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn,
253
fd = tcg_temp_new_i32();
254
fpst = fpstatus_ptr(FPST_FPCR_F16);
255
256
- neon_load_reg32(f0, vn);
257
- neon_load_reg32(f1, vm);
258
+ vfp_load_reg32(f0, vn);
259
+ vfp_load_reg32(f1, vm);
260
261
if (reads_vd) {
262
- neon_load_reg32(fd, vd);
263
+ vfp_load_reg32(fd, vd);
264
}
265
fn(fd, f0, f1, fpst);
266
- neon_store_reg32(fd, vd);
267
+ vfp_store_reg32(fd, vd);
268
269
tcg_temp_free_i32(f0);
270
tcg_temp_free_i32(f1);
271
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
272
f0 = tcg_temp_new_i32();
273
fd = tcg_temp_new_i32();
274
275
- neon_load_reg32(f0, vm);
276
+ vfp_load_reg32(f0, vm);
277
278
for (;;) {
279
fn(fd, f0);
280
- neon_store_reg32(fd, vd);
281
+ vfp_store_reg32(fd, vd);
282
283
if (veclen == 0) {
284
break;
285
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
286
/* single source one-many */
287
while (veclen--) {
288
vd = vfp_advance_sreg(vd, delta_d);
289
- neon_store_reg32(fd, vd);
290
+ vfp_store_reg32(fd, vd);
291
}
292
break;
293
}
294
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
295
veclen--;
296
vd = vfp_advance_sreg(vd, delta_d);
297
vm = vfp_advance_sreg(vm, delta_m);
298
- neon_load_reg32(f0, vm);
299
+ vfp_load_reg32(f0, vm);
300
}
301
302
tcg_temp_free_i32(f0);
303
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_hp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
304
}
305
306
f0 = tcg_temp_new_i32();
307
- neon_load_reg32(f0, vm);
308
+ vfp_load_reg32(f0, vm);
309
fn(f0, f0);
310
- neon_store_reg32(f0, vd);
311
+ vfp_store_reg32(f0, vd);
312
tcg_temp_free_i32(f0);
313
314
return true;
315
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
316
vm = tcg_temp_new_i32();
317
vd = tcg_temp_new_i32();
318
319
- neon_load_reg32(vn, a->vn);
320
- neon_load_reg32(vm, a->vm);
321
+ vfp_load_reg32(vn, a->vn);
322
+ vfp_load_reg32(vm, a->vm);
323
if (neg_n) {
324
/* VFNMS, VFMS */
325
gen_helper_vfp_negh(vn, vn);
326
}
327
- neon_load_reg32(vd, a->vd);
328
+ vfp_load_reg32(vd, a->vd);
329
if (neg_d) {
330
/* VFNMA, VFNMS */
331
gen_helper_vfp_negh(vd, vd);
332
}
333
fpst = fpstatus_ptr(FPST_FPCR_F16);
334
gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst);
335
- neon_store_reg32(vd, a->vd);
336
+ vfp_store_reg32(vd, a->vd);
337
338
tcg_temp_free_ptr(fpst);
339
tcg_temp_free_i32(vn);
340
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
341
vm = tcg_temp_new_i32();
342
vd = tcg_temp_new_i32();
343
344
- neon_load_reg32(vn, a->vn);
345
- neon_load_reg32(vm, a->vm);
346
+ vfp_load_reg32(vn, a->vn);
347
+ vfp_load_reg32(vm, a->vm);
348
if (neg_n) {
349
/* VFNMS, VFMS */
350
gen_helper_vfp_negs(vn, vn);
351
}
352
- neon_load_reg32(vd, a->vd);
353
+ vfp_load_reg32(vd, a->vd);
354
if (neg_d) {
355
/* VFNMA, VFNMS */
356
gen_helper_vfp_negs(vd, vd);
357
}
358
fpst = fpstatus_ptr(FPST_FPCR);
359
gen_helper_vfp_muladds(vd, vn, vm, vd, fpst);
360
- neon_store_reg32(vd, a->vd);
361
+ vfp_store_reg32(vd, a->vd);
362
363
tcg_temp_free_ptr(fpst);
364
tcg_temp_free_i32(vn);
365
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_hp(DisasContext *s, arg_VMOV_imm_sp *a)
366
}
367
368
fd = tcg_const_i32(vfp_expand_imm(MO_16, a->imm));
369
- neon_store_reg32(fd, a->vd);
370
+ vfp_store_reg32(fd, a->vd);
371
tcg_temp_free_i32(fd);
372
return true;
373
}
374
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
375
fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
376
377
for (;;) {
378
- neon_store_reg32(fd, vd);
379
+ vfp_store_reg32(fd, vd);
380
381
if (veclen == 0) {
382
break;
383
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a)
384
vd = tcg_temp_new_i32();
385
vm = tcg_temp_new_i32();
386
387
- neon_load_reg32(vd, a->vd);
388
+ vfp_load_reg32(vd, a->vd);
389
if (a->z) {
390
tcg_gen_movi_i32(vm, 0);
391
} else {
392
- neon_load_reg32(vm, a->vm);
393
+ vfp_load_reg32(vm, a->vm);
394
}
395
396
if (a->e) {
397
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
398
vd = tcg_temp_new_i32();
399
vm = tcg_temp_new_i32();
400
401
- neon_load_reg32(vd, a->vd);
402
+ vfp_load_reg32(vd, a->vd);
403
if (a->z) {
404
tcg_gen_movi_i32(vm, 0);
405
} else {
406
- neon_load_reg32(vm, a->vm);
407
+ vfp_load_reg32(vm, a->vm);
408
}
409
410
if (a->e) {
411
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
412
/* The T bit tells us if we want the low or high 16 bits of Vm */
413
tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
414
gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
415
- neon_store_reg32(tmp, a->vd);
416
+ vfp_store_reg32(tmp, a->vd);
417
tcg_temp_free_i32(ahp_mode);
418
tcg_temp_free_ptr(fpst);
419
tcg_temp_free_i32(tmp);
420
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
421
ahp_mode = get_ahp_flag();
422
tmp = tcg_temp_new_i32();
423
424
- neon_load_reg32(tmp, a->vm);
425
+ vfp_load_reg32(tmp, a->vm);
426
gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
427
tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
428
tcg_temp_free_i32(ahp_mode);
429
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a)
430
}
431
432
tmp = tcg_temp_new_i32();
433
- neon_load_reg32(tmp, a->vm);
434
+ vfp_load_reg32(tmp, a->vm);
435
fpst = fpstatus_ptr(FPST_FPCR_F16);
436
gen_helper_rinth(tmp, tmp, fpst);
437
- neon_store_reg32(tmp, a->vd);
438
+ vfp_store_reg32(tmp, a->vd);
439
tcg_temp_free_ptr(fpst);
440
tcg_temp_free_i32(tmp);
441
return true;
442
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
443
}
444
445
tmp = tcg_temp_new_i32();
446
- neon_load_reg32(tmp, a->vm);
447
+ vfp_load_reg32(tmp, a->vm);
448
fpst = fpstatus_ptr(FPST_FPCR);
449
gen_helper_rints(tmp, tmp, fpst);
450
- neon_store_reg32(tmp, a->vd);
451
+ vfp_store_reg32(tmp, a->vd);
452
tcg_temp_free_ptr(fpst);
453
tcg_temp_free_i32(tmp);
454
return true;
455
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a)
456
}
457
458
tmp = tcg_temp_new_i32();
459
- neon_load_reg32(tmp, a->vm);
460
+ vfp_load_reg32(tmp, a->vm);
461
fpst = fpstatus_ptr(FPST_FPCR_F16);
462
tcg_rmode = tcg_const_i32(float_round_to_zero);
463
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
464
gen_helper_rinth(tmp, tmp, fpst);
465
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
466
- neon_store_reg32(tmp, a->vd);
467
+ vfp_store_reg32(tmp, a->vd);
468
tcg_temp_free_ptr(fpst);
469
tcg_temp_free_i32(tcg_rmode);
470
tcg_temp_free_i32(tmp);
471
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
472
}
473
474
tmp = tcg_temp_new_i32();
475
- neon_load_reg32(tmp, a->vm);
476
+ vfp_load_reg32(tmp, a->vm);
477
fpst = fpstatus_ptr(FPST_FPCR);
478
tcg_rmode = tcg_const_i32(float_round_to_zero);
479
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
480
gen_helper_rints(tmp, tmp, fpst);
481
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
482
- neon_store_reg32(tmp, a->vd);
483
+ vfp_store_reg32(tmp, a->vd);
484
tcg_temp_free_ptr(fpst);
485
tcg_temp_free_i32(tcg_rmode);
486
tcg_temp_free_i32(tmp);
487
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a)
488
}
489
490
tmp = tcg_temp_new_i32();
491
- neon_load_reg32(tmp, a->vm);
492
+ vfp_load_reg32(tmp, a->vm);
493
fpst = fpstatus_ptr(FPST_FPCR_F16);
494
gen_helper_rinth_exact(tmp, tmp, fpst);
495
- neon_store_reg32(tmp, a->vd);
496
+ vfp_store_reg32(tmp, a->vd);
497
tcg_temp_free_ptr(fpst);
498
tcg_temp_free_i32(tmp);
499
return true;
500
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
501
}
502
503
tmp = tcg_temp_new_i32();
504
- neon_load_reg32(tmp, a->vm);
505
+ vfp_load_reg32(tmp, a->vm);
506
fpst = fpstatus_ptr(FPST_FPCR);
507
gen_helper_rints_exact(tmp, tmp, fpst);
508
- neon_store_reg32(tmp, a->vd);
509
+ vfp_store_reg32(tmp, a->vd);
510
tcg_temp_free_ptr(fpst);
511
tcg_temp_free_i32(tmp);
512
return true;
513
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
514
515
vm = tcg_temp_new_i32();
516
vd = tcg_temp_new_i64();
517
- neon_load_reg32(vm, a->vm);
518
+ vfp_load_reg32(vm, a->vm);
519
gen_helper_vfp_fcvtds(vd, vm, cpu_env);
520
neon_store_reg64(vd, a->vd);
521
tcg_temp_free_i32(vm);
522
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
523
vm = tcg_temp_new_i64();
524
neon_load_reg64(vm, a->vm);
525
gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
526
- neon_store_reg32(vd, a->vd);
527
+ vfp_store_reg32(vd, a->vd);
528
tcg_temp_free_i32(vd);
529
tcg_temp_free_i64(vm);
530
return true;
531
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a)
532
}
533
534
vm = tcg_temp_new_i32();
535
- neon_load_reg32(vm, a->vm);
536
+ vfp_load_reg32(vm, a->vm);
537
fpst = fpstatus_ptr(FPST_FPCR_F16);
538
if (a->s) {
539
/* i32 -> f16 */
540
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a)
541
/* u32 -> f16 */
542
gen_helper_vfp_uitoh(vm, vm, fpst);
543
}
544
- neon_store_reg32(vm, a->vd);
545
+ vfp_store_reg32(vm, a->vd);
546
tcg_temp_free_i32(vm);
547
tcg_temp_free_ptr(fpst);
548
return true;
549
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
550
}
551
552
vm = tcg_temp_new_i32();
553
- neon_load_reg32(vm, a->vm);
554
+ vfp_load_reg32(vm, a->vm);
555
fpst = fpstatus_ptr(FPST_FPCR);
556
if (a->s) {
557
/* i32 -> f32 */
558
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
559
/* u32 -> f32 */
560
gen_helper_vfp_uitos(vm, vm, fpst);
561
}
562
- neon_store_reg32(vm, a->vd);
563
+ vfp_store_reg32(vm, a->vd);
564
tcg_temp_free_i32(vm);
565
tcg_temp_free_ptr(fpst);
566
return true;
567
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
568
569
vm = tcg_temp_new_i32();
570
vd = tcg_temp_new_i64();
571
- neon_load_reg32(vm, a->vm);
572
+ vfp_load_reg32(vm, a->vm);
573
fpst = fpstatus_ptr(FPST_FPCR);
574
if (a->s) {
575
/* i32 -> f64 */
576
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
577
vd = tcg_temp_new_i32();
578
neon_load_reg64(vm, a->vm);
579
gen_helper_vjcvt(vd, vm, cpu_env);
580
- neon_store_reg32(vd, a->vd);
581
+ vfp_store_reg32(vd, a->vd);
582
tcg_temp_free_i64(vm);
583
tcg_temp_free_i32(vd);
584
return true;
585
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
586
frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
587
588
vd = tcg_temp_new_i32();
589
- neon_load_reg32(vd, a->vd);
590
+ vfp_load_reg32(vd, a->vd);
591
592
fpst = fpstatus_ptr(FPST_FPCR_F16);
593
shift = tcg_const_i32(frac_bits);
594
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
595
g_assert_not_reached();
596
}
597
598
- neon_store_reg32(vd, a->vd);
599
+ vfp_store_reg32(vd, a->vd);
600
tcg_temp_free_i32(vd);
601
tcg_temp_free_i32(shift);
602
tcg_temp_free_ptr(fpst);
603
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
604
frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
605
606
vd = tcg_temp_new_i32();
607
- neon_load_reg32(vd, a->vd);
608
+ vfp_load_reg32(vd, a->vd);
609
610
fpst = fpstatus_ptr(FPST_FPCR);
611
shift = tcg_const_i32(frac_bits);
612
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
613
g_assert_not_reached();
614
}
615
616
- neon_store_reg32(vd, a->vd);
617
+ vfp_store_reg32(vd, a->vd);
618
tcg_temp_free_i32(vd);
619
tcg_temp_free_i32(shift);
620
tcg_temp_free_ptr(fpst);
621
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a)
622
623
fpst = fpstatus_ptr(FPST_FPCR_F16);
624
vm = tcg_temp_new_i32();
625
- neon_load_reg32(vm, a->vm);
626
+ vfp_load_reg32(vm, a->vm);
627
628
if (a->s) {
629
if (a->rz) {
630
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a)
631
gen_helper_vfp_touih(vm, vm, fpst);
632
}
633
}
634
- neon_store_reg32(vm, a->vd);
635
+ vfp_store_reg32(vm, a->vd);
636
tcg_temp_free_i32(vm);
637
tcg_temp_free_ptr(fpst);
638
return true;
639
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
640
641
fpst = fpstatus_ptr(FPST_FPCR);
642
vm = tcg_temp_new_i32();
643
- neon_load_reg32(vm, a->vm);
644
+ vfp_load_reg32(vm, a->vm);
645
646
if (a->s) {
647
if (a->rz) {
648
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
649
gen_helper_vfp_touis(vm, vm, fpst);
650
}
651
}
652
- neon_store_reg32(vm, a->vd);
653
+ vfp_store_reg32(vm, a->vd);
654
tcg_temp_free_i32(vm);
655
tcg_temp_free_ptr(fpst);
656
return true;
657
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
658
gen_helper_vfp_touid(vd, vm, fpst);
659
}
660
}
661
- neon_store_reg32(vd, a->vd);
662
+ vfp_store_reg32(vd, a->vd);
663
tcg_temp_free_i32(vd);
664
tcg_temp_free_i64(vm);
665
tcg_temp_free_ptr(fpst);
666
@@ -XXX,XX +XXX,XX @@ static bool trans_VINS(DisasContext *s, arg_VINS *a)
667
/* Insert low half of Vm into high half of Vd */
668
rm = tcg_temp_new_i32();
669
rd = tcg_temp_new_i32();
670
- neon_load_reg32(rm, a->vm);
671
- neon_load_reg32(rd, a->vd);
672
+ vfp_load_reg32(rm, a->vm);
673
+ vfp_load_reg32(rd, a->vd);
674
tcg_gen_deposit_i32(rd, rd, rm, 16, 16);
675
- neon_store_reg32(rd, a->vd);
676
+ vfp_store_reg32(rd, a->vd);
677
tcg_temp_free_i32(rm);
678
tcg_temp_free_i32(rd);
679
return true;
680
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOVX(DisasContext *s, arg_VINS *a)
681
682
/* Set Vd to high half of Vm */
683
rm = tcg_temp_new_i32();
684
- neon_load_reg32(rm, a->vm);
685
+ vfp_load_reg32(rm, a->vm);
686
tcg_gen_shri_i32(rm, rm, 16);
687
- neon_store_reg32(rm, a->vd);
688
+ vfp_store_reg32(rm, a->vd);
689
tcg_temp_free_i32(rm);
690
return true;
691
}
58
--
692
--
59
2.20.1
693
2.20.1
60
694
61
695
diff view generated by jsdifflib
1
We want to use vfp_expand_imm() in the AArch32 VFP decode;
1
From: Richard Henderson <richard.henderson@linaro.org>
2
move it from the a64-only header/source file to the
2
3
AArch32 one (which is always compiled even for AArch64).
3
Replace all uses of neon_load/store_reg64 within translate-neon.c.inc.
4
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20201030022618.785675-9-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20190613163917.28589-2-peter.maydell@linaro.org
9
---
9
---
10
target/arm/translate-a64.h | 1 -
10
target/arm/translate.c | 26 +++++++++
11
target/arm/translate.h | 7 +++++++
11
target/arm/translate-neon.c.inc | 94 ++++++++++++++++-----------------
12
target/arm/translate-a64.c | 32 --------------------------------
12
2 files changed, 73 insertions(+), 47 deletions(-)
13
target/arm/translate-vfp.inc.c | 33 +++++++++++++++++++++++++++++++++
13
14
4 files changed, 40 insertions(+), 33 deletions(-)
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
16
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
17
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-a64.h
16
--- a/target/arm/translate.c
19
+++ b/target/arm/translate-a64.h
17
+++ b/target/arm/translate.c
20
@@ -XXX,XX +XXX,XX @@ void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v);
18
@@ -XXX,XX +XXX,XX @@ static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop)
21
TCGv_ptr get_fpstatus_ptr(bool);
22
bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
23
unsigned int imms, unsigned int immr);
24
-uint64_t vfp_expand_imm(int size, uint8_t imm8);
25
bool sve_access_check(DisasContext *s);
26
27
/* We should have at some point before trying to access an FP register
28
diff --git a/target/arm/translate.h b/target/arm/translate.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate.h
31
+++ b/target/arm/translate.h
32
@@ -XXX,XX +XXX,XX @@ static inline void gen_ss_advance(DisasContext *s)
33
}
19
}
34
}
20
}
35
21
36
+/*
22
+static void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
37
+ * Given a VFP floating point constant encoded into an 8 bit immediate in an
23
+{
38
+ * instruction, expand it to the actual constant value of the specified
24
+ long off = neon_element_offset(reg, ele, memop);
39
+ * size, as per the VFPExpandImm() pseudocode in the Arm ARM.
40
+ */
41
+uint64_t vfp_expand_imm(int size, uint8_t imm8);
42
+
25
+
43
/* Vector operations shared between ARM and AArch64. */
26
+ switch (memop) {
44
extern const GVecGen3 mla_op[4];
27
+ case MO_Q:
45
extern const GVecGen3 mls_op[4];
28
+ tcg_gen_ld_i64(dest, cpu_env, off);
46
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/translate-a64.c
49
+++ b/target/arm/translate-a64.c
50
@@ -XXX,XX +XXX,XX @@ static void disas_fp_3src(DisasContext *s, uint32_t insn)
51
}
52
}
53
54
-/* The imm8 encodes the sign bit, enough bits to represent an exponent in
55
- * the range 01....1xx to 10....0xx, and the most significant 4 bits of
56
- * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
57
- */
58
-uint64_t vfp_expand_imm(int size, uint8_t imm8)
59
-{
60
- uint64_t imm;
61
-
62
- switch (size) {
63
- case MO_64:
64
- imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
65
- (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
66
- extract32(imm8, 0, 6);
67
- imm <<= 48;
68
- break;
69
- case MO_32:
70
- imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
71
- (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
72
- (extract32(imm8, 0, 6) << 3);
73
- imm <<= 16;
74
- break;
75
- case MO_16:
76
- imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
77
- (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
78
- (extract32(imm8, 0, 6) << 6);
79
- break;
80
- default:
81
- g_assert_not_reached();
82
- }
83
- return imm;
84
-}
85
-
86
/* Floating point immediate
87
* 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
88
* +---+---+---+-----------+------+---+------------+-------+------+------+
89
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
90
index XXXXXXX..XXXXXXX 100644
91
--- a/target/arm/translate-vfp.inc.c
92
+++ b/target/arm/translate-vfp.inc.c
93
@@ -XXX,XX +XXX,XX @@
94
#include "decode-vfp.inc.c"
95
#include "decode-vfp-uncond.inc.c"
96
97
+/*
98
+ * The imm8 encodes the sign bit, enough bits to represent an exponent in
99
+ * the range 01....1xx to 10....0xx, and the most significant 4 bits of
100
+ * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
101
+ */
102
+uint64_t vfp_expand_imm(int size, uint8_t imm8)
103
+{
104
+ uint64_t imm;
105
+
106
+ switch (size) {
107
+ case MO_64:
108
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
109
+ (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
110
+ extract32(imm8, 0, 6);
111
+ imm <<= 48;
112
+ break;
113
+ case MO_32:
114
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
115
+ (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
116
+ (extract32(imm8, 0, 6) << 3);
117
+ imm <<= 16;
118
+ break;
119
+ case MO_16:
120
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
121
+ (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
122
+ (extract32(imm8, 0, 6) << 6);
123
+ break;
29
+ break;
124
+ default:
30
+ default:
125
+ g_assert_not_reached();
31
+ g_assert_not_reached();
126
+ }
32
+ }
127
+ return imm;
128
+}
33
+}
129
+
34
+
130
/*
35
static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
131
* Return the offset of a 16-bit half of the specified VFP single-precision
36
{
132
* register. If top is true, returns the top 16 bits; otherwise the bottom
37
long off = neon_element_offset(reg, ele, memop);
38
@@ -XXX,XX +XXX,XX @@ static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
39
}
40
}
41
42
+static void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop)
43
+{
44
+ long off = neon_element_offset(reg, ele, memop);
45
+
46
+ switch (memop) {
47
+ case MO_64:
48
+ tcg_gen_st_i64(src, cpu_env, off);
49
+ break;
50
+ default:
51
+ g_assert_not_reached();
52
+ }
53
+}
54
+
55
static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
56
{
57
TCGv_ptr ret = tcg_temp_new_ptr();
58
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/translate-neon.c.inc
61
+++ b/target/arm/translate-neon.c.inc
62
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
63
for (pass = 0; pass < a->q + 1; pass++) {
64
TCGv_i64 tmp = tcg_temp_new_i64();
65
66
- neon_load_reg64(tmp, a->vm + pass);
67
+ read_neon_element64(tmp, a->vm, pass, MO_64);
68
fn(tmp, cpu_env, tmp, constimm);
69
- neon_store_reg64(tmp, a->vd + pass);
70
+ write_neon_element64(tmp, a->vd, pass, MO_64);
71
tcg_temp_free_i64(tmp);
72
}
73
tcg_temp_free_i64(constimm);
74
@@ -XXX,XX +XXX,XX @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
75
rd = tcg_temp_new_i32();
76
77
/* Load both inputs first to avoid potential overwrite if rm == rd */
78
- neon_load_reg64(rm1, a->vm);
79
- neon_load_reg64(rm2, a->vm + 1);
80
+ read_neon_element64(rm1, a->vm, 0, MO_64);
81
+ read_neon_element64(rm2, a->vm, 1, MO_64);
82
83
shiftfn(rm1, rm1, constimm);
84
narrowfn(rd, cpu_env, rm1);
85
@@ -XXX,XX +XXX,XX @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
86
tcg_gen_shli_i64(tmp, tmp, a->shift);
87
tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
88
}
89
- neon_store_reg64(tmp, a->vd);
90
+ write_neon_element64(tmp, a->vd, 0, MO_64);
91
92
widenfn(tmp, rm1);
93
tcg_temp_free_i32(rm1);
94
@@ -XXX,XX +XXX,XX @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
95
tcg_gen_shli_i64(tmp, tmp, a->shift);
96
tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
97
}
98
- neon_store_reg64(tmp, a->vd + 1);
99
+ write_neon_element64(tmp, a->vd, 1, MO_64);
100
tcg_temp_free_i64(tmp);
101
return true;
102
}
103
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
104
rm_64 = tcg_temp_new_i64();
105
106
if (src1_wide) {
107
- neon_load_reg64(rn0_64, a->vn);
108
+ read_neon_element64(rn0_64, a->vn, 0, MO_64);
109
} else {
110
TCGv_i32 tmp = tcg_temp_new_i32();
111
read_neon_element32(tmp, a->vn, 0, MO_32);
112
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
113
* avoid incorrect results if a narrow input overlaps with the result.
114
*/
115
if (src1_wide) {
116
- neon_load_reg64(rn1_64, a->vn + 1);
117
+ read_neon_element64(rn1_64, a->vn, 1, MO_64);
118
} else {
119
TCGv_i32 tmp = tcg_temp_new_i32();
120
read_neon_element32(tmp, a->vn, 1, MO_32);
121
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
122
rm = tcg_temp_new_i32();
123
read_neon_element32(rm, a->vm, 1, MO_32);
124
125
- neon_store_reg64(rn0_64, a->vd);
126
+ write_neon_element64(rn0_64, a->vd, 0, MO_64);
127
128
widenfn(rm_64, rm);
129
tcg_temp_free_i32(rm);
130
opfn(rn1_64, rn1_64, rm_64);
131
- neon_store_reg64(rn1_64, a->vd + 1);
132
+ write_neon_element64(rn1_64, a->vd, 1, MO_64);
133
134
tcg_temp_free_i64(rn0_64);
135
tcg_temp_free_i64(rn1_64);
136
@@ -XXX,XX +XXX,XX @@ static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
137
rd0 = tcg_temp_new_i32();
138
rd1 = tcg_temp_new_i32();
139
140
- neon_load_reg64(rn_64, a->vn);
141
- neon_load_reg64(rm_64, a->vm);
142
+ read_neon_element64(rn_64, a->vn, 0, MO_64);
143
+ read_neon_element64(rm_64, a->vm, 0, MO_64);
144
145
opfn(rn_64, rn_64, rm_64);
146
147
narrowfn(rd0, rn_64);
148
149
- neon_load_reg64(rn_64, a->vn + 1);
150
- neon_load_reg64(rm_64, a->vm + 1);
151
+ read_neon_element64(rn_64, a->vn, 1, MO_64);
152
+ read_neon_element64(rm_64, a->vm, 1, MO_64);
153
154
opfn(rn_64, rn_64, rm_64);
155
156
@@ -XXX,XX +XXX,XX @@ static bool do_long_3d(DisasContext *s, arg_3diff *a,
157
/* Don't store results until after all loads: they might overlap */
158
if (accfn) {
159
tmp = tcg_temp_new_i64();
160
- neon_load_reg64(tmp, a->vd);
161
+ read_neon_element64(tmp, a->vd, 0, MO_64);
162
accfn(tmp, tmp, rd0);
163
- neon_store_reg64(tmp, a->vd);
164
- neon_load_reg64(tmp, a->vd + 1);
165
+ write_neon_element64(tmp, a->vd, 0, MO_64);
166
+ read_neon_element64(tmp, a->vd, 1, MO_64);
167
accfn(tmp, tmp, rd1);
168
- neon_store_reg64(tmp, a->vd + 1);
169
+ write_neon_element64(tmp, a->vd, 1, MO_64);
170
tcg_temp_free_i64(tmp);
171
} else {
172
- neon_store_reg64(rd0, a->vd);
173
- neon_store_reg64(rd1, a->vd + 1);
174
+ write_neon_element64(rd0, a->vd, 0, MO_64);
175
+ write_neon_element64(rd1, a->vd, 1, MO_64);
176
}
177
178
tcg_temp_free_i64(rd0);
179
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a,
180
181
if (accfn) {
182
TCGv_i64 t64 = tcg_temp_new_i64();
183
- neon_load_reg64(t64, a->vd);
184
+ read_neon_element64(t64, a->vd, 0, MO_64);
185
accfn(t64, t64, rn0_64);
186
- neon_store_reg64(t64, a->vd);
187
- neon_load_reg64(t64, a->vd + 1);
188
+ write_neon_element64(t64, a->vd, 0, MO_64);
189
+ read_neon_element64(t64, a->vd, 1, MO_64);
190
accfn(t64, t64, rn1_64);
191
- neon_store_reg64(t64, a->vd + 1);
192
+ write_neon_element64(t64, a->vd, 1, MO_64);
193
tcg_temp_free_i64(t64);
194
} else {
195
- neon_store_reg64(rn0_64, a->vd);
196
- neon_store_reg64(rn1_64, a->vd + 1);
197
+ write_neon_element64(rn0_64, a->vd, 0, MO_64);
198
+ write_neon_element64(rn1_64, a->vd, 1, MO_64);
199
}
200
tcg_temp_free_i64(rn0_64);
201
tcg_temp_free_i64(rn1_64);
202
@@ -XXX,XX +XXX,XX @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a)
203
right = tcg_temp_new_i64();
204
dest = tcg_temp_new_i64();
205
206
- neon_load_reg64(right, a->vn);
207
- neon_load_reg64(left, a->vm);
208
+ read_neon_element64(right, a->vn, 0, MO_64);
209
+ read_neon_element64(left, a->vm, 0, MO_64);
210
tcg_gen_extract2_i64(dest, right, left, a->imm * 8);
211
- neon_store_reg64(dest, a->vd);
212
+ write_neon_element64(dest, a->vd, 0, MO_64);
213
214
tcg_temp_free_i64(left);
215
tcg_temp_free_i64(right);
216
@@ -XXX,XX +XXX,XX @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a)
217
destright = tcg_temp_new_i64();
218
219
if (a->imm < 8) {
220
- neon_load_reg64(right, a->vn);
221
- neon_load_reg64(middle, a->vn + 1);
222
+ read_neon_element64(right, a->vn, 0, MO_64);
223
+ read_neon_element64(middle, a->vn, 1, MO_64);
224
tcg_gen_extract2_i64(destright, right, middle, a->imm * 8);
225
- neon_load_reg64(left, a->vm);
226
+ read_neon_element64(left, a->vm, 0, MO_64);
227
tcg_gen_extract2_i64(destleft, middle, left, a->imm * 8);
228
} else {
229
- neon_load_reg64(right, a->vn + 1);
230
- neon_load_reg64(middle, a->vm);
231
+ read_neon_element64(right, a->vn, 1, MO_64);
232
+ read_neon_element64(middle, a->vm, 0, MO_64);
233
tcg_gen_extract2_i64(destright, right, middle, (a->imm - 8) * 8);
234
- neon_load_reg64(left, a->vm + 1);
235
+ read_neon_element64(left, a->vm, 1, MO_64);
236
tcg_gen_extract2_i64(destleft, middle, left, (a->imm - 8) * 8);
237
}
238
239
- neon_store_reg64(destright, a->vd);
240
- neon_store_reg64(destleft, a->vd + 1);
241
+ write_neon_element64(destright, a->vd, 0, MO_64);
242
+ write_neon_element64(destleft, a->vd, 1, MO_64);
243
244
tcg_temp_free_i64(destright);
245
tcg_temp_free_i64(destleft);
246
@@ -XXX,XX +XXX,XX @@ static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a,
247
248
if (accfn) {
249
TCGv_i64 tmp64 = tcg_temp_new_i64();
250
- neon_load_reg64(tmp64, a->vd + pass);
251
+ read_neon_element64(tmp64, a->vd, pass, MO_64);
252
accfn(rd_64, tmp64, rd_64);
253
tcg_temp_free_i64(tmp64);
254
}
255
- neon_store_reg64(rd_64, a->vd + pass);
256
+ write_neon_element64(rd_64, a->vd, pass, MO_64);
257
tcg_temp_free_i64(rd_64);
258
}
259
return true;
260
@@ -XXX,XX +XXX,XX @@ static bool do_vmovn(DisasContext *s, arg_2misc *a,
261
rd0 = tcg_temp_new_i32();
262
rd1 = tcg_temp_new_i32();
263
264
- neon_load_reg64(rm, a->vm);
265
+ read_neon_element64(rm, a->vm, 0, MO_64);
266
narrowfn(rd0, cpu_env, rm);
267
- neon_load_reg64(rm, a->vm + 1);
268
+ read_neon_element64(rm, a->vm, 1, MO_64);
269
narrowfn(rd1, cpu_env, rm);
270
write_neon_element32(rd0, a->vd, 0, MO_32);
271
write_neon_element32(rd1, a->vd, 1, MO_32);
272
@@ -XXX,XX +XXX,XX @@ static bool trans_VSHLL(DisasContext *s, arg_2misc *a)
273
274
widenfn(rd, rm0);
275
tcg_gen_shli_i64(rd, rd, 8 << a->size);
276
- neon_store_reg64(rd, a->vd);
277
+ write_neon_element64(rd, a->vd, 0, MO_64);
278
widenfn(rd, rm1);
279
tcg_gen_shli_i64(rd, rd, 8 << a->size);
280
- neon_store_reg64(rd, a->vd + 1);
281
+ write_neon_element64(rd, a->vd, 1, MO_64);
282
283
tcg_temp_free_i64(rd);
284
tcg_temp_free_i32(rm0);
285
@@ -XXX,XX +XXX,XX @@ static bool trans_VSWP(DisasContext *s, arg_2misc *a)
286
rm = tcg_temp_new_i64();
287
rd = tcg_temp_new_i64();
288
for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
289
- neon_load_reg64(rm, a->vm + pass);
290
- neon_load_reg64(rd, a->vd + pass);
291
- neon_store_reg64(rm, a->vd + pass);
292
- neon_store_reg64(rd, a->vm + pass);
293
+ read_neon_element64(rm, a->vm, pass, MO_64);
294
+ read_neon_element64(rd, a->vd, pass, MO_64);
295
+ write_neon_element64(rm, a->vd, pass, MO_64);
296
+ write_neon_element64(rd, a->vm, pass, MO_64);
297
}
298
tcg_temp_free_i64(rm);
299
tcg_temp_free_i64(rd);
133
--
300
--
134
2.20.1
301
2.20.1
135
302
136
303
diff view generated by jsdifflib
1
Switch NEON_2RM_VRINT* away from using cpu_F0s.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The only uses of this function are for loading VFP
4
double-precision values, and nothing to do with NEON.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201030022618.785675-10-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190613163917.28589-6-peter.maydell@linaro.org
7
---
10
---
8
target/arm/translate.c | 8 +++-----
11
target/arm/translate.c | 8 ++--
9
1 file changed, 3 insertions(+), 5 deletions(-)
12
target/arm/translate-vfp.c.inc | 84 +++++++++++++++++-----------------
13
2 files changed, 46 insertions(+), 46 deletions(-)
10
14
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
17
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
18
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static int neon_2rm_is_float_op(int op)
19
@@ -XXX,XX +XXX,XX @@ static long vfp_reg_offset(bool dp, unsigned reg)
16
* what we are asking here is "does the code for this case in
20
}
17
* the Neon for-each-pass loop use cpu_F0s?".
18
*/
19
- return ((op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
20
- op == NEON_2RM_VRINTM ||
21
- (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
22
+ return ((op >= NEON_2RM_VCVTAU && op <= NEON_2RM_VCVTMS) ||
23
op >= NEON_2RM_VRECPE_F);
24
}
21
}
25
22
26
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
23
-static inline void neon_load_reg64(TCGv_i64 var, int reg)
27
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
24
+static inline void vfp_load_reg64(TCGv_i64 var, int reg)
28
gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
25
{
29
cpu_env);
26
- tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
30
- gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
27
+ tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(true, reg));
31
+ gen_helper_rints(tmp, tmp, fpstatus);
28
}
32
gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
29
33
cpu_env);
30
-static inline void neon_store_reg64(TCGv_i64 var, int reg)
34
tcg_temp_free_ptr(fpstatus);
31
+static inline void vfp_store_reg64(TCGv_i64 var, int reg)
35
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
32
{
36
case NEON_2RM_VRINTX:
33
- tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
37
{
34
+ tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(true, reg));
38
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
35
}
39
- gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
36
40
+ gen_helper_rints_exact(tmp, tmp, fpstatus);
37
static inline void vfp_load_reg32(TCGv_i32 var, int reg)
41
tcg_temp_free_ptr(fpstatus);
38
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
42
break;
39
index XXXXXXX..XXXXXXX 100644
43
}
40
--- a/target/arm/translate-vfp.c.inc
41
+++ b/target/arm/translate-vfp.c.inc
42
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
43
tcg_gen_ext_i32_i64(nf, cpu_NF);
44
tcg_gen_ext_i32_i64(vf, cpu_VF);
45
46
- neon_load_reg64(frn, rn);
47
- neon_load_reg64(frm, rm);
48
+ vfp_load_reg64(frn, rn);
49
+ vfp_load_reg64(frm, rm);
50
switch (a->cc) {
51
case 0: /* eq: Z */
52
tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
53
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
54
tcg_temp_free_i64(tmp);
55
break;
56
}
57
- neon_store_reg64(dest, rd);
58
+ vfp_store_reg64(dest, rd);
59
tcg_temp_free_i64(frn);
60
tcg_temp_free_i64(frm);
61
tcg_temp_free_i64(dest);
62
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
63
TCGv_i64 tcg_res;
64
tcg_op = tcg_temp_new_i64();
65
tcg_res = tcg_temp_new_i64();
66
- neon_load_reg64(tcg_op, rm);
67
+ vfp_load_reg64(tcg_op, rm);
68
gen_helper_rintd(tcg_res, tcg_op, fpst);
69
- neon_store_reg64(tcg_res, rd);
70
+ vfp_store_reg64(tcg_res, rd);
71
tcg_temp_free_i64(tcg_op);
72
tcg_temp_free_i64(tcg_res);
73
} else {
74
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
75
tcg_double = tcg_temp_new_i64();
76
tcg_res = tcg_temp_new_i64();
77
tcg_tmp = tcg_temp_new_i32();
78
- neon_load_reg64(tcg_double, rm);
79
+ vfp_load_reg64(tcg_double, rm);
80
if (is_signed) {
81
gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
82
} else {
83
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
84
tmp = tcg_temp_new_i64();
85
if (a->l) {
86
gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
87
- neon_store_reg64(tmp, a->vd);
88
+ vfp_store_reg64(tmp, a->vd);
89
} else {
90
- neon_load_reg64(tmp, a->vd);
91
+ vfp_load_reg64(tmp, a->vd);
92
gen_aa32_st64(s, tmp, addr, get_mem_index(s));
93
}
94
tcg_temp_free_i64(tmp);
95
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
96
if (a->l) {
97
/* load */
98
gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
99
- neon_store_reg64(tmp, a->vd + i);
100
+ vfp_store_reg64(tmp, a->vd + i);
101
} else {
102
/* store */
103
- neon_load_reg64(tmp, a->vd + i);
104
+ vfp_load_reg64(tmp, a->vd + i);
105
gen_aa32_st64(s, tmp, addr, get_mem_index(s));
106
}
107
tcg_gen_addi_i32(addr, addr, offset);
108
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
109
fd = tcg_temp_new_i64();
110
fpst = fpstatus_ptr(FPST_FPCR);
111
112
- neon_load_reg64(f0, vn);
113
- neon_load_reg64(f1, vm);
114
+ vfp_load_reg64(f0, vn);
115
+ vfp_load_reg64(f1, vm);
116
117
for (;;) {
118
if (reads_vd) {
119
- neon_load_reg64(fd, vd);
120
+ vfp_load_reg64(fd, vd);
121
}
122
fn(fd, f0, f1, fpst);
123
- neon_store_reg64(fd, vd);
124
+ vfp_store_reg64(fd, vd);
125
126
if (veclen == 0) {
127
break;
128
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
129
veclen--;
130
vd = vfp_advance_dreg(vd, delta_d);
131
vn = vfp_advance_dreg(vn, delta_d);
132
- neon_load_reg64(f0, vn);
133
+ vfp_load_reg64(f0, vn);
134
if (delta_m) {
135
vm = vfp_advance_dreg(vm, delta_m);
136
- neon_load_reg64(f1, vm);
137
+ vfp_load_reg64(f1, vm);
138
}
139
}
140
141
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
142
f0 = tcg_temp_new_i64();
143
fd = tcg_temp_new_i64();
144
145
- neon_load_reg64(f0, vm);
146
+ vfp_load_reg64(f0, vm);
147
148
for (;;) {
149
fn(fd, f0);
150
- neon_store_reg64(fd, vd);
151
+ vfp_store_reg64(fd, vd);
152
153
if (veclen == 0) {
154
break;
155
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
156
/* single source one-many */
157
while (veclen--) {
158
vd = vfp_advance_dreg(vd, delta_d);
159
- neon_store_reg64(fd, vd);
160
+ vfp_store_reg64(fd, vd);
161
}
162
break;
163
}
164
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
165
veclen--;
166
vd = vfp_advance_dreg(vd, delta_d);
167
vd = vfp_advance_dreg(vm, delta_m);
168
- neon_load_reg64(f0, vm);
169
+ vfp_load_reg64(f0, vm);
170
}
171
172
tcg_temp_free_i64(f0);
173
@@ -XXX,XX +XXX,XX @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
174
vm = tcg_temp_new_i64();
175
vd = tcg_temp_new_i64();
176
177
- neon_load_reg64(vn, a->vn);
178
- neon_load_reg64(vm, a->vm);
179
+ vfp_load_reg64(vn, a->vn);
180
+ vfp_load_reg64(vm, a->vm);
181
if (neg_n) {
182
/* VFNMS, VFMS */
183
gen_helper_vfp_negd(vn, vn);
184
}
185
- neon_load_reg64(vd, a->vd);
186
+ vfp_load_reg64(vd, a->vd);
187
if (neg_d) {
188
/* VFNMA, VFNMS */
189
gen_helper_vfp_negd(vd, vd);
190
}
191
fpst = fpstatus_ptr(FPST_FPCR);
192
gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst);
193
- neon_store_reg64(vd, a->vd);
194
+ vfp_store_reg64(vd, a->vd);
195
196
tcg_temp_free_ptr(fpst);
197
tcg_temp_free_i64(vn);
198
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
199
fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
200
201
for (;;) {
202
- neon_store_reg64(fd, vd);
203
+ vfp_store_reg64(fd, vd);
204
205
if (veclen == 0) {
206
break;
207
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
208
vd = tcg_temp_new_i64();
209
vm = tcg_temp_new_i64();
210
211
- neon_load_reg64(vd, a->vd);
212
+ vfp_load_reg64(vd, a->vd);
213
if (a->z) {
214
tcg_gen_movi_i64(vm, 0);
215
} else {
216
- neon_load_reg64(vm, a->vm);
217
+ vfp_load_reg64(vm, a->vm);
218
}
219
220
if (a->e) {
221
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
222
tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
223
vd = tcg_temp_new_i64();
224
gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
225
- neon_store_reg64(vd, a->vd);
226
+ vfp_store_reg64(vd, a->vd);
227
tcg_temp_free_i32(ahp_mode);
228
tcg_temp_free_ptr(fpst);
229
tcg_temp_free_i32(tmp);
230
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
231
tmp = tcg_temp_new_i32();
232
vm = tcg_temp_new_i64();
233
234
- neon_load_reg64(vm, a->vm);
235
+ vfp_load_reg64(vm, a->vm);
236
gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
237
tcg_temp_free_i64(vm);
238
tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
239
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
240
}
241
242
tmp = tcg_temp_new_i64();
243
- neon_load_reg64(tmp, a->vm);
244
+ vfp_load_reg64(tmp, a->vm);
245
fpst = fpstatus_ptr(FPST_FPCR);
246
gen_helper_rintd(tmp, tmp, fpst);
247
- neon_store_reg64(tmp, a->vd);
248
+ vfp_store_reg64(tmp, a->vd);
249
tcg_temp_free_ptr(fpst);
250
tcg_temp_free_i64(tmp);
251
return true;
252
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
253
}
254
255
tmp = tcg_temp_new_i64();
256
- neon_load_reg64(tmp, a->vm);
257
+ vfp_load_reg64(tmp, a->vm);
258
fpst = fpstatus_ptr(FPST_FPCR);
259
tcg_rmode = tcg_const_i32(float_round_to_zero);
260
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
261
gen_helper_rintd(tmp, tmp, fpst);
262
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
263
- neon_store_reg64(tmp, a->vd);
264
+ vfp_store_reg64(tmp, a->vd);
265
tcg_temp_free_ptr(fpst);
266
tcg_temp_free_i64(tmp);
267
tcg_temp_free_i32(tcg_rmode);
268
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
269
}
270
271
tmp = tcg_temp_new_i64();
272
- neon_load_reg64(tmp, a->vm);
273
+ vfp_load_reg64(tmp, a->vm);
274
fpst = fpstatus_ptr(FPST_FPCR);
275
gen_helper_rintd_exact(tmp, tmp, fpst);
276
- neon_store_reg64(tmp, a->vd);
277
+ vfp_store_reg64(tmp, a->vd);
278
tcg_temp_free_ptr(fpst);
279
tcg_temp_free_i64(tmp);
280
return true;
281
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
282
vd = tcg_temp_new_i64();
283
vfp_load_reg32(vm, a->vm);
284
gen_helper_vfp_fcvtds(vd, vm, cpu_env);
285
- neon_store_reg64(vd, a->vd);
286
+ vfp_store_reg64(vd, a->vd);
287
tcg_temp_free_i32(vm);
288
tcg_temp_free_i64(vd);
289
return true;
290
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
291
292
vd = tcg_temp_new_i32();
293
vm = tcg_temp_new_i64();
294
- neon_load_reg64(vm, a->vm);
295
+ vfp_load_reg64(vm, a->vm);
296
gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
297
vfp_store_reg32(vd, a->vd);
298
tcg_temp_free_i32(vd);
299
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
300
/* u32 -> f64 */
301
gen_helper_vfp_uitod(vd, vm, fpst);
302
}
303
- neon_store_reg64(vd, a->vd);
304
+ vfp_store_reg64(vd, a->vd);
305
tcg_temp_free_i32(vm);
306
tcg_temp_free_i64(vd);
307
tcg_temp_free_ptr(fpst);
308
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
309
310
vm = tcg_temp_new_i64();
311
vd = tcg_temp_new_i32();
312
- neon_load_reg64(vm, a->vm);
313
+ vfp_load_reg64(vm, a->vm);
314
gen_helper_vjcvt(vd, vm, cpu_env);
315
vfp_store_reg32(vd, a->vd);
316
tcg_temp_free_i64(vm);
317
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
318
frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
319
320
vd = tcg_temp_new_i64();
321
- neon_load_reg64(vd, a->vd);
322
+ vfp_load_reg64(vd, a->vd);
323
324
fpst = fpstatus_ptr(FPST_FPCR);
325
shift = tcg_const_i32(frac_bits);
326
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
327
g_assert_not_reached();
328
}
329
330
- neon_store_reg64(vd, a->vd);
331
+ vfp_store_reg64(vd, a->vd);
332
tcg_temp_free_i64(vd);
333
tcg_temp_free_i32(shift);
334
tcg_temp_free_ptr(fpst);
335
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
336
fpst = fpstatus_ptr(FPST_FPCR);
337
vm = tcg_temp_new_i64();
338
vd = tcg_temp_new_i32();
339
- neon_load_reg64(vm, a->vm);
340
+ vfp_load_reg64(vm, a->vm);
341
342
if (a->s) {
343
if (a->rz) {
44
--
344
--
45
2.20.1
345
2.20.1
46
346
47
347
diff view generated by jsdifflib
1
We currently put the initrd at the smaller of:
1
From: Richard Henderson <richard.henderson@linaro.org>
2
* 128MB into RAM
3
* halfway into the RAM
4
(with the dtb following it).
5
2
6
However for large kernels this might mean that the kernel
3
In both cases, we can sink the write-back and perform
7
overlaps the initrd. For some kinds of kernel (self-decompressing
4
the accumulate into the normal destination temps.
8
32-bit kernels, and ELF images with a BSS section at the end)
9
we don't know the exact size, but even there we have a
10
minimum size. Put the initrd at least further into RAM than
11
that. For image formats that can give us an exact kernel size, this
12
will mean that we definitely avoid overlaying kernel and initrd.
13
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201030022618.785675-11-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
16
Tested-by: Mark Rutland <mark.rutland@arm.com>
17
Message-id: 20190516144733.32399-4-peter.maydell@linaro.org
18
---
10
---
19
hw/arm/boot.c | 34 ++++++++++++++++++++--------------
11
target/arm/translate-neon.c.inc | 23 +++++++++--------------
20
1 file changed, 20 insertions(+), 14 deletions(-)
12
1 file changed, 9 insertions(+), 14 deletions(-)
21
13
22
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
14
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
23
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
24
--- a/hw/arm/boot.c
16
--- a/target/arm/translate-neon.c.inc
25
+++ b/hw/arm/boot.c
17
+++ b/target/arm/translate-neon.c.inc
26
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
18
@@ -XXX,XX +XXX,XX @@ static bool do_long_3d(DisasContext *s, arg_3diff *a,
27
if (info->nb_cpus == 0)
19
if (accfn) {
28
info->nb_cpus = 1;
20
tmp = tcg_temp_new_i64();
29
21
read_neon_element64(tmp, a->vd, 0, MO_64);
30
- /*
22
- accfn(tmp, tmp, rd0);
31
- * We want to put the initrd far enough into RAM that when the
23
- write_neon_element64(tmp, a->vd, 0, MO_64);
32
- * kernel is uncompressed it will not clobber the initrd. However
24
+ accfn(rd0, tmp, rd0);
33
- * on boards without much RAM we must ensure that we still leave
25
read_neon_element64(tmp, a->vd, 1, MO_64);
34
- * enough room for a decent sized initrd, and on boards with large
26
- accfn(tmp, tmp, rd1);
35
- * amounts of RAM we must avoid the initrd being so far up in RAM
27
- write_neon_element64(tmp, a->vd, 1, MO_64);
36
- * that it is outside lowmem and inaccessible to the kernel.
28
+ accfn(rd1, tmp, rd1);
37
- * So for boards with less than 256MB of RAM we put the initrd
29
tcg_temp_free_i64(tmp);
38
- * halfway into RAM, and for boards with 256MB of RAM or more we put
30
- } else {
39
- * the initrd at 128MB.
31
- write_neon_element64(rd0, a->vd, 0, MO_64);
40
- */
32
- write_neon_element64(rd1, a->vd, 1, MO_64);
41
- info->initrd_start = info->loader_start +
42
- MIN(info->ram_size / 2, 128 * 1024 * 1024);
43
-
44
/* Assume that raw images are linux kernels, and ELF images are not. */
45
kernel_size = arm_load_elf(info, &elf_entry, &elf_low_addr,
46
&elf_high_addr, elf_machine, as);
47
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
48
}
33
}
49
34
50
info->entry = entry;
35
+ write_neon_element64(rd0, a->vd, 0, MO_64);
36
+ write_neon_element64(rd1, a->vd, 1, MO_64);
37
tcg_temp_free_i64(rd0);
38
tcg_temp_free_i64(rd1);
39
40
@@ -XXX,XX +XXX,XX @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a,
41
if (accfn) {
42
TCGv_i64 t64 = tcg_temp_new_i64();
43
read_neon_element64(t64, a->vd, 0, MO_64);
44
- accfn(t64, t64, rn0_64);
45
- write_neon_element64(t64, a->vd, 0, MO_64);
46
+ accfn(rn0_64, t64, rn0_64);
47
read_neon_element64(t64, a->vd, 1, MO_64);
48
- accfn(t64, t64, rn1_64);
49
- write_neon_element64(t64, a->vd, 1, MO_64);
50
+ accfn(rn1_64, t64, rn1_64);
51
tcg_temp_free_i64(t64);
52
- } else {
53
- write_neon_element64(rn0_64, a->vd, 0, MO_64);
54
- write_neon_element64(rn1_64, a->vd, 1, MO_64);
55
}
51
+
56
+
52
+ /*
57
+ write_neon_element64(rn0_64, a->vd, 0, MO_64);
53
+ * We want to put the initrd far enough into RAM that when the
58
+ write_neon_element64(rn1_64, a->vd, 1, MO_64);
54
+ * kernel is uncompressed it will not clobber the initrd. However
59
tcg_temp_free_i64(rn0_64);
55
+ * on boards without much RAM we must ensure that we still leave
60
tcg_temp_free_i64(rn1_64);
56
+ * enough room for a decent sized initrd, and on boards with large
61
return true;
57
+ * amounts of RAM we must avoid the initrd being so far up in RAM
58
+ * that it is outside lowmem and inaccessible to the kernel.
59
+ * So for boards with less than 256MB of RAM we put the initrd
60
+ * halfway into RAM, and for boards with 256MB of RAM or more we put
61
+ * the initrd at 128MB.
62
+ * We also refuse to put the initrd somewhere that will definitely
63
+ * overlay the kernel we just loaded, though for kernel formats which
64
+ * don't tell us their exact size (eg self-decompressing 32-bit kernels)
65
+ * we might still make a bad choice here.
66
+ */
67
+ info->initrd_start = info->loader_start +
68
+ MAX(MIN(info->ram_size / 2, 128 * 1024 * 1024), kernel_size);
69
+ info->initrd_start = TARGET_PAGE_ALIGN(info->initrd_start);
70
+
71
if (is_linux) {
72
uint32_t fixupcontext[FIXUP_MAX];
73
74
--
62
--
75
2.20.1
63
2.20.1
76
64
77
65
diff view generated by jsdifflib
1
Remove some old constructs from NEON_2RM_VCVT_F16_F32 code:
1
From: Richard Henderson <richard.henderson@linaro.org>
2
* don't use cpu_F0s
3
* don't use tcg_gen_ld_f32
4
2
3
We can use proper widening loads to extend 32-bit inputs,
4
and skip the "widenfn" step.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201030022618.785675-12-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20190613163917.28589-11-peter.maydell@linaro.org
9
---
10
---
10
target/arm/translate.c | 27 ++++++++++++---------------
11
target/arm/translate.c | 6 +++
11
1 file changed, 12 insertions(+), 15 deletions(-)
12
target/arm/translate-neon.c.inc | 66 ++++++++++++++++++---------------
13
2 files changed, 43 insertions(+), 29 deletions(-)
12
14
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate.c
17
--- a/target/arm/translate.c
16
+++ b/target/arm/translate.c
18
+++ b/target/arm/translate.c
17
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
19
@@ -XXX,XX +XXX,XX @@ static void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
18
return ret;
20
long off = neon_element_offset(reg, ele, memop);
21
22
switch (memop) {
23
+ case MO_SL:
24
+ tcg_gen_ld32s_i64(dest, cpu_env, off);
25
+ break;
26
+ case MO_UL:
27
+ tcg_gen_ld32u_i64(dest, cpu_env, off);
28
+ break;
29
case MO_Q:
30
tcg_gen_ld_i64(dest, cpu_env, off);
31
break;
32
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate-neon.c.inc
35
+++ b/target/arm/translate-neon.c.inc
36
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1reg_imm *a)
37
static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
38
NeonGenWidenFn *widenfn,
39
NeonGenTwo64OpFn *opfn,
40
- bool src1_wide)
41
+ int src1_mop, int src2_mop)
42
{
43
/* 3-regs different lengths, prewidening case (VADDL/VSUBL/VAADW/VSUBW) */
44
TCGv_i64 rn0_64, rn1_64, rm_64;
45
- TCGv_i32 rm;
46
47
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
48
return false;
49
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
50
return false;
51
}
52
53
- if (!widenfn || !opfn) {
54
+ if (!opfn) {
55
/* size == 3 case, which is an entirely different insn group */
56
return false;
57
}
58
59
- if ((a->vd & 1) || (src1_wide && (a->vn & 1))) {
60
+ if ((a->vd & 1) || (src1_mop == MO_Q && (a->vn & 1))) {
61
return false;
62
}
63
64
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
65
rn1_64 = tcg_temp_new_i64();
66
rm_64 = tcg_temp_new_i64();
67
68
- if (src1_wide) {
69
- read_neon_element64(rn0_64, a->vn, 0, MO_64);
70
+ if (src1_mop >= 0) {
71
+ read_neon_element64(rn0_64, a->vn, 0, src1_mop);
72
} else {
73
TCGv_i32 tmp = tcg_temp_new_i32();
74
read_neon_element32(tmp, a->vn, 0, MO_32);
75
widenfn(rn0_64, tmp);
76
tcg_temp_free_i32(tmp);
77
}
78
- rm = tcg_temp_new_i32();
79
- read_neon_element32(rm, a->vm, 0, MO_32);
80
+ if (src2_mop >= 0) {
81
+ read_neon_element64(rm_64, a->vm, 0, src2_mop);
82
+ } else {
83
+ TCGv_i32 tmp = tcg_temp_new_i32();
84
+ read_neon_element32(tmp, a->vm, 0, MO_32);
85
+ widenfn(rm_64, tmp);
86
+ tcg_temp_free_i32(tmp);
87
+ }
88
89
- widenfn(rm_64, rm);
90
- tcg_temp_free_i32(rm);
91
opfn(rn0_64, rn0_64, rm_64);
92
93
/*
94
* Load second pass inputs before storing the first pass result, to
95
* avoid incorrect results if a narrow input overlaps with the result.
96
*/
97
- if (src1_wide) {
98
- read_neon_element64(rn1_64, a->vn, 1, MO_64);
99
+ if (src1_mop >= 0) {
100
+ read_neon_element64(rn1_64, a->vn, 1, src1_mop);
101
} else {
102
TCGv_i32 tmp = tcg_temp_new_i32();
103
read_neon_element32(tmp, a->vn, 1, MO_32);
104
widenfn(rn1_64, tmp);
105
tcg_temp_free_i32(tmp);
106
}
107
- rm = tcg_temp_new_i32();
108
- read_neon_element32(rm, a->vm, 1, MO_32);
109
+ if (src2_mop >= 0) {
110
+ read_neon_element64(rm_64, a->vm, 1, src2_mop);
111
+ } else {
112
+ TCGv_i32 tmp = tcg_temp_new_i32();
113
+ read_neon_element32(tmp, a->vm, 1, MO_32);
114
+ widenfn(rm_64, tmp);
115
+ tcg_temp_free_i32(tmp);
116
+ }
117
118
write_neon_element64(rn0_64, a->vd, 0, MO_64);
119
120
- widenfn(rm_64, rm);
121
- tcg_temp_free_i32(rm);
122
opfn(rn1_64, rn1_64, rm_64);
123
write_neon_element64(rn1_64, a->vd, 1, MO_64);
124
125
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
126
return true;
19
}
127
}
20
128
21
-#define tcg_gen_ld_f32 tcg_gen_ld_i32
129
-#define DO_PREWIDEN(INSN, S, EXT, OP, SRC1WIDE) \
22
#define tcg_gen_st_f32 tcg_gen_st_i32
130
+#define DO_PREWIDEN(INSN, S, OP, SRC1WIDE, SIGN) \
23
131
static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
24
#define ARM_CP_RW_BIT (1 << 20)
132
{ \
25
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
133
static NeonGenWidenFn * const widenfn[] = { \
26
q || (rm & 1)) {
134
gen_helper_neon_widen_##S##8, \
27
return 1;
135
gen_helper_neon_widen_##S##16, \
28
}
136
- tcg_gen_##EXT##_i32_i64, \
29
- tmp = tcg_temp_new_i32();
137
- NULL, \
30
- tmp2 = tcg_temp_new_i32();
138
+ NULL, NULL, \
31
fpst = get_fpstatus_ptr(true);
139
}; \
32
ahp = get_ahp_flag();
140
static NeonGenTwo64OpFn * const addfn[] = { \
33
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
141
gen_helper_neon_##OP##l_u16, \
34
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
142
@@ -XXX,XX +XXX,XX @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
35
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
143
tcg_gen_##OP##_i64, \
36
- gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
144
NULL, \
37
+ tmp = neon_load_reg(rm, 0);
145
}; \
38
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
146
- return do_prewiden_3d(s, a, widenfn[a->size], \
39
+ tmp2 = neon_load_reg(rm, 1);
147
- addfn[a->size], SRC1WIDE); \
40
+ gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
148
+ int narrow_mop = a->size == MO_32 ? MO_32 | SIGN : -1; \
41
tcg_gen_shli_i32(tmp2, tmp2, 16);
149
+ return do_prewiden_3d(s, a, widenfn[a->size], addfn[a->size], \
42
tcg_gen_or_i32(tmp2, tmp2, tmp);
150
+ SRC1WIDE ? MO_Q : narrow_mop, \
43
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
151
+ narrow_mop); \
44
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
152
}
45
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
153
46
+ tcg_temp_free_i32(tmp);
154
-DO_PREWIDEN(VADDL_S, s, ext, add, false)
47
+ tmp = neon_load_reg(rm, 2);
155
-DO_PREWIDEN(VADDL_U, u, extu, add, false)
48
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
156
-DO_PREWIDEN(VSUBL_S, s, ext, sub, false)
49
+ tmp3 = neon_load_reg(rm, 3);
157
-DO_PREWIDEN(VSUBL_U, u, extu, sub, false)
50
neon_store_reg(rd, 0, tmp2);
158
-DO_PREWIDEN(VADDW_S, s, ext, add, true)
51
- tmp2 = tcg_temp_new_i32();
159
-DO_PREWIDEN(VADDW_U, u, extu, add, true)
52
- gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
160
-DO_PREWIDEN(VSUBW_S, s, ext, sub, true)
53
- tcg_gen_shli_i32(tmp2, tmp2, 16);
161
-DO_PREWIDEN(VSUBW_U, u, extu, sub, true)
54
- tcg_gen_or_i32(tmp2, tmp2, tmp);
162
+DO_PREWIDEN(VADDL_S, s, add, false, MO_SIGN)
55
- neon_store_reg(rd, 1, tmp2);
163
+DO_PREWIDEN(VADDL_U, u, add, false, 0)
56
+ gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
164
+DO_PREWIDEN(VSUBL_S, s, sub, false, MO_SIGN)
57
+ tcg_gen_shli_i32(tmp3, tmp3, 16);
165
+DO_PREWIDEN(VSUBL_U, u, sub, false, 0)
58
+ tcg_gen_or_i32(tmp3, tmp3, tmp);
166
+DO_PREWIDEN(VADDW_S, s, add, true, MO_SIGN)
59
+ neon_store_reg(rd, 1, tmp3);
167
+DO_PREWIDEN(VADDW_U, u, add, true, 0)
60
tcg_temp_free_i32(tmp);
168
+DO_PREWIDEN(VSUBW_S, s, sub, true, MO_SIGN)
61
tcg_temp_free_i32(ahp);
169
+DO_PREWIDEN(VSUBW_U, u, sub, true, 0)
62
tcg_temp_free_ptr(fpst);
170
171
static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
172
NeonGenTwo64OpFn *opfn, NeonGenNarrowFn *narrowfn)
63
--
173
--
64
2.20.1
174
2.20.1
65
175
66
176
diff view generated by jsdifflib
1
The AArch32 VMOV (immediate) instruction uses the same VFP encoded
1
In the neon_padd/pmax/pmin helpers for float16, a cut-and-paste error
2
immediate format we already handle in vfp_expand_imm(). Use that
2
meant we were using the H4() address swizzler macro rather than the
3
function rather than hand-decoding it.
3
H2() which is required for 2-byte data. This had no effect on
4
little-endian hosts but meant we put the result data into the
5
destination Dreg in the wrong order on big-endian hosts.
4
6
5
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Message-id: 20190613163917.28589-3-peter.maydell@linaro.org
10
Message-id: 20201028191712.4910-2-peter.maydell@linaro.org
10
---
11
---
11
target/arm/translate-vfp.inc.c | 28 ++++------------------------
12
target/arm/vec_helper.c | 8 ++++----
12
target/arm/vfp.decode | 10 ++++++----
13
1 file changed, 4 insertions(+), 4 deletions(-)
13
2 files changed, 10 insertions(+), 28 deletions(-)
14
14
15
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
15
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-vfp.inc.c
17
--- a/target/arm/vec_helper.c
18
+++ b/target/arm/translate-vfp.inc.c
18
+++ b/target/arm/vec_helper.c
19
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
19
@@ -XXX,XX +XXX,XX @@ DO_ABA(gvec_uaba_d, uint64_t)
20
uint32_t delta_d = 0;
20
r2 = float16_##OP(m[H2(0)], m[H2(1)], fpst); \
21
int veclen = s->vec_len;
21
r3 = float16_##OP(m[H2(2)], m[H2(3)], fpst); \
22
TCGv_i32 fd;
22
\
23
- uint32_t n, i, vd;
23
- d[H4(0)] = r0; \
24
+ uint32_t vd;
24
- d[H4(1)] = r1; \
25
25
- d[H4(2)] = r2; \
26
vd = a->vd;
26
- d[H4(3)] = r3; \
27
27
+ d[H2(0)] = r0; \
28
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
28
+ d[H2(1)] = r1; \
29
}
29
+ d[H2(2)] = r2; \
30
+ d[H2(3)] = r3; \
30
}
31
}
31
32
32
- n = (a->imm4h << 28) & 0x80000000;
33
DO_NEON_PAIRWISE(neon_padd, add)
33
- i = ((a->imm4h << 4) & 0x70) | a->imm4l;
34
- if (i & 0x40) {
35
- i |= 0x780;
36
- } else {
37
- i |= 0x800;
38
- }
39
- n |= i << 19;
40
-
41
- fd = tcg_temp_new_i32();
42
- tcg_gen_movi_i32(fd, n);
43
+ fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
44
45
for (;;) {
46
neon_store_reg32(fd, vd);
47
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
48
uint32_t delta_d = 0;
49
int veclen = s->vec_len;
50
TCGv_i64 fd;
51
- uint32_t n, i, vd;
52
+ uint32_t vd;
53
54
vd = a->vd;
55
56
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
57
}
58
}
59
60
- n = (a->imm4h << 28) & 0x80000000;
61
- i = ((a->imm4h << 4) & 0x70) | a->imm4l;
62
- if (i & 0x40) {
63
- i |= 0x3f80;
64
- } else {
65
- i |= 0x4000;
66
- }
67
- n |= i << 16;
68
-
69
- fd = tcg_temp_new_i64();
70
- tcg_gen_movi_i64(fd, ((uint64_t)n) << 32);
71
+ fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
72
73
for (;;) {
74
neon_store_reg64(fd, vd);
75
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/vfp.decode
78
+++ b/target/arm/vfp.decode
79
@@ -XXX,XX +XXX,XX @@
80
%vmov_idx_b 21:1 5:2
81
%vmov_idx_h 21:1 6:1
82
83
+%vmov_imm 16:4 0:4
84
+
85
# VMOV scalar to general-purpose register; note that this does
86
# include some Neon cases.
87
VMOV_to_gp ---- 1110 u:1 1. 1 .... rt:4 1011 ... 1 0000 \
88
@@ -XXX,XX +XXX,XX @@ VFM_sp ---- 1110 1.10 .... .... 1010 . o2:1 . 0 .... \
89
VFM_dp ---- 1110 1.10 .... .... 1011 . o2:1 . 0 .... \
90
vm=%vm_dp vn=%vn_dp vd=%vd_dp o1=2
91
92
-VMOV_imm_sp ---- 1110 1.11 imm4h:4 .... 1010 0000 imm4l:4 \
93
- vd=%vd_sp
94
-VMOV_imm_dp ---- 1110 1.11 imm4h:4 .... 1011 0000 imm4l:4 \
95
- vd=%vd_dp
96
+VMOV_imm_sp ---- 1110 1.11 .... .... 1010 0000 .... \
97
+ vd=%vd_sp imm=%vmov_imm
98
+VMOV_imm_dp ---- 1110 1.11 .... .... 1011 0000 .... \
99
+ vd=%vd_dp imm=%vmov_imm
100
101
VMOV_reg_sp ---- 1110 1.11 0000 .... 1010 01.0 .... \
102
vd=%vd_sp vm=%vm_sp
103
--
34
--
104
2.20.1
35
2.20.1
105
36
106
37
diff view generated by jsdifflib
1
We calculate the locations in memory where we want to put the
1
The helper functions for performing the udot/sdot operations against
2
initrd and the DTB based on the size of the kernel, since they
2
a scalar were not using an address-swizzling macro when converting
3
come after it. Add some explicit checks that these aren't off the
3
the index of the scalar element into a pointer into the vm array.
4
end of RAM entirely.
4
This had no effect on little-endian hosts but meant we generated
5
incorrect results on big-endian hosts.
5
6
6
(At the moment the way we calculate the initrd_start means that
7
For these insns, the index is indexing over group of 4 8-bit values,
7
it can't ever be off the end of RAM, but that will change with
8
so 32 bits per indexed entity, and H4() is therefore what we want.
8
the next commit.)
9
(For Neon the only possible input indexes are 0 and 1.)
9
10
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Tested-by: Mark Rutland <mark.rutland@arm.com>
13
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Message-id: 20190516144733.32399-3-peter.maydell@linaro.org
14
Message-id: 20201028191712.4910-3-peter.maydell@linaro.org
14
---
15
---
15
hw/arm/boot.c | 23 +++++++++++++++++++++++
16
target/arm/vec_helper.c | 4 ++--
16
1 file changed, 23 insertions(+)
17
1 file changed, 2 insertions(+), 2 deletions(-)
17
18
18
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
19
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
19
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/arm/boot.c
21
--- a/target/arm/vec_helper.c
21
+++ b/hw/arm/boot.c
22
+++ b/target/arm/vec_helper.c
22
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
23
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
23
error_report("could not load kernel '%s'", info->kernel_filename);
24
intptr_t index = simd_data(desc);
24
exit(1);
25
uint32_t *d = vd;
25
}
26
int8_t *n = vn;
26
+
27
- int8_t *m_indexed = (int8_t *)vm + index * 4;
27
+ if (kernel_size > info->ram_size) {
28
+ int8_t *m_indexed = (int8_t *)vm + H4(index) * 4;
28
+ error_report("kernel '%s' is too large to fit in RAM "
29
29
+ "(kernel size %d, RAM size %" PRId64 ")",
30
/* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
30
+ info->kernel_filename, kernel_size, info->ram_size);
31
* Otherwise opr_sz is a multiple of 16.
31
+ exit(1);
32
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
32
+ }
33
intptr_t index = simd_data(desc);
33
+
34
uint32_t *d = vd;
34
info->entry = entry;
35
uint8_t *n = vn;
35
if (is_linux) {
36
- uint8_t *m_indexed = (uint8_t *)vm + index * 4;
36
uint32_t fixupcontext[FIXUP_MAX];
37
+ uint8_t *m_indexed = (uint8_t *)vm + H4(index) * 4;
37
38
38
if (info->initrd_filename) {
39
/* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
39
+
40
* Otherwise opr_sz is a multiple of 16.
40
+ if (info->initrd_start >= ram_end) {
41
+ error_report("not enough space after kernel to load initrd");
42
+ exit(1);
43
+ }
44
+
45
initrd_size = load_ramdisk_as(info->initrd_filename,
46
info->initrd_start,
47
ram_end - info->initrd_start, as);
48
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
49
info->initrd_filename);
50
exit(1);
51
}
52
+ if (info->initrd_start + initrd_size > info->ram_size) {
53
+ error_report("could not load initrd '%s': "
54
+ "too big to fit into RAM after the kernel",
55
+ info->initrd_filename);
56
+ }
57
} else {
58
initrd_size = 0;
59
}
60
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
61
/* Place the DTB after the initrd in memory with alignment. */
62
info->dtb_start = QEMU_ALIGN_UP(info->initrd_start + initrd_size,
63
align);
64
+ if (info->dtb_start >= ram_end) {
65
+ error_report("Not enough space for DTB after kernel/initrd");
66
+ exit(1);
67
+ }
68
fixupcontext[FIXUP_ARGPTR_LO] = info->dtb_start;
69
fixupcontext[FIXUP_ARGPTR_HI] = info->dtb_start >> 32;
70
} else {
71
--
41
--
72
2.20.1
42
2.20.1
73
43
74
44
diff view generated by jsdifflib
New patch
1
From: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
1
2
3
HCR should be applied when NS is set, not when it is cleared.
4
5
Signed-off-by: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
target/arm/helper.c | 5 ++---
10
1 file changed, 2 insertions(+), 3 deletions(-)
11
12
diff --git a/target/arm/helper.c b/target/arm/helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/helper.c
15
+++ b/target/arm/helper.c
16
@@ -XXX,XX +XXX,XX @@ static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
17
18
/*
19
* Non-IS variants of TLB operations are upgraded to
20
- * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
21
+ * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
22
* force broadcast of these operations.
23
*/
24
static bool tlb_force_broadcast(CPUARMState *env)
25
{
26
- return (env->cp15.hcr_el2 & HCR_FB) &&
27
- arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
28
+ return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
29
}
30
31
static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
32
--
33
2.20.1
34
35
diff view generated by jsdifflib
1
In several places cut and paste errors meant we were using the wrong
1
From: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
2
type for the 'arg' struct in trans_ functions called by the
3
decodetree decoder, because we were using the _sp version of the
4
struct in the _dp function. These were harmless, because the two
5
structs were identical and so decodetree made them typedefs of the
6
same underlying structure (and we'd have had a compile error if they
7
were not harmless), but we should clean them up anyway.
8
2
3
Secure mode is not exempted from checking SCR_EL3.TLOR, and in the
4
future HCR_EL2.TLOR when S-EL2 is enabled.
5
6
Signed-off-by: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Message-id: 20190614104457.24703-2-peter.maydell@linaro.org
12
---
9
---
13
target/arm/translate-vfp.inc.c | 28 ++++++++++++++--------------
10
target/arm/helper.c | 19 +++++--------------
14
1 file changed, 14 insertions(+), 14 deletions(-)
11
1 file changed, 5 insertions(+), 14 deletions(-)
15
12
16
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
17
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-vfp.inc.c
15
--- a/target/arm/helper.c
19
+++ b/target/arm/translate-vfp.inc.c
16
+++ b/target/arm/helper.c
20
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
17
@@ -XXX,XX +XXX,XX @@ static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
21
return true;
18
#endif
19
20
/* Shared logic between LORID and the rest of the LOR* registers.
21
- * Secure state has already been delt with.
22
+ * Secure state exclusion has already been dealt with.
23
*/
24
-static CPAccessResult access_lor_ns(CPUARMState *env)
25
+static CPAccessResult access_lor_ns(CPUARMState *env,
26
+ const ARMCPRegInfo *ri, bool isread)
27
{
28
int el = arm_current_el(env);
29
30
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_lor_ns(CPUARMState *env)
31
return CP_ACCESS_OK;
22
}
32
}
23
33
24
-static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a)
34
-static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri,
25
+static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
35
- bool isread)
36
-{
37
- if (arm_is_secure_below_el3(env)) {
38
- /* Access ok in secure mode. */
39
- return CP_ACCESS_OK;
40
- }
41
- return access_lor_ns(env);
42
-}
43
-
44
static CPAccessResult access_lor_other(CPUARMState *env,
45
const ARMCPRegInfo *ri, bool isread)
26
{
46
{
27
TCGv_i32 tmp;
47
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_lor_other(CPUARMState *env,
28
48
/* Access denied in secure mode. */
29
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
49
return CP_ACCESS_TRAP;
30
return true;
50
}
51
- return access_lor_ns(env);
52
+ return access_lor_ns(env, ri, isread);
31
}
53
}
32
54
33
-static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a)
55
/*
34
+static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
56
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo lor_reginfo[] = {
35
{
57
.type = ARM_CP_CONST, .resetvalue = 0 },
36
uint32_t offset;
58
{ .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
37
TCGv_i32 addr;
59
.opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
38
@@ -XXX,XX +XXX,XX @@ static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
60
- .access = PL1_R, .accessfn = access_lorid,
39
tcg_temp_free_i64(tmp);
61
+ .access = PL1_R, .accessfn = access_lor_ns,
40
}
62
.type = ARM_CP_CONST, .resetvalue = 0 },
41
63
REGINFO_SENTINEL
42
-static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_sp *a)
64
};
43
+static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a)
44
{
45
return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
46
}
47
@@ -XXX,XX +XXX,XX @@ static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
48
tcg_temp_free_i64(tmp);
49
}
50
51
-static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_sp *a)
52
+static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a)
53
{
54
return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
55
}
56
@@ -XXX,XX +XXX,XX @@ static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
57
tcg_temp_free_i64(tmp);
58
}
59
60
-static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_sp *a)
61
+static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a)
62
{
63
return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
64
}
65
@@ -XXX,XX +XXX,XX @@ static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
66
tcg_temp_free_i64(tmp);
67
}
68
69
-static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_sp *a)
70
+static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a)
71
{
72
return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
73
}
74
@@ -XXX,XX +XXX,XX @@ static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a)
75
return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false);
76
}
77
78
-static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_sp *a)
79
+static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_dp *a)
80
{
81
return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
82
}
83
@@ -XXX,XX +XXX,XX @@ static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
84
gen_helper_vfp_negd(vd, vd);
85
}
86
87
-static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_sp *a)
88
+static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_dp *a)
89
{
90
return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
91
}
92
@@ -XXX,XX +XXX,XX @@ static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a)
93
return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false);
94
}
95
96
-static bool trans_VADD_dp(DisasContext *s, arg_VADD_sp *a)
97
+static bool trans_VADD_dp(DisasContext *s, arg_VADD_dp *a)
98
{
99
return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
100
}
101
@@ -XXX,XX +XXX,XX @@ static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a)
102
return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false);
103
}
104
105
-static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_sp *a)
106
+static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_dp *a)
107
{
108
return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
109
}
110
@@ -XXX,XX +XXX,XX @@ static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a)
111
return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false);
112
}
113
114
-static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_sp *a)
115
+static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a)
116
{
117
return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
118
}
119
@@ -XXX,XX +XXX,XX @@ static bool trans_VFM_sp(DisasContext *s, arg_VFM_sp *a)
120
return true;
121
}
122
123
-static bool trans_VFM_dp(DisasContext *s, arg_VFM_sp *a)
124
+static bool trans_VFM_dp(DisasContext *s, arg_VFM_dp *a)
125
{
126
/*
127
* VFNMA : fd = muladd(-fd, fn, fm)
128
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
129
return true;
130
}
131
132
-static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_sp *a)
133
+static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
134
{
135
TCGv_ptr fpst;
136
TCGv_i64 tmp;
137
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
138
return true;
139
}
140
141
-static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_sp *a)
142
+static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
143
{
144
TCGv_ptr fpst;
145
TCGv_i64 tmp;
146
--
65
--
147
2.20.1
66
2.20.1
148
67
149
68
diff view generated by jsdifflib
New patch
1
If we're using the capstone disassembler, disassembly of a run of
2
instructions more than 32 bytes long disassembles the wrong data for
3
instructions beyond the 32 byte mark:
1
4
5
(qemu) xp /16x 0x100
6
0000000000000100: 0x00000005 0x54410001 0x00000001 0x00001000
7
0000000000000110: 0x00000000 0x00000004 0x54410002 0x3c000000
8
0000000000000120: 0x00000000 0x00000004 0x54410009 0x74736574
9
0000000000000130: 0x00000000 0x00000000 0x00000000 0x00000000
10
(qemu) xp /16i 0x100
11
0x00000100: 00000005 andeq r0, r0, r5
12
0x00000104: 54410001 strbpl r0, [r1], #-1
13
0x00000108: 00000001 andeq r0, r0, r1
14
0x0000010c: 00001000 andeq r1, r0, r0
15
0x00000110: 00000000 andeq r0, r0, r0
16
0x00000114: 00000004 andeq r0, r0, r4
17
0x00000118: 54410002 strbpl r0, [r1], #-2
18
0x0000011c: 3c000000 .byte 0x00, 0x00, 0x00, 0x3c
19
0x00000120: 54410001 strbpl r0, [r1], #-1
20
0x00000124: 00000001 andeq r0, r0, r1
21
0x00000128: 00001000 andeq r1, r0, r0
22
0x0000012c: 00000000 andeq r0, r0, r0
23
0x00000130: 00000004 andeq r0, r0, r4
24
0x00000134: 54410002 strbpl r0, [r1], #-2
25
0x00000138: 3c000000 .byte 0x00, 0x00, 0x00, 0x3c
26
0x0000013c: 00000000 andeq r0, r0, r0
27
28
Here the disassembly of 0x120..0x13f is using the data that is in
29
0x104..0x123.
30
31
This is caused by passing the wrong value to the read_memory_func().
32
The intention is that at this point in the loop the 'cap_buf' buffer
33
already contains 'csize' bytes of data for the instruction at guest
34
addr 'pc', and we want to read in an extra 'tsize' bytes. Those
35
extra bytes are therefore at 'pc + csize', not 'pc'. On the first
36
time through the loop 'csize' happens to be zero, so the initial read
37
of 32 bytes into cap_buf is correct and as long as the disassembly
38
never needs to read more data we return the correct information.
39
40
Use the correct guest address in the call to read_memory_func().
41
42
Cc: qemu-stable@nongnu.org
43
Fixes: https://bugs.launchpad.net/qemu/+bug/1900779
44
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
45
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
46
Message-id: 20201022132445.25039-1-peter.maydell@linaro.org
47
---
48
disas/capstone.c | 2 +-
49
1 file changed, 1 insertion(+), 1 deletion(-)
50
51
diff --git a/disas/capstone.c b/disas/capstone.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/disas/capstone.c
54
+++ b/disas/capstone.c
55
@@ -XXX,XX +XXX,XX @@ bool cap_disas_monitor(disassemble_info *info, uint64_t pc, int count)
56
57
/* Make certain that we can make progress. */
58
assert(tsize != 0);
59
- info->read_memory_func(pc, cap_buf + csize, tsize, info);
60
+ info->read_memory_func(pc + csize, cap_buf + csize, tsize, info);
61
csize += tsize;
62
63
if (cs_disasm_iter(handle, &cbuf, &csize, &pc, insn)) {
64
--
65
2.20.1
66
67
diff view generated by jsdifflib
1
Allow the DSP extension to be disabled via a CPU property for
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
M-profile CPUs. (A and R-profile CPUs don't have this extension
3
as a defined separate optional architecture extension, so
4
they don't need the property.)
5
2
3
Use the BIT_ULL() macro to ensure we use 64-bit arithmetic.
4
This fixes the following Coverity issue (OVERFLOW_BEFORE_WIDEN):
5
6
CID 1432363 (#1 of 1): Unintentional integer overflow:
7
8
overflow_before_widen:
9
Potentially overflowing expression 1 << scale with type int
10
(32 bits, signed) is evaluated using 32-bit arithmetic, and
11
then used in a context that expects an expression of type
12
hwaddr (64 bits, unsigned).
13
14
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
15
Acked-by: Eric Auger <eric.auger@redhat.com>
16
Message-id: 20201030144617.1535064-1-philmd@redhat.com
17
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Message-id: 20190517174046.11146-3-peter.maydell@linaro.org
10
---
19
---
11
target/arm/cpu.h | 2 ++
20
hw/arm/smmuv3.c | 3 ++-
12
target/arm/cpu.c | 29 +++++++++++++++++++++++++++++
21
1 file changed, 2 insertions(+), 1 deletion(-)
13
2 files changed, 31 insertions(+)
14
22
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
23
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
16
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
25
--- a/hw/arm/smmuv3.c
18
+++ b/target/arm/cpu.h
26
+++ b/hw/arm/smmuv3.c
19
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
27
@@ -XXX,XX +XXX,XX @@
20
bool has_vfp;
28
*/
21
/* CPU has Neon */
29
22
bool has_neon;
30
#include "qemu/osdep.h"
23
+ /* CPU has M-profile DSP extension */
31
+#include "qemu/bitops.h"
24
+ bool has_dsp;
32
#include "hw/irq.h"
25
33
#include "hw/sysbus.h"
26
/* CPU has memory protection unit */
34
#include "migration/vmstate.h"
27
bool has_mpu;
35
@@ -XXX,XX +XXX,XX @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
28
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
36
scale = CMD_SCALE(cmd);
29
index XXXXXXX..XXXXXXX 100644
37
num = CMD_NUM(cmd);
30
--- a/target/arm/cpu.c
38
ttl = CMD_TTL(cmd);
31
+++ b/target/arm/cpu.c
39
- num_pages = (num + 1) * (1 << (scale));
32
@@ -XXX,XX +XXX,XX @@ static Property arm_cpu_has_vfp_property =
40
+ num_pages = (num + 1) * BIT_ULL(scale);
33
static Property arm_cpu_has_neon_property =
34
DEFINE_PROP_BOOL("neon", ARMCPU, has_neon, true);
35
36
+static Property arm_cpu_has_dsp_property =
37
+ DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true);
38
+
39
static Property arm_cpu_has_mpu_property =
40
DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
41
42
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
43
}
44
}
41
}
45
42
46
+ if (arm_feature(&cpu->env, ARM_FEATURE_M) &&
43
if (type == SMMU_CMD_TLBI_NH_VA) {
47
+ arm_feature(&cpu->env, ARM_FEATURE_THUMB_DSP)) {
48
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_dsp_property,
49
+ &error_abort);
50
+ }
51
+
52
if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
53
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property,
54
&error_abort);
55
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
56
cpu->isar.mvfr0 = u;
57
}
58
59
+ if (arm_feature(env, ARM_FEATURE_M) && !cpu->has_dsp) {
60
+ uint32_t u;
61
+
62
+ unset_feature(env, ARM_FEATURE_THUMB_DSP);
63
+
64
+ u = cpu->isar.id_isar1;
65
+ u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1);
66
+ cpu->isar.id_isar1 = u;
67
+
68
+ u = cpu->isar.id_isar2;
69
+ u = FIELD_DP32(u, ID_ISAR2, MULTU, 1);
70
+ u = FIELD_DP32(u, ID_ISAR2, MULTS, 1);
71
+ cpu->isar.id_isar2 = u;
72
+
73
+ u = cpu->isar.id_isar3;
74
+ u = FIELD_DP32(u, ID_ISAR3, SIMD, 1);
75
+ u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0);
76
+ cpu->isar.id_isar3 = u;
77
+ }
78
+
79
/* Some features automatically imply others: */
80
if (arm_feature(env, ARM_FEATURE_V8)) {
81
if (arm_feature(env, ARM_FEATURE_M)) {
82
--
44
--
83
2.20.1
45
2.20.1
84
46
85
47
diff view generated by jsdifflib
1
In the Arm kernel/initrd loading code, in some places we make the
1
From: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
2
incorrect assumption that info->ram_size can be treated as the
3
address of the end of RAM, as for instance when we calculate the
4
available space for the initrd using "info->ram_size - info->initrd_start".
5
This is wrong, because many Arm boards (including "virt") specify
6
a non-zero info->loader_start to indicate that their RAM area
7
starts at a non-zero physical address.
8
2
9
Correct the places which make this incorrect assumption.
3
When booting a CPU with EL3 using the -kernel flag, set up CPTR_EL3 so
4
that SVE will not trap to EL3.
10
5
6
Signed-off-by: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201030151541.11976-1-remi@remlab.net
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Tested-by: Mark Rutland <mark.rutland@arm.com>
14
Message-id: 20190516144733.32399-2-peter.maydell@linaro.org
15
---
10
---
16
hw/arm/boot.c | 9 ++++-----
11
hw/arm/boot.c | 3 +++
17
1 file changed, 4 insertions(+), 5 deletions(-)
12
1 file changed, 3 insertions(+)
18
13
19
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
14
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
20
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/arm/boot.c
16
--- a/hw/arm/boot.c
22
+++ b/hw/arm/boot.c
17
+++ b/hw/arm/boot.c
23
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
18
@@ -XXX,XX +XXX,XX @@ static void do_cpu_reset(void *opaque)
24
int elf_machine;
19
if (cpu_isar_feature(aa64_mte, cpu)) {
25
hwaddr entry;
20
env->cp15.scr_el3 |= SCR_ATA;
26
static const ARMInsnFixup *primary_loader;
21
}
27
+ uint64_t ram_end = info->loader_start + info->ram_size;
22
+ if (cpu_isar_feature(aa64_sve, cpu)) {
28
23
+ env->cp15.cptr_el[3] |= CPTR_EZ;
29
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
24
+ }
30
primary_loader = bootloader_aarch64;
25
/* AArch64 kernels never boot in secure mode */
31
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
26
assert(!info->secure_boot);
32
/* 32-bit ARM */
27
/* This hook is only supported for AArch32 currently:
33
entry = info->loader_start + KERNEL_LOAD_ADDR;
34
kernel_size = load_image_targphys_as(info->kernel_filename, entry,
35
- info->ram_size - KERNEL_LOAD_ADDR,
36
- as);
37
+ ram_end - KERNEL_LOAD_ADDR, as);
38
is_linux = 1;
39
}
40
if (kernel_size < 0) {
41
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
42
if (info->initrd_filename) {
43
initrd_size = load_ramdisk_as(info->initrd_filename,
44
info->initrd_start,
45
- info->ram_size - info->initrd_start,
46
- as);
47
+ ram_end - info->initrd_start, as);
48
if (initrd_size < 0) {
49
initrd_size = load_image_targphys_as(info->initrd_filename,
50
info->initrd_start,
51
- info->ram_size -
52
+ ram_end -
53
info->initrd_start,
54
as);
55
}
56
--
28
--
57
2.20.1
29
2.20.1
58
30
59
31
diff view generated by jsdifflib
1
Allow VFP and neon to be disabled via a CPU property. As with
1
From: AlexChen <alex.chen@huawei.com>
2
the "pmu" property, we only allow these features to be removed
3
from CPUs which have it by default, not added to CPUs which
4
don't have it.
5
2
6
The primary motivation here is to be able to optionally
3
In omap_lcd_interrupts(), the pointer omap_lcd is dereferinced before
7
create Cortex-M33 CPUs with no FPU, but we provide switches
4
being check if it is valid, which may lead to NULL pointer dereference.
8
for both VFP and Neon because the two interact:
5
So move the assignment to surface after checking that the omap_lcd is valid
9
* AArch64 can't have one without the other
6
and move surface_bits_per_pixel(surface) to after the surface assignment.
10
* Some ID register fields only change if both are disabled
11
7
8
Reported-by: Euler Robot <euler.robot@huawei.com>
9
Signed-off-by: AlexChen <alex.chen@huawei.com>
10
Message-id: 5F9CDB8A.9000001@huawei.com
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
15
Message-id: 20190517174046.11146-2-peter.maydell@linaro.org
16
---
13
---
17
target/arm/cpu.h | 4 ++
14
hw/display/omap_lcdc.c | 10 +++++++---
18
target/arm/cpu.c | 150 +++++++++++++++++++++++++++++++++++++++++++++--
15
1 file changed, 7 insertions(+), 3 deletions(-)
19
2 files changed, 148 insertions(+), 6 deletions(-)
20
16
21
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
diff --git a/hw/display/omap_lcdc.c b/hw/display/omap_lcdc.c
22
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/cpu.h
19
--- a/hw/display/omap_lcdc.c
24
+++ b/target/arm/cpu.h
20
+++ b/hw/display/omap_lcdc.c
25
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
21
@@ -XXX,XX +XXX,XX @@ static void omap_lcd_interrupts(struct omap_lcd_panel_s *s)
26
bool has_el3;
22
static void omap_update_display(void *opaque)
27
/* CPU has PMU (Performance Monitor Unit) */
23
{
28
bool has_pmu;
24
struct omap_lcd_panel_s *omap_lcd = (struct omap_lcd_panel_s *) opaque;
29
+ /* CPU has VFP */
25
- DisplaySurface *surface = qemu_console_surface(omap_lcd->con);
30
+ bool has_vfp;
26
+ DisplaySurface *surface;
31
+ /* CPU has Neon */
27
draw_line_func draw_line;
32
+ bool has_neon;
28
int size, height, first, last;
33
29
int width, linesize, step, bpp, frame_offset;
34
/* CPU has memory protection unit */
30
hwaddr frame_base;
35
bool has_mpu;
31
36
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
32
- if (!omap_lcd || omap_lcd->plm == 1 || !omap_lcd->enable ||
37
index XXXXXXX..XXXXXXX 100644
33
- !surface_bits_per_pixel(surface)) {
38
--- a/target/arm/cpu.c
34
+ if (!omap_lcd || omap_lcd->plm == 1 || !omap_lcd->enable) {
39
+++ b/target/arm/cpu.c
40
@@ -XXX,XX +XXX,XX @@ static Property arm_cpu_cfgend_property =
41
static Property arm_cpu_has_pmu_property =
42
DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true);
43
44
+static Property arm_cpu_has_vfp_property =
45
+ DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true);
46
+
47
+static Property arm_cpu_has_neon_property =
48
+ DEFINE_PROP_BOOL("neon", ARMCPU, has_neon, true);
49
+
50
static Property arm_cpu_has_mpu_property =
51
DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
52
53
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
54
if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
55
set_feature(&cpu->env, ARM_FEATURE_PMSA);
56
}
57
+ /* Similarly for the VFP feature bits */
58
+ if (arm_feature(&cpu->env, ARM_FEATURE_VFP4)) {
59
+ set_feature(&cpu->env, ARM_FEATURE_VFP3);
60
+ }
61
+ if (arm_feature(&cpu->env, ARM_FEATURE_VFP3)) {
62
+ set_feature(&cpu->env, ARM_FEATURE_VFP);
63
+ }
64
65
if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
66
arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
67
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
68
&error_abort);
69
}
70
71
+ /*
72
+ * Allow user to turn off VFP and Neon support, but only for TCG --
73
+ * KVM does not currently allow us to lie to the guest about its
74
+ * ID/feature registers, so the guest always sees what the host has.
75
+ */
76
+ if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
77
+ cpu->has_vfp = true;
78
+ if (!kvm_enabled()) {
79
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_vfp_property,
80
+ &error_abort);
81
+ }
82
+ }
83
+
84
+ if (arm_feature(&cpu->env, ARM_FEATURE_NEON)) {
85
+ cpu->has_neon = true;
86
+ if (!kvm_enabled()) {
87
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_neon_property,
88
+ &error_abort);
89
+ }
90
+ }
91
+
92
if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
93
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property,
94
&error_abort);
95
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
96
return;
97
}
98
99
+ if (arm_feature(env, ARM_FEATURE_AARCH64) &&
100
+ cpu->has_vfp != cpu->has_neon) {
101
+ /*
102
+ * This is an architectural requirement for AArch64; AArch32 is
103
+ * more flexible and permits VFP-no-Neon and Neon-no-VFP.
104
+ */
105
+ error_setg(errp,
106
+ "AArch64 CPUs must have both VFP and Neon or neither");
107
+ return;
35
+ return;
108
+ }
36
+ }
109
+
37
+
110
+ if (!cpu->has_vfp) {
38
+ surface = qemu_console_surface(omap_lcd->con);
111
+ uint64_t t;
39
+ if (!surface_bits_per_pixel(surface)) {
112
+ uint32_t u;
40
return;
113
+
114
+ unset_feature(env, ARM_FEATURE_VFP);
115
+ unset_feature(env, ARM_FEATURE_VFP3);
116
+ unset_feature(env, ARM_FEATURE_VFP4);
117
+
118
+ t = cpu->isar.id_aa64isar1;
119
+ t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0);
120
+ cpu->isar.id_aa64isar1 = t;
121
+
122
+ t = cpu->isar.id_aa64pfr0;
123
+ t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf);
124
+ cpu->isar.id_aa64pfr0 = t;
125
+
126
+ u = cpu->isar.id_isar6;
127
+ u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0);
128
+ cpu->isar.id_isar6 = u;
129
+
130
+ u = cpu->isar.mvfr0;
131
+ u = FIELD_DP32(u, MVFR0, FPSP, 0);
132
+ u = FIELD_DP32(u, MVFR0, FPDP, 0);
133
+ u = FIELD_DP32(u, MVFR0, FPTRAP, 0);
134
+ u = FIELD_DP32(u, MVFR0, FPDIVIDE, 0);
135
+ u = FIELD_DP32(u, MVFR0, FPSQRT, 0);
136
+ u = FIELD_DP32(u, MVFR0, FPSHVEC, 0);
137
+ u = FIELD_DP32(u, MVFR0, FPROUND, 0);
138
+ cpu->isar.mvfr0 = u;
139
+
140
+ u = cpu->isar.mvfr1;
141
+ u = FIELD_DP32(u, MVFR1, FPFTZ, 0);
142
+ u = FIELD_DP32(u, MVFR1, FPDNAN, 0);
143
+ u = FIELD_DP32(u, MVFR1, FPHP, 0);
144
+ cpu->isar.mvfr1 = u;
145
+
146
+ u = cpu->isar.mvfr2;
147
+ u = FIELD_DP32(u, MVFR2, FPMISC, 0);
148
+ cpu->isar.mvfr2 = u;
149
+ }
150
+
151
+ if (!cpu->has_neon) {
152
+ uint64_t t;
153
+ uint32_t u;
154
+
155
+ unset_feature(env, ARM_FEATURE_NEON);
156
+
157
+ t = cpu->isar.id_aa64isar0;
158
+ t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0);
159
+ cpu->isar.id_aa64isar0 = t;
160
+
161
+ t = cpu->isar.id_aa64isar1;
162
+ t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
163
+ cpu->isar.id_aa64isar1 = t;
164
+
165
+ t = cpu->isar.id_aa64pfr0;
166
+ t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf);
167
+ cpu->isar.id_aa64pfr0 = t;
168
+
169
+ u = cpu->isar.id_isar5;
170
+ u = FIELD_DP32(u, ID_ISAR5, RDM, 0);
171
+ u = FIELD_DP32(u, ID_ISAR5, VCMA, 0);
172
+ cpu->isar.id_isar5 = u;
173
+
174
+ u = cpu->isar.id_isar6;
175
+ u = FIELD_DP32(u, ID_ISAR6, DP, 0);
176
+ u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
177
+ cpu->isar.id_isar6 = u;
178
+
179
+ u = cpu->isar.mvfr1;
180
+ u = FIELD_DP32(u, MVFR1, SIMDLS, 0);
181
+ u = FIELD_DP32(u, MVFR1, SIMDINT, 0);
182
+ u = FIELD_DP32(u, MVFR1, SIMDSP, 0);
183
+ u = FIELD_DP32(u, MVFR1, SIMDHP, 0);
184
+ u = FIELD_DP32(u, MVFR1, SIMDFMAC, 0);
185
+ cpu->isar.mvfr1 = u;
186
+
187
+ u = cpu->isar.mvfr2;
188
+ u = FIELD_DP32(u, MVFR2, SIMDMISC, 0);
189
+ cpu->isar.mvfr2 = u;
190
+ }
191
+
192
+ if (!cpu->has_neon && !cpu->has_vfp) {
193
+ uint64_t t;
194
+ uint32_t u;
195
+
196
+ t = cpu->isar.id_aa64isar0;
197
+ t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0);
198
+ cpu->isar.id_aa64isar0 = t;
199
+
200
+ t = cpu->isar.id_aa64isar1;
201
+ t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0);
202
+ cpu->isar.id_aa64isar1 = t;
203
+
204
+ u = cpu->isar.mvfr0;
205
+ u = FIELD_DP32(u, MVFR0, SIMDREG, 0);
206
+ cpu->isar.mvfr0 = u;
207
+ }
208
+
209
/* Some features automatically imply others: */
210
if (arm_feature(env, ARM_FEATURE_V8)) {
211
if (arm_feature(env, ARM_FEATURE_M)) {
212
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
213
if (arm_feature(env, ARM_FEATURE_V5)) {
214
set_feature(env, ARM_FEATURE_V4T);
215
}
41
}
216
- if (arm_feature(env, ARM_FEATURE_VFP4)) {
42
217
- set_feature(env, ARM_FEATURE_VFP3);
218
- }
219
- if (arm_feature(env, ARM_FEATURE_VFP3)) {
220
- set_feature(env, ARM_FEATURE_VFP);
221
- }
222
if (arm_feature(env, ARM_FEATURE_LPAE)) {
223
set_feature(env, ARM_FEATURE_V7MP);
224
set_feature(env, ARM_FEATURE_PXN);
225
--
43
--
226
2.20.1
44
2.20.1
227
45
228
46
diff view generated by jsdifflib
1
The SSE-200 hardware has configurable integration settings which
1
From: AlexChen <alex.chen@huawei.com>
2
determine whether its two CPUs have the FPU and DSP:
3
* CPU0_FPU (default 0)
4
* CPU0_DSP (default 0)
5
* CPU1_FPU (default 1)
6
* CPU1_DSP (default 1)
7
2
8
Similarly, the IoTKit has settings for its single CPU:
3
In exynos4210_fimd_update(), the pointer s is dereferinced before
9
* CPU0_FPU (default 1)
4
being check if it is valid, which may lead to NULL pointer dereference.
10
* CPU0_DSP (default 1)
5
So move the assignment to global_width after checking that the s is valid.
11
6
12
Of our four boards that use either the IoTKit or the SSE-200:
7
Reported-by: Euler Robot <euler.robot@huawei.com>
13
* mps2-an505, mps2-an521 and musca-a use the default settings
8
Signed-off-by: Alex Chen <alex.chen@huawei.com>
14
* musca-b1 enables FPU and DSP on both CPUs
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 5F9F8D88.9030102@huawei.com
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
hw/display/exynos4210_fimd.c | 4 +++-
14
1 file changed, 3 insertions(+), 1 deletion(-)
15
15
16
Currently QEMU models all these boards using CPUs with
16
diff --git a/hw/display/exynos4210_fimd.c b/hw/display/exynos4210_fimd.c
17
both FPU and DSP enabled. This means that we are incorrect
18
for mps2-an521 and musca-a, which should not have FPU or DSP
19
on CPU0.
20
21
Create QOM properties on the ARMSSE devices corresponding to the
22
default h/w integration settings, and make the Musca-B1 board
23
enable FPU and DSP on both CPUs. This fixes the mps2-an521
24
and musca-a behaviour, and leaves the musca-b1 and mps2-an505
25
behaviour unchanged.
26
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
29
Message-id: 20190517174046.11146-5-peter.maydell@linaro.org
30
---
31
include/hw/arm/armsse.h | 7 +++++
32
hw/arm/armsse.c | 58 ++++++++++++++++++++++++++++++++---------
33
hw/arm/musca.c | 8 ++++++
34
3 files changed, 61 insertions(+), 12 deletions(-)
35
36
diff --git a/include/hw/arm/armsse.h b/include/hw/arm/armsse.h
37
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
38
--- a/include/hw/arm/armsse.h
18
--- a/hw/display/exynos4210_fimd.c
39
+++ b/include/hw/arm/armsse.h
19
+++ b/hw/display/exynos4210_fimd.c
40
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ static void exynos4210_fimd_update(void *opaque)
41
* address of each SRAM bank (and thus the total amount of internal SRAM)
21
bool blend = false;
42
* + QOM property "init-svtor" sets the initial value of the CPU SVTOR register
22
uint8_t *host_fb_addr;
43
* (where it expects to load the PC and SP from the vector table on reset)
23
bool is_dirty = false;
44
+ * + QOM properties "CPU0_FPU", "CPU0_DSP", "CPU1_FPU" and "CPU1_DSP" which
24
- const int global_width = (s->vidtcon[2] & FIMD_VIDTCON2_SIZE_MASK) + 1;
45
+ * set whether the CPUs have the FPU and DSP features present. The default
25
+ int global_width;
46
+ * (matching the hardware) is that for CPU0 in an IoTKit and CPU1 in an
26
47
+ * SSE-200 both are present; CPU0 in an SSE-200 has neither.
27
if (!s || !s->console || !s->enabled ||
48
+ * Since the IoTKit has only one CPU, it does not have the CPU1_* properties.
28
surface_bits_per_pixel(qemu_console_surface(s->console)) == 0) {
49
* + Named GPIO inputs "EXP_IRQ" 0..n are the expansion interrupts for CPU 0,
29
return;
50
* which are wired to its NVIC lines 32 .. n+32
30
}
51
* + Named GPIO inputs "EXP_CPU1_IRQ" 0..n are the expansion interrupts for
52
@@ -XXX,XX +XXX,XX @@ typedef struct ARMSSE {
53
uint32_t mainclk_frq;
54
uint32_t sram_addr_width;
55
uint32_t init_svtor;
56
+ bool cpu_fpu[SSE_MAX_CPUS];
57
+ bool cpu_dsp[SSE_MAX_CPUS];
58
} ARMSSE;
59
60
typedef struct ARMSSEInfo ARMSSEInfo;
61
diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/hw/arm/armsse.c
64
+++ b/hw/arm/armsse.c
65
@@ -XXX,XX +XXX,XX @@ struct ARMSSEInfo {
66
bool has_cachectrl;
67
bool has_cpusecctrl;
68
bool has_cpuid;
69
+ Property *props;
70
+};
71
+
31
+
72
+static Property iotkit_properties[] = {
32
+ global_width = (s->vidtcon[2] & FIMD_VIDTCON2_SIZE_MASK) + 1;
73
+ DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION,
33
exynos4210_update_resolution(s);
74
+ MemoryRegion *),
34
surface = qemu_console_surface(s->console);
75
+ DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64),
76
+ DEFINE_PROP_UINT32("MAINCLK", ARMSSE, mainclk_frq, 0),
77
+ DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 15),
78
+ DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000),
79
+ DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], true),
80
+ DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true),
81
+ DEFINE_PROP_END_OF_LIST()
82
+};
83
+
84
+static Property armsse_properties[] = {
85
+ DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION,
86
+ MemoryRegion *),
87
+ DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64),
88
+ DEFINE_PROP_UINT32("MAINCLK", ARMSSE, mainclk_frq, 0),
89
+ DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 15),
90
+ DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000),
91
+ DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], false),
92
+ DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], false),
93
+ DEFINE_PROP_BOOL("CPU1_FPU", ARMSSE, cpu_fpu[1], true),
94
+ DEFINE_PROP_BOOL("CPU1_DSP", ARMSSE, cpu_dsp[1], true),
95
+ DEFINE_PROP_END_OF_LIST()
96
};
97
98
static const ARMSSEInfo armsse_variants[] = {
99
@@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = {
100
.has_cachectrl = false,
101
.has_cpusecctrl = false,
102
.has_cpuid = false,
103
+ .props = iotkit_properties,
104
},
105
{
106
.name = TYPE_SSE200,
107
@@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = {
108
.has_cachectrl = true,
109
.has_cpusecctrl = true,
110
.has_cpuid = true,
111
+ .props = armsse_properties,
112
},
113
};
114
115
@@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp)
116
return;
117
}
118
}
119
+ if (!s->cpu_fpu[i]) {
120
+ object_property_set_bool(cpuobj, false, "vfp", &err);
121
+ if (err) {
122
+ error_propagate(errp, err);
123
+ return;
124
+ }
125
+ }
126
+ if (!s->cpu_dsp[i]) {
127
+ object_property_set_bool(cpuobj, false, "dsp", &err);
128
+ if (err) {
129
+ error_propagate(errp, err);
130
+ return;
131
+ }
132
+ }
133
134
if (i > 0) {
135
memory_region_add_subregion_overlap(&s->cpu_container[i], 0,
136
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription armsse_vmstate = {
137
}
138
};
139
140
-static Property armsse_properties[] = {
141
- DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION,
142
- MemoryRegion *),
143
- DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64),
144
- DEFINE_PROP_UINT32("MAINCLK", ARMSSE, mainclk_frq, 0),
145
- DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 15),
146
- DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000),
147
- DEFINE_PROP_END_OF_LIST()
148
-};
149
-
150
static void armsse_reset(DeviceState *dev)
151
{
152
ARMSSE *s = ARMSSE(dev);
153
@@ -XXX,XX +XXX,XX @@ static void armsse_class_init(ObjectClass *klass, void *data)
154
DeviceClass *dc = DEVICE_CLASS(klass);
155
IDAUInterfaceClass *iic = IDAU_INTERFACE_CLASS(klass);
156
ARMSSEClass *asc = ARMSSE_CLASS(klass);
157
+ const ARMSSEInfo *info = data;
158
159
dc->realize = armsse_realize;
160
dc->vmsd = &armsse_vmstate;
161
- dc->props = armsse_properties;
162
+ dc->props = info->props;
163
dc->reset = armsse_reset;
164
iic->check = armsse_idau_check;
165
- asc->info = data;
166
+ asc->info = info;
167
}
168
169
static const TypeInfo armsse_info = {
170
diff --git a/hw/arm/musca.c b/hw/arm/musca.c
171
index XXXXXXX..XXXXXXX 100644
172
--- a/hw/arm/musca.c
173
+++ b/hw/arm/musca.c
174
@@ -XXX,XX +XXX,XX @@ static void musca_init(MachineState *machine)
175
qdev_prop_set_uint32(ssedev, "init-svtor", mmc->init_svtor);
176
qdev_prop_set_uint32(ssedev, "SRAM_ADDR_WIDTH", mmc->sram_addr_width);
177
qdev_prop_set_uint32(ssedev, "MAINCLK", SYSCLK_FRQ);
178
+ /*
179
+ * Musca-A takes the default SSE-200 FPU/DSP settings (ie no for
180
+ * CPU0 and yes for CPU1); Musca-B1 explicitly enables them for CPU0.
181
+ */
182
+ if (mmc->type == MUSCA_B1) {
183
+ qdev_prop_set_bit(ssedev, "CPU0_FPU", true);
184
+ qdev_prop_set_bit(ssedev, "CPU0_DSP", true);
185
+ }
186
object_property_set_bool(OBJECT(&mms->sse), true, "realized",
187
&error_fatal);
188
35
189
--
36
--
190
2.20.1
37
2.20.1
191
38
192
39
diff view generated by jsdifflib
1
Remove the now unused TCG globals cpu_F0s, cpu_F0d, cpu_F1s, cpu_F1d.
1
In arm_v7m_mmu_idx_for_secstate() we get the 'priv' level to pass to
2
armv7m_mmu_idx_for_secstate_and_priv() by calling arm_current_el().
3
This is incorrect when the security state being queried is not the
4
current one, because arm_current_el() uses the current security state
5
to determine which of the banked CONTROL.nPRIV bits to look at.
6
The effect was that if (for instance) Secure state was in privileged
7
mode but Non-Secure was not then we would return the wrong MMU index.
2
8
3
cpu_M0 is still used by the iwmmxt code, and cpu_V0 and
9
The only places where we are using this function in a way that could
4
cpu_V1 are used by both iwmmxt and Neon.
10
trigger this bug are for the stack loads during a v8M function-return
11
and for the instruction fetch of a v8M SG insn.
12
13
Fix the bug by expanding out the M-profile version of the
14
arm_current_el() logic inline so it can use the passed in secstate
15
rather than env->v7m.secure.
5
16
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
19
Message-id: 20201022164408.13214-1-peter.maydell@linaro.org
9
Message-id: 20190613163917.28589-13-peter.maydell@linaro.org
10
---
20
---
11
target/arm/translate.c | 12 ++----------
21
target/arm/m_helper.c | 3 ++-
12
1 file changed, 2 insertions(+), 10 deletions(-)
22
1 file changed, 2 insertions(+), 1 deletion(-)
13
23
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
24
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
15
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
26
--- a/target/arm/m_helper.c
17
+++ b/target/arm/translate.c
27
+++ b/target/arm/m_helper.c
18
@@ -XXX,XX +XXX,XX @@ TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
28
@@ -XXX,XX +XXX,XX @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
19
TCGv_i64 cpu_exclusive_addr;
29
/* Return the MMU index for a v7M CPU in the specified security state */
20
TCGv_i64 cpu_exclusive_val;
30
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
21
31
{
22
-/* FIXME: These should be removed. */
32
- bool priv = arm_current_el(env) != 0;
23
-static TCGv_i32 cpu_F0s, cpu_F1s;
33
+ bool priv = arm_v7m_is_handler_mode(env) ||
24
-static TCGv_i64 cpu_F0d, cpu_F1d;
34
+ !(env->v7m.control[secstate] & 1);
25
-
35
26
#include "exec/gen-icount.h"
36
return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
27
28
static const char * const regnames[] =
29
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
30
dc->base.max_insns = MIN(dc->base.max_insns, bound);
31
}
32
33
- cpu_F0s = tcg_temp_new_i32();
34
- cpu_F1s = tcg_temp_new_i32();
35
- cpu_F0d = tcg_temp_new_i64();
36
- cpu_F1d = tcg_temp_new_i64();
37
- cpu_V0 = cpu_F0d;
38
- cpu_V1 = cpu_F1d;
39
+ cpu_V0 = tcg_temp_new_i64();
40
+ cpu_V1 = tcg_temp_new_i64();
41
/* FIXME: cpu_M0 can probably be the same as cpu_V0. */
42
cpu_M0 = tcg_temp_new_i64();
43
}
37
}
44
--
38
--
45
2.20.1
39
2.20.1
46
40
47
41
diff view generated by jsdifflib
1
The GICv3 specification says that the GICD_TYPER.SecurityExtn bit
1
On some hosts (eg Ubuntu Bionic) pkg-config returns a set of
2
is RAZ if GICD_CTLR.DS is 1. We were incorrectly making it RAZ
2
libraries for gio-2.0 which don't actually work when compiling
3
if the security extension is unsupported. "Security extension
3
statically. (Specifically, the returned library string includes
4
unsupported" always implies GICD_CTLR.DS == 1, but the guest can
4
-lmount, but not -lblkid which -lmount depends upon, so linking
5
also set DS on a GIC which does support the security extension.
5
fails due to missing symbols.)
6
Fix the condition to correctly check the GICD_CTLR.DS bit.
6
7
Check that the libraries work, and don't enable gio if they don't,
8
in the same way we do for gnutls.
7
9
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20190524124248.28394-3-peter.maydell@linaro.org
11
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
13
Message-id: 20200928160402.7961-1-peter.maydell@linaro.org
10
---
14
---
11
hw/intc/arm_gicv3_dist.c | 8 +++++++-
15
configure | 10 +++++++++-
12
1 file changed, 7 insertions(+), 1 deletion(-)
16
1 file changed, 9 insertions(+), 1 deletion(-)
13
17
14
diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c
18
diff --git a/configure b/configure
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100755
16
--- a/hw/intc/arm_gicv3_dist.c
20
--- a/configure
17
+++ b/hw/intc/arm_gicv3_dist.c
21
+++ b/configure
18
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset,
22
@@ -XXX,XX +XXX,XX @@ if test "$static" = yes && test "$mingw32" = yes; then
19
* ITLinesNumber == (num external irqs / 32) - 1
23
fi
20
*/
24
21
int itlinesnumber = ((s->num_irq - GIC_INTERNAL) / 32) - 1;
25
if $pkg_config --atleast-version=$glib_req_ver gio-2.0; then
22
+ /*
26
- gio=yes
23
+ * SecurityExtn must be RAZ if GICD_CTLR.DS == 1, and
27
gio_cflags=$($pkg_config --cflags gio-2.0)
24
+ * "security extensions not supported" always implies DS == 1,
28
gio_libs=$($pkg_config --libs gio-2.0)
25
+ * so we only need to check the DS bit.
29
gdbus_codegen=$($pkg_config --variable=gdbus_codegen gio-2.0)
26
+ */
30
if [ ! -x "$gdbus_codegen" ]; then
27
+ bool sec_extn = !(s->gicd_ctlr & GICD_CTLR_DS);
31
gdbus_codegen=
28
32
fi
29
- *data = (1 << 25) | (1 << 24) | (s->security_extn << 10) |
33
+ # Check that the libraries actually work -- Ubuntu 18.04 ships
30
+ *data = (1 << 25) | (1 << 24) | (sec_extn << 10) |
34
+ # with pkg-config --static --libs data for gio-2.0 that is missing
31
(0xf << 19) | itlinesnumber;
35
+ # -lblkid and will give a link error.
32
return MEMTX_OK;
36
+ write_c_skeleton
33
}
37
+ if compile_prog "" "gio_libs" ; then
38
+ gio=yes
39
+ else
40
+ gio=no
41
+ fi
42
else
43
gio=no
44
fi
34
--
45
--
35
2.20.1
46
2.20.1
36
47
37
48
diff view generated by jsdifflib
1
Since Linux v3.17, the kernel's Image header includes a field image_size,
1
In gicv3_init_cpuif() we copy the ARMCPU gicv3_maintenance_interrupt
2
which gives the total size of the kernel including unpopulated data
2
into the GICv3CPUState struct's maintenance_irq field. This will
3
sections such as the BSS). If this is present, then return it from
3
only work if the board happens to have already wired up the CPU
4
load_aarch64_image() as the true size of the kernel rather than
4
maintenance IRQ before the GIC was realized. Unfortunately this is
5
just using the size of the Image file itself. This allows the code
5
not the case for the 'virt' board, and so the value that gets copied
6
which calculates where to put the initrd to avoid putting it in
6
is NULL (since a qemu_irq is really a pointer to an IRQState struct
7
the kernel's BSS area.
7
under the hood). The effect is that the CPU interface code never
8
actually raises the maintenance interrupt line.
8
9
9
This means that we should be able to reliably load kernel images
10
Instead, since the GICv3CPUState has a pointer to the CPUState, make
10
which are larger than 128MB without accidentally putting the
11
the dereference at the point where we want to raise the interrupt, to
11
initrd or dtb in locations that clash with the kernel itself.
12
avoid an implicit requirement on board code to wire things up in a
13
particular order.
12
14
13
Fixes: https://bugs.launchpad.net/qemu/+bug/1823998
15
Reported-by: Jose Martins <josemartins90@gmail.com>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20201009153904.28529-1-peter.maydell@linaro.org
16
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
18
Reviewed-by: Luc Michel <luc@lmichel.fr>
17
Tested-by: Mark Rutland <mark.rutland@arm.com>
18
Message-id: 20190516144733.32399-5-peter.maydell@linaro.org
19
---
19
---
20
hw/arm/boot.c | 17 +++++++++++++++--
20
include/hw/intc/arm_gicv3_common.h | 1 -
21
1 file changed, 15 insertions(+), 2 deletions(-)
21
hw/intc/arm_gicv3_cpuif.c | 5 ++---
22
2 files changed, 2 insertions(+), 4 deletions(-)
22
23
23
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
24
diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h
24
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
25
--- a/hw/arm/boot.c
26
--- a/include/hw/intc/arm_gicv3_common.h
26
+++ b/hw/arm/boot.c
27
+++ b/include/hw/intc/arm_gicv3_common.h
27
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
28
@@ -XXX,XX +XXX,XX @@ struct GICv3CPUState {
28
hwaddr *entry, AddressSpace *as)
29
qemu_irq parent_fiq;
29
{
30
qemu_irq parent_virq;
30
hwaddr kernel_load_offset = KERNEL64_LOAD_ADDR;
31
qemu_irq parent_vfiq;
31
+ uint64_t kernel_size = 0;
32
- qemu_irq maintenance_irq;
32
uint8_t *buffer;
33
33
int size;
34
/* Redistributor */
34
35
uint32_t level; /* Current IRQ level */
35
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
36
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
36
* is only valid if the image_size is non-zero.
37
index XXXXXXX..XXXXXXX 100644
37
*/
38
--- a/hw/intc/arm_gicv3_cpuif.c
38
memcpy(&hdrvals, buffer + ARM64_TEXT_OFFSET_OFFSET, sizeof(hdrvals));
39
+++ b/hw/intc/arm_gicv3_cpuif.c
39
- if (hdrvals[1] != 0) {
40
@@ -XXX,XX +XXX,XX @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
40
+
41
int irqlevel = 0;
41
+ kernel_size = le64_to_cpu(hdrvals[1]);
42
int fiqlevel = 0;
42
+
43
int maintlevel = 0;
43
+ if (kernel_size != 0) {
44
+ ARMCPU *cpu = ARM_CPU(cs->cpu);
44
kernel_load_offset = le64_to_cpu(hdrvals[0]);
45
45
46
idx = hppvi_index(cs);
46
/*
47
trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx);
47
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
48
@@ -XXX,XX +XXX,XX @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
48
}
49
49
}
50
qemu_set_irq(cs->parent_vfiq, fiqlevel);
50
51
qemu_set_irq(cs->parent_virq, irqlevel);
51
+ /*
52
- qemu_set_irq(cs->maintenance_irq, maintlevel);
52
+ * Kernels before v3.17 don't populate the image_size field, and
53
+ qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel);
53
+ * raw images have no header. For those our best guess at the size
54
+ * is the size of the Image file itself.
55
+ */
56
+ if (kernel_size == 0) {
57
+ kernel_size = size;
58
+ }
59
+
60
*entry = mem_base + kernel_load_offset;
61
rom_add_blob_fixed_as(filename, buffer, size, *entry, as);
62
63
g_free(buffer);
64
65
- return size;
66
+ return kernel_size;
67
}
54
}
68
55
69
static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
56
static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
57
@@ -XXX,XX +XXX,XX @@ void gicv3_init_cpuif(GICv3State *s)
58
&& cpu->gic_num_lrs) {
59
int j;
60
61
- cs->maintenance_irq = cpu->gicv3_maintenance_interrupt;
62
-
63
cs->num_list_regs = cpu->gic_num_lrs;
64
cs->vpribits = cpu->gic_vpribits;
65
cs->vprebits = cpu->gic_vprebits;
70
--
66
--
71
2.20.1
67
2.20.1
72
68
73
69
diff view generated by jsdifflib
1
Create "vfp" and "dsp" properties on the armv7m container object
1
The kerneldoc script currently emits Sphinx markup for a macro with
2
which will be forwarded to its CPU object, so that SoCs can
2
arguments that uses the c:function directive. This is correct for
3
configure whether the CPU has these features.
3
Sphinx versions earlier than Sphinx 3, where c:macro doesn't allow
4
documentation of macros with arguments and c:function is not picky
5
about the syntax of what it is passed. However, in Sphinx 3 the
6
c:macro directive was enhanced to support macros with arguments,
7
and c:function was made more picky about what syntax it accepted.
8
9
When kerneldoc is told that it needs to produce output for Sphinx
10
3 or later, make it emit c:function only for functions and c:macro
11
for macros with arguments. We assume that anything with a return
12
type is a function and anything without is a macro.
13
14
This fixes the Sphinx error:
15
16
/home/petmay01/linaro/qemu-from-laptop/qemu/docs/../include/qom/object.h:155:Error in declarator
17
If declarator-id with parameters (e.g., 'void f(int arg)'):
18
Invalid C declaration: Expected identifier in nested name. [error at 25]
19
DECLARE_INSTANCE_CHECKER ( InstanceType, OBJ_NAME, TYPENAME)
20
-------------------------^
21
If parenthesis in noptr-declarator (e.g., 'void (*f(int arg))(double)'):
22
Error in declarator or parameters
23
Invalid C declaration: Expecting "(" in parameters. [error at 39]
24
DECLARE_INSTANCE_CHECKER ( InstanceType, OBJ_NAME, TYPENAME)
25
---------------------------------------^
4
26
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
28
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
29
Tested-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Message-id: 20190517174046.11146-4-peter.maydell@linaro.org
30
Message-id: 20201030174700.7204-2-peter.maydell@linaro.org
9
---
31
---
10
include/hw/arm/armv7m.h | 4 ++++
32
scripts/kernel-doc | 18 +++++++++++++++++-
11
hw/arm/armv7m.c | 18 ++++++++++++++++++
33
1 file changed, 17 insertions(+), 1 deletion(-)
12
2 files changed, 22 insertions(+)
13
34
14
diff --git a/include/hw/arm/armv7m.h b/include/hw/arm/armv7m.h
35
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
15
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100755
16
--- a/include/hw/arm/armv7m.h
37
--- a/scripts/kernel-doc
17
+++ b/include/hw/arm/armv7m.h
38
+++ b/scripts/kernel-doc
18
@@ -XXX,XX +XXX,XX @@ typedef struct {
39
@@ -XXX,XX +XXX,XX @@ sub output_function_rst(%) {
19
* devices will be automatically layered on top of this view.)
40
    output_highlight_rst($args{'purpose'});
20
* + Property "idau": IDAU interface (forwarded to CPU object)
41
    $start = "\n\n**Syntax**\n\n ``";
21
* + Property "init-svtor": secure VTOR reset value (forwarded to CPU object)
42
} else {
22
+ * + Property "vfp": enable VFP (forwarded to CPU object)
43
-    print ".. c:function:: ";
23
+ * + Property "dsp": enable DSP (forwarded to CPU object)
44
+ if ((split(/\./, $sphinx_version))[0] >= 3) {
24
* + Property "enable-bitband": expose bitbanded IO
45
+ # Sphinx 3 and later distinguish macros and functions and
25
*/
46
+ # complain if you use c:function with something that's not
26
typedef struct ARMv7MState {
47
+ # syntactically valid as a function declaration.
27
@@ -XXX,XX +XXX,XX @@ typedef struct ARMv7MState {
48
+ # We assume that anything with a return type is a function
28
uint32_t init_svtor;
49
+ # and anything without is a macro.
29
bool enable_bitband;
50
+ if ($args{'functiontype'} ne "") {
30
bool start_powered_off;
51
+ print ".. c:function:: ";
31
+ bool vfp;
52
+ } else {
32
+ bool dsp;
53
+ print ".. c:macro:: ";
33
} ARMv7MState;
54
+ }
34
55
+ } else {
35
#endif
56
+ # Older Sphinx don't support documenting macros that take
36
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
57
+ # arguments with c:macro, and don't complain about the use
37
index XXXXXXX..XXXXXXX 100644
58
+ # of c:function for this.
38
--- a/hw/arm/armv7m.c
59
+ print ".. c:function:: ";
39
+++ b/hw/arm/armv7m.c
60
+ }
40
@@ -XXX,XX +XXX,XX @@ static void armv7m_realize(DeviceState *dev, Error **errp)
41
return;
42
}
43
}
61
}
44
+ if (object_property_find(OBJECT(s->cpu), "vfp", NULL)) {
62
if ($args{'functiontype'} ne "") {
45
+ object_property_set_bool(OBJECT(s->cpu), s->vfp,
63
    $start .= $args{'functiontype'} . " " . $args{'function'} . " (";
46
+ "vfp", &err);
47
+ if (err != NULL) {
48
+ error_propagate(errp, err);
49
+ return;
50
+ }
51
+ }
52
+ if (object_property_find(OBJECT(s->cpu), "dsp", NULL)) {
53
+ object_property_set_bool(OBJECT(s->cpu), s->dsp,
54
+ "dsp", &err);
55
+ if (err != NULL) {
56
+ error_propagate(errp, err);
57
+ return;
58
+ }
59
+ }
60
61
/*
62
* Tell the CPU where the NVIC is; it will fail realize if it doesn't
63
@@ -XXX,XX +XXX,XX @@ static Property armv7m_properties[] = {
64
DEFINE_PROP_BOOL("enable-bitband", ARMv7MState, enable_bitband, false),
65
DEFINE_PROP_BOOL("start-powered-off", ARMv7MState, start_powered_off,
66
false),
67
+ DEFINE_PROP_BOOL("vfp", ARMv7MState, vfp, true),
68
+ DEFINE_PROP_BOOL("dsp", ARMv7MState, dsp, true),
69
DEFINE_PROP_END_OF_LIST(),
70
};
71
72
--
64
--
73
2.20.1
65
2.20.1
74
66
75
67
diff view generated by jsdifflib
1
Stop using cpu_F0s for NEON_2RM_VRECPE_F and NEON_2RM_VRSQRTE_F.
1
Sphinx 3.2 is pickier than earlier versions about the option:: markup,
2
and complains about our usage in qemu-option-trace.rst:
3
4
../../docs/qemu-option-trace.rst.inc:4:Malformed option description
5
'[enable=]PATTERN', should look like "opt", "-opt args", "--opt args",
6
"/opt args" or "+opt args"
7
8
In this file, we're really trying to document the different parts of
9
the top-level --trace option, which qemu-nbd.rst and qemu-img.rst
10
have already introduced with an option:: markup. So it's not right
11
to use option:: here anyway. Switch to a different markup
12
(definition lists) which gives about the same formatted output.
13
14
(Unlike option::, this markup doesn't produce index entries; but
15
at the moment we don't do anything much with indexes anyway, and
16
in any case I think it doesn't make much sense to have individual
17
index entries for the sub-parts of the --trace option.)
2
18
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
20
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
21
Tested-by: Stefan Hajnoczi <stefanha@redhat.com>
6
Message-id: 20190613163917.28589-8-peter.maydell@linaro.org
22
Message-id: 20201030174700.7204-3-peter.maydell@linaro.org
7
---
23
---
8
target/arm/translate.c | 6 +++---
24
docs/qemu-option-trace.rst.inc | 6 +++---
9
1 file changed, 3 insertions(+), 3 deletions(-)
25
1 file changed, 3 insertions(+), 3 deletions(-)
10
26
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
27
diff --git a/docs/qemu-option-trace.rst.inc b/docs/qemu-option-trace.rst.inc
12
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
29
--- a/docs/qemu-option-trace.rst.inc
14
+++ b/target/arm/translate.c
30
+++ b/docs/qemu-option-trace.rst.inc
15
@@ -XXX,XX +XXX,XX @@ static int neon_2rm_is_float_op(int op)
31
@@ -XXX,XX +XXX,XX @@
16
* what we are asking here is "does the code for this case in
32
17
* the Neon for-each-pass loop use cpu_F0s?".
33
Specify tracing options.
18
*/
34
19
- return op >= NEON_2RM_VRECPE_F;
35
-.. option:: [enable=]PATTERN
20
+ return op >= NEON_2RM_VCVT_FS;
36
+``[enable=]PATTERN``
21
}
37
22
38
Immediately enable events matching *PATTERN*
23
static bool neon_2rm_is_v8_op(int op)
39
(either event name or a globbing pattern). This option is only
24
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
40
@@ -XXX,XX +XXX,XX @@ Specify tracing options.
25
case NEON_2RM_VRECPE_F:
41
26
{
42
Use :option:`-trace help` to print a list of names of trace points.
27
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
43
28
- gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
44
-.. option:: events=FILE
29
+ gen_helper_recpe_f32(tmp, tmp, fpstatus);
45
+``events=FILE``
30
tcg_temp_free_ptr(fpstatus);
46
31
break;
47
Immediately enable events listed in *FILE*.
32
}
48
The file must contain one event name (as listed in the ``trace-events-all``
33
case NEON_2RM_VRSQRTE_F:
49
@@ -XXX,XX +XXX,XX @@ Specify tracing options.
34
{
50
available if QEMU has been compiled with the ``simple``, ``log`` or
35
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
51
``ftrace`` tracing backend.
36
- gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
52
37
+ gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
53
-.. option:: file=FILE
38
tcg_temp_free_ptr(fpstatus);
54
+``file=FILE``
39
break;
55
40
}
56
Log output traces to *FILE*.
57
This option is only available if QEMU has been compiled with
41
--
58
--
42
2.20.1
59
2.20.1
43
60
44
61
diff view generated by jsdifflib
1
The GIC ID registers cover an area 0x30 bytes in size
1
The randomness tests in the NPCM7xx RNG test fail intermittently
2
(12 registers, 4 bytes each). We were incorrectly decoding
2
but fairly frequently. On my machine running the test in a loop:
3
only the first 0x20 bytes.
3
while QTEST_QEMU_BINARY=./qemu-system-aarch64 ./tests/qtest/npcm7xx_rng-test; do true; done
4
5
will fail in less than a minute with an error like:
6
ERROR:../../tests/qtest/npcm7xx_rng-test.c:256:test_first_byte_runs:
7
assertion failed (calc_runs_p(buf.l, sizeof(buf) * BITS_PER_BYTE) > 0.01): (0.00286205989 > 0.01)
8
9
(Failures have been observed on all 4 of the randomness tests,
10
not just first_byte_runs.)
11
12
It's not clear why these tests are failing like this, but intermittent
13
failures make CI and merge testing awkward, so disable running them
14
unless a developer specifically sets QEMU_TEST_FLAKY_RNG_TESTS when
15
running the test suite, until we work out the cause.
4
16
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
18
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Message-id: 20190524124248.28394-2-peter.maydell@linaro.org
19
Message-id: 20201102152454.8287-1-peter.maydell@linaro.org
20
Reviewed-by: Havard Skinnemoen <hskinnemoen@google.com>
8
---
21
---
9
hw/intc/arm_gicv3_dist.c | 4 ++--
22
tests/qtest/npcm7xx_rng-test.c | 14 ++++++++++----
10
hw/intc/arm_gicv3_redist.c | 4 ++--
23
1 file changed, 10 insertions(+), 4 deletions(-)
11
2 files changed, 4 insertions(+), 4 deletions(-)
12
24
13
diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c
25
diff --git a/tests/qtest/npcm7xx_rng-test.c b/tests/qtest/npcm7xx_rng-test.c
14
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
15
--- a/hw/intc/arm_gicv3_dist.c
27
--- a/tests/qtest/npcm7xx_rng-test.c
16
+++ b/hw/intc/arm_gicv3_dist.c
28
+++ b/tests/qtest/npcm7xx_rng-test.c
17
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset,
29
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
18
}
30
19
return MEMTX_OK;
31
qtest_add_func("npcm7xx_rng/enable_disable", test_enable_disable);
20
}
32
qtest_add_func("npcm7xx_rng/rosel", test_rosel);
21
- case GICD_IDREGS ... GICD_IDREGS + 0x1f:
33
- qtest_add_func("npcm7xx_rng/continuous/monobit", test_continuous_monobit);
22
+ case GICD_IDREGS ... GICD_IDREGS + 0x2f:
34
- qtest_add_func("npcm7xx_rng/continuous/runs", test_continuous_runs);
23
/* ID registers */
35
- qtest_add_func("npcm7xx_rng/first_byte/monobit", test_first_byte_monobit);
24
*data = gicv3_idreg(offset - GICD_IDREGS);
36
- qtest_add_func("npcm7xx_rng/first_byte/runs", test_first_byte_runs);
25
return MEMTX_OK;
37
+ /*
26
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicd_writel(GICv3State *s, hwaddr offset,
38
+ * These tests fail intermittently; only run them on explicit
27
gicd_write_irouter(s, attrs, irq, r);
39
+ * request until we figure out why.
28
return MEMTX_OK;
40
+ */
29
}
41
+ if (getenv("QEMU_TEST_FLAKY_RNG_TESTS")) {
30
- case GICD_IDREGS ... GICD_IDREGS + 0x1f:
42
+ qtest_add_func("npcm7xx_rng/continuous/monobit", test_continuous_monobit);
31
+ case GICD_IDREGS ... GICD_IDREGS + 0x2f:
43
+ qtest_add_func("npcm7xx_rng/continuous/runs", test_continuous_runs);
32
case GICD_TYPER:
44
+ qtest_add_func("npcm7xx_rng/first_byte/monobit", test_first_byte_monobit);
33
case GICD_IIDR:
45
+ qtest_add_func("npcm7xx_rng/first_byte/runs", test_first_byte_runs);
34
/* RO registers, ignore the write */
46
+ }
35
diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c
47
36
index XXXXXXX..XXXXXXX 100644
48
qtest_start("-machine npcm750-evb");
37
--- a/hw/intc/arm_gicv3_redist.c
49
ret = g_test_run();
38
+++ b/hw/intc/arm_gicv3_redist.c
39
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
40
}
41
*data = cs->gicr_nsacr;
42
return MEMTX_OK;
43
- case GICR_IDREGS ... GICR_IDREGS + 0x1f:
44
+ case GICR_IDREGS ... GICR_IDREGS + 0x2f:
45
*data = gicv3_idreg(offset - GICR_IDREGS);
46
return MEMTX_OK;
47
default:
48
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
49
return MEMTX_OK;
50
case GICR_IIDR:
51
case GICR_TYPER:
52
- case GICR_IDREGS ... GICR_IDREGS + 0x1f:
53
+ case GICR_IDREGS ... GICR_IDREGS + 0x2f:
54
/* RO registers, ignore the write */
55
qemu_log_mask(LOG_GUEST_ERROR,
56
"%s: invalid guest write to RO register at offset "
57
--
50
--
58
2.20.1
51
2.20.1
59
52
60
53
diff view generated by jsdifflib