1
The following changes since commit e3acc2c1961cbe22ca474cd5da4163b7bbf7cea3:
1
The following changes since commit c52d69e7dbaaed0ffdef8125e79218672c30161d:
2
2
3
tests/docker/dockerfiles: Bump fedora-i386-cross to fedora 34 (2021-10-05 16:40:39 -0700)
3
Merge remote-tracking branch 'remotes/cschoenebeck/tags/pull-9p-20211027' into staging (2021-10-27 11:45:18 -0700)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211006
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211027
8
8
9
for you to fetch changes up to ea3f2af8f1b87d7bced9b75ef2e788b66ec49961:
9
for you to fetch changes up to 820c025f0dcacf2f3c12735b1f162893fbfa7bc6:
10
10
11
tcg/s390x: Implement TCG_TARGET_HAS_cmpsel_vec (2021-10-05 16:53:17 -0700)
11
tcg/optimize: Propagate sign info for shifting (2021-10-27 17:11:23 -0700)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
More fixes for fedora-i386-cross
14
Improvements to qemu/int128
15
Add dup_const_tl
15
Fixes for 128/64 division.
16
Expand MemOp MO_SIZE
16
Cleanup tcg/optimize.c
17
Move MemOpIdx out of tcg.h
17
Optimize redundant sign extensions
18
Vector support for tcg/s390x
19
18
20
----------------------------------------------------------------
19
----------------------------------------------------------------
21
Philipp Tomsich (1):
20
Frédéric Pétrot (1):
22
tcg: add dup_const_tl wrapper
21
qemu/int128: Add int128_{not,xor}
23
22
24
Richard Henderson (27):
23
Luis Pires (4):
25
tests/docker: Remove fedora-i386-cross from DOCKER_PARTIAL_IMAGES
24
host-utils: move checks out of divu128/divs128
26
tests/docker: Fix fedora-i386-cross cross-compilation
25
host-utils: move udiv_qrnnd() to host-utils
27
accel/tcg: Drop signness in tracing in cputlb.c
26
host-utils: add 128-bit quotient support to divu128/divs128
28
tcg: Expand MO_SIZE to 3 bits
27
host-utils: add unit tests for divu128/divs128
29
tcg: Rename TCGMemOpIdx to MemOpIdx
30
tcg: Split out MemOpIdx to exec/memopidx.h
31
trace/mem: Pass MemOpIdx to trace_mem_get_info
32
accel/tcg: Pass MemOpIdx to atomic_trace_*_post
33
plugins: Reorg arguments to qemu_plugin_vcpu_mem_cb
34
trace: Split guest_mem_before
35
hw/core/cpu: Re-sort the non-pointers to the end of CPUClass
36
tcg: Expand usadd/ussub with umin/umax
37
tcg/s390x: Rename from tcg/s390
38
tcg/s390x: Change FACILITY representation
39
tcg/s390x: Merge TCG_AREG0 and TCG_REG_CALL_STACK into TCGReg
40
tcg/s390x: Add host vector framework
41
tcg/s390x: Implement tcg_out_ld/st for vector types
42
tcg/s390x: Implement tcg_out_mov for vector types
43
tcg/s390x: Implement tcg_out_dup*_vec
44
tcg/s390x: Implement minimal vector operations
45
tcg/s390x: Implement andc, orc, abs, neg, not vector operations
46
tcg/s390x: Implement TCG_TARGET_HAS_mul_vec
47
tcg/s390x: Implement vector shift operations
48
tcg/s390x: Implement TCG_TARGET_HAS_minmax_vec
49
tcg/s390x: Implement TCG_TARGET_HAS_sat_vec
50
tcg/s390x: Implement TCG_TARGET_HAS_bitsel_vec
51
tcg/s390x: Implement TCG_TARGET_HAS_cmpsel_vec
52
28
53
meson.build | 2 -
29
Richard Henderson (51):
54
accel/tcg/atomic_template.h | 73 +-
30
tcg/optimize: Rename "mask" to "z_mask"
55
include/exec/memop.h | 14 +-
31
tcg/optimize: Split out OptContext
56
include/exec/memopidx.h | 55 ++
32
tcg/optimize: Remove do_default label
57
include/hw/core/cpu.h | 11 +-
33
tcg/optimize: Change tcg_opt_gen_{mov,movi} interface
58
include/qemu/plugin.h | 26 +-
34
tcg/optimize: Move prev_mb into OptContext
59
include/tcg/tcg.h | 117 ++-
35
tcg/optimize: Split out init_arguments
60
tcg/{s390 => s390x}/tcg-target-con-set.h | 7 +
36
tcg/optimize: Split out copy_propagate
61
tcg/{s390 => s390x}/tcg-target-con-str.h | 1 +
37
tcg/optimize: Split out fold_call
62
tcg/{s390 => s390x}/tcg-target.h | 91 ++-
38
tcg/optimize: Drop nb_oargs, nb_iargs locals
63
tcg/s390x/tcg-target.opc.h | 15 +
39
tcg/optimize: Change fail return for do_constant_folding_cond*
64
trace/mem.h | 63 --
40
tcg/optimize: Return true from tcg_opt_gen_{mov,movi}
65
accel/tcg/cputlb.c | 103 ++-
41
tcg/optimize: Split out finish_folding
66
accel/tcg/plugin-gen.c | 5 +-
42
tcg/optimize: Use a boolean to avoid a mass of continues
67
accel/tcg/user-exec.c | 133 ++-
43
tcg/optimize: Split out fold_mb, fold_qemu_{ld,st}
68
plugins/api.c | 19 +-
44
tcg/optimize: Split out fold_const{1,2}
69
plugins/core.c | 10 +-
45
tcg/optimize: Split out fold_setcond2
70
target/arm/helper-a64.c | 16 +-
46
tcg/optimize: Split out fold_brcond2
71
target/arm/m_helper.c | 2 +-
47
tcg/optimize: Split out fold_brcond
72
target/arm/translate-a64.c | 2 +-
48
tcg/optimize: Split out fold_setcond
73
target/i386/tcg/mem_helper.c | 4 +-
49
tcg/optimize: Split out fold_mulu2_i32
74
target/m68k/op_helper.c | 2 +-
50
tcg/optimize: Split out fold_addsub2_i32
75
target/mips/tcg/msa_helper.c | 6 +-
51
tcg/optimize: Split out fold_movcond
76
target/s390x/tcg/mem_helper.c | 20 +-
52
tcg/optimize: Split out fold_extract2
77
target/sparc/ldst_helper.c | 2 +-
53
tcg/optimize: Split out fold_extract, fold_sextract
78
tcg/optimize.c | 2 +-
54
tcg/optimize: Split out fold_deposit
79
tcg/tcg-op-vec.c | 37 +-
55
tcg/optimize: Split out fold_count_zeros
80
tcg/tcg-op.c | 60 +-
56
tcg/optimize: Split out fold_bswap
81
tcg/tcg.c | 2 +-
57
tcg/optimize: Split out fold_dup, fold_dup2
82
tcg/tci.c | 14 +-
58
tcg/optimize: Split out fold_mov
83
accel/tcg/atomic_common.c.inc | 43 +-
59
tcg/optimize: Split out fold_xx_to_i
84
target/s390x/tcg/translate_vx.c.inc | 2 +-
60
tcg/optimize: Split out fold_xx_to_x
85
tcg/aarch64/tcg-target.c.inc | 18 +-
61
tcg/optimize: Split out fold_xi_to_i
86
tcg/arm/tcg-target.c.inc | 14 +-
62
tcg/optimize: Add type to OptContext
87
tcg/i386/tcg-target.c.inc | 14 +-
63
tcg/optimize: Split out fold_to_not
88
tcg/mips/tcg-target.c.inc | 16 +-
64
tcg/optimize: Split out fold_sub_to_neg
89
tcg/ppc/tcg-target.c.inc | 18 +-
65
tcg/optimize: Split out fold_xi_to_x
90
tcg/riscv/tcg-target.c.inc | 20 +-
66
tcg/optimize: Split out fold_ix_to_i
91
tcg/{s390 => s390x}/tcg-target.c.inc | 949 ++++++++++++++++++++--
67
tcg/optimize: Split out fold_masks
92
tcg/sparc/tcg-target.c.inc | 20 +-
68
tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies
93
tcg/tcg-ldst.c.inc | 2 +-
69
tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops
94
tests/docker/Makefile.include | 2 +-
70
tcg/optimize: Sink commutative operand swapping into fold functions
95
tests/docker/dockerfiles/fedora-i386-cross.docker | 5 +-
71
tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values
96
trace-events | 18 +-
72
tcg/optimize: Use fold_xx_to_i for orc
97
44 files changed, 1445 insertions(+), 610 deletions(-)
73
tcg/optimize: Use fold_xi_to_x for mul
98
create mode 100644 include/exec/memopidx.h
74
tcg/optimize: Use fold_xi_to_x for div
99
rename tcg/{s390 => s390x}/tcg-target-con-set.h (86%)
75
tcg/optimize: Use fold_xx_to_i for rem
100
rename tcg/{s390 => s390x}/tcg-target-con-str.h (96%)
76
tcg/optimize: Optimize sign extensions
101
rename tcg/{s390 => s390x}/tcg-target.h (66%)
77
tcg/optimize: Propagate sign info for logical operations
102
create mode 100644 tcg/s390x/tcg-target.opc.h
78
tcg/optimize: Propagate sign info for setcond
103
delete mode 100644 trace/mem.h
79
tcg/optimize: Propagate sign info for bit counting
104
rename tcg/{s390 => s390x}/tcg-target.c.inc (73%)
80
tcg/optimize: Propagate sign info for shifting
105
81
82
include/fpu/softfloat-macros.h | 82 --
83
include/hw/clock.h | 5 +-
84
include/qemu/host-utils.h | 121 +-
85
include/qemu/int128.h | 20 +
86
target/ppc/int_helper.c | 23 +-
87
tcg/optimize.c | 2644 ++++++++++++++++++++++++----------------
88
tests/unit/test-div128.c | 197 +++
89
util/host-utils.c | 147 ++-
90
tests/unit/meson.build | 1 +
91
9 files changed, 2053 insertions(+), 1187 deletions(-)
92
create mode 100644 tests/unit/test-div128.c
93
diff view generated by jsdifflib
1
Add registers and function stubs. The functionality
1
From: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
2
is disabled via squashing s390_facilities[2] to 0.
3
2
4
We must still include results for the mandatory opcodes in
3
Addition of not and xor on 128-bit integers.
5
tcg_target_op_def, as all opcodes are checked during tcg init.
6
4
7
Reviewed-by: David Hildenbrand <david@redhat.com>
5
Signed-off-by: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
6
Co-authored-by: Fabien Portas <fabien.portas@grenoble-inp.org>
7
Message-Id: <20211025122818.168890-3-frederic.petrot@univ-grenoble-alpes.fr>
8
[rth: Split out logical operations.]
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
11
---
10
tcg/s390x/tcg-target-con-set.h | 4 +
12
include/qemu/int128.h | 20 ++++++++++++++++++++
11
tcg/s390x/tcg-target-con-str.h | 1 +
13
1 file changed, 20 insertions(+)
12
tcg/s390x/tcg-target.h | 35 ++++++++-
13
tcg/s390x/tcg-target.opc.h | 12 +++
14
tcg/s390x/tcg-target.c.inc | 137 ++++++++++++++++++++++++++++++++-
15
5 files changed, 184 insertions(+), 5 deletions(-)
16
create mode 100644 tcg/s390x/tcg-target.opc.h
17
14
18
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
15
diff --git a/include/qemu/int128.h b/include/qemu/int128.h
19
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
20
--- a/tcg/s390x/tcg-target-con-set.h
17
--- a/include/qemu/int128.h
21
+++ b/tcg/s390x/tcg-target-con-set.h
18
+++ b/include/qemu/int128.h
22
@@ -XXX,XX +XXX,XX @@ C_O0_I1(r)
19
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_exts64(int64_t a)
23
C_O0_I2(L, L)
20
return a;
24
C_O0_I2(r, r)
25
C_O0_I2(r, ri)
26
+C_O0_I2(v, r)
27
C_O1_I1(r, L)
28
C_O1_I1(r, r)
29
+C_O1_I1(v, r)
30
+C_O1_I1(v, vr)
31
C_O1_I2(r, 0, ri)
32
C_O1_I2(r, 0, rI)
33
C_O1_I2(r, 0, rJ)
34
C_O1_I2(r, r, ri)
35
C_O1_I2(r, rZ, r)
36
+C_O1_I2(v, v, v)
37
C_O1_I4(r, r, ri, r, 0)
38
C_O1_I4(r, r, ri, rI, 0)
39
C_O2_I2(b, a, 0, r)
40
diff --git a/tcg/s390x/tcg-target-con-str.h b/tcg/s390x/tcg-target-con-str.h
41
index XXXXXXX..XXXXXXX 100644
42
--- a/tcg/s390x/tcg-target-con-str.h
43
+++ b/tcg/s390x/tcg-target-con-str.h
44
@@ -XXX,XX +XXX,XX @@
45
*/
46
REGS('r', ALL_GENERAL_REGS)
47
REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
48
+REGS('v', ALL_VECTOR_REGS)
49
/*
50
* A (single) even/odd pair for division.
51
* TODO: Add something to the register allocator to allow
52
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
53
index XXXXXXX..XXXXXXX 100644
54
--- a/tcg/s390x/tcg-target.h
55
+++ b/tcg/s390x/tcg-target.h
56
@@ -XXX,XX +XXX,XX @@ typedef enum TCGReg {
57
TCG_REG_R8, TCG_REG_R9, TCG_REG_R10, TCG_REG_R11,
58
TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, TCG_REG_R15,
59
60
+ TCG_REG_V0 = 32, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
61
+ TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
62
+ TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
63
+ TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
64
+ TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
65
+ TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
66
+ TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27,
67
+ TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
68
+
69
TCG_AREG0 = TCG_REG_R10,
70
TCG_REG_CALL_STACK = TCG_REG_R15
71
} TCGReg;
72
73
-#define TCG_TARGET_NB_REGS 16
74
+#define TCG_TARGET_NB_REGS 64
75
76
/* A list of relevant facilities used by this translator. Some of these
77
are required for proper operation, and these are checked at startup. */
78
@@ -XXX,XX +XXX,XX @@ typedef enum TCGReg {
79
#define FACILITY_FAST_BCR_SER FACILITY_LOAD_ON_COND
80
#define FACILITY_DISTINCT_OPS FACILITY_LOAD_ON_COND
81
#define FACILITY_LOAD_ON_COND2 53
82
+#define FACILITY_VECTOR 129
83
84
-extern uint64_t s390_facilities[1];
85
+extern uint64_t s390_facilities[3];
86
87
#define HAVE_FACILITY(X) \
88
((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
89
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[1];
90
#define TCG_TARGET_HAS_muluh_i64 0
91
#define TCG_TARGET_HAS_mulsh_i64 0
92
93
+#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR)
94
+#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR)
95
+#define TCG_TARGET_HAS_v256 0
96
+
97
+#define TCG_TARGET_HAS_andc_vec 0
98
+#define TCG_TARGET_HAS_orc_vec 0
99
+#define TCG_TARGET_HAS_not_vec 0
100
+#define TCG_TARGET_HAS_neg_vec 0
101
+#define TCG_TARGET_HAS_abs_vec 0
102
+#define TCG_TARGET_HAS_roti_vec 0
103
+#define TCG_TARGET_HAS_rots_vec 0
104
+#define TCG_TARGET_HAS_rotv_vec 0
105
+#define TCG_TARGET_HAS_shi_vec 0
106
+#define TCG_TARGET_HAS_shs_vec 0
107
+#define TCG_TARGET_HAS_shv_vec 0
108
+#define TCG_TARGET_HAS_mul_vec 0
109
+#define TCG_TARGET_HAS_sat_vec 0
110
+#define TCG_TARGET_HAS_minmax_vec 0
111
+#define TCG_TARGET_HAS_bitsel_vec 0
112
+#define TCG_TARGET_HAS_cmpsel_vec 0
113
+
114
/* used for function call generation */
115
#define TCG_TARGET_STACK_ALIGN        8
116
#define TCG_TARGET_CALL_STACK_OFFSET    160
117
diff --git a/tcg/s390x/tcg-target.opc.h b/tcg/s390x/tcg-target.opc.h
118
new file mode 100644
119
index XXXXXXX..XXXXXXX
120
--- /dev/null
121
+++ b/tcg/s390x/tcg-target.opc.h
122
@@ -XXX,XX +XXX,XX @@
123
+/*
124
+ * Copyright (c) 2021 Linaro
125
+ *
126
+ * This work is licensed under the terms of the GNU GPL, version 2 or
127
+ * (at your option) any later version.
128
+ *
129
+ * See the COPYING file in the top-level directory for details.
130
+ *
131
+ * Target-specific opcodes for host vector expansion. These will be
132
+ * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
133
+ * consider these to be UNSPEC with names.
134
+ */
135
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
136
index XXXXXXX..XXXXXXX 100644
137
--- a/tcg/s390x/tcg-target.c.inc
138
+++ b/tcg/s390x/tcg-target.c.inc
139
@@ -XXX,XX +XXX,XX @@
140
#define TCG_CT_CONST_ZERO 0x800
141
142
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16)
143
+#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
144
+
145
/*
146
* For softmmu, we need to avoid conflicts with the first 3
147
* argument registers to perform the tlb lookup, and to call
148
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
149
150
#ifdef CONFIG_DEBUG_TCG
151
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
152
- "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
153
- "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
154
+ "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
155
+ "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
156
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
157
+ "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
158
+ "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
159
+ "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
160
+ "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
161
};
162
#endif
163
164
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_reg_alloc_order[] = {
165
TCG_REG_R4,
166
TCG_REG_R3,
167
TCG_REG_R2,
168
+
169
+ /* V8-V15 are call saved, and omitted. */
170
+ TCG_REG_V0,
171
+ TCG_REG_V1,
172
+ TCG_REG_V2,
173
+ TCG_REG_V3,
174
+ TCG_REG_V4,
175
+ TCG_REG_V5,
176
+ TCG_REG_V6,
177
+ TCG_REG_V7,
178
+ TCG_REG_V16,
179
+ TCG_REG_V17,
180
+ TCG_REG_V18,
181
+ TCG_REG_V19,
182
+ TCG_REG_V20,
183
+ TCG_REG_V21,
184
+ TCG_REG_V22,
185
+ TCG_REG_V23,
186
+ TCG_REG_V24,
187
+ TCG_REG_V25,
188
+ TCG_REG_V26,
189
+ TCG_REG_V27,
190
+ TCG_REG_V28,
191
+ TCG_REG_V29,
192
+ TCG_REG_V30,
193
+ TCG_REG_V31,
194
};
195
196
static const int tcg_target_call_iarg_regs[] = {
197
@@ -XXX,XX +XXX,XX @@ static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
198
#endif
199
200
static const tcg_insn_unit *tb_ret_addr;
201
-uint64_t s390_facilities[1];
202
+uint64_t s390_facilities[3];
203
204
static bool patch_reloc(tcg_insn_unit *src_rw, int type,
205
intptr_t value, intptr_t addend)
206
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
207
}
208
}
21
}
209
22
210
+static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
23
+static inline Int128 int128_not(Int128 a)
211
+ TCGReg dst, TCGReg src)
212
+{
24
+{
213
+ g_assert_not_reached();
25
+ return ~a;
214
+}
26
+}
215
+
27
+
216
+static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
28
static inline Int128 int128_and(Int128 a, Int128 b)
217
+ TCGReg dst, TCGReg base, intptr_t offset)
29
{
30
return a & b;
31
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_or(Int128 a, Int128 b)
32
return a | b;
33
}
34
35
+static inline Int128 int128_xor(Int128 a, Int128 b)
218
+{
36
+{
219
+ g_assert_not_reached();
37
+ return a ^ b;
220
+}
38
+}
221
+
39
+
222
+static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
40
static inline Int128 int128_rshift(Int128 a, int n)
223
+ TCGReg dst, int64_t val)
41
{
42
return a >> n;
43
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_exts64(int64_t a)
44
return int128_make128(a, (a < 0) ? -1 : 0);
45
}
46
47
+static inline Int128 int128_not(Int128 a)
224
+{
48
+{
225
+ g_assert_not_reached();
49
+ return int128_make128(~a.lo, ~a.hi);
226
+}
50
+}
227
+
51
+
228
+static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
52
static inline Int128 int128_and(Int128 a, Int128 b)
229
+ unsigned vecl, unsigned vece,
53
{
230
+ const TCGArg *args, const int *const_args)
54
return int128_make128(a.lo & b.lo, a.hi & b.hi);
55
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_or(Int128 a, Int128 b)
56
return int128_make128(a.lo | b.lo, a.hi | b.hi);
57
}
58
59
+static inline Int128 int128_xor(Int128 a, Int128 b)
231
+{
60
+{
232
+ g_assert_not_reached();
61
+ return int128_make128(a.lo ^ b.lo, a.hi ^ b.hi);
233
+}
62
+}
234
+
63
+
235
+int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
64
static inline Int128 int128_rshift(Int128 a, int n)
236
+{
237
+ return 0;
238
+}
239
+
240
+void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
241
+ TCGArg a0, ...)
242
+{
243
+ g_assert_not_reached();
244
+}
245
+
246
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
247
{
65
{
248
switch (op) {
66
int64_t h;
249
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
250
? C_O2_I4(r, r, 0, 1, rA, r)
251
: C_O2_I4(r, r, 0, 1, r, r));
252
253
+ case INDEX_op_st_vec:
254
+ return C_O0_I2(v, r);
255
+ case INDEX_op_ld_vec:
256
+ case INDEX_op_dupm_vec:
257
+ return C_O1_I1(v, r);
258
+ case INDEX_op_dup_vec:
259
+ return C_O1_I1(v, vr);
260
+ case INDEX_op_add_vec:
261
+ case INDEX_op_sub_vec:
262
+ case INDEX_op_and_vec:
263
+ case INDEX_op_or_vec:
264
+ case INDEX_op_xor_vec:
265
+ case INDEX_op_cmp_vec:
266
+ return C_O1_I2(v, v, v);
267
+
268
default:
269
g_assert_not_reached();
270
}
271
}
272
273
+/*
274
+ * Mainline glibc added HWCAP_S390_VX before it was kernel abi.
275
+ * Some distros have fixed this up locally, others have not.
276
+ */
277
+#ifndef HWCAP_S390_VXRS
278
+#define HWCAP_S390_VXRS 2048
279
+#endif
280
+
281
static void query_s390_facilities(void)
282
{
283
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
284
@@ -XXX,XX +XXX,XX @@ static void query_s390_facilities(void)
285
asm volatile(".word 0xb2b0,0x1000"
286
: "=r"(r0) : "r"(r0), "r"(r1) : "memory", "cc");
287
}
288
+
289
+ /*
290
+ * Use of vector registers requires os support beyond the facility bit.
291
+ * If the kernel does not advertise support, disable the facility bits.
292
+ * There is nothing else we currently care about in the 3rd word, so
293
+ * disable VECTOR with one store.
294
+ */
295
+ if (1 || !(hwcap & HWCAP_S390_VXRS)) {
296
+ s390_facilities[2] = 0;
297
+ }
298
}
299
300
static void tcg_target_init(TCGContext *s)
301
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
302
303
tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
304
tcg_target_available_regs[TCG_TYPE_I64] = 0xffff;
305
+ if (HAVE_FACILITY(VECTOR)) {
306
+ tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
307
+ tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
308
+ }
309
310
tcg_target_call_clobber_regs = 0;
311
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
312
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
313
/* The return register can be considered call-clobbered. */
314
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
315
316
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
317
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
318
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
319
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
320
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
321
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
322
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
323
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
324
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
325
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
326
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
327
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
328
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V20);
329
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V21);
330
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V22);
331
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V23);
332
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
333
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
334
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
335
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
336
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
337
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
338
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
339
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
340
+
341
s->reserved_regs = 0;
342
tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
343
/* XXX many insns can't be used with R0, so we better avoid it for now */
344
--
67
--
345
2.25.1
68
2.25.1
346
69
347
70
diff view generated by jsdifflib
New patch
1
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
3
In preparation for changing the divu128/divs128 implementations
4
to allow for quotients larger than 64 bits, move the div-by-zero
5
and overflow checks to the callers.
6
7
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20211025191154.350831-2-luis.pires@eldorado.org.br>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
include/hw/clock.h | 5 +++--
13
include/qemu/host-utils.h | 34 ++++++++++++---------------------
14
target/ppc/int_helper.c | 14 +++++++++-----
15
util/host-utils.c | 40 ++++++++++++++++++---------------------
16
4 files changed, 42 insertions(+), 51 deletions(-)
17
18
diff --git a/include/hw/clock.h b/include/hw/clock.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/clock.h
21
+++ b/include/hw/clock.h
22
@@ -XXX,XX +XXX,XX @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
23
return 0;
24
}
25
/*
26
- * Ignore divu128() return value as we've caught div-by-zero and don't
27
- * need different behaviour for overflow.
28
+ * BUG: when CONFIG_INT128 is not defined, the current implementation of
29
+ * divu128 does not return a valid truncated quotient, so the result will
30
+ * be wrong.
31
*/
32
divu128(&lo, &hi, clk->period);
33
return lo;
34
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/include/qemu/host-utils.h
37
+++ b/include/qemu/host-utils.h
38
@@ -XXX,XX +XXX,XX @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
39
return (__int128_t)a * b / c;
40
}
41
42
-static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
43
+static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
44
{
45
- if (divisor == 0) {
46
- return 1;
47
- } else {
48
- __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
49
- __uint128_t result = dividend / divisor;
50
- *plow = result;
51
- *phigh = dividend % divisor;
52
- return result > UINT64_MAX;
53
- }
54
+ __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
55
+ __uint128_t result = dividend / divisor;
56
+ *plow = result;
57
+ *phigh = dividend % divisor;
58
}
59
60
-static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
61
+static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
62
{
63
- if (divisor == 0) {
64
- return 1;
65
- } else {
66
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
67
- __int128_t result = dividend / divisor;
68
- *plow = result;
69
- *phigh = dividend % divisor;
70
- return result != *plow;
71
- }
72
+ __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
73
+ __int128_t result = dividend / divisor;
74
+ *plow = result;
75
+ *phigh = dividend % divisor;
76
}
77
#else
78
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
79
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
80
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
81
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
82
+void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
83
+void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
84
85
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
86
{
87
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/ppc/int_helper.c
90
+++ b/target/ppc/int_helper.c
91
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
92
uint64_t rt = 0;
93
int overflow = 0;
94
95
- overflow = divu128(&rt, &ra, rb);
96
-
97
- if (unlikely(overflow)) {
98
+ if (unlikely(rb == 0 || ra >= rb)) {
99
+ overflow = 1;
100
rt = 0; /* Undefined */
101
+ } else {
102
+ divu128(&rt, &ra, rb);
103
}
104
105
if (oe) {
106
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
107
int64_t rt = 0;
108
int64_t ra = (int64_t)rau;
109
int64_t rb = (int64_t)rbu;
110
- int overflow = divs128(&rt, &ra, rb);
111
+ int overflow = 0;
112
113
- if (unlikely(overflow)) {
114
+ if (unlikely(rb == 0 || uabs64(ra) >= uabs64(rb))) {
115
+ overflow = 1;
116
rt = 0; /* Undefined */
117
+ } else {
118
+ divs128(&rt, &ra, rb);
119
}
120
121
if (oe) {
122
diff --git a/util/host-utils.c b/util/host-utils.c
123
index XXXXXXX..XXXXXXX 100644
124
--- a/util/host-utils.c
125
+++ b/util/host-utils.c
126
@@ -XXX,XX +XXX,XX @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
127
*phigh = rh;
128
}
129
130
-/* Unsigned 128x64 division. Returns 1 if overflow (divide by zero or */
131
-/* quotient exceeds 64 bits). Otherwise returns quotient via plow and */
132
-/* remainder via phigh. */
133
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
134
+/*
135
+ * Unsigned 128-by-64 division. Returns quotient via plow and
136
+ * remainder via phigh.
137
+ * The result must fit in 64 bits (plow) - otherwise, the result
138
+ * is undefined.
139
+ * This function will cause a division by zero if passed a zero divisor.
140
+ */
141
+void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
142
{
143
uint64_t dhi = *phigh;
144
uint64_t dlo = *plow;
145
unsigned i;
146
uint64_t carry = 0;
147
148
- if (divisor == 0) {
149
- return 1;
150
- } else if (dhi == 0) {
151
+ if (divisor == 0 || dhi == 0) {
152
*plow = dlo / divisor;
153
*phigh = dlo % divisor;
154
- return 0;
155
- } else if (dhi >= divisor) {
156
- return 1;
157
} else {
158
159
for (i = 0; i < 64; i++) {
160
@@ -XXX,XX +XXX,XX @@ int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
161
162
*plow = dlo;
163
*phigh = dhi;
164
- return 0;
165
}
166
}
167
168
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
169
+/*
170
+ * Signed 128-by-64 division. Returns quotient via plow and
171
+ * remainder via phigh.
172
+ * The result must fit in 64 bits (plow) - otherwise, the result
173
+ * is undefined.
174
+ * This function will cause a division by zero if passed a zero divisor.
175
+ */
176
+void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
177
{
178
int sgn_dvdnd = *phigh < 0;
179
int sgn_divsr = divisor < 0;
180
- int overflow = 0;
181
182
if (sgn_dvdnd) {
183
*plow = ~(*plow);
184
@@ -XXX,XX +XXX,XX @@ int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
185
divisor = 0 - divisor;
186
}
187
188
- overflow = divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
189
+ divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
190
191
if (sgn_dvdnd ^ sgn_divsr) {
192
*plow = 0 - *plow;
193
}
194
-
195
- if (!overflow) {
196
- if ((*plow < 0) ^ (sgn_dvdnd ^ sgn_divsr)) {
197
- overflow = 1;
198
- }
199
- }
200
-
201
- return overflow;
202
}
203
#endif
204
205
--
206
2.25.1
207
208
diff view generated by jsdifflib
New patch
1
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
3
Move udiv_qrnnd() from include/fpu/softfloat-macros.h to host-utils,
4
so it can be reused by divu128().
5
6
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20211025191154.350831-3-luis.pires@eldorado.org.br>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
include/fpu/softfloat-macros.h | 82 ----------------------------------
12
include/qemu/host-utils.h | 81 +++++++++++++++++++++++++++++++++
13
2 files changed, 81 insertions(+), 82 deletions(-)
14
15
diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/fpu/softfloat-macros.h
18
+++ b/include/fpu/softfloat-macros.h
19
@@ -XXX,XX +XXX,XX @@
20
* so some portions are provided under:
21
* the SoftFloat-2a license
22
* the BSD license
23
- * GPL-v2-or-later
24
*
25
* Any future contributions to this file after December 1st 2014 will be
26
* taken to be licensed under the Softfloat-2a license unless specifically
27
@@ -XXX,XX +XXX,XX @@ this code that are retained.
28
* THE POSSIBILITY OF SUCH DAMAGE.
29
*/
30
31
-/* Portions of this work are licensed under the terms of the GNU GPL,
32
- * version 2 or later. See the COPYING file in the top-level directory.
33
- */
34
-
35
#ifndef FPU_SOFTFLOAT_MACROS_H
36
#define FPU_SOFTFLOAT_MACROS_H
37
38
@@ -XXX,XX +XXX,XX @@ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b)
39
40
}
41
42
-/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
43
- * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
44
- *
45
- * Licensed under the GPLv2/LGPLv3
46
- */
47
-static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
48
- uint64_t n0, uint64_t d)
49
-{
50
-#if defined(__x86_64__)
51
- uint64_t q;
52
- asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
53
- return q;
54
-#elif defined(__s390x__) && !defined(__clang__)
55
- /* Need to use a TImode type to get an even register pair for DLGR. */
56
- unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
57
- asm("dlgr %0, %1" : "+r"(n) : "r"(d));
58
- *r = n >> 64;
59
- return n;
60
-#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
61
- /* From Power ISA 2.06, programming note for divdeu. */
62
- uint64_t q1, q2, Q, r1, r2, R;
63
- asm("divdeu %0,%2,%4; divdu %1,%3,%4"
64
- : "=&r"(q1), "=r"(q2)
65
- : "r"(n1), "r"(n0), "r"(d));
66
- r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
67
- r2 = n0 - (q2 * d);
68
- Q = q1 + q2;
69
- R = r1 + r2;
70
- if (R >= d || R < r2) { /* overflow implies R > d */
71
- Q += 1;
72
- R -= d;
73
- }
74
- *r = R;
75
- return Q;
76
-#else
77
- uint64_t d0, d1, q0, q1, r1, r0, m;
78
-
79
- d0 = (uint32_t)d;
80
- d1 = d >> 32;
81
-
82
- r1 = n1 % d1;
83
- q1 = n1 / d1;
84
- m = q1 * d0;
85
- r1 = (r1 << 32) | (n0 >> 32);
86
- if (r1 < m) {
87
- q1 -= 1;
88
- r1 += d;
89
- if (r1 >= d) {
90
- if (r1 < m) {
91
- q1 -= 1;
92
- r1 += d;
93
- }
94
- }
95
- }
96
- r1 -= m;
97
-
98
- r0 = r1 % d1;
99
- q0 = r1 / d1;
100
- m = q0 * d0;
101
- r0 = (r0 << 32) | (uint32_t)n0;
102
- if (r0 < m) {
103
- q0 -= 1;
104
- r0 += d;
105
- if (r0 >= d) {
106
- if (r0 < m) {
107
- q0 -= 1;
108
- r0 += d;
109
- }
110
- }
111
- }
112
- r0 -= m;
113
-
114
- *r = r0;
115
- return (q1 << 32) | q0;
116
-#endif
117
-}
118
-
119
/*----------------------------------------------------------------------------
120
| Returns an approximation to the square root of the 32-bit significand given
121
| by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of
122
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
123
index XXXXXXX..XXXXXXX 100644
124
--- a/include/qemu/host-utils.h
125
+++ b/include/qemu/host-utils.h
126
@@ -XXX,XX +XXX,XX @@
127
* THE SOFTWARE.
128
*/
129
130
+/* Portions of this work are licensed under the terms of the GNU GPL,
131
+ * version 2 or later. See the COPYING file in the top-level directory.
132
+ */
133
+
134
#ifndef HOST_UTILS_H
135
#define HOST_UTILS_H
136
137
@@ -XXX,XX +XXX,XX @@ void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
138
*/
139
void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
140
141
+/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
142
+ * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
143
+ *
144
+ * Licensed under the GPLv2/LGPLv3
145
+ */
146
+static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
147
+ uint64_t n0, uint64_t d)
148
+{
149
+#if defined(__x86_64__)
150
+ uint64_t q;
151
+ asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
152
+ return q;
153
+#elif defined(__s390x__) && !defined(__clang__)
154
+ /* Need to use a TImode type to get an even register pair for DLGR. */
155
+ unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
156
+ asm("dlgr %0, %1" : "+r"(n) : "r"(d));
157
+ *r = n >> 64;
158
+ return n;
159
+#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
160
+ /* From Power ISA 2.06, programming note for divdeu. */
161
+ uint64_t q1, q2, Q, r1, r2, R;
162
+ asm("divdeu %0,%2,%4; divdu %1,%3,%4"
163
+ : "=&r"(q1), "=r"(q2)
164
+ : "r"(n1), "r"(n0), "r"(d));
165
+ r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
166
+ r2 = n0 - (q2 * d);
167
+ Q = q1 + q2;
168
+ R = r1 + r2;
169
+ if (R >= d || R < r2) { /* overflow implies R > d */
170
+ Q += 1;
171
+ R -= d;
172
+ }
173
+ *r = R;
174
+ return Q;
175
+#else
176
+ uint64_t d0, d1, q0, q1, r1, r0, m;
177
+
178
+ d0 = (uint32_t)d;
179
+ d1 = d >> 32;
180
+
181
+ r1 = n1 % d1;
182
+ q1 = n1 / d1;
183
+ m = q1 * d0;
184
+ r1 = (r1 << 32) | (n0 >> 32);
185
+ if (r1 < m) {
186
+ q1 -= 1;
187
+ r1 += d;
188
+ if (r1 >= d) {
189
+ if (r1 < m) {
190
+ q1 -= 1;
191
+ r1 += d;
192
+ }
193
+ }
194
+ }
195
+ r1 -= m;
196
+
197
+ r0 = r1 % d1;
198
+ q0 = r1 / d1;
199
+ m = q0 * d0;
200
+ r0 = (r0 << 32) | (uint32_t)n0;
201
+ if (r0 < m) {
202
+ q0 -= 1;
203
+ r0 += d;
204
+ if (r0 >= d) {
205
+ if (r0 < m) {
206
+ q0 -= 1;
207
+ r0 += d;
208
+ }
209
+ }
210
+ }
211
+ r0 -= m;
212
+
213
+ *r = r0;
214
+ return (q1 << 32) | q0;
215
+#endif
216
+}
217
+
218
#endif
219
--
220
2.25.1
221
222
diff view generated by jsdifflib
1
Reviewed-by: David Hildenbrand <david@redhat.com>
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
3
These will be used to implement new decimal floating point
4
instructions from Power ISA 3.1.
5
6
The remainder is now returned directly by divu128/divs128,
7
freeing up phigh to receive the high 64 bits of the quotient.
8
9
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20211025191154.350831-4-luis.pires@eldorado.org.br>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
13
---
4
tcg/s390x/tcg-target.h | 2 +-
14
include/hw/clock.h | 6 +-
5
tcg/s390x/tcg-target.c.inc | 7 +++++++
15
include/qemu/host-utils.h | 20 ++++--
6
2 files changed, 8 insertions(+), 1 deletion(-)
16
target/ppc/int_helper.c | 9 +--
7
17
util/host-utils.c | 133 +++++++++++++++++++++++++-------------
8
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
18
4 files changed, 108 insertions(+), 60 deletions(-)
9
index XXXXXXX..XXXXXXX 100644
19
10
--- a/tcg/s390x/tcg-target.h
20
diff --git a/include/hw/clock.h b/include/hw/clock.h
11
+++ b/tcg/s390x/tcg-target.h
21
index XXXXXXX..XXXXXXX 100644
12
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
22
--- a/include/hw/clock.h
13
#define TCG_TARGET_HAS_shi_vec 0
23
+++ b/include/hw/clock.h
14
#define TCG_TARGET_HAS_shs_vec 0
24
@@ -XXX,XX +XXX,XX @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
15
#define TCG_TARGET_HAS_shv_vec 0
25
if (clk->period == 0) {
16
-#define TCG_TARGET_HAS_mul_vec 0
17
+#define TCG_TARGET_HAS_mul_vec 1
18
#define TCG_TARGET_HAS_sat_vec 0
19
#define TCG_TARGET_HAS_minmax_vec 0
20
#define TCG_TARGET_HAS_bitsel_vec 0
21
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
22
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/s390x/tcg-target.c.inc
24
+++ b/tcg/s390x/tcg-target.c.inc
25
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
26
VRRc_VCEQ = 0xe7f8, /* we leave the m5 cs field 0 */
27
VRRc_VCH = 0xe7fb, /* " */
28
VRRc_VCHL = 0xe7f9, /* " */
29
+ VRRc_VML = 0xe7a2,
30
VRRc_VN = 0xe768,
31
VRRc_VNC = 0xe769,
32
VRRc_VNO = 0xe76b,
33
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
34
case INDEX_op_andc_vec:
35
tcg_out_insn(s, VRRc, VNC, a0, a1, a2, 0);
36
break;
37
+ case INDEX_op_mul_vec:
38
+ tcg_out_insn(s, VRRc, VML, a0, a1, a2, vece);
39
+ break;
40
case INDEX_op_or_vec:
41
tcg_out_insn(s, VRRc, VO, a0, a1, a2, 0);
42
break;
43
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
44
return 1;
45
case INDEX_op_cmp_vec:
46
return -1;
47
+ case INDEX_op_mul_vec:
48
+ return vece < MO_64;
49
default:
50
return 0;
26
return 0;
51
}
27
}
52
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
28
- /*
53
case INDEX_op_orc_vec:
29
- * BUG: when CONFIG_INT128 is not defined, the current implementation of
54
case INDEX_op_xor_vec:
30
- * divu128 does not return a valid truncated quotient, so the result will
55
case INDEX_op_cmp_vec:
31
- * be wrong.
56
+ case INDEX_op_mul_vec:
32
- */
57
return C_O1_I2(v, v, v);
33
+
58
34
divu128(&lo, &hi, clk->period);
59
default:
35
return lo;
36
}
37
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/include/qemu/host-utils.h
40
+++ b/include/qemu/host-utils.h
41
@@ -XXX,XX +XXX,XX @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
42
return (__int128_t)a * b / c;
43
}
44
45
-static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
46
+static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh,
47
+ uint64_t divisor)
48
{
49
__uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
50
__uint128_t result = dividend / divisor;
51
+
52
*plow = result;
53
- *phigh = dividend % divisor;
54
+ *phigh = result >> 64;
55
+ return dividend % divisor;
56
}
57
58
-static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
59
+static inline int64_t divs128(uint64_t *plow, int64_t *phigh,
60
+ int64_t divisor)
61
{
62
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
63
+ __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
64
__int128_t result = dividend / divisor;
65
+
66
*plow = result;
67
- *phigh = dividend % divisor;
68
+ *phigh = result >> 64;
69
+ return dividend % divisor;
70
}
71
#else
72
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
73
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
74
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
75
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
76
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
77
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor);
78
79
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
80
{
81
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/ppc/int_helper.c
84
+++ b/target/ppc/int_helper.c
85
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
86
87
uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
88
{
89
- int64_t rt = 0;
90
+ uint64_t rt = 0;
91
int64_t ra = (int64_t)rau;
92
int64_t rb = (int64_t)rbu;
93
int overflow = 0;
94
@@ -XXX,XX +XXX,XX @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
95
int cr;
96
uint64_t lo_value;
97
uint64_t hi_value;
98
+ uint64_t rem;
99
ppc_avr_t ret = { .u64 = { 0, 0 } };
100
101
if (b->VsrSD(0) < 0) {
102
@@ -XXX,XX +XXX,XX @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
103
* In that case, we leave r unchanged.
104
*/
105
} else {
106
- divu128(&lo_value, &hi_value, 1000000000000000ULL);
107
+ rem = divu128(&lo_value, &hi_value, 1000000000000000ULL);
108
109
- for (i = 1; i < 16; hi_value /= 10, i++) {
110
- bcd_put_digit(&ret, hi_value % 10, i);
111
+ for (i = 1; i < 16; rem /= 10, i++) {
112
+ bcd_put_digit(&ret, rem % 10, i);
113
}
114
115
for (; i < 32; lo_value /= 10, i++) {
116
diff --git a/util/host-utils.c b/util/host-utils.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/util/host-utils.c
119
+++ b/util/host-utils.c
120
@@ -XXX,XX +XXX,XX @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
121
}
122
123
/*
124
- * Unsigned 128-by-64 division. Returns quotient via plow and
125
- * remainder via phigh.
126
- * The result must fit in 64 bits (plow) - otherwise, the result
127
- * is undefined.
128
- * This function will cause a division by zero if passed a zero divisor.
129
+ * Unsigned 128-by-64 division.
130
+ * Returns the remainder.
131
+ * Returns quotient via plow and phigh.
132
+ * Also returns the remainder via the function return value.
133
*/
134
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
135
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
136
{
137
uint64_t dhi = *phigh;
138
uint64_t dlo = *plow;
139
- unsigned i;
140
- uint64_t carry = 0;
141
+ uint64_t rem, dhighest;
142
+ int sh;
143
144
if (divisor == 0 || dhi == 0) {
145
*plow = dlo / divisor;
146
- *phigh = dlo % divisor;
147
+ *phigh = 0;
148
+ return dlo % divisor;
149
} else {
150
+ sh = clz64(divisor);
151
152
- for (i = 0; i < 64; i++) {
153
- carry = dhi >> 63;
154
- dhi = (dhi << 1) | (dlo >> 63);
155
- if (carry || (dhi >= divisor)) {
156
- dhi -= divisor;
157
- carry = 1;
158
- } else {
159
- carry = 0;
160
+ if (dhi < divisor) {
161
+ if (sh != 0) {
162
+ /* normalize the divisor, shifting the dividend accordingly */
163
+ divisor <<= sh;
164
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
165
+ dlo <<= sh;
166
}
167
- dlo = (dlo << 1) | carry;
168
+
169
+ *phigh = 0;
170
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
171
+ } else {
172
+ if (sh != 0) {
173
+ /* normalize the divisor, shifting the dividend accordingly */
174
+ divisor <<= sh;
175
+ dhighest = dhi >> (64 - sh);
176
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
177
+ dlo <<= sh;
178
+
179
+ *phigh = udiv_qrnnd(&dhi, dhighest, dhi, divisor);
180
+ } else {
181
+ /**
182
+ * dhi >= divisor
183
+ * Since the MSB of divisor is set (sh == 0),
184
+ * (dhi - divisor) < divisor
185
+ *
186
+ * Thus, the high part of the quotient is 1, and we can
187
+ * calculate the low part with a single call to udiv_qrnnd
188
+ * after subtracting divisor from dhi
189
+ */
190
+ dhi -= divisor;
191
+ *phigh = 1;
192
+ }
193
+
194
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
195
}
196
197
- *plow = dlo;
198
- *phigh = dhi;
199
+ /*
200
+ * since the dividend/divisor might have been normalized,
201
+ * the remainder might also have to be shifted back
202
+ */
203
+ return rem >> sh;
204
}
205
}
206
207
/*
208
- * Signed 128-by-64 division. Returns quotient via plow and
209
- * remainder via phigh.
210
- * The result must fit in 64 bits (plow) - otherwise, the result
211
- * is undefined.
212
- * This function will cause a division by zero if passed a zero divisor.
213
+ * Signed 128-by-64 division.
214
+ * Returns quotient via plow and phigh.
215
+ * Also returns the remainder via the function return value.
216
*/
217
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
218
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor)
219
{
220
- int sgn_dvdnd = *phigh < 0;
221
- int sgn_divsr = divisor < 0;
222
+ bool neg_quotient = false, neg_remainder = false;
223
+ uint64_t unsig_hi = *phigh, unsig_lo = *plow;
224
+ uint64_t rem;
225
226
- if (sgn_dvdnd) {
227
- *plow = ~(*plow);
228
- *phigh = ~(*phigh);
229
- if (*plow == (int64_t)-1) {
230
+ if (*phigh < 0) {
231
+ neg_quotient = !neg_quotient;
232
+ neg_remainder = !neg_remainder;
233
+
234
+ if (unsig_lo == 0) {
235
+ unsig_hi = -unsig_hi;
236
+ } else {
237
+ unsig_hi = ~unsig_hi;
238
+ unsig_lo = -unsig_lo;
239
+ }
240
+ }
241
+
242
+ if (divisor < 0) {
243
+ neg_quotient = !neg_quotient;
244
+
245
+ divisor = -divisor;
246
+ }
247
+
248
+ rem = divu128(&unsig_lo, &unsig_hi, (uint64_t)divisor);
249
+
250
+ if (neg_quotient) {
251
+ if (unsig_lo == 0) {
252
+ *phigh = -unsig_hi;
253
*plow = 0;
254
- (*phigh)++;
255
- } else {
256
- (*plow)++;
257
- }
258
+ } else {
259
+ *phigh = ~unsig_hi;
260
+ *plow = -unsig_lo;
261
+ }
262
+ } else {
263
+ *phigh = unsig_hi;
264
+ *plow = unsig_lo;
265
}
266
267
- if (sgn_divsr) {
268
- divisor = 0 - divisor;
269
- }
270
-
271
- divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
272
-
273
- if (sgn_dvdnd ^ sgn_divsr) {
274
- *plow = 0 - *plow;
275
+ if (neg_remainder) {
276
+ return -rem;
277
+ } else {
278
+ return rem;
279
}
280
}
281
#endif
60
--
282
--
61
2.25.1
283
2.25.1
62
284
63
285
diff view generated by jsdifflib
1
Move this code from tcg/tcg.h to its own header.
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20211025191154.350831-5-luis.pires@eldorado.org.br>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
7
---
6
include/exec/memopidx.h | 55 +++++++++++++++++++++++++++++++++++++++++
8
tests/unit/test-div128.c | 197 +++++++++++++++++++++++++++++++++++++++
7
include/tcg/tcg.h | 39 +----------------------------
9
tests/unit/meson.build | 1 +
8
2 files changed, 56 insertions(+), 38 deletions(-)
10
2 files changed, 198 insertions(+)
9
create mode 100644 include/exec/memopidx.h
11
create mode 100644 tests/unit/test-div128.c
10
12
11
diff --git a/include/exec/memopidx.h b/include/exec/memopidx.h
13
diff --git a/tests/unit/test-div128.c b/tests/unit/test-div128.c
12
new file mode 100644
14
new file mode 100644
13
index XXXXXXX..XXXXXXX
15
index XXXXXXX..XXXXXXX
14
--- /dev/null
16
--- /dev/null
15
+++ b/include/exec/memopidx.h
17
+++ b/tests/unit/test-div128.c
16
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@
17
+/*
19
+/*
18
+ * Combine the MemOp and mmu_idx parameters into a single value.
20
+ * Test 128-bit division functions
19
+ *
21
+ *
20
+ * Authors:
22
+ * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
21
+ * Richard Henderson <rth@twiddle.net>
23
+ *
22
+ *
24
+ * This library is free software; you can redistribute it and/or
23
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
25
+ * modify it under the terms of the GNU Lesser General Public
24
+ * See the COPYING file in the top-level directory.
26
+ * License as published by the Free Software Foundation; either
27
+ * version 2.1 of the License, or (at your option) any later version.
28
+ *
29
+ * This library is distributed in the hope that it will be useful,
30
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
31
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
32
+ * Lesser General Public License for more details.
33
+ *
34
+ * You should have received a copy of the GNU Lesser General Public
35
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25
+ */
36
+ */
26
+
37
+
27
+#ifndef EXEC_MEMOPIDX_H
38
+#include "qemu/osdep.h"
28
+#define EXEC_MEMOPIDX_H 1
39
+#include "qemu/host-utils.h"
29
+
40
+
30
+#include "exec/memop.h"
41
+typedef struct {
31
+
42
+ uint64_t high;
32
+typedef uint32_t MemOpIdx;
43
+ uint64_t low;
33
+
44
+ uint64_t rhigh;
34
+/**
45
+ uint64_t rlow;
35
+ * make_memop_idx
46
+ uint64_t divisor;
36
+ * @op: memory operation
47
+ uint64_t remainder;
37
+ * @idx: mmu index
48
+} test_data_unsigned;
38
+ *
49
+
39
+ * Encode these values into a single parameter.
50
+typedef struct {
40
+ */
51
+ int64_t high;
41
+static inline MemOpIdx make_memop_idx(MemOp op, unsigned idx)
52
+ uint64_t low;
53
+ int64_t rhigh;
54
+ uint64_t rlow;
55
+ int64_t divisor;
56
+ int64_t remainder;
57
+} test_data_signed;
58
+
59
+static const test_data_unsigned test_table_unsigned[] = {
60
+ /* Dividend fits in 64 bits */
61
+ { 0x0000000000000000ULL, 0x0000000000000000ULL,
62
+ 0x0000000000000000ULL, 0x0000000000000000ULL,
63
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
64
+ { 0x0000000000000000ULL, 0x0000000000000001ULL,
65
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
66
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
67
+ { 0x0000000000000000ULL, 0x0000000000000003ULL,
68
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
69
+ 0x0000000000000002ULL, 0x0000000000000001ULL},
70
+ { 0x0000000000000000ULL, 0x8000000000000000ULL,
71
+ 0x0000000000000000ULL, 0x8000000000000000ULL,
72
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
73
+ { 0x0000000000000000ULL, 0xa000000000000000ULL,
74
+ 0x0000000000000000ULL, 0x0000000000000002ULL,
75
+ 0x4000000000000000ULL, 0x2000000000000000ULL},
76
+ { 0x0000000000000000ULL, 0x8000000000000000ULL,
77
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
78
+ 0x8000000000000000ULL, 0x0000000000000000ULL},
79
+
80
+ /* Dividend > 64 bits, with MSB 0 */
81
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
82
+ 0x123456789abcdefeULL, 0xefedcba987654321ULL,
83
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
84
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
85
+ 0x0000000000000001ULL, 0x000000000000000dULL,
86
+ 0x123456789abcdefeULL, 0x03456789abcdf03bULL},
87
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
88
+ 0x0123456789abcdefULL, 0xeefedcba98765432ULL,
89
+ 0x0000000000000010ULL, 0x0000000000000001ULL},
90
+
91
+ /* Dividend > 64 bits, with MSB 1 */
92
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
93
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
94
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
95
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
96
+ 0x0000000000000001ULL, 0x0000000000000000ULL,
97
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL},
98
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
99
+ 0x0feeddccbbaa9988ULL, 0x7766554433221100ULL,
100
+ 0x0000000000000010ULL, 0x000000000000000fULL},
101
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
102
+ 0x000000000000000eULL, 0x00f0f0f0f0f0f35aULL,
103
+ 0x123456789abcdefeULL, 0x0f8922bc55ef90c3ULL},
104
+
105
+ /**
106
+ * Divisor == 64 bits, with MSB 1
107
+ * and high 64 bits of dividend >= divisor
108
+ * (for testing normalization)
109
+ */
110
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
111
+ 0x0000000000000001ULL, 0x0000000000000000ULL,
112
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL},
113
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
114
+ 0x0000000000000001ULL, 0xfddbb9977553310aULL,
115
+ 0x8000000000000001ULL, 0x78899aabbccddf05ULL},
116
+
117
+ /* Dividend > 64 bits, divisor almost as big */
118
+ { 0x0000000000000001ULL, 0x23456789abcdef01ULL,
119
+ 0x0000000000000000ULL, 0x000000000000000fULL,
120
+ 0x123456789abcdefeULL, 0x123456789abcde1fULL},
121
+};
122
+
123
+static const test_data_signed test_table_signed[] = {
124
+ /* Positive dividend, positive/negative divisors */
125
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
126
+ 0x0000000000000000LL, 0x0000000000bc614eULL,
127
+ 0x0000000000000001LL, 0x0000000000000000LL},
128
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
129
+ 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
130
+ 0xffffffffffffffffLL, 0x0000000000000000LL},
131
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
132
+ 0x0000000000000000LL, 0x00000000005e30a7ULL,
133
+ 0x0000000000000002LL, 0x0000000000000000LL},
134
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
135
+ 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL,
136
+ 0xfffffffffffffffeLL, 0x0000000000000000LL},
137
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
138
+ 0x0000000000000000LL, 0x0000000000178c29ULL,
139
+ 0x0000000000000008LL, 0x0000000000000006LL},
140
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
141
+ 0xffffffffffffffffLL, 0xffffffffffe873d7ULL,
142
+ 0xfffffffffffffff8LL, 0x0000000000000006LL},
143
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
144
+ 0x0000000000000000LL, 0x000000000000550dULL,
145
+ 0x0000000000000237LL, 0x0000000000000183LL},
146
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
147
+ 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL,
148
+ 0xfffffffffffffdc9LL, 0x0000000000000183LL},
149
+
150
+ /* Negative dividend, positive/negative divisors */
151
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
152
+ 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
153
+ 0x0000000000000001LL, 0x0000000000000000LL},
154
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
155
+ 0x0000000000000000LL, 0x0000000000bc614eULL,
156
+ 0xffffffffffffffffLL, 0x0000000000000000LL},
157
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
158
+ 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL,
159
+ 0x0000000000000002LL, 0x0000000000000000LL},
160
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
161
+ 0x0000000000000000LL, 0x00000000005e30a7ULL,
162
+ 0xfffffffffffffffeLL, 0x0000000000000000LL},
163
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
164
+ 0xffffffffffffffffLL, 0xffffffffffe873d7ULL,
165
+ 0x0000000000000008LL, 0xfffffffffffffffaLL},
166
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
167
+ 0x0000000000000000LL, 0x0000000000178c29ULL,
168
+ 0xfffffffffffffff8LL, 0xfffffffffffffffaLL},
169
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
170
+ 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL,
171
+ 0x0000000000000237LL, 0xfffffffffffffe7dLL},
172
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
173
+ 0x0000000000000000LL, 0x000000000000550dULL,
174
+ 0xfffffffffffffdc9LL, 0xfffffffffffffe7dLL},
175
+};
176
+
177
+static void test_divu128(void)
42
+{
178
+{
43
+#ifdef CONFIG_DEBUG_TCG
179
+ int i;
44
+ assert(idx <= 15);
180
+ uint64_t rem;
45
+#endif
181
+ test_data_unsigned tmp;
46
+ return (op << 4) | idx;
182
+
183
+ for (i = 0; i < ARRAY_SIZE(test_table_unsigned); ++i) {
184
+ tmp = test_table_unsigned[i];
185
+
186
+ rem = divu128(&tmp.low, &tmp.high, tmp.divisor);
187
+ g_assert_cmpuint(tmp.low, ==, tmp.rlow);
188
+ g_assert_cmpuint(tmp.high, ==, tmp.rhigh);
189
+ g_assert_cmpuint(rem, ==, tmp.remainder);
190
+ }
47
+}
191
+}
48
+
192
+
49
+/**
193
+static void test_divs128(void)
50
+ * get_memop
51
+ * @oi: combined op/idx parameter
52
+ *
53
+ * Extract the memory operation from the combined value.
54
+ */
55
+static inline MemOp get_memop(MemOpIdx oi)
56
+{
194
+{
57
+ return oi >> 4;
195
+ int i;
196
+ int64_t rem;
197
+ test_data_signed tmp;
198
+
199
+ for (i = 0; i < ARRAY_SIZE(test_table_signed); ++i) {
200
+ tmp = test_table_signed[i];
201
+
202
+ rem = divs128(&tmp.low, &tmp.high, tmp.divisor);
203
+ g_assert_cmpuint(tmp.low, ==, tmp.rlow);
204
+ g_assert_cmpuint(tmp.high, ==, tmp.rhigh);
205
+ g_assert_cmpuint(rem, ==, tmp.remainder);
206
+ }
58
+}
207
+}
59
+
208
+
60
+/**
209
+int main(int argc, char **argv)
61
+ * get_mmuidx
62
+ * @oi: combined op/idx parameter
63
+ *
64
+ * Extract the mmu index from the combined value.
65
+ */
66
+static inline unsigned get_mmuidx(MemOpIdx oi)
67
+{
210
+{
68
+ return oi & 15;
211
+ g_test_init(&argc, &argv, NULL);
212
+ g_test_add_func("/host-utils/test_divu128", test_divu128);
213
+ g_test_add_func("/host-utils/test_divs128", test_divs128);
214
+ return g_test_run();
69
+}
215
+}
70
+
216
diff --git a/tests/unit/meson.build b/tests/unit/meson.build
71
+#endif
72
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
73
index XXXXXXX..XXXXXXX 100644
217
index XXXXXXX..XXXXXXX 100644
74
--- a/include/tcg/tcg.h
218
--- a/tests/unit/meson.build
75
+++ b/include/tcg/tcg.h
219
+++ b/tests/unit/meson.build
76
@@ -XXX,XX +XXX,XX @@
220
@@ -XXX,XX +XXX,XX @@ tests = {
77
221
# all code tested by test-x86-cpuid is inside topology.h
78
#include "cpu.h"
222
'test-x86-cpuid': [],
79
#include "exec/memop.h"
223
'test-cutils': [],
80
+#include "exec/memopidx.h"
224
+ 'test-div128': [],
81
#include "qemu/bitops.h"
225
'test-shift128': [],
82
#include "qemu/plugin.h"
226
'test-mul64': [],
83
#include "qemu/queue.h"
227
# all code tested by test-int128 is inside int128.h
84
@@ -XXX,XX +XXX,XX @@ static inline size_t tcg_current_code_size(TCGContext *s)
85
return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
86
}
87
88
-/* Combine the MemOp and mmu_idx parameters into a single value. */
89
-typedef uint32_t MemOpIdx;
90
-
91
-/**
92
- * make_memop_idx
93
- * @op: memory operation
94
- * @idx: mmu index
95
- *
96
- * Encode these values into a single parameter.
97
- */
98
-static inline MemOpIdx make_memop_idx(MemOp op, unsigned idx)
99
-{
100
- tcg_debug_assert(idx <= 15);
101
- return (op << 4) | idx;
102
-}
103
-
104
-/**
105
- * get_memop
106
- * @oi: combined op/idx parameter
107
- *
108
- * Extract the memory operation from the combined value.
109
- */
110
-static inline MemOp get_memop(MemOpIdx oi)
111
-{
112
- return oi >> 4;
113
-}
114
-
115
-/**
116
- * get_mmuidx
117
- * @oi: combined op/idx parameter
118
- *
119
- * Extract the mmu index from the combined value.
120
- */
121
-static inline unsigned get_mmuidx(MemOpIdx oi)
122
-{
123
- return oi & 15;
124
-}
125
-
126
/**
127
* tcg_qemu_tb_exec:
128
* @env: pointer to CPUArchState for the CPU
129
--
228
--
130
2.25.1
229
2.25.1
131
230
132
231
diff view generated by jsdifflib
New patch
1
Prepare for tracking different masks by renaming this one.
1
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 142 +++++++++++++++++++++++++------------------------
9
1 file changed, 72 insertions(+), 70 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
16
TCGTemp *prev_copy;
17
TCGTemp *next_copy;
18
uint64_t val;
19
- uint64_t mask;
20
+ uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
21
} TempOptInfo;
22
23
static inline TempOptInfo *ts_info(TCGTemp *ts)
24
@@ -XXX,XX +XXX,XX @@ static void reset_ts(TCGTemp *ts)
25
ti->next_copy = ts;
26
ti->prev_copy = ts;
27
ti->is_const = false;
28
- ti->mask = -1;
29
+ ti->z_mask = -1;
30
}
31
32
static void reset_temp(TCGArg arg)
33
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
34
if (ts->kind == TEMP_CONST) {
35
ti->is_const = true;
36
ti->val = ts->val;
37
- ti->mask = ts->val;
38
+ ti->z_mask = ts->val;
39
if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
40
/* High bits of a 32-bit quantity are garbage. */
41
- ti->mask |= ~0xffffffffull;
42
+ ti->z_mask |= ~0xffffffffull;
43
}
44
} else {
45
ti->is_const = false;
46
- ti->mask = -1;
47
+ ti->z_mask = -1;
48
}
49
}
50
51
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
52
const TCGOpDef *def;
53
TempOptInfo *di;
54
TempOptInfo *si;
55
- uint64_t mask;
56
+ uint64_t z_mask;
57
TCGOpcode new_op;
58
59
if (ts_are_copies(dst_ts, src_ts)) {
60
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
61
op->args[0] = dst;
62
op->args[1] = src;
63
64
- mask = si->mask;
65
+ z_mask = si->z_mask;
66
if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
67
/* High bits of the destination are now garbage. */
68
- mask |= ~0xffffffffull;
69
+ z_mask |= ~0xffffffffull;
70
}
71
- di->mask = mask;
72
+ di->z_mask = z_mask;
73
74
if (src_ts->type == dst_ts->type) {
75
TempOptInfo *ni = ts_info(si->next_copy);
76
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
77
}
78
79
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
80
- uint64_t mask, partmask, affected, tmp;
81
+ uint64_t z_mask, partmask, affected, tmp;
82
int nb_oargs, nb_iargs;
83
TCGOpcode opc = op->opc;
84
const TCGOpDef *def = &tcg_op_defs[opc];
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
86
87
/* Simplify using known-zero bits. Currently only ops with a single
88
output argument is supported. */
89
- mask = -1;
90
+ z_mask = -1;
91
affected = -1;
92
switch (opc) {
93
CASE_OP_32_64(ext8s):
94
- if ((arg_info(op->args[1])->mask & 0x80) != 0) {
95
+ if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
96
break;
97
}
98
QEMU_FALLTHROUGH;
99
CASE_OP_32_64(ext8u):
100
- mask = 0xff;
101
+ z_mask = 0xff;
102
goto and_const;
103
CASE_OP_32_64(ext16s):
104
- if ((arg_info(op->args[1])->mask & 0x8000) != 0) {
105
+ if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
106
break;
107
}
108
QEMU_FALLTHROUGH;
109
CASE_OP_32_64(ext16u):
110
- mask = 0xffff;
111
+ z_mask = 0xffff;
112
goto and_const;
113
case INDEX_op_ext32s_i64:
114
- if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
115
+ if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
116
break;
117
}
118
QEMU_FALLTHROUGH;
119
case INDEX_op_ext32u_i64:
120
- mask = 0xffffffffU;
121
+ z_mask = 0xffffffffU;
122
goto and_const;
123
124
CASE_OP_32_64(and):
125
- mask = arg_info(op->args[2])->mask;
126
+ z_mask = arg_info(op->args[2])->z_mask;
127
if (arg_is_const(op->args[2])) {
128
and_const:
129
- affected = arg_info(op->args[1])->mask & ~mask;
130
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
131
}
132
- mask = arg_info(op->args[1])->mask & mask;
133
+ z_mask = arg_info(op->args[1])->z_mask & z_mask;
134
break;
135
136
case INDEX_op_ext_i32_i64:
137
- if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
138
+ if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
139
break;
140
}
141
QEMU_FALLTHROUGH;
142
case INDEX_op_extu_i32_i64:
143
/* We do not compute affected as it is a size changing op. */
144
- mask = (uint32_t)arg_info(op->args[1])->mask;
145
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
146
break;
147
148
CASE_OP_32_64(andc):
149
/* Known-zeros does not imply known-ones. Therefore unless
150
op->args[2] is constant, we can't infer anything from it. */
151
if (arg_is_const(op->args[2])) {
152
- mask = ~arg_info(op->args[2])->mask;
153
+ z_mask = ~arg_info(op->args[2])->z_mask;
154
goto and_const;
155
}
156
/* But we certainly know nothing outside args[1] may be set. */
157
- mask = arg_info(op->args[1])->mask;
158
+ z_mask = arg_info(op->args[1])->z_mask;
159
break;
160
161
case INDEX_op_sar_i32:
162
if (arg_is_const(op->args[2])) {
163
tmp = arg_info(op->args[2])->val & 31;
164
- mask = (int32_t)arg_info(op->args[1])->mask >> tmp;
165
+ z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
166
}
167
break;
168
case INDEX_op_sar_i64:
169
if (arg_is_const(op->args[2])) {
170
tmp = arg_info(op->args[2])->val & 63;
171
- mask = (int64_t)arg_info(op->args[1])->mask >> tmp;
172
+ z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
173
}
174
break;
175
176
case INDEX_op_shr_i32:
177
if (arg_is_const(op->args[2])) {
178
tmp = arg_info(op->args[2])->val & 31;
179
- mask = (uint32_t)arg_info(op->args[1])->mask >> tmp;
180
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
181
}
182
break;
183
case INDEX_op_shr_i64:
184
if (arg_is_const(op->args[2])) {
185
tmp = arg_info(op->args[2])->val & 63;
186
- mask = (uint64_t)arg_info(op->args[1])->mask >> tmp;
187
+ z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
188
}
189
break;
190
191
case INDEX_op_extrl_i64_i32:
192
- mask = (uint32_t)arg_info(op->args[1])->mask;
193
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
194
break;
195
case INDEX_op_extrh_i64_i32:
196
- mask = (uint64_t)arg_info(op->args[1])->mask >> 32;
197
+ z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
198
break;
199
200
CASE_OP_32_64(shl):
201
if (arg_is_const(op->args[2])) {
202
tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
203
- mask = arg_info(op->args[1])->mask << tmp;
204
+ z_mask = arg_info(op->args[1])->z_mask << tmp;
205
}
206
break;
207
208
CASE_OP_32_64(neg):
209
/* Set to 1 all bits to the left of the rightmost. */
210
- mask = -(arg_info(op->args[1])->mask
211
- & -arg_info(op->args[1])->mask);
212
+ z_mask = -(arg_info(op->args[1])->z_mask
213
+ & -arg_info(op->args[1])->z_mask);
214
break;
215
216
CASE_OP_32_64(deposit):
217
- mask = deposit64(arg_info(op->args[1])->mask,
218
- op->args[3], op->args[4],
219
- arg_info(op->args[2])->mask);
220
+ z_mask = deposit64(arg_info(op->args[1])->z_mask,
221
+ op->args[3], op->args[4],
222
+ arg_info(op->args[2])->z_mask);
223
break;
224
225
CASE_OP_32_64(extract):
226
- mask = extract64(arg_info(op->args[1])->mask,
227
- op->args[2], op->args[3]);
228
+ z_mask = extract64(arg_info(op->args[1])->z_mask,
229
+ op->args[2], op->args[3]);
230
if (op->args[2] == 0) {
231
- affected = arg_info(op->args[1])->mask & ~mask;
232
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
233
}
234
break;
235
CASE_OP_32_64(sextract):
236
- mask = sextract64(arg_info(op->args[1])->mask,
237
- op->args[2], op->args[3]);
238
- if (op->args[2] == 0 && (tcg_target_long)mask >= 0) {
239
- affected = arg_info(op->args[1])->mask & ~mask;
240
+ z_mask = sextract64(arg_info(op->args[1])->z_mask,
241
+ op->args[2], op->args[3]);
242
+ if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
243
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
244
}
245
break;
246
247
CASE_OP_32_64(or):
248
CASE_OP_32_64(xor):
249
- mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask;
250
+ z_mask = arg_info(op->args[1])->z_mask
251
+ | arg_info(op->args[2])->z_mask;
252
break;
253
254
case INDEX_op_clz_i32:
255
case INDEX_op_ctz_i32:
256
- mask = arg_info(op->args[2])->mask | 31;
257
+ z_mask = arg_info(op->args[2])->z_mask | 31;
258
break;
259
260
case INDEX_op_clz_i64:
261
case INDEX_op_ctz_i64:
262
- mask = arg_info(op->args[2])->mask | 63;
263
+ z_mask = arg_info(op->args[2])->z_mask | 63;
264
break;
265
266
case INDEX_op_ctpop_i32:
267
- mask = 32 | 31;
268
+ z_mask = 32 | 31;
269
break;
270
case INDEX_op_ctpop_i64:
271
- mask = 64 | 63;
272
+ z_mask = 64 | 63;
273
break;
274
275
CASE_OP_32_64(setcond):
276
case INDEX_op_setcond2_i32:
277
- mask = 1;
278
+ z_mask = 1;
279
break;
280
281
CASE_OP_32_64(movcond):
282
- mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask;
283
+ z_mask = arg_info(op->args[3])->z_mask
284
+ | arg_info(op->args[4])->z_mask;
285
break;
286
287
CASE_OP_32_64(ld8u):
288
- mask = 0xff;
289
+ z_mask = 0xff;
290
break;
291
CASE_OP_32_64(ld16u):
292
- mask = 0xffff;
293
+ z_mask = 0xffff;
294
break;
295
case INDEX_op_ld32u_i64:
296
- mask = 0xffffffffu;
297
+ z_mask = 0xffffffffu;
298
break;
299
300
CASE_OP_32_64(qemu_ld):
301
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
302
MemOpIdx oi = op->args[nb_oargs + nb_iargs];
303
MemOp mop = get_memop(oi);
304
if (!(mop & MO_SIGN)) {
305
- mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
306
+ z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
307
}
308
}
309
break;
310
311
CASE_OP_32_64(bswap16):
312
- mask = arg_info(op->args[1])->mask;
313
- if (mask <= 0xffff) {
314
+ z_mask = arg_info(op->args[1])->z_mask;
315
+ if (z_mask <= 0xffff) {
316
op->args[2] |= TCG_BSWAP_IZ;
317
}
318
- mask = bswap16(mask);
319
+ z_mask = bswap16(z_mask);
320
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
321
case TCG_BSWAP_OZ:
322
break;
323
case TCG_BSWAP_OS:
324
- mask = (int16_t)mask;
325
+ z_mask = (int16_t)z_mask;
326
break;
327
default: /* undefined high bits */
328
- mask |= MAKE_64BIT_MASK(16, 48);
329
+ z_mask |= MAKE_64BIT_MASK(16, 48);
330
break;
331
}
332
break;
333
334
case INDEX_op_bswap32_i64:
335
- mask = arg_info(op->args[1])->mask;
336
- if (mask <= 0xffffffffu) {
337
+ z_mask = arg_info(op->args[1])->z_mask;
338
+ if (z_mask <= 0xffffffffu) {
339
op->args[2] |= TCG_BSWAP_IZ;
340
}
341
- mask = bswap32(mask);
342
+ z_mask = bswap32(z_mask);
343
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
344
case TCG_BSWAP_OZ:
345
break;
346
case TCG_BSWAP_OS:
347
- mask = (int32_t)mask;
348
+ z_mask = (int32_t)z_mask;
349
break;
350
default: /* undefined high bits */
351
- mask |= MAKE_64BIT_MASK(32, 32);
352
+ z_mask |= MAKE_64BIT_MASK(32, 32);
353
break;
354
}
355
break;
356
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
357
/* 32-bit ops generate 32-bit results. For the result is zero test
358
below, we can ignore high bits, but for further optimizations we
359
need to record that the high bits contain garbage. */
360
- partmask = mask;
361
+ partmask = z_mask;
362
if (!(def->flags & TCG_OPF_64BIT)) {
363
- mask |= ~(tcg_target_ulong)0xffffffffu;
364
+ z_mask |= ~(tcg_target_ulong)0xffffffffu;
365
partmask &= 0xffffffffu;
366
affected &= 0xffffffffu;
367
}
368
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
369
vs the high word of the input. */
370
do_setcond_high:
371
reset_temp(op->args[0]);
372
- arg_info(op->args[0])->mask = 1;
373
+ arg_info(op->args[0])->z_mask = 1;
374
op->opc = INDEX_op_setcond_i32;
375
op->args[1] = op->args[2];
376
op->args[2] = op->args[4];
377
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
378
}
379
do_setcond_low:
380
reset_temp(op->args[0]);
381
- arg_info(op->args[0])->mask = 1;
382
+ arg_info(op->args[0])->z_mask = 1;
383
op->opc = INDEX_op_setcond_i32;
384
op->args[2] = op->args[3];
385
op->args[3] = op->args[5];
386
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
387
/* Default case: we know nothing about operation (or were unable
388
to compute the operation result) so no propagation is done.
389
We trash everything if the operation is the end of a basic
390
- block, otherwise we only trash the output args. "mask" is
391
+ block, otherwise we only trash the output args. "z_mask" is
392
the non-zero bits mask for the first output arg. */
393
if (def->flags & TCG_OPF_BB_END) {
394
memset(&temps_used, 0, sizeof(temps_used));
395
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
396
/* Save the corresponding known-zero bits mask for the
397
first output argument (only one supported so far). */
398
if (i == 0) {
399
- arg_info(op->args[i])->mask = mask;
400
+ arg_info(op->args[i])->z_mask = z_mask;
401
}
402
}
403
}
404
--
405
2.25.1
406
407
diff view generated by jsdifflib
New patch
1
Provide what will become a larger context for splitting
2
the very large tcg_optimize function.
1
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 77 ++++++++++++++++++++++++++------------------------
10
1 file changed, 40 insertions(+), 37 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
17
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
18
} TempOptInfo;
19
20
+typedef struct OptContext {
21
+ TCGTempSet temps_used;
22
+} OptContext;
23
+
24
static inline TempOptInfo *ts_info(TCGTemp *ts)
25
{
26
return ts->state_ptr;
27
@@ -XXX,XX +XXX,XX @@ static void reset_temp(TCGArg arg)
28
}
29
30
/* Initialize and activate a temporary. */
31
-static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
32
+static void init_ts_info(OptContext *ctx, TCGTemp *ts)
33
{
34
size_t idx = temp_idx(ts);
35
TempOptInfo *ti;
36
37
- if (test_bit(idx, temps_used->l)) {
38
+ if (test_bit(idx, ctx->temps_used.l)) {
39
return;
40
}
41
- set_bit(idx, temps_used->l);
42
+ set_bit(idx, ctx->temps_used.l);
43
44
ti = ts->state_ptr;
45
if (ti == NULL) {
46
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
47
}
48
}
49
50
-static void init_arg_info(TCGTempSet *temps_used, TCGArg arg)
51
+static void init_arg_info(OptContext *ctx, TCGArg arg)
52
{
53
- init_ts_info(temps_used, arg_temp(arg));
54
+ init_ts_info(ctx, arg_temp(arg));
55
}
56
57
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
58
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
59
}
60
}
61
62
-static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
63
+static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
64
TCGOp *op, TCGArg dst, uint64_t val)
65
{
66
const TCGOpDef *def = &tcg_op_defs[op->opc];
67
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
68
69
/* Convert movi to mov with constant temp. */
70
tv = tcg_constant_internal(type, val);
71
- init_ts_info(temps_used, tv);
72
+ init_ts_info(ctx, tv);
73
tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
74
}
75
76
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
77
{
78
int nb_temps, nb_globals, i;
79
TCGOp *op, *op_next, *prev_mb = NULL;
80
- TCGTempSet temps_used;
81
+ OptContext ctx = {};
82
83
/* Array VALS has an element for each temp.
84
If this temp holds a constant then its value is kept in VALS' element.
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
86
nb_temps = s->nb_temps;
87
nb_globals = s->nb_globals;
88
89
- memset(&temps_used, 0, sizeof(temps_used));
90
for (i = 0; i < nb_temps; ++i) {
91
s->temps[i].state_ptr = NULL;
92
}
93
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
94
for (i = 0; i < nb_oargs + nb_iargs; i++) {
95
TCGTemp *ts = arg_temp(op->args[i]);
96
if (ts) {
97
- init_ts_info(&temps_used, ts);
98
+ init_ts_info(&ctx, ts);
99
}
100
}
101
} else {
102
nb_oargs = def->nb_oargs;
103
nb_iargs = def->nb_iargs;
104
for (i = 0; i < nb_oargs + nb_iargs; i++) {
105
- init_arg_info(&temps_used, op->args[i]);
106
+ init_arg_info(&ctx, op->args[i]);
107
}
108
}
109
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
CASE_OP_32_64(rotr):
112
if (arg_is_const(op->args[1])
113
&& arg_info(op->args[1])->val == 0) {
114
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
115
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
116
continue;
117
}
118
break;
119
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
120
121
if (partmask == 0) {
122
tcg_debug_assert(nb_oargs == 1);
123
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
124
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
125
continue;
126
}
127
if (affected == 0) {
128
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
129
CASE_OP_32_64(mulsh):
130
if (arg_is_const(op->args[2])
131
&& arg_info(op->args[2])->val == 0) {
132
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
133
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
134
continue;
135
}
136
break;
137
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
138
CASE_OP_32_64_VEC(sub):
139
CASE_OP_32_64_VEC(xor):
140
if (args_are_copies(op->args[1], op->args[2])) {
141
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
142
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
143
continue;
144
}
145
break;
146
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
147
if (arg_is_const(op->args[1])) {
148
tmp = arg_info(op->args[1])->val;
149
tmp = dup_const(TCGOP_VECE(op), tmp);
150
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
151
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
152
break;
153
}
154
goto do_default;
155
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
156
case INDEX_op_dup2_vec:
157
assert(TCG_TARGET_REG_BITS == 32);
158
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
159
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0],
160
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0],
161
deposit64(arg_info(op->args[1])->val, 32, 32,
162
arg_info(op->args[2])->val));
163
break;
164
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
165
case INDEX_op_extrh_i64_i32:
166
if (arg_is_const(op->args[1])) {
167
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
168
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
169
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
170
break;
171
}
172
goto do_default;
173
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
174
if (arg_is_const(op->args[1])) {
175
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
176
op->args[2]);
177
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
178
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
179
break;
180
}
181
goto do_default;
182
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
183
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
184
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
185
arg_info(op->args[2])->val);
186
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
187
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
188
break;
189
}
190
goto do_default;
191
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
192
TCGArg v = arg_info(op->args[1])->val;
193
if (v != 0) {
194
tmp = do_constant_folding(opc, v, 0);
195
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
196
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
197
} else {
198
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
199
}
200
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
201
tmp = deposit64(arg_info(op->args[1])->val,
202
op->args[3], op->args[4],
203
arg_info(op->args[2])->val);
204
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
205
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
206
break;
207
}
208
goto do_default;
209
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
210
if (arg_is_const(op->args[1])) {
211
tmp = extract64(arg_info(op->args[1])->val,
212
op->args[2], op->args[3]);
213
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
214
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
215
break;
216
}
217
goto do_default;
218
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
219
if (arg_is_const(op->args[1])) {
220
tmp = sextract64(arg_info(op->args[1])->val,
221
op->args[2], op->args[3]);
222
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
223
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
224
break;
225
}
226
goto do_default;
227
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
228
tmp = (int32_t)(((uint32_t)v1 >> shr) |
229
((uint32_t)v2 << (32 - shr)));
230
}
231
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
232
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
233
break;
234
}
235
goto do_default;
236
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
237
tmp = do_constant_folding_cond(opc, op->args[1],
238
op->args[2], op->args[3]);
239
if (tmp != 2) {
240
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
241
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
242
break;
243
}
244
goto do_default;
245
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
246
op->args[1], op->args[2]);
247
if (tmp != 2) {
248
if (tmp) {
249
- memset(&temps_used, 0, sizeof(temps_used));
250
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
251
op->opc = INDEX_op_br;
252
op->args[0] = op->args[3];
253
} else {
254
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
255
256
rl = op->args[0];
257
rh = op->args[1];
258
- tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)a);
259
- tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(a >> 32));
260
+ tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
261
+ tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
262
break;
263
}
264
goto do_default;
265
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
266
267
rl = op->args[0];
268
rh = op->args[1];
269
- tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)r);
270
- tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(r >> 32));
271
+ tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
272
+ tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
273
break;
274
}
275
goto do_default;
276
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
277
if (tmp != 2) {
278
if (tmp) {
279
do_brcond_true:
280
- memset(&temps_used, 0, sizeof(temps_used));
281
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
282
op->opc = INDEX_op_br;
283
op->args[0] = op->args[5];
284
} else {
285
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
286
/* Simplify LT/GE comparisons vs zero to a single compare
287
vs the high word of the input. */
288
do_brcond_high:
289
- memset(&temps_used, 0, sizeof(temps_used));
290
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
291
op->opc = INDEX_op_brcond_i32;
292
op->args[0] = op->args[1];
293
op->args[1] = op->args[3];
294
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
295
goto do_default;
296
}
297
do_brcond_low:
298
- memset(&temps_used, 0, sizeof(temps_used));
299
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
300
op->opc = INDEX_op_brcond_i32;
301
op->args[1] = op->args[2];
302
op->args[2] = op->args[4];
303
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
304
op->args[5]);
305
if (tmp != 2) {
306
do_setcond_const:
307
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
308
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
309
} else if ((op->args[5] == TCG_COND_LT
310
|| op->args[5] == TCG_COND_GE)
311
&& arg_is_const(op->args[3])
312
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
313
if (!(tcg_call_flags(op)
314
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
315
for (i = 0; i < nb_globals; i++) {
316
- if (test_bit(i, temps_used.l)) {
317
+ if (test_bit(i, ctx.temps_used.l)) {
318
reset_ts(&s->temps[i]);
319
}
320
}
321
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
322
block, otherwise we only trash the output args. "z_mask" is
323
the non-zero bits mask for the first output arg. */
324
if (def->flags & TCG_OPF_BB_END) {
325
- memset(&temps_used, 0, sizeof(temps_used));
326
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
327
} else {
328
do_reset_output:
329
for (i = 0; i < nb_oargs; i++) {
330
--
331
2.25.1
332
333
diff view generated by jsdifflib
New patch
1
Break the final cleanup clause out of the main switch
2
statement. When fully folding an opcode to mov/movi,
3
use "continue" to process the next opcode, else break
4
to fall into the final cleanup.
1
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 190 ++++++++++++++++++++++++-------------------------
12
1 file changed, 94 insertions(+), 96 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
19
switch (opc) {
20
CASE_OP_32_64_VEC(mov):
21
tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
22
- break;
23
+ continue;
24
25
case INDEX_op_dup_vec:
26
if (arg_is_const(op->args[1])) {
27
tmp = arg_info(op->args[1])->val;
28
tmp = dup_const(TCGOP_VECE(op), tmp);
29
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
30
- break;
31
+ continue;
32
}
33
- goto do_default;
34
+ break;
35
36
case INDEX_op_dup2_vec:
37
assert(TCG_TARGET_REG_BITS == 32);
38
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
39
tcg_opt_gen_movi(s, &ctx, op, op->args[0],
40
deposit64(arg_info(op->args[1])->val, 32, 32,
41
arg_info(op->args[2])->val));
42
- break;
43
+ continue;
44
} else if (args_are_copies(op->args[1], op->args[2])) {
45
op->opc = INDEX_op_dup_vec;
46
TCGOP_VECE(op) = MO_32;
47
nb_iargs = 1;
48
}
49
- goto do_default;
50
+ break;
51
52
CASE_OP_32_64(not):
53
CASE_OP_32_64(neg):
54
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
55
if (arg_is_const(op->args[1])) {
56
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
57
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
58
- break;
59
+ continue;
60
}
61
- goto do_default;
62
+ break;
63
64
CASE_OP_32_64(bswap16):
65
CASE_OP_32_64(bswap32):
66
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
67
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
68
op->args[2]);
69
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
70
- break;
71
+ continue;
72
}
73
- goto do_default;
74
+ break;
75
76
CASE_OP_32_64(add):
77
CASE_OP_32_64(sub):
78
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
79
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
80
arg_info(op->args[2])->val);
81
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
82
- break;
83
+ continue;
84
}
85
- goto do_default;
86
+ break;
87
88
CASE_OP_32_64(clz):
89
CASE_OP_32_64(ctz):
90
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
91
} else {
92
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
93
}
94
- break;
95
+ continue;
96
}
97
- goto do_default;
98
+ break;
99
100
CASE_OP_32_64(deposit):
101
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
102
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
103
op->args[3], op->args[4],
104
arg_info(op->args[2])->val);
105
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
106
- break;
107
+ continue;
108
}
109
- goto do_default;
110
+ break;
111
112
CASE_OP_32_64(extract):
113
if (arg_is_const(op->args[1])) {
114
tmp = extract64(arg_info(op->args[1])->val,
115
op->args[2], op->args[3]);
116
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
117
- break;
118
+ continue;
119
}
120
- goto do_default;
121
+ break;
122
123
CASE_OP_32_64(sextract):
124
if (arg_is_const(op->args[1])) {
125
tmp = sextract64(arg_info(op->args[1])->val,
126
op->args[2], op->args[3]);
127
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
128
- break;
129
+ continue;
130
}
131
- goto do_default;
132
+ break;
133
134
CASE_OP_32_64(extract2):
135
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
136
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
137
((uint32_t)v2 << (32 - shr)));
138
}
139
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
140
- break;
141
+ continue;
142
}
143
- goto do_default;
144
+ break;
145
146
CASE_OP_32_64(setcond):
147
tmp = do_constant_folding_cond(opc, op->args[1],
148
op->args[2], op->args[3]);
149
if (tmp != 2) {
150
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
151
- break;
152
+ continue;
153
}
154
- goto do_default;
155
+ break;
156
157
CASE_OP_32_64(brcond):
158
tmp = do_constant_folding_cond(opc, op->args[0],
159
op->args[1], op->args[2]);
160
- if (tmp != 2) {
161
- if (tmp) {
162
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
163
- op->opc = INDEX_op_br;
164
- op->args[0] = op->args[3];
165
- } else {
166
- tcg_op_remove(s, op);
167
- }
168
+ switch (tmp) {
169
+ case 0:
170
+ tcg_op_remove(s, op);
171
+ continue;
172
+ case 1:
173
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
174
+ op->opc = opc = INDEX_op_br;
175
+ op->args[0] = op->args[3];
176
break;
177
}
178
- goto do_default;
179
+ break;
180
181
CASE_OP_32_64(movcond):
182
tmp = do_constant_folding_cond(opc, op->args[1],
183
op->args[2], op->args[5]);
184
if (tmp != 2) {
185
tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
186
- break;
187
+ continue;
188
}
189
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
190
uint64_t tv = arg_info(op->args[3])->val;
191
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
192
if (fv == 1 && tv == 0) {
193
cond = tcg_invert_cond(cond);
194
} else if (!(tv == 1 && fv == 0)) {
195
- goto do_default;
196
+ break;
197
}
198
op->args[3] = cond;
199
op->opc = opc = (opc == INDEX_op_movcond_i32
200
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
201
: INDEX_op_setcond_i64);
202
nb_iargs = 2;
203
}
204
- goto do_default;
205
+ break;
206
207
case INDEX_op_add2_i32:
208
case INDEX_op_sub2_i32:
209
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
210
rh = op->args[1];
211
tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
212
tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
213
- break;
214
+ continue;
215
}
216
- goto do_default;
217
+ break;
218
219
case INDEX_op_mulu2_i32:
220
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
221
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
222
rh = op->args[1];
223
tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
224
tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
225
- break;
226
+ continue;
227
}
228
- goto do_default;
229
+ break;
230
231
case INDEX_op_brcond2_i32:
232
tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
233
op->args[4]);
234
- if (tmp != 2) {
235
- if (tmp) {
236
- do_brcond_true:
237
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
238
- op->opc = INDEX_op_br;
239
- op->args[0] = op->args[5];
240
- } else {
241
+ if (tmp == 0) {
242
do_brcond_false:
243
- tcg_op_remove(s, op);
244
- }
245
- } else if ((op->args[4] == TCG_COND_LT
246
- || op->args[4] == TCG_COND_GE)
247
- && arg_is_const(op->args[2])
248
- && arg_info(op->args[2])->val == 0
249
- && arg_is_const(op->args[3])
250
- && arg_info(op->args[3])->val == 0) {
251
+ tcg_op_remove(s, op);
252
+ continue;
253
+ }
254
+ if (tmp == 1) {
255
+ do_brcond_true:
256
+ op->opc = opc = INDEX_op_br;
257
+ op->args[0] = op->args[5];
258
+ break;
259
+ }
260
+ if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE)
261
+ && arg_is_const(op->args[2])
262
+ && arg_info(op->args[2])->val == 0
263
+ && arg_is_const(op->args[3])
264
+ && arg_info(op->args[3])->val == 0) {
265
/* Simplify LT/GE comparisons vs zero to a single compare
266
vs the high word of the input. */
267
do_brcond_high:
268
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
269
- op->opc = INDEX_op_brcond_i32;
270
+ op->opc = opc = INDEX_op_brcond_i32;
271
op->args[0] = op->args[1];
272
op->args[1] = op->args[3];
273
op->args[2] = op->args[4];
274
op->args[3] = op->args[5];
275
- } else if (op->args[4] == TCG_COND_EQ) {
276
+ break;
277
+ }
278
+ if (op->args[4] == TCG_COND_EQ) {
279
/* Simplify EQ comparisons where one of the pairs
280
can be simplified. */
281
tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
282
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
283
if (tmp == 0) {
284
goto do_brcond_false;
285
} else if (tmp != 1) {
286
- goto do_default;
287
+ break;
288
}
289
do_brcond_low:
290
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
291
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
292
op->args[1] = op->args[2];
293
op->args[2] = op->args[4];
294
op->args[3] = op->args[5];
295
- } else if (op->args[4] == TCG_COND_NE) {
296
+ break;
297
+ }
298
+ if (op->args[4] == TCG_COND_NE) {
299
/* Simplify NE comparisons where one of the pairs
300
can be simplified. */
301
tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
302
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
303
} else if (tmp == 1) {
304
goto do_brcond_true;
305
}
306
- goto do_default;
307
- } else {
308
- goto do_default;
309
}
310
break;
311
312
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
313
if (tmp != 2) {
314
do_setcond_const:
315
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
316
- } else if ((op->args[5] == TCG_COND_LT
317
- || op->args[5] == TCG_COND_GE)
318
- && arg_is_const(op->args[3])
319
- && arg_info(op->args[3])->val == 0
320
- && arg_is_const(op->args[4])
321
- && arg_info(op->args[4])->val == 0) {
322
+ continue;
323
+ }
324
+ if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
325
+ && arg_is_const(op->args[3])
326
+ && arg_info(op->args[3])->val == 0
327
+ && arg_is_const(op->args[4])
328
+ && arg_info(op->args[4])->val == 0) {
329
/* Simplify LT/GE comparisons vs zero to a single compare
330
vs the high word of the input. */
331
do_setcond_high:
332
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
333
op->args[1] = op->args[2];
334
op->args[2] = op->args[4];
335
op->args[3] = op->args[5];
336
- } else if (op->args[5] == TCG_COND_EQ) {
337
+ break;
338
+ }
339
+ if (op->args[5] == TCG_COND_EQ) {
340
/* Simplify EQ comparisons where one of the pairs
341
can be simplified. */
342
tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
343
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
344
if (tmp == 0) {
345
goto do_setcond_high;
346
} else if (tmp != 1) {
347
- goto do_default;
348
+ break;
349
}
350
do_setcond_low:
351
reset_temp(op->args[0]);
352
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
353
op->opc = INDEX_op_setcond_i32;
354
op->args[2] = op->args[3];
355
op->args[3] = op->args[5];
356
- } else if (op->args[5] == TCG_COND_NE) {
357
+ break;
358
+ }
359
+ if (op->args[5] == TCG_COND_NE) {
360
/* Simplify NE comparisons where one of the pairs
361
can be simplified. */
362
tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
363
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
364
} else if (tmp == 1) {
365
goto do_setcond_const;
366
}
367
- goto do_default;
368
- } else {
369
- goto do_default;
370
}
371
break;
372
373
- case INDEX_op_call:
374
- if (!(tcg_call_flags(op)
375
+ default:
376
+ break;
377
+ }
378
+
379
+ /* Some of the folding above can change opc. */
380
+ opc = op->opc;
381
+ def = &tcg_op_defs[opc];
382
+ if (def->flags & TCG_OPF_BB_END) {
383
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
384
+ } else {
385
+ if (opc == INDEX_op_call &&
386
+ !(tcg_call_flags(op)
387
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
388
for (i = 0; i < nb_globals; i++) {
389
if (test_bit(i, ctx.temps_used.l)) {
390
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
391
}
392
}
393
}
394
- goto do_reset_output;
395
396
- default:
397
- do_default:
398
- /* Default case: we know nothing about operation (or were unable
399
- to compute the operation result) so no propagation is done.
400
- We trash everything if the operation is the end of a basic
401
- block, otherwise we only trash the output args. "z_mask" is
402
- the non-zero bits mask for the first output arg. */
403
- if (def->flags & TCG_OPF_BB_END) {
404
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
405
- } else {
406
- do_reset_output:
407
- for (i = 0; i < nb_oargs; i++) {
408
- reset_temp(op->args[i]);
409
- /* Save the corresponding known-zero bits mask for the
410
- first output argument (only one supported so far). */
411
- if (i == 0) {
412
- arg_info(op->args[i])->z_mask = z_mask;
413
- }
414
+ for (i = 0; i < nb_oargs; i++) {
415
+ reset_temp(op->args[i]);
416
+ /* Save the corresponding known-zero bits mask for the
417
+ first output argument (only one supported so far). */
418
+ if (i == 0) {
419
+ arg_info(op->args[i])->z_mask = z_mask;
420
}
421
}
422
- break;
423
}
424
425
/* Eliminate duplicate and redundant fence instructions. */
426
--
427
2.25.1
428
429
diff view generated by jsdifflib
New patch
1
1
Adjust the interface to take the OptContext parameter instead
2
of TCGContext or both.
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 67 +++++++++++++++++++++++++-------------------------
9
1 file changed, 34 insertions(+), 33 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
16
} TempOptInfo;
17
18
typedef struct OptContext {
19
+ TCGContext *tcg;
20
TCGTempSet temps_used;
21
} OptContext;
22
23
@@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
24
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
25
}
26
27
-static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
28
+static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
29
{
30
TCGTemp *dst_ts = arg_temp(dst);
31
TCGTemp *src_ts = arg_temp(src);
32
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
33
TCGOpcode new_op;
34
35
if (ts_are_copies(dst_ts, src_ts)) {
36
- tcg_op_remove(s, op);
37
+ tcg_op_remove(ctx->tcg, op);
38
return;
39
}
40
41
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
42
}
43
}
44
45
-static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
46
- TCGOp *op, TCGArg dst, uint64_t val)
47
+static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
48
+ TCGArg dst, uint64_t val)
49
{
50
const TCGOpDef *def = &tcg_op_defs[op->opc];
51
TCGType type;
52
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
53
/* Convert movi to mov with constant temp. */
54
tv = tcg_constant_internal(type, val);
55
init_ts_info(ctx, tv);
56
- tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
57
+ tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
58
}
59
60
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
62
{
63
int nb_temps, nb_globals, i;
64
TCGOp *op, *op_next, *prev_mb = NULL;
65
- OptContext ctx = {};
66
+ OptContext ctx = { .tcg = s };
67
68
/* Array VALS has an element for each temp.
69
If this temp holds a constant then its value is kept in VALS' element.
70
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
71
CASE_OP_32_64(rotr):
72
if (arg_is_const(op->args[1])
73
&& arg_info(op->args[1])->val == 0) {
74
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
75
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
76
continue;
77
}
78
break;
79
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
80
if (!arg_is_const(op->args[1])
81
&& arg_is_const(op->args[2])
82
&& arg_info(op->args[2])->val == 0) {
83
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
84
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
85
continue;
86
}
87
break;
88
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
89
if (!arg_is_const(op->args[1])
90
&& arg_is_const(op->args[2])
91
&& arg_info(op->args[2])->val == -1) {
92
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
93
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
94
continue;
95
}
96
break;
97
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
98
99
if (partmask == 0) {
100
tcg_debug_assert(nb_oargs == 1);
101
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
102
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
103
continue;
104
}
105
if (affected == 0) {
106
tcg_debug_assert(nb_oargs == 1);
107
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
108
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
109
continue;
110
}
111
112
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
113
CASE_OP_32_64(mulsh):
114
if (arg_is_const(op->args[2])
115
&& arg_info(op->args[2])->val == 0) {
116
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
117
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
118
continue;
119
}
120
break;
121
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
122
CASE_OP_32_64_VEC(or):
123
CASE_OP_32_64_VEC(and):
124
if (args_are_copies(op->args[1], op->args[2])) {
125
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
126
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
127
continue;
128
}
129
break;
130
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
131
CASE_OP_32_64_VEC(sub):
132
CASE_OP_32_64_VEC(xor):
133
if (args_are_copies(op->args[1], op->args[2])) {
134
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
135
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
136
continue;
137
}
138
break;
139
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
140
allocator where needed and possible. Also detect copies. */
141
switch (opc) {
142
CASE_OP_32_64_VEC(mov):
143
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
144
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
145
continue;
146
147
case INDEX_op_dup_vec:
148
if (arg_is_const(op->args[1])) {
149
tmp = arg_info(op->args[1])->val;
150
tmp = dup_const(TCGOP_VECE(op), tmp);
151
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
152
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
153
continue;
154
}
155
break;
156
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
157
case INDEX_op_dup2_vec:
158
assert(TCG_TARGET_REG_BITS == 32);
159
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
160
- tcg_opt_gen_movi(s, &ctx, op, op->args[0],
161
+ tcg_opt_gen_movi(&ctx, op, op->args[0],
162
deposit64(arg_info(op->args[1])->val, 32, 32,
163
arg_info(op->args[2])->val));
164
continue;
165
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
166
case INDEX_op_extrh_i64_i32:
167
if (arg_is_const(op->args[1])) {
168
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
169
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
170
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
171
continue;
172
}
173
break;
174
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
175
if (arg_is_const(op->args[1])) {
176
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
177
op->args[2]);
178
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
179
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
180
continue;
181
}
182
break;
183
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
184
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
185
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
186
arg_info(op->args[2])->val);
187
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
188
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
189
continue;
190
}
191
break;
192
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
193
TCGArg v = arg_info(op->args[1])->val;
194
if (v != 0) {
195
tmp = do_constant_folding(opc, v, 0);
196
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
197
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
198
} else {
199
- tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
200
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
201
}
202
continue;
203
}
204
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
205
tmp = deposit64(arg_info(op->args[1])->val,
206
op->args[3], op->args[4],
207
arg_info(op->args[2])->val);
208
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
209
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
210
continue;
211
}
212
break;
213
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
214
if (arg_is_const(op->args[1])) {
215
tmp = extract64(arg_info(op->args[1])->val,
216
op->args[2], op->args[3]);
217
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
218
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
219
continue;
220
}
221
break;
222
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
223
if (arg_is_const(op->args[1])) {
224
tmp = sextract64(arg_info(op->args[1])->val,
225
op->args[2], op->args[3]);
226
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
227
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
228
continue;
229
}
230
break;
231
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
232
tmp = (int32_t)(((uint32_t)v1 >> shr) |
233
((uint32_t)v2 << (32 - shr)));
234
}
235
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
236
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
237
continue;
238
}
239
break;
240
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
241
tmp = do_constant_folding_cond(opc, op->args[1],
242
op->args[2], op->args[3]);
243
if (tmp != 2) {
244
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
245
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
246
continue;
247
}
248
break;
249
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
250
tmp = do_constant_folding_cond(opc, op->args[1],
251
op->args[2], op->args[5]);
252
if (tmp != 2) {
253
- tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
254
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
255
continue;
256
}
257
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
258
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
259
260
rl = op->args[0];
261
rh = op->args[1];
262
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
263
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
264
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
265
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
266
continue;
267
}
268
break;
269
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
270
271
rl = op->args[0];
272
rh = op->args[1];
273
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
274
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
275
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
276
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
277
continue;
278
}
279
break;
280
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
281
op->args[5]);
282
if (tmp != 2) {
283
do_setcond_const:
284
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
285
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
286
continue;
287
}
288
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
289
--
290
2.25.1
291
292
diff view generated by jsdifflib
New patch
1
This will expose the variable to subroutines that
2
will be broken out of tcg_optimize.
1
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 11 ++++++-----
10
1 file changed, 6 insertions(+), 5 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
17
18
typedef struct OptContext {
19
TCGContext *tcg;
20
+ TCGOp *prev_mb;
21
TCGTempSet temps_used;
22
} OptContext;
23
24
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
25
void tcg_optimize(TCGContext *s)
26
{
27
int nb_temps, nb_globals, i;
28
- TCGOp *op, *op_next, *prev_mb = NULL;
29
+ TCGOp *op, *op_next;
30
OptContext ctx = { .tcg = s };
31
32
/* Array VALS has an element for each temp.
33
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
34
}
35
36
/* Eliminate duplicate and redundant fence instructions. */
37
- if (prev_mb) {
38
+ if (ctx.prev_mb) {
39
switch (opc) {
40
case INDEX_op_mb:
41
/* Merge two barriers of the same type into one,
42
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
43
* barrier. This is stricter than specified but for
44
* the purposes of TCG is better than not optimizing.
45
*/
46
- prev_mb->args[0] |= op->args[0];
47
+ ctx.prev_mb->args[0] |= op->args[0];
48
tcg_op_remove(s, op);
49
break;
50
51
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
52
case INDEX_op_qemu_st_i64:
53
case INDEX_op_call:
54
/* Opcodes that touch guest memory stop the optimization. */
55
- prev_mb = NULL;
56
+ ctx.prev_mb = NULL;
57
break;
58
}
59
} else if (opc == INDEX_op_mb) {
60
- prev_mb = op;
61
+ ctx.prev_mb = op;
62
}
63
}
64
}
65
--
66
2.25.1
67
68
diff view generated by jsdifflib
New patch
1
There was no real reason for calls to have separate code here.
2
Unify init for calls vs non-calls using the call path, which
3
handles TCG_CALL_DUMMY_ARG.
1
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
tcg/optimize.c | 25 +++++++++++--------------
11
1 file changed, 11 insertions(+), 14 deletions(-)
12
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/optimize.c
16
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
18
}
19
}
20
21
-static void init_arg_info(OptContext *ctx, TCGArg arg)
22
-{
23
- init_ts_info(ctx, arg_temp(arg));
24
-}
25
-
26
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
27
{
28
TCGTemp *i, *g, *l;
29
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
30
return false;
31
}
32
33
+static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
34
+{
35
+ for (int i = 0; i < nb_args; i++) {
36
+ TCGTemp *ts = arg_temp(op->args[i]);
37
+ if (ts) {
38
+ init_ts_info(ctx, ts);
39
+ }
40
+ }
41
+}
42
+
43
/* Propagate constants and copies, fold constant expressions. */
44
void tcg_optimize(TCGContext *s)
45
{
46
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
47
if (opc == INDEX_op_call) {
48
nb_oargs = TCGOP_CALLO(op);
49
nb_iargs = TCGOP_CALLI(op);
50
- for (i = 0; i < nb_oargs + nb_iargs; i++) {
51
- TCGTemp *ts = arg_temp(op->args[i]);
52
- if (ts) {
53
- init_ts_info(&ctx, ts);
54
- }
55
- }
56
} else {
57
nb_oargs = def->nb_oargs;
58
nb_iargs = def->nb_iargs;
59
- for (i = 0; i < nb_oargs + nb_iargs; i++) {
60
- init_arg_info(&ctx, op->args[i]);
61
- }
62
}
63
+ init_arguments(&ctx, op, nb_oargs + nb_iargs);
64
65
/* Do copy propagation */
66
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
67
--
68
2.25.1
69
70
diff view generated by jsdifflib
New patch
1
Continue splitting tcg_optimize.
1
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 22 ++++++++++++++--------
9
1 file changed, 14 insertions(+), 8 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
16
}
17
}
18
19
+static void copy_propagate(OptContext *ctx, TCGOp *op,
20
+ int nb_oargs, int nb_iargs)
21
+{
22
+ TCGContext *s = ctx->tcg;
23
+
24
+ for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
25
+ TCGTemp *ts = arg_temp(op->args[i]);
26
+ if (ts && ts_is_copy(ts)) {
27
+ op->args[i] = temp_arg(find_better_copy(s, ts));
28
+ }
29
+ }
30
+}
31
+
32
/* Propagate constants and copies, fold constant expressions. */
33
void tcg_optimize(TCGContext *s)
34
{
35
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
36
nb_iargs = def->nb_iargs;
37
}
38
init_arguments(&ctx, op, nb_oargs + nb_iargs);
39
-
40
- /* Do copy propagation */
41
- for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
42
- TCGTemp *ts = arg_temp(op->args[i]);
43
- if (ts && ts_is_copy(ts)) {
44
- op->args[i] = temp_arg(find_better_copy(s, ts));
45
- }
46
- }
47
+ copy_propagate(&ctx, op, nb_oargs, nb_iargs);
48
49
/* For commutative operations make constant second argument */
50
switch (opc) {
51
--
52
2.25.1
53
54
diff view generated by jsdifflib
New patch
1
Calls are special in that they have a variable number
2
of arguments, and need to be able to clobber globals.
1
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 63 ++++++++++++++++++++++++++++++++------------------
9
1 file changed, 41 insertions(+), 22 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
16
}
17
}
18
19
+static bool fold_call(OptContext *ctx, TCGOp *op)
20
+{
21
+ TCGContext *s = ctx->tcg;
22
+ int nb_oargs = TCGOP_CALLO(op);
23
+ int nb_iargs = TCGOP_CALLI(op);
24
+ int flags, i;
25
+
26
+ init_arguments(ctx, op, nb_oargs + nb_iargs);
27
+ copy_propagate(ctx, op, nb_oargs, nb_iargs);
28
+
29
+ /* If the function reads or writes globals, reset temp data. */
30
+ flags = tcg_call_flags(op);
31
+ if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
32
+ int nb_globals = s->nb_globals;
33
+
34
+ for (i = 0; i < nb_globals; i++) {
35
+ if (test_bit(i, ctx->temps_used.l)) {
36
+ reset_ts(&ctx->tcg->temps[i]);
37
+ }
38
+ }
39
+ }
40
+
41
+ /* Reset temp data for outputs. */
42
+ for (i = 0; i < nb_oargs; i++) {
43
+ reset_temp(op->args[i]);
44
+ }
45
+
46
+ /* Stop optimizing MB across calls. */
47
+ ctx->prev_mb = NULL;
48
+ return true;
49
+}
50
+
51
/* Propagate constants and copies, fold constant expressions. */
52
void tcg_optimize(TCGContext *s)
53
{
54
- int nb_temps, nb_globals, i;
55
+ int nb_temps, i;
56
TCGOp *op, *op_next;
57
OptContext ctx = { .tcg = s };
58
59
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
60
available through the doubly linked circular list. */
61
62
nb_temps = s->nb_temps;
63
- nb_globals = s->nb_globals;
64
-
65
for (i = 0; i < nb_temps; ++i) {
66
s->temps[i].state_ptr = NULL;
67
}
68
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
69
uint64_t z_mask, partmask, affected, tmp;
70
int nb_oargs, nb_iargs;
71
TCGOpcode opc = op->opc;
72
- const TCGOpDef *def = &tcg_op_defs[opc];
73
+ const TCGOpDef *def;
74
75
- /* Count the arguments, and initialize the temps that are
76
- going to be used */
77
+ /* Calls are special. */
78
if (opc == INDEX_op_call) {
79
- nb_oargs = TCGOP_CALLO(op);
80
- nb_iargs = TCGOP_CALLI(op);
81
- } else {
82
- nb_oargs = def->nb_oargs;
83
- nb_iargs = def->nb_iargs;
84
+ fold_call(&ctx, op);
85
+ continue;
86
}
87
+
88
+ def = &tcg_op_defs[opc];
89
+ nb_oargs = def->nb_oargs;
90
+ nb_iargs = def->nb_iargs;
91
init_arguments(&ctx, op, nb_oargs + nb_iargs);
92
copy_propagate(&ctx, op, nb_oargs, nb_iargs);
93
94
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
95
if (def->flags & TCG_OPF_BB_END) {
96
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
97
} else {
98
- if (opc == INDEX_op_call &&
99
- !(tcg_call_flags(op)
100
- & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
101
- for (i = 0; i < nb_globals; i++) {
102
- if (test_bit(i, ctx.temps_used.l)) {
103
- reset_ts(&s->temps[i]);
104
- }
105
- }
106
- }
107
-
108
for (i = 0; i < nb_oargs; i++) {
109
reset_temp(op->args[i]);
110
/* Save the corresponding known-zero bits mask for the
111
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
112
case INDEX_op_qemu_st_i32:
113
case INDEX_op_qemu_st8_i32:
114
case INDEX_op_qemu_st_i64:
115
- case INDEX_op_call:
116
/* Opcodes that touch guest memory stop the optimization. */
117
ctx.prev_mb = NULL;
118
break;
119
--
120
2.25.1
121
122
diff view generated by jsdifflib
1
We're about to move this out of tcg.h, so rename it
1
Rather than try to keep these up-to-date across folding,
2
as we did when moving MemOp.
2
re-read nb_oargs at the end, after re-reading the opcode.
3
3
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
A couple of asserts need dropping, but that will take care
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
of itself as we split the function further.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
10
---
8
accel/tcg/atomic_template.h | 24 +++++------
11
tcg/optimize.c | 14 ++++----------
9
include/tcg/tcg.h | 74 ++++++++++++++++-----------------
12
1 file changed, 4 insertions(+), 10 deletions(-)
10
accel/tcg/cputlb.c | 78 +++++++++++++++++------------------
11
accel/tcg/user-exec.c | 2 +-
12
target/arm/helper-a64.c | 16 +++----
13
target/arm/m_helper.c | 2 +-
14
target/i386/tcg/mem_helper.c | 4 +-
15
target/m68k/op_helper.c | 2 +-
16
target/mips/tcg/msa_helper.c | 6 +--
17
target/s390x/tcg/mem_helper.c | 20 ++++-----
18
target/sparc/ldst_helper.c | 2 +-
19
tcg/optimize.c | 2 +-
20
tcg/tcg-op.c | 12 +++---
21
tcg/tcg.c | 2 +-
22
tcg/tci.c | 14 +++----
23
accel/tcg/atomic_common.c.inc | 6 +--
24
tcg/aarch64/tcg-target.c.inc | 14 +++----
25
tcg/arm/tcg-target.c.inc | 10 ++---
26
tcg/i386/tcg-target.c.inc | 10 ++---
27
tcg/mips/tcg-target.c.inc | 12 +++---
28
tcg/ppc/tcg-target.c.inc | 10 ++---
29
tcg/riscv/tcg-target.c.inc | 16 +++----
30
tcg/s390/tcg-target.c.inc | 10 ++---
31
tcg/sparc/tcg-target.c.inc | 4 +-
32
tcg/tcg-ldst.c.inc | 2 +-
33
25 files changed, 177 insertions(+), 177 deletions(-)
34
13
35
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/accel/tcg/atomic_template.h
38
+++ b/accel/tcg/atomic_template.h
39
@@ -XXX,XX +XXX,XX @@
40
41
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
42
ABI_TYPE cmpv, ABI_TYPE newv,
43
- TCGMemOpIdx oi, uintptr_t retaddr)
44
+ MemOpIdx oi, uintptr_t retaddr)
45
{
46
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
47
PAGE_READ | PAGE_WRITE, retaddr);
48
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
49
#if DATA_SIZE >= 16
50
#if HAVE_ATOMIC128
51
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
52
- TCGMemOpIdx oi, uintptr_t retaddr)
53
+ MemOpIdx oi, uintptr_t retaddr)
54
{
55
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
56
PAGE_READ, retaddr);
57
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
58
}
59
60
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
61
- TCGMemOpIdx oi, uintptr_t retaddr)
62
+ MemOpIdx oi, uintptr_t retaddr)
63
{
64
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
65
PAGE_WRITE, retaddr);
66
@@ -XXX,XX +XXX,XX @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
67
#endif
68
#else
69
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
70
- TCGMemOpIdx oi, uintptr_t retaddr)
71
+ MemOpIdx oi, uintptr_t retaddr)
72
{
73
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
74
PAGE_READ | PAGE_WRITE, retaddr);
75
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
76
77
#define GEN_ATOMIC_HELPER(X) \
78
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
79
- ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
80
+ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
81
{ \
82
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
83
PAGE_READ | PAGE_WRITE, retaddr); \
84
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER(xor_fetch)
85
*/
86
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
87
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
88
- ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
89
+ ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
90
{ \
91
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
92
PAGE_READ | PAGE_WRITE, retaddr); \
93
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
94
95
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
96
ABI_TYPE cmpv, ABI_TYPE newv,
97
- TCGMemOpIdx oi, uintptr_t retaddr)
98
+ MemOpIdx oi, uintptr_t retaddr)
99
{
100
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
101
PAGE_READ | PAGE_WRITE, retaddr);
102
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
103
#if DATA_SIZE >= 16
104
#if HAVE_ATOMIC128
105
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
106
- TCGMemOpIdx oi, uintptr_t retaddr)
107
+ MemOpIdx oi, uintptr_t retaddr)
108
{
109
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
110
PAGE_READ, retaddr);
111
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
112
}
113
114
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
115
- TCGMemOpIdx oi, uintptr_t retaddr)
116
+ MemOpIdx oi, uintptr_t retaddr)
117
{
118
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
119
PAGE_WRITE, retaddr);
120
@@ -XXX,XX +XXX,XX @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
121
#endif
122
#else
123
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
124
- TCGMemOpIdx oi, uintptr_t retaddr)
125
+ MemOpIdx oi, uintptr_t retaddr)
126
{
127
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
128
PAGE_READ | PAGE_WRITE, retaddr);
129
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
130
131
#define GEN_ATOMIC_HELPER(X) \
132
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
133
- ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
134
+ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
135
{ \
136
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
137
PAGE_READ | PAGE_WRITE, retaddr); \
138
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER(xor_fetch)
139
*/
140
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
141
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
142
- ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
143
+ ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
144
{ \
145
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
146
PAGE_READ | PAGE_WRITE, retaddr); \
147
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/include/tcg/tcg.h
150
+++ b/include/tcg/tcg.h
151
@@ -XXX,XX +XXX,XX @@ static inline size_t tcg_current_code_size(TCGContext *s)
152
}
153
154
/* Combine the MemOp and mmu_idx parameters into a single value. */
155
-typedef uint32_t TCGMemOpIdx;
156
+typedef uint32_t MemOpIdx;
157
158
/**
159
* make_memop_idx
160
@@ -XXX,XX +XXX,XX @@ typedef uint32_t TCGMemOpIdx;
161
*
162
* Encode these values into a single parameter.
163
*/
164
-static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx)
165
+static inline MemOpIdx make_memop_idx(MemOp op, unsigned idx)
166
{
167
tcg_debug_assert(idx <= 15);
168
return (op << 4) | idx;
169
@@ -XXX,XX +XXX,XX @@ static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx)
170
*
171
* Extract the memory operation from the combined value.
172
*/
173
-static inline MemOp get_memop(TCGMemOpIdx oi)
174
+static inline MemOp get_memop(MemOpIdx oi)
175
{
176
return oi >> 4;
177
}
178
@@ -XXX,XX +XXX,XX @@ static inline MemOp get_memop(TCGMemOpIdx oi)
179
*
180
* Extract the mmu index from the combined value.
181
*/
182
-static inline unsigned get_mmuidx(TCGMemOpIdx oi)
183
+static inline unsigned get_mmuidx(MemOpIdx oi)
184
{
185
return oi & 15;
186
}
187
@@ -XXX,XX +XXX,XX @@ uint64_t dup_const(unsigned vece, uint64_t c);
188
#ifdef CONFIG_SOFTMMU
189
/* Value zero-extended to tcg register size. */
190
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
191
- TCGMemOpIdx oi, uintptr_t retaddr);
192
+ MemOpIdx oi, uintptr_t retaddr);
193
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
194
- TCGMemOpIdx oi, uintptr_t retaddr);
195
+ MemOpIdx oi, uintptr_t retaddr);
196
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
197
- TCGMemOpIdx oi, uintptr_t retaddr);
198
+ MemOpIdx oi, uintptr_t retaddr);
199
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
200
- TCGMemOpIdx oi, uintptr_t retaddr);
201
+ MemOpIdx oi, uintptr_t retaddr);
202
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
203
- TCGMemOpIdx oi, uintptr_t retaddr);
204
+ MemOpIdx oi, uintptr_t retaddr);
205
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
206
- TCGMemOpIdx oi, uintptr_t retaddr);
207
+ MemOpIdx oi, uintptr_t retaddr);
208
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
209
- TCGMemOpIdx oi, uintptr_t retaddr);
210
+ MemOpIdx oi, uintptr_t retaddr);
211
212
/* Value sign-extended to tcg register size. */
213
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
214
- TCGMemOpIdx oi, uintptr_t retaddr);
215
+ MemOpIdx oi, uintptr_t retaddr);
216
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
217
- TCGMemOpIdx oi, uintptr_t retaddr);
218
+ MemOpIdx oi, uintptr_t retaddr);
219
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
220
- TCGMemOpIdx oi, uintptr_t retaddr);
221
+ MemOpIdx oi, uintptr_t retaddr);
222
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
223
- TCGMemOpIdx oi, uintptr_t retaddr);
224
+ MemOpIdx oi, uintptr_t retaddr);
225
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
226
- TCGMemOpIdx oi, uintptr_t retaddr);
227
+ MemOpIdx oi, uintptr_t retaddr);
228
229
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
230
- TCGMemOpIdx oi, uintptr_t retaddr);
231
+ MemOpIdx oi, uintptr_t retaddr);
232
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
233
- TCGMemOpIdx oi, uintptr_t retaddr);
234
+ MemOpIdx oi, uintptr_t retaddr);
235
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
236
- TCGMemOpIdx oi, uintptr_t retaddr);
237
+ MemOpIdx oi, uintptr_t retaddr);
238
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
239
- TCGMemOpIdx oi, uintptr_t retaddr);
240
+ MemOpIdx oi, uintptr_t retaddr);
241
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
242
- TCGMemOpIdx oi, uintptr_t retaddr);
243
+ MemOpIdx oi, uintptr_t retaddr);
244
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
245
- TCGMemOpIdx oi, uintptr_t retaddr);
246
+ MemOpIdx oi, uintptr_t retaddr);
247
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
248
- TCGMemOpIdx oi, uintptr_t retaddr);
249
+ MemOpIdx oi, uintptr_t retaddr);
250
251
/* Temporary aliases until backends are converted. */
252
#ifdef TARGET_WORDS_BIGENDIAN
253
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
254
255
uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
256
uint32_t cmpv, uint32_t newv,
257
- TCGMemOpIdx oi, uintptr_t retaddr);
258
+ MemOpIdx oi, uintptr_t retaddr);
259
uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
260
uint32_t cmpv, uint32_t newv,
261
- TCGMemOpIdx oi, uintptr_t retaddr);
262
+ MemOpIdx oi, uintptr_t retaddr);
263
uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
264
uint32_t cmpv, uint32_t newv,
265
- TCGMemOpIdx oi, uintptr_t retaddr);
266
+ MemOpIdx oi, uintptr_t retaddr);
267
uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
268
uint64_t cmpv, uint64_t newv,
269
- TCGMemOpIdx oi, uintptr_t retaddr);
270
+ MemOpIdx oi, uintptr_t retaddr);
271
uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
272
uint32_t cmpv, uint32_t newv,
273
- TCGMemOpIdx oi, uintptr_t retaddr);
274
+ MemOpIdx oi, uintptr_t retaddr);
275
uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
276
uint32_t cmpv, uint32_t newv,
277
- TCGMemOpIdx oi, uintptr_t retaddr);
278
+ MemOpIdx oi, uintptr_t retaddr);
279
uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
280
uint64_t cmpv, uint64_t newv,
281
- TCGMemOpIdx oi, uintptr_t retaddr);
282
+ MemOpIdx oi, uintptr_t retaddr);
283
284
#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
285
TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
286
(CPUArchState *env, target_ulong addr, TYPE val, \
287
- TCGMemOpIdx oi, uintptr_t retaddr);
288
+ MemOpIdx oi, uintptr_t retaddr);
289
290
#ifdef CONFIG_ATOMIC64
291
#define GEN_ATOMIC_HELPER_ALL(NAME) \
292
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_ALL(xchg)
293
294
Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
295
Int128 cmpv, Int128 newv,
296
- TCGMemOpIdx oi, uintptr_t retaddr);
297
+ MemOpIdx oi, uintptr_t retaddr);
298
Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
299
Int128 cmpv, Int128 newv,
300
- TCGMemOpIdx oi, uintptr_t retaddr);
301
+ MemOpIdx oi, uintptr_t retaddr);
302
303
Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
304
- TCGMemOpIdx oi, uintptr_t retaddr);
305
+ MemOpIdx oi, uintptr_t retaddr);
306
Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
307
- TCGMemOpIdx oi, uintptr_t retaddr);
308
+ MemOpIdx oi, uintptr_t retaddr);
309
void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
310
- TCGMemOpIdx oi, uintptr_t retaddr);
311
+ MemOpIdx oi, uintptr_t retaddr);
312
void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
313
- TCGMemOpIdx oi, uintptr_t retaddr);
314
+ MemOpIdx oi, uintptr_t retaddr);
315
316
#ifdef CONFIG_DEBUG_TCG
317
void tcg_assert_listed_vecop(TCGOpcode);
318
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
319
index XXXXXXX..XXXXXXX 100644
320
--- a/accel/tcg/cputlb.c
321
+++ b/accel/tcg/cputlb.c
322
@@ -XXX,XX +XXX,XX @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
323
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
324
*/
325
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
326
- TCGMemOpIdx oi, int size, int prot,
327
+ MemOpIdx oi, int size, int prot,
328
uintptr_t retaddr)
329
{
330
size_t mmu_idx = get_mmuidx(oi);
331
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
332
*/
333
334
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
335
- TCGMemOpIdx oi, uintptr_t retaddr);
336
+ MemOpIdx oi, uintptr_t retaddr);
337
338
static inline uint64_t QEMU_ALWAYS_INLINE
339
load_memop(const void *haddr, MemOp op)
340
@@ -XXX,XX +XXX,XX @@ load_memop(const void *haddr, MemOp op)
341
}
342
343
static inline uint64_t QEMU_ALWAYS_INLINE
344
-load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
345
+load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
346
uintptr_t retaddr, MemOp op, bool code_read,
347
FullLoadHelper *full_load)
348
{
349
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
350
*/
351
352
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
353
- TCGMemOpIdx oi, uintptr_t retaddr)
354
+ MemOpIdx oi, uintptr_t retaddr)
355
{
356
return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
357
}
358
359
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
360
- TCGMemOpIdx oi, uintptr_t retaddr)
361
+ MemOpIdx oi, uintptr_t retaddr)
362
{
363
return full_ldub_mmu(env, addr, oi, retaddr);
364
}
365
366
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
367
- TCGMemOpIdx oi, uintptr_t retaddr)
368
+ MemOpIdx oi, uintptr_t retaddr)
369
{
370
return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
371
full_le_lduw_mmu);
372
}
373
374
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
375
- TCGMemOpIdx oi, uintptr_t retaddr)
376
+ MemOpIdx oi, uintptr_t retaddr)
377
{
378
return full_le_lduw_mmu(env, addr, oi, retaddr);
379
}
380
381
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
382
- TCGMemOpIdx oi, uintptr_t retaddr)
383
+ MemOpIdx oi, uintptr_t retaddr)
384
{
385
return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
386
full_be_lduw_mmu);
387
}
388
389
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
390
- TCGMemOpIdx oi, uintptr_t retaddr)
391
+ MemOpIdx oi, uintptr_t retaddr)
392
{
393
return full_be_lduw_mmu(env, addr, oi, retaddr);
394
}
395
396
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
397
- TCGMemOpIdx oi, uintptr_t retaddr)
398
+ MemOpIdx oi, uintptr_t retaddr)
399
{
400
return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
401
full_le_ldul_mmu);
402
}
403
404
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
405
- TCGMemOpIdx oi, uintptr_t retaddr)
406
+ MemOpIdx oi, uintptr_t retaddr)
407
{
408
return full_le_ldul_mmu(env, addr, oi, retaddr);
409
}
410
411
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
412
- TCGMemOpIdx oi, uintptr_t retaddr)
413
+ MemOpIdx oi, uintptr_t retaddr)
414
{
415
return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
416
full_be_ldul_mmu);
417
}
418
419
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
420
- TCGMemOpIdx oi, uintptr_t retaddr)
421
+ MemOpIdx oi, uintptr_t retaddr)
422
{
423
return full_be_ldul_mmu(env, addr, oi, retaddr);
424
}
425
426
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
427
- TCGMemOpIdx oi, uintptr_t retaddr)
428
+ MemOpIdx oi, uintptr_t retaddr)
429
{
430
return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
431
helper_le_ldq_mmu);
432
}
433
434
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
435
- TCGMemOpIdx oi, uintptr_t retaddr)
436
+ MemOpIdx oi, uintptr_t retaddr)
437
{
438
return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
439
helper_be_ldq_mmu);
440
@@ -XXX,XX +XXX,XX @@ uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
441
442
443
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
444
- TCGMemOpIdx oi, uintptr_t retaddr)
445
+ MemOpIdx oi, uintptr_t retaddr)
446
{
447
return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
448
}
449
450
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
451
- TCGMemOpIdx oi, uintptr_t retaddr)
452
+ MemOpIdx oi, uintptr_t retaddr)
453
{
454
return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
455
}
456
457
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
458
- TCGMemOpIdx oi, uintptr_t retaddr)
459
+ MemOpIdx oi, uintptr_t retaddr)
460
{
461
return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
462
}
463
464
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
465
- TCGMemOpIdx oi, uintptr_t retaddr)
466
+ MemOpIdx oi, uintptr_t retaddr)
467
{
468
return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
469
}
470
471
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
472
- TCGMemOpIdx oi, uintptr_t retaddr)
473
+ MemOpIdx oi, uintptr_t retaddr)
474
{
475
return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
476
}
477
@@ -XXX,XX +XXX,XX @@ static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
478
MemOp op, FullLoadHelper *full_load)
479
{
480
uint16_t meminfo;
481
- TCGMemOpIdx oi;
482
+ MemOpIdx oi;
483
uint64_t ret;
484
485
meminfo = trace_mem_get_info(op, mmu_idx, false);
486
@@ -XXX,XX +XXX,XX @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
487
uintptr_t index, index2;
488
CPUTLBEntry *entry, *entry2;
489
target_ulong page2, tlb_addr, tlb_addr2;
490
- TCGMemOpIdx oi;
491
+ MemOpIdx oi;
492
size_t size2;
493
int i;
494
495
@@ -XXX,XX +XXX,XX @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
496
497
static inline void QEMU_ALWAYS_INLINE
498
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
499
- TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
500
+ MemOpIdx oi, uintptr_t retaddr, MemOp op)
501
{
502
uintptr_t mmu_idx = get_mmuidx(oi);
503
uintptr_t index = tlb_index(env, mmu_idx, addr);
504
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
505
506
void __attribute__((noinline))
507
helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
508
- TCGMemOpIdx oi, uintptr_t retaddr)
509
+ MemOpIdx oi, uintptr_t retaddr)
510
{
511
store_helper(env, addr, val, oi, retaddr, MO_UB);
512
}
513
514
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
515
- TCGMemOpIdx oi, uintptr_t retaddr)
516
+ MemOpIdx oi, uintptr_t retaddr)
517
{
518
store_helper(env, addr, val, oi, retaddr, MO_LEUW);
519
}
520
521
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
522
- TCGMemOpIdx oi, uintptr_t retaddr)
523
+ MemOpIdx oi, uintptr_t retaddr)
524
{
525
store_helper(env, addr, val, oi, retaddr, MO_BEUW);
526
}
527
528
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
529
- TCGMemOpIdx oi, uintptr_t retaddr)
530
+ MemOpIdx oi, uintptr_t retaddr)
531
{
532
store_helper(env, addr, val, oi, retaddr, MO_LEUL);
533
}
534
535
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
536
- TCGMemOpIdx oi, uintptr_t retaddr)
537
+ MemOpIdx oi, uintptr_t retaddr)
538
{
539
store_helper(env, addr, val, oi, retaddr, MO_BEUL);
540
}
541
542
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
543
- TCGMemOpIdx oi, uintptr_t retaddr)
544
+ MemOpIdx oi, uintptr_t retaddr)
545
{
546
store_helper(env, addr, val, oi, retaddr, MO_LEQ);
547
}
548
549
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
550
- TCGMemOpIdx oi, uintptr_t retaddr)
551
+ MemOpIdx oi, uintptr_t retaddr)
552
{
553
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
554
}
555
@@ -XXX,XX +XXX,XX @@ static inline void QEMU_ALWAYS_INLINE
556
cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
557
int mmu_idx, uintptr_t retaddr, MemOp op)
558
{
559
- TCGMemOpIdx oi;
560
+ MemOpIdx oi;
561
uint16_t meminfo;
562
563
meminfo = trace_mem_get_info(op, mmu_idx, true);
564
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
565
/* Code access functions. */
566
567
static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
568
- TCGMemOpIdx oi, uintptr_t retaddr)
569
+ MemOpIdx oi, uintptr_t retaddr)
570
{
571
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
572
}
573
574
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
575
{
576
- TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
577
+ MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
578
return full_ldub_code(env, addr, oi, 0);
579
}
580
581
static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
582
- TCGMemOpIdx oi, uintptr_t retaddr)
583
+ MemOpIdx oi, uintptr_t retaddr)
584
{
585
return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
586
}
587
588
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
589
{
590
- TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
591
+ MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
592
return full_lduw_code(env, addr, oi, 0);
593
}
594
595
static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
596
- TCGMemOpIdx oi, uintptr_t retaddr)
597
+ MemOpIdx oi, uintptr_t retaddr)
598
{
599
return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
600
}
601
602
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
603
{
604
- TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
605
+ MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
606
return full_ldl_code(env, addr, oi, 0);
607
}
608
609
static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
610
- TCGMemOpIdx oi, uintptr_t retaddr)
611
+ MemOpIdx oi, uintptr_t retaddr)
612
{
613
return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
614
}
615
616
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
617
{
618
- TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
619
+ MemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
620
return full_ldq_code(env, addr, oi, 0);
621
}
622
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
623
index XXXXXXX..XXXXXXX 100644
624
--- a/accel/tcg/user-exec.c
625
+++ b/accel/tcg/user-exec.c
626
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
627
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
628
*/
629
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
630
- TCGMemOpIdx oi, int size, int prot,
631
+ MemOpIdx oi, int size, int prot,
632
uintptr_t retaddr)
633
{
634
/* Enforce qemu required alignment. */
635
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
636
index XXXXXXX..XXXXXXX 100644
637
--- a/target/arm/helper-a64.c
638
+++ b/target/arm/helper-a64.c
639
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
640
clear_helper_retaddr();
641
#else
642
int mem_idx = cpu_mmu_index(env, false);
643
- TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
644
- TCGMemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx);
645
+ MemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
646
+ MemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx);
647
648
o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra);
649
o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra);
650
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
651
uintptr_t ra = GETPC();
652
bool success;
653
int mem_idx;
654
- TCGMemOpIdx oi;
655
+ MemOpIdx oi;
656
657
assert(HAVE_CMPXCHG128);
658
659
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
660
clear_helper_retaddr();
661
#else
662
int mem_idx = cpu_mmu_index(env, false);
663
- TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
664
- TCGMemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx);
665
+ MemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
666
+ MemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx);
667
668
o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra);
669
o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra);
670
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
671
uintptr_t ra = GETPC();
672
bool success;
673
int mem_idx;
674
- TCGMemOpIdx oi;
675
+ MemOpIdx oi;
676
677
assert(HAVE_CMPXCHG128);
678
679
@@ -XXX,XX +XXX,XX @@ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
680
Int128 oldv, cmpv, newv;
681
uintptr_t ra = GETPC();
682
int mem_idx;
683
- TCGMemOpIdx oi;
684
+ MemOpIdx oi;
685
686
assert(HAVE_CMPXCHG128);
687
688
@@ -XXX,XX +XXX,XX @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
689
Int128 oldv, cmpv, newv;
690
uintptr_t ra = GETPC();
691
int mem_idx;
692
- TCGMemOpIdx oi;
693
+ MemOpIdx oi;
694
695
assert(HAVE_CMPXCHG128);
696
697
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
698
index XXXXXXX..XXXXXXX 100644
699
--- a/target/arm/m_helper.c
700
+++ b/target/arm/m_helper.c
701
@@ -XXX,XX +XXX,XX @@ static bool do_v7m_function_return(ARMCPU *cpu)
702
703
{
704
bool threadmode, spsel;
705
- TCGMemOpIdx oi;
706
+ MemOpIdx oi;
707
ARMMMUIdx mmu_idx;
708
uint32_t *frame_sp_p;
709
uint32_t frameptr;
710
diff --git a/target/i386/tcg/mem_helper.c b/target/i386/tcg/mem_helper.c
711
index XXXXXXX..XXXXXXX 100644
712
--- a/target/i386/tcg/mem_helper.c
713
+++ b/target/i386/tcg/mem_helper.c
714
@@ -XXX,XX +XXX,XX @@ void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
715
{
716
uintptr_t ra = GETPC();
717
int mem_idx = cpu_mmu_index(env, false);
718
- TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
719
+ MemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
720
oldv = cpu_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
721
}
722
723
@@ -XXX,XX +XXX,XX @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
724
Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
725
726
int mem_idx = cpu_mmu_index(env, false);
727
- TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
728
+ MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
729
Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra);
730
731
if (int128_eq(oldv, cmpv)) {
732
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
733
index XXXXXXX..XXXXXXX 100644
734
--- a/target/m68k/op_helper.c
735
+++ b/target/m68k/op_helper.c
736
@@ -XXX,XX +XXX,XX @@ static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2,
737
uintptr_t ra = GETPC();
738
#if defined(CONFIG_ATOMIC64)
739
int mmu_idx = cpu_mmu_index(env, 0);
740
- TCGMemOpIdx oi = make_memop_idx(MO_BEQ, mmu_idx);
741
+ MemOpIdx oi = make_memop_idx(MO_BEQ, mmu_idx);
742
#endif
743
744
if (parallel) {
745
diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c
746
index XXXXXXX..XXXXXXX 100644
747
--- a/target/mips/tcg/msa_helper.c
748
+++ b/target/mips/tcg/msa_helper.c
749
@@ -XXX,XX +XXX,XX @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
750
#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
751
752
#if !defined(CONFIG_USER_ONLY)
753
-#define MEMOP_IDX(DF) \
754
- TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
755
- cpu_mmu_index(env, false));
756
+#define MEMOP_IDX(DF) \
757
+ MemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
758
+ cpu_mmu_index(env, false));
759
#else
760
#define MEMOP_IDX(DF)
761
#endif
762
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
763
index XXXXXXX..XXXXXXX 100644
764
--- a/target/s390x/tcg/mem_helper.c
765
+++ b/target/s390x/tcg/mem_helper.c
766
@@ -XXX,XX +XXX,XX @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
767
g_assert(haddr);
768
memset(haddr, byte, size);
769
#else
770
- TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
771
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
772
int i;
773
774
if (likely(haddr)) {
775
@@ -XXX,XX +XXX,XX @@ static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
776
#ifdef CONFIG_USER_ONLY
777
return ldub_p(*haddr + offset);
778
#else
779
- TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
780
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
781
uint8_t byte;
782
783
if (likely(*haddr)) {
784
@@ -XXX,XX +XXX,XX @@ static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
785
#ifdef CONFIG_USER_ONLY
786
stb_p(*haddr + offset, byte);
787
#else
788
- TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
789
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
790
791
if (likely(*haddr)) {
792
stb_p(*haddr + offset, byte);
793
@@ -XXX,XX +XXX,XX @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
794
Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
795
Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
796
int mem_idx;
797
- TCGMemOpIdx oi;
798
+ MemOpIdx oi;
799
Int128 oldv;
800
bool fail;
801
802
@@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
803
uint32_t *haddr = g2h(env_cpu(env), a1);
804
ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
805
#else
806
- TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
807
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
808
ov = cpu_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
809
#endif
810
} else {
811
@@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
812
813
if (parallel) {
814
#ifdef CONFIG_ATOMIC64
815
- TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
816
+ MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
817
ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
818
#else
819
/* Note that we asserted !parallel above. */
820
@@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
821
cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
822
cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
823
} else if (HAVE_CMPXCHG128) {
824
- TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
825
+ MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
826
ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
827
cc = !int128_eq(ov, cv);
828
} else {
829
@@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
830
cpu_stq_data_ra(env, a2 + 0, svh, ra);
831
cpu_stq_data_ra(env, a2 + 8, svl, ra);
832
} else if (HAVE_ATOMIC128) {
833
- TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
834
+ MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
835
Int128 sv = int128_make128(svl, svh);
836
cpu_atomic_sto_be_mmu(env, a2, sv, oi, ra);
837
} else {
838
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
839
uintptr_t ra = GETPC();
840
uint64_t hi, lo;
841
int mem_idx;
842
- TCGMemOpIdx oi;
843
+ MemOpIdx oi;
844
Int128 v;
845
846
assert(HAVE_ATOMIC128);
847
@@ -XXX,XX +XXX,XX @@ void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
848
{
849
uintptr_t ra = GETPC();
850
int mem_idx;
851
- TCGMemOpIdx oi;
852
+ MemOpIdx oi;
853
Int128 v;
854
855
assert(HAVE_ATOMIC128);
856
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
857
index XXXXXXX..XXXXXXX 100644
858
--- a/target/sparc/ldst_helper.c
859
+++ b/target/sparc/ldst_helper.c
860
@@ -XXX,XX +XXX,XX @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
861
case ASI_SNF:
862
case ASI_SNFL:
863
{
864
- TCGMemOpIdx oi;
865
+ MemOpIdx oi;
866
int idx = (env->pstate & PS_PRIV
867
? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX)
868
: (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX));
869
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
870
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
871
--- a/tcg/optimize.c
16
--- a/tcg/optimize.c
872
+++ b/tcg/optimize.c
17
+++ b/tcg/optimize.c
873
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
18
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
874
19
20
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
21
uint64_t z_mask, partmask, affected, tmp;
22
- int nb_oargs, nb_iargs;
23
TCGOpcode opc = op->opc;
24
const TCGOpDef *def;
25
26
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
27
}
28
29
def = &tcg_op_defs[opc];
30
- nb_oargs = def->nb_oargs;
31
- nb_iargs = def->nb_iargs;
32
- init_arguments(&ctx, op, nb_oargs + nb_iargs);
33
- copy_propagate(&ctx, op, nb_oargs, nb_iargs);
34
+ init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
35
+ copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
36
37
/* For commutative operations make constant second argument */
38
switch (opc) {
39
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
40
875
CASE_OP_32_64(qemu_ld):
41
CASE_OP_32_64(qemu_ld):
876
{
42
{
877
- TCGMemOpIdx oi = op->args[nb_oargs + nb_iargs];
43
- MemOpIdx oi = op->args[nb_oargs + nb_iargs];
878
+ MemOpIdx oi = op->args[nb_oargs + nb_iargs];
44
+ MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
879
MemOp mop = get_memop(oi);
45
MemOp mop = get_memop(oi);
880
if (!(mop & MO_SIGN)) {
46
if (!(mop & MO_SIGN)) {
881
mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
47
z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
882
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
883
index XXXXXXX..XXXXXXX 100644
49
}
884
--- a/tcg/tcg-op.c
50
885
+++ b/tcg/tcg-op.c
51
if (partmask == 0) {
886
@@ -XXX,XX +XXX,XX @@ static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
52
- tcg_debug_assert(nb_oargs == 1);
887
static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
53
tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
888
MemOp memop, TCGArg idx)
54
continue;
889
{
55
}
890
- TCGMemOpIdx oi = make_memop_idx(memop, idx);
56
if (affected == 0) {
891
+ MemOpIdx oi = make_memop_idx(memop, idx);
57
- tcg_debug_assert(nb_oargs == 1);
892
#if TARGET_LONG_BITS == 32
58
tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
893
tcg_gen_op3i_i32(opc, val, addr, oi);
59
continue;
894
#else
60
}
895
@@ -XXX,XX +XXX,XX @@ static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
896
static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr,
62
} else if (args_are_copies(op->args[1], op->args[2])) {
897
MemOp memop, TCGArg idx)
63
op->opc = INDEX_op_dup_vec;
898
{
64
TCGOP_VECE(op) = MO_32;
899
- TCGMemOpIdx oi = make_memop_idx(memop, idx);
65
- nb_iargs = 1;
900
+ MemOpIdx oi = make_memop_idx(memop, idx);
66
}
901
#if TARGET_LONG_BITS == 32
67
break;
902
if (TCG_TARGET_REG_BITS == 32) {
68
903
tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi);
69
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
904
@@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
70
op->opc = opc = (opc == INDEX_op_movcond_i32
905
tcg_temp_free_i32(t1);
71
? INDEX_op_setcond_i32
906
} else {
72
: INDEX_op_setcond_i64);
907
gen_atomic_cx_i32 gen;
73
- nb_iargs = 2;
908
- TCGMemOpIdx oi;
74
}
909
+ MemOpIdx oi;
75
break;
910
76
911
gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
77
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
912
tcg_debug_assert(gen != NULL);
78
if (def->flags & TCG_OPF_BB_END) {
913
@@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
79
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
914
} else if ((memop & MO_SIZE) == MO_64) {
80
} else {
915
#ifdef CONFIG_ATOMIC64
81
+ int nb_oargs = def->nb_oargs;
916
gen_atomic_cx_i64 gen;
82
for (i = 0; i < nb_oargs; i++) {
917
- TCGMemOpIdx oi;
83
reset_temp(op->args[i]);
918
+ MemOpIdx oi;
84
/* Save the corresponding known-zero bits mask for the
919
920
gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
921
tcg_debug_assert(gen != NULL);
922
@@ -XXX,XX +XXX,XX @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
923
TCGArg idx, MemOp memop, void * const table[])
924
{
925
gen_atomic_op_i32 gen;
926
- TCGMemOpIdx oi;
927
+ MemOpIdx oi;
928
929
memop = tcg_canonicalize_memop(memop, 0, 0);
930
931
@@ -XXX,XX +XXX,XX @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
932
if ((memop & MO_SIZE) == MO_64) {
933
#ifdef CONFIG_ATOMIC64
934
gen_atomic_op_i64 gen;
935
- TCGMemOpIdx oi;
936
+ MemOpIdx oi;
937
938
gen = table[memop & (MO_SIZE | MO_BSWAP)];
939
tcg_debug_assert(gen != NULL);
940
diff --git a/tcg/tcg.c b/tcg/tcg.c
941
index XXXXXXX..XXXXXXX 100644
942
--- a/tcg/tcg.c
943
+++ b/tcg/tcg.c
944
@@ -XXX,XX +XXX,XX @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs)
945
case INDEX_op_qemu_ld_i64:
946
case INDEX_op_qemu_st_i64:
947
{
948
- TCGMemOpIdx oi = op->args[k++];
949
+ MemOpIdx oi = op->args[k++];
950
MemOp op = get_memop(oi);
951
unsigned ix = get_mmuidx(oi);
952
953
diff --git a/tcg/tci.c b/tcg/tci.c
954
index XXXXXXX..XXXXXXX 100644
955
--- a/tcg/tci.c
956
+++ b/tcg/tci.c
957
@@ -XXX,XX +XXX,XX @@ static uint64_t tci_uint64(uint32_t high, uint32_t low)
958
* i = immediate (uint32_t)
959
* I = immediate (tcg_target_ulong)
960
* l = label or pointer
961
- * m = immediate (TCGMemOpIdx)
962
+ * m = immediate (MemOpIdx)
963
* n = immediate (call return length)
964
* r = register
965
* s = signed ldst offset
966
@@ -XXX,XX +XXX,XX @@ static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
967
}
968
969
static void tci_args_rrm(uint32_t insn, TCGReg *r0,
970
- TCGReg *r1, TCGMemOpIdx *m2)
971
+ TCGReg *r1, MemOpIdx *m2)
972
{
973
*r0 = extract32(insn, 8, 4);
974
*r1 = extract32(insn, 12, 4);
975
@@ -XXX,XX +XXX,XX @@ static void tci_args_rrrc(uint32_t insn,
976
}
977
978
static void tci_args_rrrm(uint32_t insn,
979
- TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGMemOpIdx *m3)
980
+ TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3)
981
{
982
*r0 = extract32(insn, 8, 4);
983
*r1 = extract32(insn, 12, 4);
984
@@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
985
}
986
987
static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
988
- TCGMemOpIdx oi, const void *tb_ptr)
989
+ MemOpIdx oi, const void *tb_ptr)
990
{
991
MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
992
uintptr_t ra = (uintptr_t)tb_ptr;
993
@@ -XXX,XX +XXX,XX @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
994
}
995
996
static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
997
- TCGMemOpIdx oi, const void *tb_ptr)
998
+ MemOpIdx oi, const void *tb_ptr)
999
{
1000
MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
1001
uintptr_t ra = (uintptr_t)tb_ptr;
1002
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
1003
uint32_t tmp32;
1004
uint64_t tmp64;
1005
uint64_t T1, T2;
1006
- TCGMemOpIdx oi;
1007
+ MemOpIdx oi;
1008
int32_t ofs;
1009
void *ptr;
1010
1011
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
1012
tcg_target_ulong i1;
1013
int32_t s2;
1014
TCGCond c;
1015
- TCGMemOpIdx oi;
1016
+ MemOpIdx oi;
1017
uint8_t pos, len;
1018
void *ptr;
1019
1020
diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc
1021
index XXXXXXX..XXXXXXX 100644
1022
--- a/accel/tcg/atomic_common.c.inc
1023
+++ b/accel/tcg/atomic_common.c.inc
1024
@@ -XXX,XX +XXX,XX @@
1025
*/
1026
1027
static uint16_t atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr,
1028
- TCGMemOpIdx oi)
1029
+ MemOpIdx oi)
1030
{
1031
CPUState *cpu = env_cpu(env);
1032
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
1033
@@ -XXX,XX +XXX,XX @@ static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
1034
1035
#if HAVE_ATOMIC128
1036
static uint16_t atomic_trace_ld_pre(CPUArchState *env, target_ulong addr,
1037
- TCGMemOpIdx oi)
1038
+ MemOpIdx oi)
1039
{
1040
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
1041
1042
@@ -XXX,XX +XXX,XX @@ static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
1043
}
1044
1045
static uint16_t atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
1046
- TCGMemOpIdx oi)
1047
+ MemOpIdx oi)
1048
{
1049
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), true);
1050
1051
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
1052
index XXXXXXX..XXXXXXX 100644
1053
--- a/tcg/aarch64/tcg-target.c.inc
1054
+++ b/tcg/aarch64/tcg-target.c.inc
1055
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
1056
#include "../tcg-ldst.c.inc"
1057
1058
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1059
- * TCGMemOpIdx oi, uintptr_t ra)
1060
+ * MemOpIdx oi, uintptr_t ra)
1061
*/
1062
static void * const qemu_ld_helpers[MO_SIZE + 1] = {
1063
[MO_8] = helper_ret_ldub_mmu,
1064
@@ -XXX,XX +XXX,XX @@ static void * const qemu_ld_helpers[MO_SIZE + 1] = {
1065
};
1066
1067
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1068
- * uintxx_t val, TCGMemOpIdx oi,
1069
+ * uintxx_t val, MemOpIdx oi,
1070
* uintptr_t ra)
1071
*/
1072
static void * const qemu_st_helpers[MO_SIZE + 1] = {
1073
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
1074
1075
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1076
{
1077
- TCGMemOpIdx oi = lb->oi;
1078
+ MemOpIdx oi = lb->oi;
1079
MemOp opc = get_memop(oi);
1080
MemOp size = opc & MO_SIZE;
1081
1082
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1083
1084
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1085
{
1086
- TCGMemOpIdx oi = lb->oi;
1087
+ MemOpIdx oi = lb->oi;
1088
MemOp opc = get_memop(oi);
1089
MemOp size = opc & MO_SIZE;
1090
1091
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1092
return true;
1093
}
1094
1095
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1096
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
1097
TCGType ext, TCGReg data_reg, TCGReg addr_reg,
1098
tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1099
{
1100
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
1101
}
1102
1103
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1104
- TCGMemOpIdx oi, TCGType ext)
1105
+ MemOpIdx oi, TCGType ext)
1106
{
1107
MemOp memop = get_memop(oi);
1108
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
1109
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1110
}
1111
1112
static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1113
- TCGMemOpIdx oi)
1114
+ MemOpIdx oi)
1115
{
1116
MemOp memop = get_memop(oi);
1117
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
1118
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
1119
index XXXXXXX..XXXXXXX 100644
1120
--- a/tcg/arm/tcg-target.c.inc
1121
+++ b/tcg/arm/tcg-target.c.inc
1122
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1123
/* Record the context of a call to the out of line helper code for the slow
1124
path for a load or store, so that we can later generate the correct
1125
helper code. */
1126
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1127
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
1128
TCGReg datalo, TCGReg datahi, TCGReg addrlo,
1129
TCGReg addrhi, tcg_insn_unit *raddr,
1130
tcg_insn_unit *label_ptr)
1131
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1132
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1133
{
1134
TCGReg argreg, datalo, datahi;
1135
- TCGMemOpIdx oi = lb->oi;
1136
+ MemOpIdx oi = lb->oi;
1137
MemOp opc = get_memop(oi);
1138
void *func;
1139
1140
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1141
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1142
{
1143
TCGReg argreg, datalo, datahi;
1144
- TCGMemOpIdx oi = lb->oi;
1145
+ MemOpIdx oi = lb->oi;
1146
MemOp opc = get_memop(oi);
1147
1148
if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1149
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1150
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1151
{
1152
TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1153
- TCGMemOpIdx oi;
1154
+ MemOpIdx oi;
1155
MemOp opc;
1156
#ifdef CONFIG_SOFTMMU
1157
int mem_index;
1158
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1159
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1160
{
1161
TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1162
- TCGMemOpIdx oi;
1163
+ MemOpIdx oi;
1164
MemOp opc;
1165
#ifdef CONFIG_SOFTMMU
1166
int mem_index;
1167
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
1168
index XXXXXXX..XXXXXXX 100644
1169
--- a/tcg/i386/tcg-target.c.inc
1170
+++ b/tcg/i386/tcg-target.c.inc
1171
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1172
* for a load or store, so that we can later generate the correct helper code
1173
*/
1174
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
1175
- TCGMemOpIdx oi,
1176
+ MemOpIdx oi,
1177
TCGReg datalo, TCGReg datahi,
1178
TCGReg addrlo, TCGReg addrhi,
1179
tcg_insn_unit *raddr,
1180
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
1181
*/
1182
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1183
{
1184
- TCGMemOpIdx oi = l->oi;
1185
+ MemOpIdx oi = l->oi;
1186
MemOp opc = get_memop(oi);
1187
TCGReg data_reg;
1188
tcg_insn_unit **label_ptr = &l->label_ptr[0];
1189
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1190
*/
1191
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1192
{
1193
- TCGMemOpIdx oi = l->oi;
1194
+ MemOpIdx oi = l->oi;
1195
MemOp opc = get_memop(oi);
1196
MemOp s_bits = opc & MO_SIZE;
1197
tcg_insn_unit **label_ptr = &l->label_ptr[0];
1198
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1199
{
1200
TCGReg datalo, datahi, addrlo;
1201
TCGReg addrhi __attribute__((unused));
1202
- TCGMemOpIdx oi;
1203
+ MemOpIdx oi;
1204
MemOp opc;
1205
#if defined(CONFIG_SOFTMMU)
1206
int mem_index;
1207
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1208
{
1209
TCGReg datalo, datahi, addrlo;
1210
TCGReg addrhi __attribute__((unused));
1211
- TCGMemOpIdx oi;
1212
+ MemOpIdx oi;
1213
MemOp opc;
1214
#if defined(CONFIG_SOFTMMU)
1215
int mem_index;
1216
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
1217
index XXXXXXX..XXXXXXX 100644
1218
--- a/tcg/mips/tcg-target.c.inc
1219
+++ b/tcg/mips/tcg-target.c.inc
1220
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
1221
* Clobbers TMP0, TMP1, TMP2, TMP3.
1222
*/
1223
static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
1224
- TCGReg addrh, TCGMemOpIdx oi,
1225
+ TCGReg addrh, MemOpIdx oi,
1226
tcg_insn_unit *label_ptr[2], bool is_load)
1227
{
1228
MemOp opc = get_memop(oi);
1229
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
1230
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP2, addrl);
1231
}
1232
1233
-static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
1234
+static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
1235
TCGType ext,
1236
TCGReg datalo, TCGReg datahi,
1237
TCGReg addrlo, TCGReg addrhi,
1238
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
1239
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1240
{
1241
const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr);
1242
- TCGMemOpIdx oi = l->oi;
1243
+ MemOpIdx oi = l->oi;
1244
MemOp opc = get_memop(oi);
1245
TCGReg v0;
1246
int i;
1247
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1248
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1249
{
1250
const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr);
1251
- TCGMemOpIdx oi = l->oi;
1252
+ MemOpIdx oi = l->oi;
1253
MemOp opc = get_memop(oi);
1254
MemOp s_bits = opc & MO_SIZE;
1255
int i;
1256
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
1257
{
1258
TCGReg addr_regl, addr_regh __attribute__((unused));
1259
TCGReg data_regl, data_regh;
1260
- TCGMemOpIdx oi;
1261
+ MemOpIdx oi;
1262
MemOp opc;
1263
#if defined(CONFIG_SOFTMMU)
1264
tcg_insn_unit *label_ptr[2];
1265
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
1266
{
1267
TCGReg addr_regl, addr_regh __attribute__((unused));
1268
TCGReg data_regl, data_regh;
1269
- TCGMemOpIdx oi;
1270
+ MemOpIdx oi;
1271
MemOp opc;
1272
#if defined(CONFIG_SOFTMMU)
1273
tcg_insn_unit *label_ptr[2];
1274
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
1275
index XXXXXXX..XXXXXXX 100644
1276
--- a/tcg/ppc/tcg-target.c.inc
1277
+++ b/tcg/ppc/tcg-target.c.inc
1278
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
1279
/* Record the context of a call to the out of line helper code for the slow
1280
path for a load or store, so that we can later generate the correct
1281
helper code. */
1282
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1283
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
1284
TCGReg datalo_reg, TCGReg datahi_reg,
1285
TCGReg addrlo_reg, TCGReg addrhi_reg,
1286
tcg_insn_unit *raddr, tcg_insn_unit *lptr)
1287
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1288
1289
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1290
{
1291
- TCGMemOpIdx oi = lb->oi;
1292
+ MemOpIdx oi = lb->oi;
1293
MemOp opc = get_memop(oi);
1294
TCGReg hi, lo, arg = TCG_REG_R3;
1295
1296
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1297
1298
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1299
{
1300
- TCGMemOpIdx oi = lb->oi;
1301
+ MemOpIdx oi = lb->oi;
1302
MemOp opc = get_memop(oi);
1303
MemOp s_bits = opc & MO_SIZE;
1304
TCGReg hi, lo, arg = TCG_REG_R3;
1305
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
1306
{
1307
TCGReg datalo, datahi, addrlo, rbase;
1308
TCGReg addrhi __attribute__((unused));
1309
- TCGMemOpIdx oi;
1310
+ MemOpIdx oi;
1311
MemOp opc, s_bits;
1312
#ifdef CONFIG_SOFTMMU
1313
int mem_index;
1314
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
1315
{
1316
TCGReg datalo, datahi, addrlo, rbase;
1317
TCGReg addrhi __attribute__((unused));
1318
- TCGMemOpIdx oi;
1319
+ MemOpIdx oi;
1320
MemOp opc, s_bits;
1321
#ifdef CONFIG_SOFTMMU
1322
int mem_index;
1323
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
1324
index XXXXXXX..XXXXXXX 100644
1325
--- a/tcg/riscv/tcg-target.c.inc
1326
+++ b/tcg/riscv/tcg-target.c.inc
1327
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
1328
#include "../tcg-ldst.c.inc"
1329
1330
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1331
- * TCGMemOpIdx oi, uintptr_t ra)
1332
+ * MemOpIdx oi, uintptr_t ra)
1333
*/
1334
static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
1335
[MO_UB] = helper_ret_ldub_mmu,
1336
@@ -XXX,XX +XXX,XX @@ static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
1337
};
1338
1339
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1340
- * uintxx_t val, TCGMemOpIdx oi,
1341
+ * uintxx_t val, MemOpIdx oi,
1342
* uintptr_t ra)
1343
*/
1344
static void * const qemu_st_helpers[MO_SIZE + 1] = {
1345
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
1346
}
1347
1348
static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
1349
- TCGReg addrh, TCGMemOpIdx oi,
1350
+ TCGReg addrh, MemOpIdx oi,
1351
tcg_insn_unit **label_ptr, bool is_load)
1352
{
1353
MemOp opc = get_memop(oi);
1354
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
1355
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl);
1356
}
1357
1358
-static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
1359
+static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
1360
TCGType ext,
1361
TCGReg datalo, TCGReg datahi,
1362
TCGReg addrlo, TCGReg addrhi,
1363
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
1364
1365
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1366
{
1367
- TCGMemOpIdx oi = l->oi;
1368
+ MemOpIdx oi = l->oi;
1369
MemOp opc = get_memop(oi);
1370
TCGReg a0 = tcg_target_call_iarg_regs[0];
1371
TCGReg a1 = tcg_target_call_iarg_regs[1];
1372
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1373
1374
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1375
{
1376
- TCGMemOpIdx oi = l->oi;
1377
+ MemOpIdx oi = l->oi;
1378
MemOp opc = get_memop(oi);
1379
MemOp s_bits = opc & MO_SIZE;
1380
TCGReg a0 = tcg_target_call_iarg_regs[0];
1381
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
1382
{
1383
TCGReg addr_regl, addr_regh __attribute__((unused));
1384
TCGReg data_regl, data_regh;
1385
- TCGMemOpIdx oi;
1386
+ MemOpIdx oi;
1387
MemOp opc;
1388
#if defined(CONFIG_SOFTMMU)
1389
tcg_insn_unit *label_ptr[1];
1390
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
1391
{
1392
TCGReg addr_regl, addr_regh __attribute__((unused));
1393
TCGReg data_regl, data_regh;
1394
- TCGMemOpIdx oi;
1395
+ MemOpIdx oi;
1396
MemOp opc;
1397
#if defined(CONFIG_SOFTMMU)
1398
tcg_insn_unit *label_ptr[1];
1399
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
1400
index XXXXXXX..XXXXXXX 100644
1401
--- a/tcg/s390/tcg-target.c.inc
1402
+++ b/tcg/s390/tcg-target.c.inc
1403
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
1404
return addr_reg;
1405
}
1406
1407
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1408
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
1409
TCGReg data, TCGReg addr,
1410
tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1411
{
1412
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1413
{
1414
TCGReg addr_reg = lb->addrlo_reg;
1415
TCGReg data_reg = lb->datalo_reg;
1416
- TCGMemOpIdx oi = lb->oi;
1417
+ MemOpIdx oi = lb->oi;
1418
MemOp opc = get_memop(oi);
1419
1420
if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1421
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1422
{
1423
TCGReg addr_reg = lb->addrlo_reg;
1424
TCGReg data_reg = lb->datalo_reg;
1425
- TCGMemOpIdx oi = lb->oi;
1426
+ MemOpIdx oi = lb->oi;
1427
MemOp opc = get_memop(oi);
1428
1429
if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1430
@@ -XXX,XX +XXX,XX @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1431
#endif /* CONFIG_SOFTMMU */
1432
1433
static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1434
- TCGMemOpIdx oi)
1435
+ MemOpIdx oi)
1436
{
1437
MemOp opc = get_memop(oi);
1438
#ifdef CONFIG_SOFTMMU
1439
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1440
}
1441
1442
static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1443
- TCGMemOpIdx oi)
1444
+ MemOpIdx oi)
1445
{
1446
MemOp opc = get_memop(oi);
1447
#ifdef CONFIG_SOFTMMU
1448
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
1449
index XXXXXXX..XXXXXXX 100644
1450
--- a/tcg/sparc/tcg-target.c.inc
1451
+++ b/tcg/sparc/tcg-target.c.inc
1452
@@ -XXX,XX +XXX,XX @@ static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
1453
};
1454
1455
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1456
- TCGMemOpIdx oi, bool is_64)
1457
+ MemOpIdx oi, bool is_64)
1458
{
1459
MemOp memop = get_memop(oi);
1460
#ifdef CONFIG_SOFTMMU
1461
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1462
}
1463
1464
static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1465
- TCGMemOpIdx oi)
1466
+ MemOpIdx oi)
1467
{
1468
MemOp memop = get_memop(oi);
1469
#ifdef CONFIG_SOFTMMU
1470
diff --git a/tcg/tcg-ldst.c.inc b/tcg/tcg-ldst.c.inc
1471
index XXXXXXX..XXXXXXX 100644
1472
--- a/tcg/tcg-ldst.c.inc
1473
+++ b/tcg/tcg-ldst.c.inc
1474
@@ -XXX,XX +XXX,XX @@
1475
1476
typedef struct TCGLabelQemuLdst {
1477
bool is_ld; /* qemu_ld: true, qemu_st: false */
1478
- TCGMemOpIdx oi;
1479
+ MemOpIdx oi;
1480
TCGType type; /* result type of a load */
1481
TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
1482
TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
1483
--
85
--
1484
2.25.1
86
2.25.1
1485
87
1486
88
diff view generated by jsdifflib
1
Use the MemOpIdx directly, rather than the rearrangement
1
Return -1 instead of 2 for failure, so that we can
2
of the same bits currently done by the trace infrastructure.
2
use comparisons against 0 for all cases.
3
Pass in enum qemu_plugin_mem_rw so that we are able to treat
3
4
read-modify-write operations as a single operation.
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
include/qemu/plugin.h | 26 ++++++++++++++++++++++++--
8
tcg/optimize.c | 145 +++++++++++++++++++++++++------------------------
10
accel/tcg/cputlb.c | 4 ++--
9
1 file changed, 74 insertions(+), 71 deletions(-)
11
accel/tcg/plugin-gen.c | 5 ++---
10
12
accel/tcg/user-exec.c | 28 ++++++++++++++--------------
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
plugins/api.c | 19 +++++++++++--------
14
plugins/core.c | 10 +++++-----
15
tcg/tcg-op.c | 30 +++++++++++++++++++++---------
16
accel/tcg/atomic_common.c.inc | 13 +++----------
17
8 files changed, 82 insertions(+), 53 deletions(-)
18
19
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
20
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
21
--- a/include/qemu/plugin.h
13
--- a/tcg/optimize.c
22
+++ b/include/qemu/plugin.h
14
+++ b/tcg/optimize.c
23
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
24
#include "qemu/error-report.h"
25
#include "qemu/queue.h"
26
#include "qemu/option.h"
27
+#include "exec/memopidx.h"
28
29
/*
30
* Events that plugins can subscribe to.
31
@@ -XXX,XX +XXX,XX @@ enum qemu_plugin_event {
32
struct qemu_plugin_desc;
33
typedef QTAILQ_HEAD(, qemu_plugin_desc) QemuPluginList;
34
35
+/*
36
+ * Construct a qemu_plugin_meminfo_t.
37
+ */
38
+static inline qemu_plugin_meminfo_t
39
+make_plugin_meminfo(MemOpIdx oi, enum qemu_plugin_mem_rw rw)
40
+{
41
+ return oi | (rw << 16);
42
+}
43
+
44
+/*
45
+ * Extract the memory operation direction from a qemu_plugin_meminfo_t.
46
+ * Other portions may be extracted via get_memop and get_mmuidx.
47
+ */
48
+static inline enum qemu_plugin_mem_rw
49
+get_plugin_meminfo_rw(qemu_plugin_meminfo_t i)
50
+{
51
+ return i >> 16;
52
+}
53
+
54
#ifdef CONFIG_PLUGIN
55
extern QemuOptsList qemu_plugin_opts;
56
57
@@ -XXX,XX +XXX,XX @@ qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1,
58
uint64_t a6, uint64_t a7, uint64_t a8);
59
void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret);
60
61
-void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t meminfo);
62
+void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
63
+ MemOpIdx oi, enum qemu_plugin_mem_rw rw);
64
65
void qemu_plugin_flush_cb(void);
66
67
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret)
68
{ }
69
70
static inline void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
71
- uint32_t meminfo)
72
+ MemOpIdx oi,
73
+ enum qemu_plugin_mem_rw rw)
74
{ }
75
76
static inline void qemu_plugin_flush_cb(void)
77
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/accel/tcg/cputlb.c
80
+++ b/accel/tcg/cputlb.c
81
@@ -XXX,XX +XXX,XX @@ static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
82
83
ret = full_load(env, addr, oi, retaddr);
84
85
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
86
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
87
88
return ret;
89
}
90
@@ -XXX,XX +XXX,XX @@ cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
91
92
store_helper(env, addr, val, oi, retaddr, op);
93
94
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
95
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
96
}
97
98
void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
99
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/accel/tcg/plugin-gen.c
102
+++ b/accel/tcg/plugin-gen.c
103
@@ -XXX,XX +XXX,XX @@
104
#include "qemu/osdep.h"
105
#include "tcg/tcg.h"
106
#include "tcg/tcg-op.h"
107
-#include "trace/mem.h"
108
#include "exec/exec-all.h"
109
#include "exec/plugin-gen.h"
110
#include "exec/translator.h"
111
@@ -XXX,XX +XXX,XX @@ static void gen_mem_wrapped(enum plugin_gen_cb type,
112
const union mem_gen_fn *f, TCGv addr,
113
uint32_t info, bool is_mem)
114
{
115
- int wr = !!(info & TRACE_MEM_ST);
116
+ enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
117
118
- gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, wr);
119
+ gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw);
120
if (is_mem) {
121
f->mem_fn(addr, info);
122
} else {
123
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
124
index XXXXXXX..XXXXXXX 100644
125
--- a/accel/tcg/user-exec.c
126
+++ b/accel/tcg/user-exec.c
127
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
128
129
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
130
ret = ldub_p(g2h(env_cpu(env), ptr));
131
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
132
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
133
return ret;
134
}
135
136
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
137
138
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
139
ret = lduw_be_p(g2h(env_cpu(env), ptr));
140
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
141
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
142
return ret;
143
}
144
145
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
146
147
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
148
ret = ldl_be_p(g2h(env_cpu(env), ptr));
149
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
150
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
151
return ret;
152
}
153
154
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
155
156
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
157
ret = ldq_be_p(g2h(env_cpu(env), ptr));
158
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
159
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
160
return ret;
161
}
162
163
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr)
164
165
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
166
ret = lduw_le_p(g2h(env_cpu(env), ptr));
167
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
168
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
169
return ret;
170
}
171
172
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
173
174
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
175
ret = ldl_le_p(g2h(env_cpu(env), ptr));
176
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
177
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
178
return ret;
179
}
180
181
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr)
182
183
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
184
ret = ldq_le_p(g2h(env_cpu(env), ptr));
185
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
186
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
187
return ret;
188
}
189
190
@@ -XXX,XX +XXX,XX @@ void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
191
192
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
193
stb_p(g2h(env_cpu(env), ptr), val);
194
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
195
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
196
}
197
198
void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
199
@@ -XXX,XX +XXX,XX @@ void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
200
201
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
202
stw_be_p(g2h(env_cpu(env), ptr), val);
203
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
204
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
205
}
206
207
void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
208
@@ -XXX,XX +XXX,XX @@ void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
209
210
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
211
stl_be_p(g2h(env_cpu(env), ptr), val);
212
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
213
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
214
}
215
216
void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
217
@@ -XXX,XX +XXX,XX @@ void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
218
219
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
220
stq_be_p(g2h(env_cpu(env), ptr), val);
221
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
222
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
223
}
224
225
void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
226
@@ -XXX,XX +XXX,XX @@ void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
227
228
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
229
stw_le_p(g2h(env_cpu(env), ptr), val);
230
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
231
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
232
}
233
234
void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
235
@@ -XXX,XX +XXX,XX @@ void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
236
237
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
238
stl_le_p(g2h(env_cpu(env), ptr), val);
239
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
240
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
241
}
242
243
void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
244
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
245
246
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
247
stq_le_p(g2h(env_cpu(env), ptr), val);
248
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
249
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
250
}
251
252
void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
253
diff --git a/plugins/api.c b/plugins/api.c
254
index XXXXXXX..XXXXXXX 100644
255
--- a/plugins/api.c
256
+++ b/plugins/api.c
257
@@ -XXX,XX +XXX,XX @@
258
#include "qemu/plugin-memory.h"
259
#include "hw/boards.h"
260
#endif
261
-#include "trace/mem.h"
262
263
/* Uninstall and Reset handlers */
264
265
@@ -XXX,XX +XXX,XX @@ const char *qemu_plugin_insn_symbol(const struct qemu_plugin_insn *insn)
266
267
unsigned qemu_plugin_mem_size_shift(qemu_plugin_meminfo_t info)
268
{
269
- return info & TRACE_MEM_SZ_SHIFT_MASK;
270
+ MemOp op = get_memop(info);
271
+ return op & MO_SIZE;
272
}
273
274
bool qemu_plugin_mem_is_sign_extended(qemu_plugin_meminfo_t info)
275
{
276
- return !!(info & TRACE_MEM_SE);
277
+ MemOp op = get_memop(info);
278
+ return op & MO_SIGN;
279
}
280
281
bool qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info)
282
{
283
- return !!(info & TRACE_MEM_BE);
284
+ MemOp op = get_memop(info);
285
+ return (op & MO_BSWAP) == MO_BE;
286
}
287
288
bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info)
289
{
290
- return !!(info & TRACE_MEM_ST);
291
+ return get_plugin_meminfo_rw(info) & QEMU_PLUGIN_MEM_W;
292
}
293
294
/*
295
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
296
{
297
#ifdef CONFIG_SOFTMMU
298
CPUState *cpu = current_cpu;
299
- unsigned int mmu_idx = info >> TRACE_MEM_MMU_SHIFT;
300
- hwaddr_info.is_store = info & TRACE_MEM_ST;
301
+ unsigned int mmu_idx = get_mmuidx(info);
302
+ enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
303
+ hwaddr_info.is_store = (rw & QEMU_PLUGIN_MEM_W) != 0;
304
305
if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx,
306
- info & TRACE_MEM_ST, &hwaddr_info)) {
307
+ hwaddr_info.is_store, &hwaddr_info)) {
308
error_report("invalid use of qemu_plugin_get_hwaddr");
309
return NULL;
310
}
311
diff --git a/plugins/core.c b/plugins/core.c
312
index XXXXXXX..XXXXXXX 100644
313
--- a/plugins/core.c
314
+++ b/plugins/core.c
315
@@ -XXX,XX +XXX,XX @@
316
#include "exec/helper-proto.h"
317
#include "tcg/tcg.h"
318
#include "tcg/tcg-op.h"
319
-#include "trace/mem.h" /* mem_info macros */
320
#include "plugin.h"
321
#include "qemu/compiler.h"
322
323
@@ -XXX,XX +XXX,XX @@ void exec_inline_op(struct qemu_plugin_dyn_cb *cb)
324
}
16
}
325
}
17
}
326
18
327
-void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info)
19
-/* Return 2 if the condition can't be simplified, and the result
328
+void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
20
- of the condition (0 or 1) if it can */
329
+ MemOpIdx oi, enum qemu_plugin_mem_rw rw)
21
-static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
22
- TCGArg y, TCGCond c)
23
+/*
24
+ * Return -1 if the condition can't be simplified,
25
+ * and the result of the condition (0 or 1) if it can.
26
+ */
27
+static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
28
+ TCGArg y, TCGCond c)
330
{
29
{
331
GArray *arr = cpu->plugin_mem_cbs;
30
uint64_t xv = arg_info(x)->val;
332
size_t i;
31
uint64_t yv = arg_info(y)->val;
333
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info)
32
@@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
334
for (i = 0; i < arr->len; i++) {
33
case TCG_COND_GEU:
335
struct qemu_plugin_dyn_cb *cb =
34
return 1;
336
&g_array_index(arr, struct qemu_plugin_dyn_cb, i);
35
default:
337
- int w = !!(info & TRACE_MEM_ST) + 1;
36
- return 2;
338
37
+ return -1;
339
- if (!(w & cb->rw)) {
340
+ if (!(rw & cb->rw)) {
341
break;
342
}
38
}
343
switch (cb->type) {
39
}
344
case PLUGIN_CB_REGULAR:
40
- return 2;
345
- cb->f.vcpu_mem(cpu->cpu_index, info, vaddr, cb->userp);
41
+ return -1;
346
+ cb->f.vcpu_mem(cpu->cpu_index, make_plugin_meminfo(oi, rw),
347
+ vaddr, cb->userp);
348
break;
349
case PLUGIN_CB_INLINE:
350
exec_inline_op(cb);
351
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
352
index XXXXXXX..XXXXXXX 100644
353
--- a/tcg/tcg-op.c
354
+++ b/tcg/tcg-op.c
355
@@ -XXX,XX +XXX,XX @@ static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr)
356
return vaddr;
357
}
42
}
358
43
359
-static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info)
44
-/* Return 2 if the condition can't be simplified, and the result
360
+static void plugin_gen_mem_callbacks(TCGv vaddr, MemOpIdx oi,
45
- of the condition (0 or 1) if it can */
361
+ enum qemu_plugin_mem_rw rw)
46
-static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
47
+/*
48
+ * Return -1 if the condition can't be simplified,
49
+ * and the result of the condition (0 or 1) if it can.
50
+ */
51
+static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
362
{
52
{
363
#ifdef CONFIG_PLUGIN
53
TCGArg al = p1[0], ah = p1[1];
364
if (tcg_ctx->plugin_insn != NULL) {
54
TCGArg bl = p2[0], bh = p2[1];
365
+ qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw);
55
@@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
366
plugin_gen_empty_mem_callback(vaddr, info);
56
if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
367
tcg_temp_free(vaddr);
57
return do_constant_folding_cond_eq(c);
368
}
58
}
369
@@ -XXX,XX +XXX,XX @@ static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info)
59
- return 2;
370
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
60
+ return -1;
371
{
372
MemOp orig_memop;
373
- uint16_t info = trace_mem_get_info(make_memop_idx(memop, idx), 0);
374
+ MemOpIdx oi;
375
+ uint16_t info;
376
377
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
378
memop = tcg_canonicalize_memop(memop, 0, 0);
379
+ oi = make_memop_idx(memop, idx);
380
+ info = trace_mem_get_info(oi, 0);
381
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
382
383
orig_memop = memop;
384
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
385
386
addr = plugin_prep_mem_callbacks(addr);
387
gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
388
- plugin_gen_mem_callbacks(addr, info);
389
+ plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R);
390
391
if ((orig_memop ^ memop) & MO_BSWAP) {
392
switch (orig_memop & MO_SIZE) {
393
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
394
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
395
{
396
TCGv_i32 swap = NULL;
397
- uint16_t info = trace_mem_get_info(make_memop_idx(memop, idx), 1);
398
+ MemOpIdx oi;
399
+ uint16_t info;
400
401
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
402
memop = tcg_canonicalize_memop(memop, 0, 1);
403
+ oi = make_memop_idx(memop, idx);
404
+ info = trace_mem_get_info(oi, 1);
405
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
406
407
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
408
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
409
} else {
410
gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
411
}
412
- plugin_gen_mem_callbacks(addr, info);
413
+ plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W);
414
415
if (swap) {
416
tcg_temp_free_i32(swap);
417
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
418
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
419
{
420
MemOp orig_memop;
421
+ MemOpIdx oi;
422
uint16_t info;
423
424
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
425
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
426
427
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
428
memop = tcg_canonicalize_memop(memop, 1, 0);
429
- info = trace_mem_get_info(make_memop_idx(memop, idx), 0);
430
+ oi = make_memop_idx(memop, idx);
431
+ info = trace_mem_get_info(oi, 0);
432
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
433
434
orig_memop = memop;
435
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
436
437
addr = plugin_prep_mem_callbacks(addr);
438
gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
439
- plugin_gen_mem_callbacks(addr, info);
440
+ plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R);
441
442
if ((orig_memop ^ memop) & MO_BSWAP) {
443
int flags = (orig_memop & MO_SIGN
444
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
445
void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
446
{
447
TCGv_i64 swap = NULL;
448
+ MemOpIdx oi;
449
uint16_t info;
450
451
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
452
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
453
454
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
455
memop = tcg_canonicalize_memop(memop, 1, 1);
456
- info = trace_mem_get_info(make_memop_idx(memop, idx), 1);
457
+ oi = make_memop_idx(memop, idx);
458
+ info = trace_mem_get_info(oi, 1);
459
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
460
461
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
462
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
463
464
addr = plugin_prep_mem_callbacks(addr);
465
gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
466
- plugin_gen_mem_callbacks(addr, info);
467
+ plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W);
468
469
if (swap) {
470
tcg_temp_free_i64(swap);
471
diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc
472
index XXXXXXX..XXXXXXX 100644
473
--- a/accel/tcg/atomic_common.c.inc
474
+++ b/accel/tcg/atomic_common.c.inc
475
@@ -XXX,XX +XXX,XX @@ static void atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr,
476
static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
477
MemOpIdx oi)
478
{
479
- uint16_t info = trace_mem_get_info(oi, false);
480
-
481
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
482
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST);
483
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
484
}
61
}
485
62
486
#if HAVE_ATOMIC128
63
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
487
@@ -XXX,XX +XXX,XX @@ static void atomic_trace_ld_pre(CPUArchState *env, target_ulong addr,
64
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
488
static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
65
break;
489
MemOpIdx oi)
66
490
{
67
CASE_OP_32_64(setcond):
491
- uint16_t info = trace_mem_get_info(oi, false);
68
- tmp = do_constant_folding_cond(opc, op->args[1],
492
-
69
- op->args[2], op->args[3]);
493
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
70
- if (tmp != 2) {
494
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
71
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
495
}
72
+ i = do_constant_folding_cond(opc, op->args[1],
496
73
+ op->args[2], op->args[3]);
497
static void atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
74
+ if (i >= 0) {
498
@@ -XXX,XX +XXX,XX @@ static void atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
75
+ tcg_opt_gen_movi(&ctx, op, op->args[0], i);
499
static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
76
continue;
500
MemOpIdx oi)
77
}
501
{
78
break;
502
- uint16_t info = trace_mem_get_info(oi, false);
79
503
-
80
CASE_OP_32_64(brcond):
504
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
81
- tmp = do_constant_folding_cond(opc, op->args[0],
505
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
82
- op->args[1], op->args[2]);
506
}
83
- switch (tmp) {
507
#endif
84
- case 0:
508
85
+ i = do_constant_folding_cond(opc, op->args[0],
86
+ op->args[1], op->args[2]);
87
+ if (i == 0) {
88
tcg_op_remove(s, op);
89
continue;
90
- case 1:
91
+ } else if (i > 0) {
92
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
93
op->opc = opc = INDEX_op_br;
94
op->args[0] = op->args[3];
95
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
96
break;
97
98
CASE_OP_32_64(movcond):
99
- tmp = do_constant_folding_cond(opc, op->args[1],
100
- op->args[2], op->args[5]);
101
- if (tmp != 2) {
102
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
103
+ i = do_constant_folding_cond(opc, op->args[1],
104
+ op->args[2], op->args[5]);
105
+ if (i >= 0) {
106
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]);
107
continue;
108
}
109
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
break;
112
113
case INDEX_op_brcond2_i32:
114
- tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
115
- op->args[4]);
116
- if (tmp == 0) {
117
+ i = do_constant_folding_cond2(&op->args[0], &op->args[2],
118
+ op->args[4]);
119
+ if (i == 0) {
120
do_brcond_false:
121
tcg_op_remove(s, op);
122
continue;
123
}
124
- if (tmp == 1) {
125
+ if (i > 0) {
126
do_brcond_true:
127
op->opc = opc = INDEX_op_br;
128
op->args[0] = op->args[5];
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
if (op->args[4] == TCG_COND_EQ) {
131
/* Simplify EQ comparisons where one of the pairs
132
can be simplified. */
133
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
134
- op->args[0], op->args[2],
135
- TCG_COND_EQ);
136
- if (tmp == 0) {
137
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
138
+ op->args[0], op->args[2],
139
+ TCG_COND_EQ);
140
+ if (i == 0) {
141
goto do_brcond_false;
142
- } else if (tmp == 1) {
143
+ } else if (i > 0) {
144
goto do_brcond_high;
145
}
146
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
147
- op->args[1], op->args[3],
148
- TCG_COND_EQ);
149
- if (tmp == 0) {
150
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
151
+ op->args[1], op->args[3],
152
+ TCG_COND_EQ);
153
+ if (i == 0) {
154
goto do_brcond_false;
155
- } else if (tmp != 1) {
156
+ } else if (i < 0) {
157
break;
158
}
159
do_brcond_low:
160
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
161
if (op->args[4] == TCG_COND_NE) {
162
/* Simplify NE comparisons where one of the pairs
163
can be simplified. */
164
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
165
- op->args[0], op->args[2],
166
- TCG_COND_NE);
167
- if (tmp == 0) {
168
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
169
+ op->args[0], op->args[2],
170
+ TCG_COND_NE);
171
+ if (i == 0) {
172
goto do_brcond_high;
173
- } else if (tmp == 1) {
174
+ } else if (i > 0) {
175
goto do_brcond_true;
176
}
177
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
178
- op->args[1], op->args[3],
179
- TCG_COND_NE);
180
- if (tmp == 0) {
181
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
182
+ op->args[1], op->args[3],
183
+ TCG_COND_NE);
184
+ if (i == 0) {
185
goto do_brcond_low;
186
- } else if (tmp == 1) {
187
+ } else if (i > 0) {
188
goto do_brcond_true;
189
}
190
}
191
break;
192
193
case INDEX_op_setcond2_i32:
194
- tmp = do_constant_folding_cond2(&op->args[1], &op->args[3],
195
- op->args[5]);
196
- if (tmp != 2) {
197
+ i = do_constant_folding_cond2(&op->args[1], &op->args[3],
198
+ op->args[5]);
199
+ if (i >= 0) {
200
do_setcond_const:
201
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
202
+ tcg_opt_gen_movi(&ctx, op, op->args[0], i);
203
continue;
204
}
205
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
206
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
207
if (op->args[5] == TCG_COND_EQ) {
208
/* Simplify EQ comparisons where one of the pairs
209
can be simplified. */
210
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
211
- op->args[1], op->args[3],
212
- TCG_COND_EQ);
213
- if (tmp == 0) {
214
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
215
+ op->args[1], op->args[3],
216
+ TCG_COND_EQ);
217
+ if (i == 0) {
218
goto do_setcond_const;
219
- } else if (tmp == 1) {
220
+ } else if (i > 0) {
221
goto do_setcond_high;
222
}
223
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
224
- op->args[2], op->args[4],
225
- TCG_COND_EQ);
226
- if (tmp == 0) {
227
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
228
+ op->args[2], op->args[4],
229
+ TCG_COND_EQ);
230
+ if (i == 0) {
231
goto do_setcond_high;
232
- } else if (tmp != 1) {
233
+ } else if (i < 0) {
234
break;
235
}
236
do_setcond_low:
237
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
238
if (op->args[5] == TCG_COND_NE) {
239
/* Simplify NE comparisons where one of the pairs
240
can be simplified. */
241
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
242
- op->args[1], op->args[3],
243
- TCG_COND_NE);
244
- if (tmp == 0) {
245
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
246
+ op->args[1], op->args[3],
247
+ TCG_COND_NE);
248
+ if (i == 0) {
249
goto do_setcond_high;
250
- } else if (tmp == 1) {
251
+ } else if (i > 0) {
252
goto do_setcond_const;
253
}
254
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
255
- op->args[2], op->args[4],
256
- TCG_COND_NE);
257
- if (tmp == 0) {
258
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
259
+ op->args[2], op->args[4],
260
+ TCG_COND_NE);
261
+ if (i == 0) {
262
goto do_setcond_low;
263
- } else if (tmp == 1) {
264
+ } else if (i > 0) {
265
goto do_setcond_const;
266
}
267
}
509
--
268
--
510
2.25.1
269
2.25.1
511
270
512
271
diff view generated by jsdifflib
New patch
1
This will allow callers to tail call to these functions
2
and return true indicating processing complete.
1
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 9 +++++----
10
1 file changed, 5 insertions(+), 4 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
17
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
18
}
19
20
-static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
21
+static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
22
{
23
TCGTemp *dst_ts = arg_temp(dst);
24
TCGTemp *src_ts = arg_temp(src);
25
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
26
27
if (ts_are_copies(dst_ts, src_ts)) {
28
tcg_op_remove(ctx->tcg, op);
29
- return;
30
+ return true;
31
}
32
33
reset_ts(dst_ts);
34
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
35
di->is_const = si->is_const;
36
di->val = si->val;
37
}
38
+ return true;
39
}
40
41
-static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
42
+static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
43
TCGArg dst, uint64_t val)
44
{
45
const TCGOpDef *def = &tcg_op_defs[op->opc];
46
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
47
/* Convert movi to mov with constant temp. */
48
tv = tcg_constant_internal(type, val);
49
init_ts_info(ctx, tv);
50
- tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
51
+ return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
52
}
53
54
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
55
--
56
2.25.1
57
58
diff view generated by jsdifflib
1
This is via expansion; don't actually set TCG_TARGET_HAS_cmpsel_vec.
1
Copy z_mask into OptContext, for writeback to the
2
first output within the new function.
2
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
7
---
5
tcg/s390x/tcg-target.c.inc | 24 +++++++++++++++++++++++-
8
tcg/optimize.c | 49 +++++++++++++++++++++++++++++++++----------------
6
1 file changed, 23 insertions(+), 1 deletion(-)
9
1 file changed, 33 insertions(+), 16 deletions(-)
7
10
8
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/s390x/tcg-target.c.inc
13
--- a/tcg/optimize.c
11
+++ b/tcg/s390x/tcg-target.c.inc
14
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
15
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
13
case INDEX_op_xor_vec:
16
TCGContext *tcg;
14
return 1;
17
TCGOp *prev_mb;
15
case INDEX_op_cmp_vec:
18
TCGTempSet temps_used;
16
+ case INDEX_op_cmpsel_vec:
19
+
17
case INDEX_op_rotrv_vec:
20
+ /* In flight values from optimization. */
18
return -1;
21
+ uint64_t z_mask;
19
case INDEX_op_mul_vec:
22
} OptContext;
20
@@ -XXX,XX +XXX,XX @@ static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
23
24
static inline TempOptInfo *ts_info(TCGTemp *ts)
25
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
21
}
26
}
22
}
27
}
23
28
24
+static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0,
29
+static void finish_folding(OptContext *ctx, TCGOp *op)
25
+ TCGv_vec c1, TCGv_vec c2,
26
+ TCGv_vec v3, TCGv_vec v4, TCGCond cond)
27
+{
30
+{
28
+ TCGv_vec t = tcg_temp_new_vec(type);
31
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
32
+ int i, nb_oargs;
29
+
33
+
30
+ if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) {
34
+ /*
31
+ /* Invert the sense of the compare by swapping arguments. */
35
+ * For an opcode that ends a BB, reset all temp data.
32
+ tcg_gen_bitsel_vec(vece, v0, t, v4, v3);
36
+ * We do no cross-BB optimization.
33
+ } else {
37
+ */
34
+ tcg_gen_bitsel_vec(vece, v0, t, v3, v4);
38
+ if (def->flags & TCG_OPF_BB_END) {
39
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
40
+ ctx->prev_mb = NULL;
41
+ return;
35
+ }
42
+ }
36
+ tcg_temp_free_vec(t);
43
+
44
+ nb_oargs = def->nb_oargs;
45
+ for (i = 0; i < nb_oargs; i++) {
46
+ reset_temp(op->args[i]);
47
+ /*
48
+ * Save the corresponding known-zero bits mask for the
49
+ * first output argument (only one supported so far).
50
+ */
51
+ if (i == 0) {
52
+ arg_info(op->args[i])->z_mask = ctx->z_mask;
53
+ }
54
+ }
37
+}
55
+}
38
+
56
+
39
static void expand_vec_sat(TCGType type, unsigned vece, TCGv_vec v0,
57
static bool fold_call(OptContext *ctx, TCGOp *op)
40
TCGv_vec v1, TCGv_vec v2, TCGOpcode add_sub_opc)
41
{
58
{
42
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
59
TCGContext *s = ctx->tcg;
43
TCGArg a0, ...)
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
44
{
61
partmask &= 0xffffffffu;
45
va_list va;
62
affected &= 0xffffffffu;
46
- TCGv_vec v0, v1, v2, t0;
63
}
47
+ TCGv_vec v0, v1, v2, v3, v4, t0;
64
+ ctx.z_mask = z_mask;
48
65
49
va_start(va, a0);
66
if (partmask == 0) {
50
v0 = temp_tcgv_vec(arg_temp(a0));
67
tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
51
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
68
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
52
expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
69
break;
53
break;
70
}
54
71
55
+ case INDEX_op_cmpsel_vec:
72
- /* Some of the folding above can change opc. */
56
+ v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
73
- opc = op->opc;
57
+ v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
74
- def = &tcg_op_defs[opc];
58
+ expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg));
75
- if (def->flags & TCG_OPF_BB_END) {
59
+ break;
76
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
60
+
77
- } else {
61
case INDEX_op_rotrv_vec:
78
- int nb_oargs = def->nb_oargs;
62
t0 = tcg_temp_new_vec(type);
79
- for (i = 0; i < nb_oargs; i++) {
63
tcg_gen_neg_vec(vece, t0, v2);
80
- reset_temp(op->args[i]);
81
- /* Save the corresponding known-zero bits mask for the
82
- first output argument (only one supported so far). */
83
- if (i == 0) {
84
- arg_info(op->args[i])->z_mask = z_mask;
85
- }
86
- }
87
- }
88
+ finish_folding(&ctx, op);
89
90
/* Eliminate duplicate and redundant fence instructions. */
91
if (ctx.prev_mb) {
64
--
92
--
65
2.25.1
93
2.25.1
66
94
67
95
diff view generated by jsdifflib
New patch
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 9 ++++++---
7
1 file changed, 6 insertions(+), 3 deletions(-)
1
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
14
uint64_t z_mask, partmask, affected, tmp;
15
TCGOpcode opc = op->opc;
16
const TCGOpDef *def;
17
+ bool done = false;
18
19
/* Calls are special. */
20
if (opc == INDEX_op_call) {
21
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
22
allocator where needed and possible. Also detect copies. */
23
switch (opc) {
24
CASE_OP_32_64_VEC(mov):
25
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
26
- continue;
27
+ done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
28
+ break;
29
30
case INDEX_op_dup_vec:
31
if (arg_is_const(op->args[1])) {
32
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
33
break;
34
}
35
36
- finish_folding(&ctx, op);
37
+ if (!done) {
38
+ finish_folding(&ctx, op);
39
+ }
40
41
/* Eliminate duplicate and redundant fence instructions. */
42
if (ctx.prev_mb) {
43
--
44
2.25.1
45
46
diff view generated by jsdifflib
1
Reviewed-by: David Hildenbrand <david@redhat.com>
1
This puts the separate mb optimization into the same framework
2
as the others. While fold_qemu_{ld,st} are currently identical,
3
that won't last as more code gets moved.
4
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
8
---
4
tcg/s390x/tcg-target.c.inc | 72 +++++++++++++++++++++++++++++++++++---
9
tcg/optimize.c | 89 +++++++++++++++++++++++++++++---------------------
5
1 file changed, 68 insertions(+), 4 deletions(-)
10
1 file changed, 51 insertions(+), 38 deletions(-)
6
11
7
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/s390x/tcg-target.c.inc
14
--- a/tcg/optimize.c
10
+++ b/tcg/s390x/tcg-target.c.inc
15
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
16
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
12
RX_STC = 0x42,
17
return true;
13
RX_STH = 0x40,
14
15
+ VRRa_VLR = 0xe756,
16
+
17
+ VRSb_VLVG = 0xe722,
18
+ VRSc_VLGV = 0xe721,
19
+
20
VRX_VL = 0xe706,
21
VRX_VLLEZ = 0xe704,
22
VRX_VST = 0xe70e,
23
@@ -XXX,XX +XXX,XX @@ static int RXB(TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
24
| ((v4 & 0x10) << (4 + 0));
25
}
18
}
26
19
27
+static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op,
20
+static bool fold_mb(OptContext *ctx, TCGOp *op)
28
+ TCGReg v1, TCGReg v2, int m3)
29
+{
21
+{
30
+ tcg_debug_assert(is_vector_reg(v1));
22
+ /* Eliminate duplicate and redundant fence instructions. */
31
+ tcg_debug_assert(is_vector_reg(v2));
23
+ if (ctx->prev_mb) {
32
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
24
+ /*
33
+ tcg_out32(s, (op & 0x00ff) | RXB(v1, v2, 0, 0) | (m3 << 12));
25
+ * Merge two barriers of the same type into one,
26
+ * or a weaker barrier into a stronger one,
27
+ * or two weaker barriers into a stronger one.
28
+ * mb X; mb Y => mb X|Y
29
+ * mb; strl => mb; st
30
+ * ldaq; mb => ld; mb
31
+ * ldaq; strl => ld; mb; st
32
+ * Other combinations are also merged into a strong
33
+ * barrier. This is stricter than specified but for
34
+ * the purposes of TCG is better than not optimizing.
35
+ */
36
+ ctx->prev_mb->args[0] |= op->args[0];
37
+ tcg_op_remove(ctx->tcg, op);
38
+ } else {
39
+ ctx->prev_mb = op;
40
+ }
41
+ return true;
34
+}
42
+}
35
+
43
+
36
+static void tcg_out_insn_VRSb(TCGContext *s, S390Opcode op, TCGReg v1,
44
+static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
37
+ intptr_t d2, TCGReg b2, TCGReg r3, int m4)
38
+{
45
+{
39
+ tcg_debug_assert(is_vector_reg(v1));
46
+ /* Opcodes that touch guest memory stop the mb optimization. */
40
+ tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
47
+ ctx->prev_mb = NULL;
41
+ tcg_debug_assert(is_general_reg(b2));
48
+ return false;
42
+ tcg_debug_assert(is_general_reg(r3));
43
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r3);
44
+ tcg_out16(s, b2 << 12 | d2);
45
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
46
+}
49
+}
47
+
50
+
48
+static void tcg_out_insn_VRSc(TCGContext *s, S390Opcode op, TCGReg r1,
51
+static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
49
+ intptr_t d2, TCGReg b2, TCGReg v3, int m4)
50
+{
52
+{
51
+ tcg_debug_assert(is_general_reg(r1));
53
+ /* Opcodes that touch guest memory stop the mb optimization. */
52
+ tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
54
+ ctx->prev_mb = NULL;
53
+ tcg_debug_assert(is_general_reg(b2));
55
+ return false;
54
+ tcg_debug_assert(is_vector_reg(v3));
55
+ tcg_out16(s, (op & 0xff00) | (r1 << 4) | (v3 & 0xf));
56
+ tcg_out16(s, b2 << 12 | d2);
57
+ tcg_out16(s, (op & 0x00ff) | RXB(0, 0, v3, 0) | (m4 << 12));
58
+}
56
+}
59
+
57
+
60
static void tcg_out_insn_VRX(TCGContext *s, S390Opcode op, TCGReg v1,
58
/* Propagate constants and copies, fold constant expressions. */
61
TCGReg b2, TCGReg x2, intptr_t d2, int m3)
59
void tcg_optimize(TCGContext *s)
62
{
60
{
63
@@ -XXX,XX +XXX,XX @@ static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
64
62
}
65
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
63
break;
66
{
64
67
- if (src != dst) {
65
+ case INDEX_op_mb:
68
- if (type == TCG_TYPE_I32) {
66
+ done = fold_mb(&ctx, op);
69
+ if (src == dst) {
70
+ return true;
71
+ }
72
+ switch (type) {
73
+ case TCG_TYPE_I32:
74
+ if (likely(is_general_reg(dst) && is_general_reg(src))) {
75
tcg_out_insn(s, RR, LR, dst, src);
76
- } else {
77
- tcg_out_insn(s, RRE, LGR, dst, src);
78
+ break;
67
+ break;
68
+ case INDEX_op_qemu_ld_i32:
69
+ case INDEX_op_qemu_ld_i64:
70
+ done = fold_qemu_ld(&ctx, op);
71
+ break;
72
+ case INDEX_op_qemu_st_i32:
73
+ case INDEX_op_qemu_st8_i32:
74
+ case INDEX_op_qemu_st_i64:
75
+ done = fold_qemu_st(&ctx, op);
76
+ break;
77
+
78
default:
79
break;
79
}
80
}
80
+ /* fallthru */
81
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
81
+
82
if (!done) {
82
+ case TCG_TYPE_I64:
83
finish_folding(&ctx, op);
83
+ if (likely(is_general_reg(dst))) {
84
}
84
+ if (likely(is_general_reg(src))) {
85
-
85
+ tcg_out_insn(s, RRE, LGR, dst, src);
86
- /* Eliminate duplicate and redundant fence instructions. */
86
+ } else {
87
- if (ctx.prev_mb) {
87
+ tcg_out_insn(s, VRSc, VLGV, dst, 0, 0, src, 3);
88
- switch (opc) {
88
+ }
89
- case INDEX_op_mb:
89
+ break;
90
- /* Merge two barriers of the same type into one,
90
+ } else if (is_general_reg(src)) {
91
- * or a weaker barrier into a stronger one,
91
+ tcg_out_insn(s, VRSb, VLVG, dst, 0, 0, src, 3);
92
- * or two weaker barriers into a stronger one.
92
+ break;
93
- * mb X; mb Y => mb X|Y
93
+ }
94
- * mb; strl => mb; st
94
+ /* fallthru */
95
- * ldaq; mb => ld; mb
95
+
96
- * ldaq; strl => ld; mb; st
96
+ case TCG_TYPE_V64:
97
- * Other combinations are also merged into a strong
97
+ case TCG_TYPE_V128:
98
- * barrier. This is stricter than specified but for
98
+ tcg_out_insn(s, VRRa, VLR, dst, src, 0);
99
- * the purposes of TCG is better than not optimizing.
99
+ break;
100
- */
100
+
101
- ctx.prev_mb->args[0] |= op->args[0];
101
+ default:
102
- tcg_op_remove(s, op);
102
+ g_assert_not_reached();
103
- break;
104
-
105
- default:
106
- /* Opcodes that end the block stop the optimization. */
107
- if ((def->flags & TCG_OPF_BB_END) == 0) {
108
- break;
109
- }
110
- /* fallthru */
111
- case INDEX_op_qemu_ld_i32:
112
- case INDEX_op_qemu_ld_i64:
113
- case INDEX_op_qemu_st_i32:
114
- case INDEX_op_qemu_st8_i32:
115
- case INDEX_op_qemu_st_i64:
116
- /* Opcodes that touch guest memory stop the optimization. */
117
- ctx.prev_mb = NULL;
118
- break;
119
- }
120
- } else if (opc == INDEX_op_mb) {
121
- ctx.prev_mb = op;
122
- }
103
}
123
}
104
return true;
105
}
124
}
106
--
125
--
107
2.25.1
126
2.25.1
108
127
109
128
diff view generated by jsdifflib
1
We (will) often have the complete MemOpIdx handy, so use that.
1
Split out a whole bunch of placeholder functions, which are
2
2
currently identical. That won't last as more code gets moved.
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
Use CASE_32_64_VEC for some logical operators that previously
5
missed the addition of vectors.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
10
---
7
trace/mem.h | 32 +++++++++-----------------
11
tcg/optimize.c | 271 +++++++++++++++++++++++++++++++++++++++----------
8
accel/tcg/cputlb.c | 12 ++++------
12
1 file changed, 219 insertions(+), 52 deletions(-)
9
accel/tcg/user-exec.c | 42 +++++++++++++++++++++++------------
13
10
tcg/tcg-op.c | 8 +++----
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
accel/tcg/atomic_common.c.inc | 6 ++---
12
5 files changed, 49 insertions(+), 51 deletions(-)
13
14
diff --git a/trace/mem.h b/trace/mem.h
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/trace/mem.h
16
--- a/tcg/optimize.c
17
+++ b/trace/mem.h
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
19
#ifndef TRACE__MEM_H
19
}
20
#define TRACE__MEM_H
20
}
21
21
22
-#include "tcg/tcg.h"
22
+/*
23
+#include "exec/memopidx.h"
23
+ * The fold_* functions return true when processing is complete,
24
24
+ * usually by folding the operation to a constant or to a copy,
25
#define TRACE_MEM_SZ_SHIFT_MASK 0xf /* size shift mask */
25
+ * and calling tcg_opt_gen_{mov,movi}. They may do other things,
26
#define TRACE_MEM_SE (1ULL << 4) /* sign extended (y/n) */
26
+ * like collect information about the value produced, for use in
27
@@ -XXX,XX +XXX,XX @@
27
+ * optimizing a subsequent operation.
28
#define TRACE_MEM_MMU_SHIFT 8 /* mmu idx */
28
+ *
29
29
+ * These first fold_* functions are all helpers, used by other
30
/**
30
+ * folders for more specific operations.
31
- * trace_mem_build_info:
31
+ */
32
+ * trace_mem_get_info:
32
+
33
*
33
+static bool fold_const1(OptContext *ctx, TCGOp *op)
34
* Return a value for the 'info' argument in guest memory access traces.
34
+{
35
*/
35
+ if (arg_is_const(op->args[1])) {
36
-static inline uint16_t trace_mem_build_info(int size_shift, bool sign_extend,
36
+ uint64_t t;
37
- MemOp endianness, bool store,
37
+
38
- unsigned int mmu_idx)
38
+ t = arg_info(op->args[1])->val;
39
+static inline uint16_t trace_mem_get_info(MemOpIdx oi, bool store)
39
+ t = do_constant_folding(op->opc, t, 0);
40
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
41
+ }
42
+ return false;
43
+}
44
+
45
+static bool fold_const2(OptContext *ctx, TCGOp *op)
46
+{
47
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
48
+ uint64_t t1 = arg_info(op->args[1])->val;
49
+ uint64_t t2 = arg_info(op->args[2])->val;
50
+
51
+ t1 = do_constant_folding(op->opc, t1, t2);
52
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
53
+ }
54
+ return false;
55
+}
56
+
57
+/*
58
+ * These outermost fold_<op> functions are sorted alphabetically.
59
+ */
60
+
61
+static bool fold_add(OptContext *ctx, TCGOp *op)
62
+{
63
+ return fold_const2(ctx, op);
64
+}
65
+
66
+static bool fold_and(OptContext *ctx, TCGOp *op)
67
+{
68
+ return fold_const2(ctx, op);
69
+}
70
+
71
+static bool fold_andc(OptContext *ctx, TCGOp *op)
72
+{
73
+ return fold_const2(ctx, op);
74
+}
75
+
76
static bool fold_call(OptContext *ctx, TCGOp *op)
40
{
77
{
41
+ MemOp op = get_memop(oi);
78
TCGContext *s = ctx->tcg;
42
+ uint32_t size_shift = op & MO_SIZE;
79
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
43
+ bool sign_extend = op & MO_SIGN;
80
return true;
44
+ bool big_endian = (op & MO_BSWAP) == MO_BE;
45
uint16_t res;
46
47
res = size_shift & TRACE_MEM_SZ_SHIFT_MASK;
48
if (sign_extend) {
49
res |= TRACE_MEM_SE;
50
}
51
- if (endianness == MO_BE) {
52
+ if (big_endian) {
53
res |= TRACE_MEM_BE;
54
}
55
if (store) {
56
res |= TRACE_MEM_ST;
57
}
58
#ifdef CONFIG_SOFTMMU
59
- res |= mmu_idx << TRACE_MEM_MMU_SHIFT;
60
+ res |= get_mmuidx(oi) << TRACE_MEM_MMU_SHIFT;
61
#endif
62
+
63
return res;
64
}
81
}
65
82
83
+static bool fold_ctpop(OptContext *ctx, TCGOp *op)
84
+{
85
+ return fold_const1(ctx, op);
86
+}
87
+
88
+static bool fold_divide(OptContext *ctx, TCGOp *op)
89
+{
90
+ return fold_const2(ctx, op);
91
+}
92
+
93
+static bool fold_eqv(OptContext *ctx, TCGOp *op)
94
+{
95
+ return fold_const2(ctx, op);
96
+}
97
+
98
+static bool fold_exts(OptContext *ctx, TCGOp *op)
99
+{
100
+ return fold_const1(ctx, op);
101
+}
102
+
103
+static bool fold_extu(OptContext *ctx, TCGOp *op)
104
+{
105
+ return fold_const1(ctx, op);
106
+}
107
+
108
static bool fold_mb(OptContext *ctx, TCGOp *op)
109
{
110
/* Eliminate duplicate and redundant fence instructions. */
111
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
112
return true;
113
}
114
115
+static bool fold_mul(OptContext *ctx, TCGOp *op)
116
+{
117
+ return fold_const2(ctx, op);
118
+}
119
+
120
+static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
121
+{
122
+ return fold_const2(ctx, op);
123
+}
124
+
125
+static bool fold_nand(OptContext *ctx, TCGOp *op)
126
+{
127
+ return fold_const2(ctx, op);
128
+}
129
+
130
+static bool fold_neg(OptContext *ctx, TCGOp *op)
131
+{
132
+ return fold_const1(ctx, op);
133
+}
134
+
135
+static bool fold_nor(OptContext *ctx, TCGOp *op)
136
+{
137
+ return fold_const2(ctx, op);
138
+}
139
+
140
+static bool fold_not(OptContext *ctx, TCGOp *op)
141
+{
142
+ return fold_const1(ctx, op);
143
+}
144
+
145
+static bool fold_or(OptContext *ctx, TCGOp *op)
146
+{
147
+ return fold_const2(ctx, op);
148
+}
149
+
150
+static bool fold_orc(OptContext *ctx, TCGOp *op)
151
+{
152
+ return fold_const2(ctx, op);
153
+}
154
+
155
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
156
{
157
/* Opcodes that touch guest memory stop the mb optimization. */
158
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
159
return false;
160
}
161
162
+static bool fold_remainder(OptContext *ctx, TCGOp *op)
163
+{
164
+ return fold_const2(ctx, op);
165
+}
166
+
167
+static bool fold_shift(OptContext *ctx, TCGOp *op)
168
+{
169
+ return fold_const2(ctx, op);
170
+}
171
+
172
+static bool fold_sub(OptContext *ctx, TCGOp *op)
173
+{
174
+ return fold_const2(ctx, op);
175
+}
176
+
177
+static bool fold_xor(OptContext *ctx, TCGOp *op)
178
+{
179
+ return fold_const2(ctx, op);
180
+}
181
+
182
/* Propagate constants and copies, fold constant expressions. */
183
void tcg_optimize(TCGContext *s)
184
{
185
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
186
}
187
break;
188
189
- CASE_OP_32_64(not):
190
- CASE_OP_32_64(neg):
191
- CASE_OP_32_64(ext8s):
192
- CASE_OP_32_64(ext8u):
193
- CASE_OP_32_64(ext16s):
194
- CASE_OP_32_64(ext16u):
195
- CASE_OP_32_64(ctpop):
196
- case INDEX_op_ext32s_i64:
197
- case INDEX_op_ext32u_i64:
198
- case INDEX_op_ext_i32_i64:
199
- case INDEX_op_extu_i32_i64:
200
- case INDEX_op_extrl_i64_i32:
201
- case INDEX_op_extrh_i64_i32:
202
- if (arg_is_const(op->args[1])) {
203
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
204
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
205
- continue;
206
- }
207
- break;
66
-
208
-
67
-/**
209
CASE_OP_32_64(bswap16):
68
- * trace_mem_get_info:
210
CASE_OP_32_64(bswap32):
69
- *
211
case INDEX_op_bswap64_i64:
70
- * Return a value for the 'info' argument in guest memory access traces.
212
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
71
- */
213
}
72
-static inline uint16_t trace_mem_get_info(MemOp op,
214
break;
73
- unsigned int mmu_idx,
215
74
- bool store)
216
- CASE_OP_32_64(add):
75
-{
217
- CASE_OP_32_64(sub):
76
- return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN),
218
- CASE_OP_32_64(mul):
77
- op & MO_BSWAP, store,
219
- CASE_OP_32_64(or):
78
- mmu_idx);
220
- CASE_OP_32_64(and):
79
-}
221
- CASE_OP_32_64(xor):
222
- CASE_OP_32_64(shl):
223
- CASE_OP_32_64(shr):
224
- CASE_OP_32_64(sar):
225
- CASE_OP_32_64(rotl):
226
- CASE_OP_32_64(rotr):
227
- CASE_OP_32_64(andc):
228
- CASE_OP_32_64(orc):
229
- CASE_OP_32_64(eqv):
230
- CASE_OP_32_64(nand):
231
- CASE_OP_32_64(nor):
232
- CASE_OP_32_64(muluh):
233
- CASE_OP_32_64(mulsh):
234
- CASE_OP_32_64(div):
235
- CASE_OP_32_64(divu):
236
- CASE_OP_32_64(rem):
237
- CASE_OP_32_64(remu):
238
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
239
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
240
- arg_info(op->args[2])->val);
241
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
242
- continue;
243
- }
244
- break;
80
-
245
-
81
#endif /* TRACE__MEM_H */
246
CASE_OP_32_64(clz):
82
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
247
CASE_OP_32_64(ctz):
83
index XXXXXXX..XXXXXXX 100644
248
if (arg_is_const(op->args[1])) {
84
--- a/accel/tcg/cputlb.c
249
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
85
+++ b/accel/tcg/cputlb.c
250
}
86
@@ -XXX,XX +XXX,XX @@ static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
251
break;
87
int mmu_idx, uintptr_t retaddr,
252
88
MemOp op, FullLoadHelper *full_load)
253
+ default:
89
{
254
+ break;
90
- uint16_t meminfo;
255
+
91
- MemOpIdx oi;
256
+ /* ---------------------------------------------------------- */
92
+ MemOpIdx oi = make_memop_idx(op, mmu_idx);
257
+ /* Sorted alphabetically by opcode as much as possible. */
93
+ uint16_t meminfo = trace_mem_get_info(oi, false);
258
+
94
uint64_t ret;
259
+ CASE_OP_32_64_VEC(add):
95
260
+ done = fold_add(&ctx, op);
96
- meminfo = trace_mem_get_info(op, mmu_idx, false);
261
+ break;
97
trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
262
+ CASE_OP_32_64_VEC(and):
98
263
+ done = fold_and(&ctx, op);
99
- oi = make_memop_idx(op, mmu_idx);
264
+ break;
100
ret = full_load(env, addr, oi, retaddr);
265
+ CASE_OP_32_64_VEC(andc):
101
266
+ done = fold_andc(&ctx, op);
102
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
267
+ break;
103
@@ -XXX,XX +XXX,XX @@ static inline void QEMU_ALWAYS_INLINE
268
+ CASE_OP_32_64(ctpop):
104
cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
269
+ done = fold_ctpop(&ctx, op);
105
int mmu_idx, uintptr_t retaddr, MemOp op)
270
+ break;
106
{
271
+ CASE_OP_32_64(div):
107
- MemOpIdx oi;
272
+ CASE_OP_32_64(divu):
108
- uint16_t meminfo;
273
+ done = fold_divide(&ctx, op);
109
+ MemOpIdx oi = make_memop_idx(op, mmu_idx);
274
+ break;
110
+ uint16_t meminfo = trace_mem_get_info(oi, true);
275
+ CASE_OP_32_64(eqv):
111
276
+ done = fold_eqv(&ctx, op);
112
- meminfo = trace_mem_get_info(op, mmu_idx, true);
277
+ break;
113
trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
278
+ CASE_OP_32_64(ext8s):
114
279
+ CASE_OP_32_64(ext16s):
115
- oi = make_memop_idx(op, mmu_idx);
280
+ case INDEX_op_ext32s_i64:
116
store_helper(env, addr, val, oi, retaddr, op);
281
+ case INDEX_op_ext_i32_i64:
117
282
+ done = fold_exts(&ctx, op);
118
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
283
+ break;
119
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
284
+ CASE_OP_32_64(ext8u):
120
index XXXXXXX..XXXXXXX 100644
285
+ CASE_OP_32_64(ext16u):
121
--- a/accel/tcg/user-exec.c
286
+ case INDEX_op_ext32u_i64:
122
+++ b/accel/tcg/user-exec.c
287
+ case INDEX_op_extu_i32_i64:
123
@@ -XXX,XX +XXX,XX @@ int cpu_signal_handler(int host_signum, void *pinfo,
288
+ case INDEX_op_extrl_i64_i32:
124
289
+ case INDEX_op_extrh_i64_i32:
125
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
290
+ done = fold_extu(&ctx, op);
126
{
291
+ break;
127
+ MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX);
292
case INDEX_op_mb:
128
+ uint16_t meminfo = trace_mem_get_info(oi, false);
293
done = fold_mb(&ctx, op);
129
uint32_t ret;
294
break;
130
- uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, false);
295
+ CASE_OP_32_64(mul):
131
296
+ done = fold_mul(&ctx, op);
132
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
297
+ break;
133
ret = ldub_p(g2h(env_cpu(env), ptr));
298
+ CASE_OP_32_64(mulsh):
134
@@ -XXX,XX +XXX,XX @@ int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
299
+ CASE_OP_32_64(muluh):
135
300
+ done = fold_mul_highpart(&ctx, op);
136
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
301
+ break;
137
{
302
+ CASE_OP_32_64(nand):
138
+ MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX);
303
+ done = fold_nand(&ctx, op);
139
+ uint16_t meminfo = trace_mem_get_info(oi, false);
304
+ break;
140
uint32_t ret;
305
+ CASE_OP_32_64(neg):
141
- uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, false);
306
+ done = fold_neg(&ctx, op);
142
307
+ break;
143
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
308
+ CASE_OP_32_64(nor):
144
ret = lduw_be_p(g2h(env_cpu(env), ptr));
309
+ done = fold_nor(&ctx, op);
145
@@ -XXX,XX +XXX,XX @@ int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr)
310
+ break;
146
311
+ CASE_OP_32_64_VEC(not):
147
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
312
+ done = fold_not(&ctx, op);
148
{
313
+ break;
149
+ MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX);
314
+ CASE_OP_32_64_VEC(or):
150
+ uint16_t meminfo = trace_mem_get_info(oi, false);
315
+ done = fold_or(&ctx, op);
151
uint32_t ret;
316
+ break;
152
- uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, false);
317
+ CASE_OP_32_64_VEC(orc):
153
318
+ done = fold_orc(&ctx, op);
154
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
319
+ break;
155
ret = ldl_be_p(g2h(env_cpu(env), ptr));
320
case INDEX_op_qemu_ld_i32:
156
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
321
case INDEX_op_qemu_ld_i64:
157
322
done = fold_qemu_ld(&ctx, op);
158
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
323
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
159
{
324
case INDEX_op_qemu_st_i64:
160
+ MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX);
325
done = fold_qemu_st(&ctx, op);
161
+ uint16_t meminfo = trace_mem_get_info(oi, false);
326
break;
162
uint64_t ret;
327
-
163
- uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, false);
328
- default:
164
329
+ CASE_OP_32_64(rem):
165
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
330
+ CASE_OP_32_64(remu):
166
ret = ldq_be_p(g2h(env_cpu(env), ptr));
331
+ done = fold_remainder(&ctx, op);
167
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
332
+ break;
168
333
+ CASE_OP_32_64(rotl):
169
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr)
334
+ CASE_OP_32_64(rotr):
170
{
335
+ CASE_OP_32_64(sar):
171
+ MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX);
336
+ CASE_OP_32_64(shl):
172
+ uint16_t meminfo = trace_mem_get_info(oi, false);
337
+ CASE_OP_32_64(shr):
173
uint32_t ret;
338
+ done = fold_shift(&ctx, op);
174
- uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, false);
339
+ break;
175
340
+ CASE_OP_32_64_VEC(sub):
176
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
341
+ done = fold_sub(&ctx, op);
177
ret = lduw_le_p(g2h(env_cpu(env), ptr));
342
+ break;
178
@@ -XXX,XX +XXX,XX @@ int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr)
343
+ CASE_OP_32_64_VEC(xor):
179
344
+ done = fold_xor(&ctx, op);
180
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
345
break;
181
{
346
}
182
+ MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX);
183
+ uint16_t meminfo = trace_mem_get_info(oi, false);
184
uint32_t ret;
185
- uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, false);
186
187
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
188
ret = ldl_le_p(g2h(env_cpu(env), ptr));
189
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
190
191
uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr)
192
{
193
+ MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX);
194
+ uint16_t meminfo = trace_mem_get_info(oi, false);
195
uint64_t ret;
196
- uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, false);
197
198
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
199
ret = ldq_le_p(g2h(env_cpu(env), ptr));
200
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
201
202
void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
203
{
204
- uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, true);
205
+ MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX);
206
+ uint16_t meminfo = trace_mem_get_info(oi, true);
207
208
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
209
stb_p(g2h(env_cpu(env), ptr), val);
210
@@ -XXX,XX +XXX,XX @@ void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
211
212
void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
213
{
214
- uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, true);
215
+ MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX);
216
+ uint16_t meminfo = trace_mem_get_info(oi, true);
217
218
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
219
stw_be_p(g2h(env_cpu(env), ptr), val);
220
@@ -XXX,XX +XXX,XX @@ void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
221
222
void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
223
{
224
- uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, true);
225
+ MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX);
226
+ uint16_t meminfo = trace_mem_get_info(oi, true);
227
228
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
229
stl_be_p(g2h(env_cpu(env), ptr), val);
230
@@ -XXX,XX +XXX,XX @@ void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
231
232
void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
233
{
234
- uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, true);
235
+ MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX);
236
+ uint16_t meminfo = trace_mem_get_info(oi, true);
237
238
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
239
stq_be_p(g2h(env_cpu(env), ptr), val);
240
@@ -XXX,XX +XXX,XX @@ void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
241
242
void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
243
{
244
- uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, true);
245
+ MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX);
246
+ uint16_t meminfo = trace_mem_get_info(oi, true);
247
248
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
249
stw_le_p(g2h(env_cpu(env), ptr), val);
250
@@ -XXX,XX +XXX,XX @@ void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
251
252
void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
253
{
254
- uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, true);
255
+ MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX);
256
+ uint16_t meminfo = trace_mem_get_info(oi, true);
257
258
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
259
stl_le_p(g2h(env_cpu(env), ptr), val);
260
@@ -XXX,XX +XXX,XX @@ void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
261
262
void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
263
{
264
- uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, true);
265
+ MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX);
266
+ uint16_t meminfo = trace_mem_get_info(oi, true);
267
268
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
269
stq_le_p(g2h(env_cpu(env), ptr), val);
270
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
271
index XXXXXXX..XXXXXXX 100644
272
--- a/tcg/tcg-op.c
273
+++ b/tcg/tcg-op.c
274
@@ -XXX,XX +XXX,XX @@ static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info)
275
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
276
{
277
MemOp orig_memop;
278
- uint16_t info = trace_mem_get_info(memop, idx, 0);
279
+ uint16_t info = trace_mem_get_info(make_memop_idx(memop, idx), 0);
280
281
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
282
memop = tcg_canonicalize_memop(memop, 0, 0);
283
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
284
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
285
{
286
TCGv_i32 swap = NULL;
287
- uint16_t info = trace_mem_get_info(memop, idx, 1);
288
+ uint16_t info = trace_mem_get_info(make_memop_idx(memop, idx), 1);
289
290
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
291
memop = tcg_canonicalize_memop(memop, 0, 1);
292
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
293
294
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
295
memop = tcg_canonicalize_memop(memop, 1, 0);
296
- info = trace_mem_get_info(memop, idx, 0);
297
+ info = trace_mem_get_info(make_memop_idx(memop, idx), 0);
298
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
299
300
orig_memop = memop;
301
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
302
303
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
304
memop = tcg_canonicalize_memop(memop, 1, 1);
305
- info = trace_mem_get_info(memop, idx, 1);
306
+ info = trace_mem_get_info(make_memop_idx(memop, idx), 1);
307
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
308
309
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
310
diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc
311
index XXXXXXX..XXXXXXX 100644
312
--- a/accel/tcg/atomic_common.c.inc
313
+++ b/accel/tcg/atomic_common.c.inc
314
@@ -XXX,XX +XXX,XX @@ static uint16_t atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr,
315
MemOpIdx oi)
316
{
317
CPUState *cpu = env_cpu(env);
318
- uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
319
+ uint16_t info = trace_mem_get_info(oi, false);
320
321
trace_guest_mem_before_exec(cpu, addr, info);
322
trace_guest_mem_before_exec(cpu, addr, info | TRACE_MEM_ST);
323
@@ -XXX,XX +XXX,XX @@ static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
324
static uint16_t atomic_trace_ld_pre(CPUArchState *env, target_ulong addr,
325
MemOpIdx oi)
326
{
327
- uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
328
+ uint16_t info = trace_mem_get_info(oi, false);
329
330
trace_guest_mem_before_exec(env_cpu(env), addr, info);
331
332
@@ -XXX,XX +XXX,XX @@ static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
333
static uint16_t atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
334
MemOpIdx oi)
335
{
336
- uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), true);
337
+ uint16_t info = trace_mem_get_info(oi, true);
338
339
trace_guest_mem_before_exec(env_cpu(env), addr, info);
340
347
341
--
348
--
342
2.25.1
349
2.25.1
343
350
344
351
diff view generated by jsdifflib
1
Reviewed-by: David Hildenbrand <david@redhat.com>
1
Reduce some code duplication by folding the NE and EQ cases.
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/s390x/tcg-target.h | 2 +-
7
tcg/optimize.c | 145 ++++++++++++++++++++++++-------------------------
5
tcg/s390x/tcg-target.c.inc | 25 +++++++++++++++++++++++++
8
1 file changed, 72 insertions(+), 73 deletions(-)
6
2 files changed, 26 insertions(+), 1 deletion(-)
7
9
8
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/s390x/tcg-target.h
12
--- a/tcg/optimize.c
11
+++ b/tcg/s390x/tcg-target.h
13
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
13
#define TCG_TARGET_HAS_shv_vec 1
15
return fold_const2(ctx, op);
14
#define TCG_TARGET_HAS_mul_vec 1
16
}
15
#define TCG_TARGET_HAS_sat_vec 0
17
16
-#define TCG_TARGET_HAS_minmax_vec 0
18
+static bool fold_setcond2(OptContext *ctx, TCGOp *op)
17
+#define TCG_TARGET_HAS_minmax_vec 1
19
+{
18
#define TCG_TARGET_HAS_bitsel_vec 0
20
+ TCGCond cond = op->args[5];
19
#define TCG_TARGET_HAS_cmpsel_vec 0
21
+ int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
20
22
+ int inv = 0;
21
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
23
+
22
index XXXXXXX..XXXXXXX 100644
24
+ if (i >= 0) {
23
--- a/tcg/s390x/tcg-target.c.inc
25
+ goto do_setcond_const;
24
+++ b/tcg/s390x/tcg-target.c.inc
26
+ }
25
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
27
+
26
VRRc_VESRAV = 0xe77a,
28
+ switch (cond) {
27
VRRc_VESRLV = 0xe778,
29
+ case TCG_COND_LT:
28
VRRc_VML = 0xe7a2,
30
+ case TCG_COND_GE:
29
+ VRRc_VMN = 0xe7fe,
31
+ /*
30
+ VRRc_VMNL = 0xe7fc,
32
+ * Simplify LT/GE comparisons vs zero to a single compare
31
+ VRRc_VMX = 0xe7ff,
33
+ * vs the high word of the input.
32
+ VRRc_VMXL = 0xe7fd,
34
+ */
33
VRRc_VN = 0xe768,
35
+ if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 &&
34
VRRc_VNC = 0xe769,
36
+ arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) {
35
VRRc_VNO = 0xe76b,
37
+ goto do_setcond_high;
36
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
38
+ }
37
tcg_out_insn(s, VRRc, VERLLV, a0, a1, a2, vece);
38
break;
39
40
+ case INDEX_op_smin_vec:
41
+ tcg_out_insn(s, VRRc, VMN, a0, a1, a2, vece);
42
+ break;
43
+ case INDEX_op_smax_vec:
44
+ tcg_out_insn(s, VRRc, VMX, a0, a1, a2, vece);
45
+ break;
46
+ case INDEX_op_umin_vec:
47
+ tcg_out_insn(s, VRRc, VMNL, a0, a1, a2, vece);
48
+ break;
49
+ case INDEX_op_umax_vec:
50
+ tcg_out_insn(s, VRRc, VMXL, a0, a1, a2, vece);
51
+ break;
39
+ break;
52
+
40
+
53
case INDEX_op_cmp_vec:
41
+ case TCG_COND_NE:
54
switch ((TCGCond)args[3]) {
42
+ inv = 1;
55
case TCG_COND_EQ:
43
+ QEMU_FALLTHROUGH;
56
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
44
+ case TCG_COND_EQ:
57
case INDEX_op_shri_vec:
45
+ /*
58
case INDEX_op_shrs_vec:
46
+ * Simplify EQ/NE comparisons where one of the pairs
59
case INDEX_op_shrv_vec:
47
+ * can be simplified.
60
+ case INDEX_op_smax_vec:
48
+ */
61
+ case INDEX_op_smin_vec:
49
+ i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
62
case INDEX_op_sub_vec:
50
+ op->args[3], cond);
63
+ case INDEX_op_umax_vec:
51
+ switch (i ^ inv) {
64
+ case INDEX_op_umin_vec:
52
+ case 0:
65
case INDEX_op_xor_vec:
53
+ goto do_setcond_const;
66
return 1;
54
+ case 1:
67
case INDEX_op_cmp_vec:
55
+ goto do_setcond_high;
68
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
56
+ }
69
case INDEX_op_shlv_vec:
57
+
70
case INDEX_op_shrv_vec:
58
+ i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
71
case INDEX_op_sarv_vec:
59
+ op->args[4], cond);
72
+ case INDEX_op_smax_vec:
60
+ switch (i ^ inv) {
73
+ case INDEX_op_smin_vec:
61
+ case 0:
74
+ case INDEX_op_umax_vec:
62
+ goto do_setcond_const;
75
+ case INDEX_op_umin_vec:
63
+ case 1:
76
return C_O1_I2(v, v, v);
64
+ op->args[2] = op->args[3];
77
case INDEX_op_rotls_vec:
65
+ op->args[3] = cond;
78
case INDEX_op_shls_vec:
66
+ op->opc = INDEX_op_setcond_i32;
67
+ break;
68
+ }
69
+ break;
70
+
71
+ default:
72
+ break;
73
+
74
+ do_setcond_high:
75
+ op->args[1] = op->args[2];
76
+ op->args[2] = op->args[4];
77
+ op->args[3] = cond;
78
+ op->opc = INDEX_op_setcond_i32;
79
+ break;
80
+ }
81
+ return false;
82
+
83
+ do_setcond_const:
84
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
85
+}
86
+
87
static bool fold_shift(OptContext *ctx, TCGOp *op)
88
{
89
return fold_const2(ctx, op);
90
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
91
}
92
break;
93
94
- case INDEX_op_setcond2_i32:
95
- i = do_constant_folding_cond2(&op->args[1], &op->args[3],
96
- op->args[5]);
97
- if (i >= 0) {
98
- do_setcond_const:
99
- tcg_opt_gen_movi(&ctx, op, op->args[0], i);
100
- continue;
101
- }
102
- if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
103
- && arg_is_const(op->args[3])
104
- && arg_info(op->args[3])->val == 0
105
- && arg_is_const(op->args[4])
106
- && arg_info(op->args[4])->val == 0) {
107
- /* Simplify LT/GE comparisons vs zero to a single compare
108
- vs the high word of the input. */
109
- do_setcond_high:
110
- reset_temp(op->args[0]);
111
- arg_info(op->args[0])->z_mask = 1;
112
- op->opc = INDEX_op_setcond_i32;
113
- op->args[1] = op->args[2];
114
- op->args[2] = op->args[4];
115
- op->args[3] = op->args[5];
116
- break;
117
- }
118
- if (op->args[5] == TCG_COND_EQ) {
119
- /* Simplify EQ comparisons where one of the pairs
120
- can be simplified. */
121
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
122
- op->args[1], op->args[3],
123
- TCG_COND_EQ);
124
- if (i == 0) {
125
- goto do_setcond_const;
126
- } else if (i > 0) {
127
- goto do_setcond_high;
128
- }
129
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
130
- op->args[2], op->args[4],
131
- TCG_COND_EQ);
132
- if (i == 0) {
133
- goto do_setcond_high;
134
- } else if (i < 0) {
135
- break;
136
- }
137
- do_setcond_low:
138
- reset_temp(op->args[0]);
139
- arg_info(op->args[0])->z_mask = 1;
140
- op->opc = INDEX_op_setcond_i32;
141
- op->args[2] = op->args[3];
142
- op->args[3] = op->args[5];
143
- break;
144
- }
145
- if (op->args[5] == TCG_COND_NE) {
146
- /* Simplify NE comparisons where one of the pairs
147
- can be simplified. */
148
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
149
- op->args[1], op->args[3],
150
- TCG_COND_NE);
151
- if (i == 0) {
152
- goto do_setcond_high;
153
- } else if (i > 0) {
154
- goto do_setcond_const;
155
- }
156
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
157
- op->args[2], op->args[4],
158
- TCG_COND_NE);
159
- if (i == 0) {
160
- goto do_setcond_low;
161
- } else if (i > 0) {
162
- goto do_setcond_const;
163
- }
164
- }
165
- break;
166
-
167
default:
168
break;
169
170
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
171
CASE_OP_32_64(shr):
172
done = fold_shift(&ctx, op);
173
break;
174
+ case INDEX_op_setcond2_i32:
175
+ done = fold_setcond2(&ctx, op);
176
+ break;
177
CASE_OP_32_64_VEC(sub):
178
done = fold_sub(&ctx, op);
179
break;
79
--
180
--
80
2.25.1
181
2.25.1
81
182
82
183
diff view generated by jsdifflib
1
Reviewed-by: David Hildenbrand <david@redhat.com>
1
Reduce some code duplication by folding the NE and EQ cases.
2
3
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
tcg/s390x/tcg-target.c.inc | 132 +++++++++++++++++++++++++++++++++----
6
tcg/optimize.c | 159 +++++++++++++++++++++++++------------------------
5
1 file changed, 120 insertions(+), 12 deletions(-)
7
1 file changed, 81 insertions(+), 78 deletions(-)
6
8
7
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/s390x/tcg-target.c.inc
11
--- a/tcg/optimize.c
10
+++ b/tcg/s390x/tcg-target.c.inc
12
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
13
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
12
RX_STC = 0x42,
14
return fold_const2(ctx, op);
13
RX_STH = 0x40,
15
}
14
16
15
+ VRX_VL = 0xe706,
17
+static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
+ VRX_VLLEZ = 0xe704,
18
+{
17
+ VRX_VST = 0xe70e,
19
+ TCGCond cond = op->args[4];
18
+ VRX_VSTEF = 0xe70b,
20
+ int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
19
+ VRX_VSTEG = 0xe70a,
21
+ TCGArg label = op->args[5];
22
+ int inv = 0;
20
+
23
+
21
NOP = 0x0707,
24
+ if (i >= 0) {
22
} S390Opcode;
25
+ goto do_brcond_const;
23
26
+ }
24
@@ -XXX,XX +XXX,XX @@ static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
25
static const tcg_insn_unit *tb_ret_addr;
26
uint64_t s390_facilities[3];
27
28
+static inline bool is_general_reg(TCGReg r)
29
+{
30
+ return r <= TCG_REG_R15;
31
+}
32
+
27
+
33
+static inline bool is_vector_reg(TCGReg r)
28
+ switch (cond) {
34
+{
29
+ case TCG_COND_LT:
35
+ return r >= TCG_REG_V0 && r <= TCG_REG_V31;
30
+ case TCG_COND_GE:
36
+}
31
+ /*
32
+ * Simplify LT/GE comparisons vs zero to a single compare
33
+ * vs the high word of the input.
34
+ */
35
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 &&
36
+ arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) {
37
+ goto do_brcond_high;
38
+ }
39
+ break;
37
+
40
+
38
static bool patch_reloc(tcg_insn_unit *src_rw, int type,
41
+ case TCG_COND_NE:
39
intptr_t value, intptr_t addend)
42
+ inv = 1;
40
{
43
+ QEMU_FALLTHROUGH;
41
@@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
44
+ case TCG_COND_EQ:
42
#define tcg_out_insn_RX tcg_out_insn_RS
45
+ /*
43
#define tcg_out_insn_RXY tcg_out_insn_RSY
46
+ * Simplify EQ/NE comparisons where one of the pairs
44
47
+ * can be simplified.
45
+static int RXB(TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
48
+ */
46
+{
49
+ i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
47
+ /*
50
+ op->args[2], cond);
48
+ * Shift bit 4 of each regno to its corresponding bit of RXB.
51
+ switch (i ^ inv) {
49
+ * RXB itself begins at bit 8 of the instruction so 8 - 4 = 4
52
+ case 0:
50
+ * is the left-shift of the 4th operand.
53
+ goto do_brcond_const;
51
+ */
54
+ case 1:
52
+ return ((v1 & 0x10) << (4 + 3))
55
+ goto do_brcond_high;
53
+ | ((v2 & 0x10) << (4 + 2))
56
+ }
54
+ | ((v3 & 0x10) << (4 + 1))
55
+ | ((v4 & 0x10) << (4 + 0));
56
+}
57
+
57
+
58
+static void tcg_out_insn_VRX(TCGContext *s, S390Opcode op, TCGReg v1,
58
+ i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
59
+ TCGReg b2, TCGReg x2, intptr_t d2, int m3)
59
+ op->args[3], cond);
60
+{
60
+ switch (i ^ inv) {
61
+ tcg_debug_assert(is_vector_reg(v1));
61
+ case 0:
62
+ tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
62
+ goto do_brcond_const;
63
+ tcg_debug_assert(is_general_reg(x2));
63
+ case 1:
64
+ tcg_debug_assert(is_general_reg(b2));
64
+ op->opc = INDEX_op_brcond_i32;
65
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | x2);
65
+ op->args[1] = op->args[2];
66
+ tcg_out16(s, (b2 << 12) | d2);
66
+ op->args[2] = cond;
67
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
67
+ op->args[3] = label;
68
+}
69
+
70
/* Emit an opcode with "type-checking" of the format. */
71
#define tcg_out_insn(S, FMT, OP, ...) \
72
glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
73
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
74
}
75
}
76
77
+static void tcg_out_vrx_mem(TCGContext *s, S390Opcode opc_vrx,
78
+ TCGReg data, TCGReg base, TCGReg index,
79
+ tcg_target_long ofs, int m3)
80
+{
81
+ if (ofs < 0 || ofs >= 0x1000) {
82
+ if (ofs >= -0x80000 && ofs < 0x80000) {
83
+ tcg_out_insn(s, RXY, LAY, TCG_TMP0, base, index, ofs);
84
+ base = TCG_TMP0;
85
+ index = TCG_REG_NONE;
86
+ ofs = 0;
87
+ } else {
88
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs);
89
+ if (index != TCG_REG_NONE) {
90
+ tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
91
+ }
92
+ index = TCG_TMP0;
93
+ ofs = 0;
94
+ }
95
+ }
96
+ tcg_out_insn_VRX(s, opc_vrx, data, base, index, ofs, m3);
97
+}
98
99
/* load data without address translation or endianness conversion */
100
-static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
101
- TCGReg base, intptr_t ofs)
102
+static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
103
+ TCGReg base, intptr_t ofs)
104
{
105
- if (type == TCG_TYPE_I32) {
106
- tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
107
- } else {
108
- tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
109
+ switch (type) {
110
+ case TCG_TYPE_I32:
111
+ if (likely(is_general_reg(data))) {
112
+ tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
113
+ break;
68
+ break;
114
+ }
69
+ }
115
+ tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_32);
116
+ break;
117
+
118
+ case TCG_TYPE_I64:
119
+ if (likely(is_general_reg(data))) {
120
+ tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
121
+ break;
122
+ }
123
+ /* fallthru */
124
+
125
+ case TCG_TYPE_V64:
126
+ tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_64);
127
+ break;
128
+
129
+ case TCG_TYPE_V128:
130
+ /* Hint quadword aligned. */
131
+ tcg_out_vrx_mem(s, VRX_VL, data, base, TCG_REG_NONE, ofs, 4);
132
+ break;
70
+ break;
133
+
71
+
134
+ default:
72
+ default:
135
+ g_assert_not_reached();
136
}
137
}
138
139
-static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
140
- TCGReg base, intptr_t ofs)
141
+static void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
142
+ TCGReg base, intptr_t ofs)
143
{
144
- if (type == TCG_TYPE_I32) {
145
- tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
146
- } else {
147
- tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
148
+ switch (type) {
149
+ case TCG_TYPE_I32:
150
+ if (likely(is_general_reg(data))) {
151
+ tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
152
+ } else {
153
+ tcg_out_vrx_mem(s, VRX_VSTEF, data, base, TCG_REG_NONE, ofs, 1);
154
+ }
155
+ break;
73
+ break;
156
+
74
+
157
+ case TCG_TYPE_I64:
75
+ do_brcond_high:
158
+ if (likely(is_general_reg(data))) {
76
+ op->opc = INDEX_op_brcond_i32;
159
+ tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
77
+ op->args[0] = op->args[1];
160
+ break;
78
+ op->args[1] = op->args[3];
161
+ }
79
+ op->args[2] = cond;
162
+ /* fallthru */
80
+ op->args[3] = label;
163
+
164
+ case TCG_TYPE_V64:
165
+ tcg_out_vrx_mem(s, VRX_VSTEG, data, base, TCG_REG_NONE, ofs, 0);
166
+ break;
81
+ break;
167
+
82
+
168
+ case TCG_TYPE_V128:
83
+ do_brcond_const:
169
+ /* Hint quadword aligned. */
84
+ if (i == 0) {
170
+ tcg_out_vrx_mem(s, VRX_VST, data, base, TCG_REG_NONE, ofs, 4);
85
+ tcg_op_remove(ctx->tcg, op);
86
+ return true;
87
+ }
88
+ op->opc = INDEX_op_br;
89
+ op->args[0] = label;
171
+ break;
90
+ break;
91
+ }
92
+ return false;
93
+}
172
+
94
+
173
+ default:
95
static bool fold_call(OptContext *ctx, TCGOp *op)
174
+ g_assert_not_reached();
96
{
175
}
97
TCGContext *s = ctx->tcg;
176
}
98
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
177
99
}
100
break;
101
102
- case INDEX_op_brcond2_i32:
103
- i = do_constant_folding_cond2(&op->args[0], &op->args[2],
104
- op->args[4]);
105
- if (i == 0) {
106
- do_brcond_false:
107
- tcg_op_remove(s, op);
108
- continue;
109
- }
110
- if (i > 0) {
111
- do_brcond_true:
112
- op->opc = opc = INDEX_op_br;
113
- op->args[0] = op->args[5];
114
- break;
115
- }
116
- if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE)
117
- && arg_is_const(op->args[2])
118
- && arg_info(op->args[2])->val == 0
119
- && arg_is_const(op->args[3])
120
- && arg_info(op->args[3])->val == 0) {
121
- /* Simplify LT/GE comparisons vs zero to a single compare
122
- vs the high word of the input. */
123
- do_brcond_high:
124
- op->opc = opc = INDEX_op_brcond_i32;
125
- op->args[0] = op->args[1];
126
- op->args[1] = op->args[3];
127
- op->args[2] = op->args[4];
128
- op->args[3] = op->args[5];
129
- break;
130
- }
131
- if (op->args[4] == TCG_COND_EQ) {
132
- /* Simplify EQ comparisons where one of the pairs
133
- can be simplified. */
134
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
135
- op->args[0], op->args[2],
136
- TCG_COND_EQ);
137
- if (i == 0) {
138
- goto do_brcond_false;
139
- } else if (i > 0) {
140
- goto do_brcond_high;
141
- }
142
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
143
- op->args[1], op->args[3],
144
- TCG_COND_EQ);
145
- if (i == 0) {
146
- goto do_brcond_false;
147
- } else if (i < 0) {
148
- break;
149
- }
150
- do_brcond_low:
151
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
152
- op->opc = INDEX_op_brcond_i32;
153
- op->args[1] = op->args[2];
154
- op->args[2] = op->args[4];
155
- op->args[3] = op->args[5];
156
- break;
157
- }
158
- if (op->args[4] == TCG_COND_NE) {
159
- /* Simplify NE comparisons where one of the pairs
160
- can be simplified. */
161
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
162
- op->args[0], op->args[2],
163
- TCG_COND_NE);
164
- if (i == 0) {
165
- goto do_brcond_high;
166
- } else if (i > 0) {
167
- goto do_brcond_true;
168
- }
169
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
170
- op->args[1], op->args[3],
171
- TCG_COND_NE);
172
- if (i == 0) {
173
- goto do_brcond_low;
174
- } else if (i > 0) {
175
- goto do_brcond_true;
176
- }
177
- }
178
- break;
179
-
180
default:
181
break;
182
183
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
184
CASE_OP_32_64_VEC(andc):
185
done = fold_andc(&ctx, op);
186
break;
187
+ case INDEX_op_brcond2_i32:
188
+ done = fold_brcond2(&ctx, op);
189
+ break;
190
CASE_OP_32_64(ctpop):
191
done = fold_ctpop(&ctx, op);
192
break;
178
--
193
--
179
2.25.1
194
2.25.1
180
195
181
196
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 33 +++++++++++++++++++--------------
6
1 file changed, 19 insertions(+), 14 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_brcond(OptContext *ctx, TCGOp *op)
17
+{
18
+ TCGCond cond = op->args[2];
19
+ int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
20
+
21
+ if (i == 0) {
22
+ tcg_op_remove(ctx->tcg, op);
23
+ return true;
24
+ }
25
+ if (i > 0) {
26
+ op->opc = INDEX_op_br;
27
+ op->args[0] = op->args[3];
28
+ }
29
+ return false;
30
+}
31
+
32
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
33
{
34
TCGCond cond = op->args[4];
35
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
36
}
37
break;
38
39
- CASE_OP_32_64(brcond):
40
- i = do_constant_folding_cond(opc, op->args[0],
41
- op->args[1], op->args[2]);
42
- if (i == 0) {
43
- tcg_op_remove(s, op);
44
- continue;
45
- } else if (i > 0) {
46
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
47
- op->opc = opc = INDEX_op_br;
48
- op->args[0] = op->args[3];
49
- break;
50
- }
51
- break;
52
-
53
CASE_OP_32_64(movcond):
54
i = do_constant_folding_cond(opc, op->args[1],
55
op->args[2], op->args[5]);
56
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
57
CASE_OP_32_64_VEC(andc):
58
done = fold_andc(&ctx, op);
59
break;
60
+ CASE_OP_32_64(brcond):
61
+ done = fold_brcond(&ctx, op);
62
+ break;
63
case INDEX_op_brcond2_i32:
64
done = fold_brcond2(&ctx, op);
65
break;
66
--
67
2.25.1
68
69
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 23 ++++++++++++++---------
6
1 file changed, 14 insertions(+), 9 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_setcond(OptContext *ctx, TCGOp *op)
17
+{
18
+ TCGCond cond = op->args[3];
19
+ int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
20
+
21
+ if (i >= 0) {
22
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
+ }
24
+ return false;
25
+}
26
+
27
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
28
{
29
TCGCond cond = op->args[5];
30
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
31
}
32
break;
33
34
- CASE_OP_32_64(setcond):
35
- i = do_constant_folding_cond(opc, op->args[1],
36
- op->args[2], op->args[3]);
37
- if (i >= 0) {
38
- tcg_opt_gen_movi(&ctx, op, op->args[0], i);
39
- continue;
40
- }
41
- break;
42
-
43
CASE_OP_32_64(movcond):
44
i = do_constant_folding_cond(opc, op->args[1],
45
op->args[2], op->args[5]);
46
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
47
CASE_OP_32_64(shr):
48
done = fold_shift(&ctx, op);
49
break;
50
+ CASE_OP_32_64(setcond):
51
+ done = fold_setcond(&ctx, op);
52
+ break;
53
case INDEX_op_setcond2_i32:
54
done = fold_setcond2(&ctx, op);
55
break;
56
--
57
2.25.1
58
59
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 37 +++++++++++++++++++++----------------
6
1 file changed, 21 insertions(+), 16 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
19
+ uint32_t a = arg_info(op->args[2])->val;
20
+ uint32_t b = arg_info(op->args[3])->val;
21
+ uint64_t r = (uint64_t)a * b;
22
+ TCGArg rl, rh;
23
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
24
+
25
+ rl = op->args[0];
26
+ rh = op->args[1];
27
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
28
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
29
+ return true;
30
+ }
31
+ return false;
32
+}
33
+
34
static bool fold_nand(OptContext *ctx, TCGOp *op)
35
{
36
return fold_const2(ctx, op);
37
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
38
}
39
break;
40
41
- case INDEX_op_mulu2_i32:
42
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
43
- uint32_t a = arg_info(op->args[2])->val;
44
- uint32_t b = arg_info(op->args[3])->val;
45
- uint64_t r = (uint64_t)a * b;
46
- TCGArg rl, rh;
47
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
48
-
49
- rl = op->args[0];
50
- rh = op->args[1];
51
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
52
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
53
- continue;
54
- }
55
- break;
56
-
57
default:
58
break;
59
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
61
CASE_OP_32_64(muluh):
62
done = fold_mul_highpart(&ctx, op);
63
break;
64
+ case INDEX_op_mulu2_i32:
65
+ done = fold_mulu2_i32(&ctx, op);
66
+ break;
67
CASE_OP_32_64(nand):
68
done = fold_nand(&ctx, op);
69
break;
70
--
71
2.25.1
72
73
diff view generated by jsdifflib
New patch
1
Add two additional helpers, fold_add2_i32 and fold_sub2_i32
2
which will not be simple wrappers forever.
1
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 70 +++++++++++++++++++++++++++++++-------------------
9
1 file changed, 44 insertions(+), 26 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
16
return fold_const2(ctx, op);
17
}
18
19
+static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
20
+{
21
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
22
+ arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
23
+ uint32_t al = arg_info(op->args[2])->val;
24
+ uint32_t ah = arg_info(op->args[3])->val;
25
+ uint32_t bl = arg_info(op->args[4])->val;
26
+ uint32_t bh = arg_info(op->args[5])->val;
27
+ uint64_t a = ((uint64_t)ah << 32) | al;
28
+ uint64_t b = ((uint64_t)bh << 32) | bl;
29
+ TCGArg rl, rh;
30
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
31
+
32
+ if (add) {
33
+ a += b;
34
+ } else {
35
+ a -= b;
36
+ }
37
+
38
+ rl = op->args[0];
39
+ rh = op->args[1];
40
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
41
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
42
+ return true;
43
+ }
44
+ return false;
45
+}
46
+
47
+static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
48
+{
49
+ return fold_addsub2_i32(ctx, op, true);
50
+}
51
+
52
static bool fold_and(OptContext *ctx, TCGOp *op)
53
{
54
return fold_const2(ctx, op);
55
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
56
return fold_const2(ctx, op);
57
}
58
59
+static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
60
+{
61
+ return fold_addsub2_i32(ctx, op, false);
62
+}
63
+
64
static bool fold_xor(OptContext *ctx, TCGOp *op)
65
{
66
return fold_const2(ctx, op);
67
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
68
}
69
break;
70
71
- case INDEX_op_add2_i32:
72
- case INDEX_op_sub2_i32:
73
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])
74
- && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
75
- uint32_t al = arg_info(op->args[2])->val;
76
- uint32_t ah = arg_info(op->args[3])->val;
77
- uint32_t bl = arg_info(op->args[4])->val;
78
- uint32_t bh = arg_info(op->args[5])->val;
79
- uint64_t a = ((uint64_t)ah << 32) | al;
80
- uint64_t b = ((uint64_t)bh << 32) | bl;
81
- TCGArg rl, rh;
82
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
83
-
84
- if (opc == INDEX_op_add2_i32) {
85
- a += b;
86
- } else {
87
- a -= b;
88
- }
89
-
90
- rl = op->args[0];
91
- rh = op->args[1];
92
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
93
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
94
- continue;
95
- }
96
- break;
97
98
default:
99
break;
100
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
101
CASE_OP_32_64_VEC(add):
102
done = fold_add(&ctx, op);
103
break;
104
+ case INDEX_op_add2_i32:
105
+ done = fold_add2_i32(&ctx, op);
106
+ break;
107
CASE_OP_32_64_VEC(and):
108
done = fold_and(&ctx, op);
109
break;
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
CASE_OP_32_64_VEC(sub):
112
done = fold_sub(&ctx, op);
113
break;
114
+ case INDEX_op_sub2_i32:
115
+ done = fold_sub2_i32(&ctx, op);
116
+ break;
117
CASE_OP_32_64_VEC(xor):
118
done = fold_xor(&ctx, op);
119
break;
120
--
121
2.25.1
122
123
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 56 ++++++++++++++++++++++++++++----------------------
6
1 file changed, 31 insertions(+), 25 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
13
return true;
14
}
15
16
+static bool fold_movcond(OptContext *ctx, TCGOp *op)
17
+{
18
+ TCGOpcode opc = op->opc;
19
+ TCGCond cond = op->args[5];
20
+ int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
21
+
22
+ if (i >= 0) {
23
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
+ }
25
+
26
+ if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
27
+ uint64_t tv = arg_info(op->args[3])->val;
28
+ uint64_t fv = arg_info(op->args[4])->val;
29
+
30
+ opc = (opc == INDEX_op_movcond_i32
31
+ ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
32
+
33
+ if (tv == 1 && fv == 0) {
34
+ op->opc = opc;
35
+ op->args[3] = cond;
36
+ } else if (fv == 1 && tv == 0) {
37
+ op->opc = opc;
38
+ op->args[3] = tcg_invert_cond(cond);
39
+ }
40
+ }
41
+ return false;
42
+}
43
+
44
static bool fold_mul(OptContext *ctx, TCGOp *op)
45
{
46
return fold_const2(ctx, op);
47
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
48
}
49
break;
50
51
- CASE_OP_32_64(movcond):
52
- i = do_constant_folding_cond(opc, op->args[1],
53
- op->args[2], op->args[5]);
54
- if (i >= 0) {
55
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]);
56
- continue;
57
- }
58
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
59
- uint64_t tv = arg_info(op->args[3])->val;
60
- uint64_t fv = arg_info(op->args[4])->val;
61
- TCGCond cond = op->args[5];
62
-
63
- if (fv == 1 && tv == 0) {
64
- cond = tcg_invert_cond(cond);
65
- } else if (!(tv == 1 && fv == 0)) {
66
- break;
67
- }
68
- op->args[3] = cond;
69
- op->opc = opc = (opc == INDEX_op_movcond_i32
70
- ? INDEX_op_setcond_i32
71
- : INDEX_op_setcond_i64);
72
- }
73
- break;
74
-
75
-
76
default:
77
break;
78
79
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
80
case INDEX_op_mb:
81
done = fold_mb(&ctx, op);
82
break;
83
+ CASE_OP_32_64(movcond):
84
+ done = fold_movcond(&ctx, op);
85
+ break;
86
CASE_OP_32_64(mul):
87
done = fold_mul(&ctx, op);
88
break;
89
--
90
2.25.1
91
92
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 39 ++++++++++++++++++++++-----------------
6
1 file changed, 22 insertions(+), 17 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_extract2(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
19
+ uint64_t v1 = arg_info(op->args[1])->val;
20
+ uint64_t v2 = arg_info(op->args[2])->val;
21
+ int shr = op->args[3];
22
+
23
+ if (op->opc == INDEX_op_extract2_i64) {
24
+ v1 >>= shr;
25
+ v2 <<= 64 - shr;
26
+ } else {
27
+ v1 = (uint32_t)v1 >> shr;
28
+ v2 = (int32_t)v2 << (32 - shr);
29
+ }
30
+ return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
31
+ }
32
+ return false;
33
+}
34
+
35
static bool fold_exts(OptContext *ctx, TCGOp *op)
36
{
37
return fold_const1(ctx, op);
38
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
39
}
40
break;
41
42
- CASE_OP_32_64(extract2):
43
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
44
- uint64_t v1 = arg_info(op->args[1])->val;
45
- uint64_t v2 = arg_info(op->args[2])->val;
46
- int shr = op->args[3];
47
-
48
- if (opc == INDEX_op_extract2_i64) {
49
- tmp = (v1 >> shr) | (v2 << (64 - shr));
50
- } else {
51
- tmp = (int32_t)(((uint32_t)v1 >> shr) |
52
- ((uint32_t)v2 << (32 - shr)));
53
- }
54
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
55
- continue;
56
- }
57
- break;
58
-
59
default:
60
break;
61
62
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
63
CASE_OP_32_64(eqv):
64
done = fold_eqv(&ctx, op);
65
break;
66
+ CASE_OP_32_64(extract2):
67
+ done = fold_extract2(&ctx, op);
68
+ break;
69
CASE_OP_32_64(ext8s):
70
CASE_OP_32_64(ext16s):
71
case INDEX_op_ext32s_i64:
72
--
73
2.25.1
74
75
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 48 ++++++++++++++++++++++++++++++------------------
6
1 file changed, 30 insertions(+), 18 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_extract(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1])) {
19
+ uint64_t t;
20
+
21
+ t = arg_info(op->args[1])->val;
22
+ t = extract64(t, op->args[2], op->args[3]);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
24
+ }
25
+ return false;
26
+}
27
+
28
static bool fold_extract2(OptContext *ctx, TCGOp *op)
29
{
30
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
31
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
32
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
33
}
34
35
+static bool fold_sextract(OptContext *ctx, TCGOp *op)
36
+{
37
+ if (arg_is_const(op->args[1])) {
38
+ uint64_t t;
39
+
40
+ t = arg_info(op->args[1])->val;
41
+ t = sextract64(t, op->args[2], op->args[3]);
42
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
43
+ }
44
+ return false;
45
+}
46
+
47
static bool fold_shift(OptContext *ctx, TCGOp *op)
48
{
49
return fold_const2(ctx, op);
50
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
51
}
52
break;
53
54
- CASE_OP_32_64(extract):
55
- if (arg_is_const(op->args[1])) {
56
- tmp = extract64(arg_info(op->args[1])->val,
57
- op->args[2], op->args[3]);
58
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
59
- continue;
60
- }
61
- break;
62
-
63
- CASE_OP_32_64(sextract):
64
- if (arg_is_const(op->args[1])) {
65
- tmp = sextract64(arg_info(op->args[1])->val,
66
- op->args[2], op->args[3]);
67
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
68
- continue;
69
- }
70
- break;
71
-
72
default:
73
break;
74
75
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
76
CASE_OP_32_64(eqv):
77
done = fold_eqv(&ctx, op);
78
break;
79
+ CASE_OP_32_64(extract):
80
+ done = fold_extract(&ctx, op);
81
+ break;
82
CASE_OP_32_64(extract2):
83
done = fold_extract2(&ctx, op);
84
break;
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
86
case INDEX_op_setcond2_i32:
87
done = fold_setcond2(&ctx, op);
88
break;
89
+ CASE_OP_32_64(sextract):
90
+ done = fold_sextract(&ctx, op);
91
+ break;
92
CASE_OP_32_64_VEC(sub):
93
done = fold_sub(&ctx, op);
94
break;
95
--
96
2.25.1
97
98
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 25 +++++++++++++++----------
6
1 file changed, 15 insertions(+), 10 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
13
return fold_const1(ctx, op);
14
}
15
16
+static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
19
+ uint64_t t1 = arg_info(op->args[1])->val;
20
+ uint64_t t2 = arg_info(op->args[2])->val;
21
+
22
+ t1 = deposit64(t1, op->args[3], op->args[4], t2);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
24
+ }
25
+ return false;
26
+}
27
+
28
static bool fold_divide(OptContext *ctx, TCGOp *op)
29
{
30
return fold_const2(ctx, op);
31
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
32
}
33
break;
34
35
- CASE_OP_32_64(deposit):
36
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
37
- tmp = deposit64(arg_info(op->args[1])->val,
38
- op->args[3], op->args[4],
39
- arg_info(op->args[2])->val);
40
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
41
- continue;
42
- }
43
- break;
44
-
45
default:
46
break;
47
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
49
CASE_OP_32_64(ctpop):
50
done = fold_ctpop(&ctx, op);
51
break;
52
+ CASE_OP_32_64(deposit):
53
+ done = fold_deposit(&ctx, op);
54
+ break;
55
CASE_OP_32_64(div):
56
CASE_OP_32_64(divu):
57
done = fold_divide(&ctx, op);
58
--
59
2.25.1
60
61
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 32 ++++++++++++++++++--------------
6
1 file changed, 18 insertions(+), 14 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
13
return true;
14
}
15
16
+static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1])) {
19
+ uint64_t t = arg_info(op->args[1])->val;
20
+
21
+ if (t != 0) {
22
+ t = do_constant_folding(op->opc, t, 0);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
24
+ }
25
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
26
+ }
27
+ return false;
28
+}
29
+
30
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
31
{
32
return fold_const1(ctx, op);
33
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
34
}
35
break;
36
37
- CASE_OP_32_64(clz):
38
- CASE_OP_32_64(ctz):
39
- if (arg_is_const(op->args[1])) {
40
- TCGArg v = arg_info(op->args[1])->val;
41
- if (v != 0) {
42
- tmp = do_constant_folding(opc, v, 0);
43
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
44
- } else {
45
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
46
- }
47
- continue;
48
- }
49
- break;
50
-
51
default:
52
break;
53
54
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
55
case INDEX_op_brcond2_i32:
56
done = fold_brcond2(&ctx, op);
57
break;
58
+ CASE_OP_32_64(clz):
59
+ CASE_OP_32_64(ctz):
60
+ done = fold_count_zeros(&ctx, op);
61
+ break;
62
CASE_OP_32_64(ctpop):
63
done = fold_ctpop(&ctx, op);
64
break;
65
--
66
2.25.1
67
68
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 27 ++++++++++++++++-----------
6
1 file changed, 16 insertions(+), 11 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
13
return false;
14
}
15
16
+static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1])) {
19
+ uint64_t t = arg_info(op->args[1])->val;
20
+
21
+ t = do_constant_folding(op->opc, t, op->args[2]);
22
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
23
+ }
24
+ return false;
25
+}
26
+
27
static bool fold_call(OptContext *ctx, TCGOp *op)
28
{
29
TCGContext *s = ctx->tcg;
30
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
31
}
32
break;
33
34
- CASE_OP_32_64(bswap16):
35
- CASE_OP_32_64(bswap32):
36
- case INDEX_op_bswap64_i64:
37
- if (arg_is_const(op->args[1])) {
38
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
39
- op->args[2]);
40
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
41
- continue;
42
- }
43
- break;
44
-
45
default:
46
break;
47
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
49
case INDEX_op_brcond2_i32:
50
done = fold_brcond2(&ctx, op);
51
break;
52
+ CASE_OP_32_64(bswap16):
53
+ CASE_OP_32_64(bswap32):
54
+ case INDEX_op_bswap64_i64:
55
+ done = fold_bswap(&ctx, op);
56
+ break;
57
CASE_OP_32_64(clz):
58
CASE_OP_32_64(ctz):
59
done = fold_count_zeros(&ctx, op);
60
--
61
2.25.1
62
63
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 53 +++++++++++++++++++++++++++++---------------------
6
1 file changed, 31 insertions(+), 22 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_dup(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1])) {
19
+ uint64_t t = arg_info(op->args[1])->val;
20
+ t = dup_const(TCGOP_VECE(op), t);
21
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
22
+ }
23
+ return false;
24
+}
25
+
26
+static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
+{
28
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
29
+ uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
30
+ arg_info(op->args[2])->val);
31
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
32
+ }
33
+
34
+ if (args_are_copies(op->args[1], op->args[2])) {
35
+ op->opc = INDEX_op_dup_vec;
36
+ TCGOP_VECE(op) = MO_32;
37
+ }
38
+ return false;
39
+}
40
+
41
static bool fold_eqv(OptContext *ctx, TCGOp *op)
42
{
43
return fold_const2(ctx, op);
44
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
45
done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
46
break;
47
48
- case INDEX_op_dup_vec:
49
- if (arg_is_const(op->args[1])) {
50
- tmp = arg_info(op->args[1])->val;
51
- tmp = dup_const(TCGOP_VECE(op), tmp);
52
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
53
- continue;
54
- }
55
- break;
56
-
57
- case INDEX_op_dup2_vec:
58
- assert(TCG_TARGET_REG_BITS == 32);
59
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
60
- tcg_opt_gen_movi(&ctx, op, op->args[0],
61
- deposit64(arg_info(op->args[1])->val, 32, 32,
62
- arg_info(op->args[2])->val));
63
- continue;
64
- } else if (args_are_copies(op->args[1], op->args[2])) {
65
- op->opc = INDEX_op_dup_vec;
66
- TCGOP_VECE(op) = MO_32;
67
- }
68
- break;
69
-
70
default:
71
break;
72
73
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
74
CASE_OP_32_64(divu):
75
done = fold_divide(&ctx, op);
76
break;
77
+ case INDEX_op_dup_vec:
78
+ done = fold_dup(&ctx, op);
79
+ break;
80
+ case INDEX_op_dup2_vec:
81
+ done = fold_dup2(&ctx, op);
82
+ break;
83
CASE_OP_32_64(eqv):
84
done = fold_eqv(&ctx, op);
85
break;
86
--
87
2.25.1
88
89
diff view generated by jsdifflib
New patch
1
This is the final entry in the main switch that was in a
2
different form. After this, we have the option to convert
3
the switch into a function dispatch table.
1
4
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 27 ++++++++++++++-------------
10
1 file changed, 14 insertions(+), 13 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
17
return true;
18
}
19
20
+static bool fold_mov(OptContext *ctx, TCGOp *op)
21
+{
22
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
23
+}
24
+
25
static bool fold_movcond(OptContext *ctx, TCGOp *op)
26
{
27
TCGOpcode opc = op->opc;
28
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
29
break;
30
}
31
32
- /* Propagate constants through copy operations and do constant
33
- folding. Constants will be substituted to arguments by register
34
- allocator where needed and possible. Also detect copies. */
35
+ /*
36
+ * Process each opcode.
37
+ * Sorted alphabetically by opcode as much as possible.
38
+ */
39
switch (opc) {
40
- CASE_OP_32_64_VEC(mov):
41
- done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
42
- break;
43
-
44
- default:
45
- break;
46
-
47
- /* ---------------------------------------------------------- */
48
- /* Sorted alphabetically by opcode as much as possible. */
49
-
50
CASE_OP_32_64_VEC(add):
51
done = fold_add(&ctx, op);
52
break;
53
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
54
case INDEX_op_mb:
55
done = fold_mb(&ctx, op);
56
break;
57
+ CASE_OP_32_64_VEC(mov):
58
+ done = fold_mov(&ctx, op);
59
+ break;
60
CASE_OP_32_64(movcond):
61
done = fold_movcond(&ctx, op);
62
break;
63
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
64
CASE_OP_32_64_VEC(xor):
65
done = fold_xor(&ctx, op);
66
break;
67
+ default:
68
+ break;
69
}
70
71
if (!done) {
72
--
73
2.25.1
74
75
diff view generated by jsdifflib
1
From: Philipp Tomsich <philipp.tomsich@vrull.eu>
1
Pull the "op r, a, a => movi r, 0" optimization into a function,
2
and use it in the outer opcode fold functions.
2
3
3
dup_const always generates a uint64_t, which may exceed the size of a
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
target_long (generating warnings with recent-enough compilers).
5
6
To ensure that we can use dup_const both for 64bit and 32bit targets,
7
this adds dup_const_tl, which either maps back to dup_const (for 64bit
8
targets) or provides a similar implementation using 32bit constants.
9
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Signed-off-by: Philipp Tomsich <philipp.tomsich@vrull.eu>
12
Message-Id: <20211003214243.3813425-1-philipp.tomsich@vrull.eu>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
7
---
15
include/tcg/tcg.h | 12 ++++++++++++
8
tcg/optimize.c | 41 ++++++++++++++++++++++++-----------------
16
1 file changed, 12 insertions(+)
9
1 file changed, 24 insertions(+), 17 deletions(-)
17
10
18
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
19
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
20
--- a/include/tcg/tcg.h
13
--- a/tcg/optimize.c
21
+++ b/include/tcg/tcg.h
14
+++ b/tcg/optimize.c
22
@@ -XXX,XX +XXX,XX @@ uint64_t dup_const(unsigned vece, uint64_t c);
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
23
: (qemu_build_not_reached_always(), 0)) \
16
return false;
24
: dup_const(VECE, C))
17
}
25
18
26
+#if TARGET_LONG_BITS == 64
19
+/* If the binary operation has both arguments equal, fold to @i. */
27
+# define dup_const_tl dup_const
20
+static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
28
+#else
21
+{
29
+# define dup_const_tl(VECE, C) \
22
+ if (args_are_copies(op->args[1], op->args[2])) {
30
+ (__builtin_constant_p(VECE) \
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
31
+ ? ( (VECE) == MO_8 ? 0x01010101ul * (uint8_t)(C) \
24
+ }
32
+ : (VECE) == MO_16 ? 0x00010001ul * (uint16_t)(C) \
25
+ return false;
33
+ : (VECE) == MO_32 ? 0x00000001ul * (uint32_t)(C) \
26
+}
34
+ : (qemu_build_not_reached_always(), 0)) \
35
+ : (target_long)dup_const(VECE, C))
36
+#endif
37
+
27
+
38
/*
28
/*
39
* Memory helpers that will be used by TCG generated code.
29
* These outermost fold_<op> functions are sorted alphabetically.
40
*/
30
*/
31
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
32
33
static bool fold_andc(OptContext *ctx, TCGOp *op)
34
{
35
- return fold_const2(ctx, op);
36
+ if (fold_const2(ctx, op) ||
37
+ fold_xx_to_i(ctx, op, 0)) {
38
+ return true;
39
+ }
40
+ return false;
41
}
42
43
static bool fold_brcond(OptContext *ctx, TCGOp *op)
44
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
45
46
static bool fold_sub(OptContext *ctx, TCGOp *op)
47
{
48
- return fold_const2(ctx, op);
49
+ if (fold_const2(ctx, op) ||
50
+ fold_xx_to_i(ctx, op, 0)) {
51
+ return true;
52
+ }
53
+ return false;
54
}
55
56
static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
57
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
58
59
static bool fold_xor(OptContext *ctx, TCGOp *op)
60
{
61
- return fold_const2(ctx, op);
62
+ if (fold_const2(ctx, op) ||
63
+ fold_xx_to_i(ctx, op, 0)) {
64
+ return true;
65
+ }
66
+ return false;
67
}
68
69
/* Propagate constants and copies, fold constant expressions. */
70
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
71
break;
72
}
73
74
- /* Simplify expression for "op r, a, a => movi r, 0" cases */
75
- switch (opc) {
76
- CASE_OP_32_64_VEC(andc):
77
- CASE_OP_32_64_VEC(sub):
78
- CASE_OP_32_64_VEC(xor):
79
- if (args_are_copies(op->args[1], op->args[2])) {
80
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
81
- continue;
82
- }
83
- break;
84
- default:
85
- break;
86
- }
87
-
88
/*
89
* Process each opcode.
90
* Sorted alphabetically by opcode as much as possible.
41
--
91
--
42
2.25.1
92
2.25.1
43
93
44
94
diff view generated by jsdifflib
1
There is no point in encoding load/store within a bit of
1
Pull the "op r, a, a => mov r, a" optimization into a function,
2
the memory trace info operand. Represent atomic operations
2
and use it in the outer opcode fold functions.
3
as a single read-modify-write tracepoint. Use MemOpIdx
4
instead of inventing a form specifically for traces.
5
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
accel/tcg/atomic_template.h | 1 -
8
tcg/optimize.c | 39 ++++++++++++++++++++++++---------------
10
trace/mem.h | 51 -----------------------------------
9
1 file changed, 24 insertions(+), 15 deletions(-)
11
accel/tcg/cputlb.c | 7 ++---
12
accel/tcg/user-exec.c | 44 +++++++++++-------------------
13
tcg/tcg-op.c | 17 +++---------
14
accel/tcg/atomic_common.c.inc | 12 +++------
15
trace-events | 18 +++----------
16
7 files changed, 28 insertions(+), 122 deletions(-)
17
delete mode 100644 trace/mem.h
18
10
19
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
20
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
21
--- a/accel/tcg/atomic_template.h
13
--- a/tcg/optimize.c
22
+++ b/accel/tcg/atomic_template.h
14
+++ b/tcg/optimize.c
23
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
16
return false;
17
}
18
19
+/* If the binary operation has both arguments equal, fold to identity. */
20
+static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
21
+{
22
+ if (args_are_copies(op->args[1], op->args[2])) {
23
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
24
+ }
25
+ return false;
26
+}
27
+
28
/*
29
* These outermost fold_<op> functions are sorted alphabetically.
30
+ *
31
+ * The ordering of the transformations should be:
32
+ * 1) those that produce a constant
33
+ * 2) those that produce a copy
34
+ * 3) those that produce information about the result value.
24
*/
35
*/
25
36
26
#include "qemu/plugin.h"
37
static bool fold_add(OptContext *ctx, TCGOp *op)
27
-#include "trace/mem.h"
38
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
28
39
29
#if DATA_SIZE == 16
40
static bool fold_and(OptContext *ctx, TCGOp *op)
30
# define SUFFIX o
41
{
31
diff --git a/trace/mem.h b/trace/mem.h
42
- return fold_const2(ctx, op);
32
deleted file mode 100644
43
+ if (fold_const2(ctx, op) ||
33
index XXXXXXX..XXXXXXX
44
+ fold_xx_to_x(ctx, op)) {
34
--- a/trace/mem.h
45
+ return true;
35
+++ /dev/null
46
+ }
36
@@ -XXX,XX +XXX,XX @@
47
+ return false;
37
-/*
48
}
38
- * Helper functions for guest memory tracing
49
39
- *
50
static bool fold_andc(OptContext *ctx, TCGOp *op)
40
- * Copyright (C) 2016 Lluís Vilanova <vilanova@ac.upc.edu>
51
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
41
- *
52
42
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
53
static bool fold_or(OptContext *ctx, TCGOp *op)
43
- * See the COPYING file in the top-level directory.
54
{
44
- */
55
- return fold_const2(ctx, op);
56
+ if (fold_const2(ctx, op) ||
57
+ fold_xx_to_x(ctx, op)) {
58
+ return true;
59
+ }
60
+ return false;
61
}
62
63
static bool fold_orc(OptContext *ctx, TCGOp *op)
64
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
65
break;
66
}
67
68
- /* Simplify expression for "op r, a, a => mov r, a" cases */
69
- switch (opc) {
70
- CASE_OP_32_64_VEC(or):
71
- CASE_OP_32_64_VEC(and):
72
- if (args_are_copies(op->args[1], op->args[2])) {
73
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
74
- continue;
75
- }
76
- break;
77
- default:
78
- break;
79
- }
45
-
80
-
46
-#ifndef TRACE__MEM_H
81
/*
47
-#define TRACE__MEM_H
82
* Process each opcode.
48
-
83
* Sorted alphabetically by opcode as much as possible.
49
-#include "exec/memopidx.h"
50
-
51
-#define TRACE_MEM_SZ_SHIFT_MASK 0xf /* size shift mask */
52
-#define TRACE_MEM_SE (1ULL << 4) /* sign extended (y/n) */
53
-#define TRACE_MEM_BE (1ULL << 5) /* big endian (y/n) */
54
-#define TRACE_MEM_ST (1ULL << 6) /* store (y/n) */
55
-#define TRACE_MEM_MMU_SHIFT 8 /* mmu idx */
56
-
57
-/**
58
- * trace_mem_get_info:
59
- *
60
- * Return a value for the 'info' argument in guest memory access traces.
61
- */
62
-static inline uint16_t trace_mem_get_info(MemOpIdx oi, bool store)
63
-{
64
- MemOp op = get_memop(oi);
65
- uint32_t size_shift = op & MO_SIZE;
66
- bool sign_extend = op & MO_SIGN;
67
- bool big_endian = (op & MO_BSWAP) == MO_BE;
68
- uint16_t res;
69
-
70
- res = size_shift & TRACE_MEM_SZ_SHIFT_MASK;
71
- if (sign_extend) {
72
- res |= TRACE_MEM_SE;
73
- }
74
- if (big_endian) {
75
- res |= TRACE_MEM_BE;
76
- }
77
- if (store) {
78
- res |= TRACE_MEM_ST;
79
- }
80
-#ifdef CONFIG_SOFTMMU
81
- res |= get_mmuidx(oi) << TRACE_MEM_MMU_SHIFT;
82
-#endif
83
-
84
- return res;
85
-}
86
-
87
-#endif /* TRACE__MEM_H */
88
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/accel/tcg/cputlb.c
91
+++ b/accel/tcg/cputlb.c
92
@@ -XXX,XX +XXX,XX @@
93
#include "qemu/atomic128.h"
94
#include "exec/translate-all.h"
95
#include "trace/trace-root.h"
96
-#include "trace/mem.h"
97
#include "tb-hash.h"
98
#include "internal.h"
99
#ifdef CONFIG_PLUGIN
100
@@ -XXX,XX +XXX,XX @@ static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
101
MemOp op, FullLoadHelper *full_load)
102
{
103
MemOpIdx oi = make_memop_idx(op, mmu_idx);
104
- uint16_t meminfo = trace_mem_get_info(oi, false);
105
uint64_t ret;
106
107
- trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
108
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
109
110
ret = full_load(env, addr, oi, retaddr);
111
112
@@ -XXX,XX +XXX,XX @@ cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
113
int mmu_idx, uintptr_t retaddr, MemOp op)
114
{
115
MemOpIdx oi = make_memop_idx(op, mmu_idx);
116
- uint16_t meminfo = trace_mem_get_info(oi, true);
117
118
- trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
119
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
120
121
store_helper(env, addr, val, oi, retaddr, op);
122
123
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
124
index XXXXXXX..XXXXXXX 100644
125
--- a/accel/tcg/user-exec.c
126
+++ b/accel/tcg/user-exec.c
127
@@ -XXX,XX +XXX,XX @@
128
#include "exec/helper-proto.h"
129
#include "qemu/atomic128.h"
130
#include "trace/trace-root.h"
131
-#include "trace/mem.h"
132
+#include "internal.h"
133
134
#undef EAX
135
#undef ECX
136
@@ -XXX,XX +XXX,XX @@ int cpu_signal_handler(int host_signum, void *pinfo,
137
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
138
{
139
MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX);
140
- uint16_t meminfo = trace_mem_get_info(oi, false);
141
uint32_t ret;
142
143
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
144
+ trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
145
ret = ldub_p(g2h(env_cpu(env), ptr));
146
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
147
return ret;
148
@@ -XXX,XX +XXX,XX @@ int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
149
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
150
{
151
MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX);
152
- uint16_t meminfo = trace_mem_get_info(oi, false);
153
uint32_t ret;
154
155
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
156
+ trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
157
ret = lduw_be_p(g2h(env_cpu(env), ptr));
158
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
159
return ret;
160
@@ -XXX,XX +XXX,XX @@ int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr)
161
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
162
{
163
MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX);
164
- uint16_t meminfo = trace_mem_get_info(oi, false);
165
uint32_t ret;
166
167
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
168
+ trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
169
ret = ldl_be_p(g2h(env_cpu(env), ptr));
170
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
171
return ret;
172
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
173
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
174
{
175
MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX);
176
- uint16_t meminfo = trace_mem_get_info(oi, false);
177
uint64_t ret;
178
179
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
180
+ trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
181
ret = ldq_be_p(g2h(env_cpu(env), ptr));
182
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
183
return ret;
184
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
185
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr)
186
{
187
MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX);
188
- uint16_t meminfo = trace_mem_get_info(oi, false);
189
uint32_t ret;
190
191
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
192
+ trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
193
ret = lduw_le_p(g2h(env_cpu(env), ptr));
194
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
195
return ret;
196
@@ -XXX,XX +XXX,XX @@ int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr)
197
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
198
{
199
MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX);
200
- uint16_t meminfo = trace_mem_get_info(oi, false);
201
uint32_t ret;
202
203
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
204
+ trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
205
ret = ldl_le_p(g2h(env_cpu(env), ptr));
206
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
207
return ret;
208
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
209
uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr)
210
{
211
MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX);
212
- uint16_t meminfo = trace_mem_get_info(oi, false);
213
uint64_t ret;
214
215
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
216
+ trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
217
ret = ldq_le_p(g2h(env_cpu(env), ptr));
218
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
219
return ret;
220
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
221
void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
222
{
223
MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX);
224
- uint16_t meminfo = trace_mem_get_info(oi, true);
225
226
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
227
+ trace_guest_st_before_exec(env_cpu(env), ptr, oi);
228
stb_p(g2h(env_cpu(env), ptr), val);
229
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
230
}
231
@@ -XXX,XX +XXX,XX @@ void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
232
void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
233
{
234
MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX);
235
- uint16_t meminfo = trace_mem_get_info(oi, true);
236
237
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
238
+ trace_guest_st_before_exec(env_cpu(env), ptr, oi);
239
stw_be_p(g2h(env_cpu(env), ptr), val);
240
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
241
}
242
@@ -XXX,XX +XXX,XX @@ void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
243
void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
244
{
245
MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX);
246
- uint16_t meminfo = trace_mem_get_info(oi, true);
247
248
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
249
+ trace_guest_st_before_exec(env_cpu(env), ptr, oi);
250
stl_be_p(g2h(env_cpu(env), ptr), val);
251
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
252
}
253
@@ -XXX,XX +XXX,XX @@ void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
254
void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
255
{
256
MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX);
257
- uint16_t meminfo = trace_mem_get_info(oi, true);
258
259
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
260
+ trace_guest_st_before_exec(env_cpu(env), ptr, oi);
261
stq_be_p(g2h(env_cpu(env), ptr), val);
262
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
263
}
264
@@ -XXX,XX +XXX,XX @@ void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
265
void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
266
{
267
MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX);
268
- uint16_t meminfo = trace_mem_get_info(oi, true);
269
270
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
271
+ trace_guest_st_before_exec(env_cpu(env), ptr, oi);
272
stw_le_p(g2h(env_cpu(env), ptr), val);
273
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
274
}
275
@@ -XXX,XX +XXX,XX @@ void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
276
void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
277
{
278
MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX);
279
- uint16_t meminfo = trace_mem_get_info(oi, true);
280
281
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
282
+ trace_guest_st_before_exec(env_cpu(env), ptr, oi);
283
stl_le_p(g2h(env_cpu(env), ptr), val);
284
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
285
}
286
@@ -XXX,XX +XXX,XX @@ void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
287
void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
288
{
289
MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX);
290
- uint16_t meminfo = trace_mem_get_info(oi, true);
291
292
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
293
+ trace_guest_st_before_exec(env_cpu(env), ptr, oi);
294
stq_le_p(g2h(env_cpu(env), ptr), val);
295
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
296
}
297
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
298
index XXXXXXX..XXXXXXX 100644
299
--- a/tcg/tcg-op.c
300
+++ b/tcg/tcg-op.c
301
@@ -XXX,XX +XXX,XX @@
302
#include "tcg/tcg-op.h"
303
#include "tcg/tcg-mo.h"
304
#include "trace-tcg.h"
305
-#include "trace/mem.h"
306
#include "exec/plugin-gen.h"
307
308
/* Reduce the number of ifdefs below. This assumes that all uses of
309
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
310
{
311
MemOp orig_memop;
312
MemOpIdx oi;
313
- uint16_t info;
314
315
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
316
memop = tcg_canonicalize_memop(memop, 0, 0);
317
oi = make_memop_idx(memop, idx);
318
- info = trace_mem_get_info(oi, 0);
319
- trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
320
+ trace_guest_ld_before_tcg(tcg_ctx->cpu, cpu_env, addr, oi);
321
322
orig_memop = memop;
323
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
324
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
325
{
326
TCGv_i32 swap = NULL;
327
MemOpIdx oi;
328
- uint16_t info;
329
330
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
331
memop = tcg_canonicalize_memop(memop, 0, 1);
332
oi = make_memop_idx(memop, idx);
333
- info = trace_mem_get_info(oi, 1);
334
- trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
335
+ trace_guest_st_before_tcg(tcg_ctx->cpu, cpu_env, addr, oi);
336
337
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
338
swap = tcg_temp_new_i32();
339
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
340
{
341
MemOp orig_memop;
342
MemOpIdx oi;
343
- uint16_t info;
344
345
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
346
tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
347
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
348
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
349
memop = tcg_canonicalize_memop(memop, 1, 0);
350
oi = make_memop_idx(memop, idx);
351
- info = trace_mem_get_info(oi, 0);
352
- trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
353
+ trace_guest_ld_before_tcg(tcg_ctx->cpu, cpu_env, addr, oi);
354
355
orig_memop = memop;
356
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
357
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
358
{
359
TCGv_i64 swap = NULL;
360
MemOpIdx oi;
361
- uint16_t info;
362
363
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
364
tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
365
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
366
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
367
memop = tcg_canonicalize_memop(memop, 1, 1);
368
oi = make_memop_idx(memop, idx);
369
- info = trace_mem_get_info(oi, 1);
370
- trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
371
+ trace_guest_st_before_tcg(tcg_ctx->cpu, cpu_env, addr, oi);
372
373
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
374
swap = tcg_temp_new_i64();
375
diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc
376
index XXXXXXX..XXXXXXX 100644
377
--- a/accel/tcg/atomic_common.c.inc
378
+++ b/accel/tcg/atomic_common.c.inc
379
@@ -XXX,XX +XXX,XX @@ static void atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr,
380
MemOpIdx oi)
381
{
382
CPUState *cpu = env_cpu(env);
383
- uint16_t info = trace_mem_get_info(oi, false);
384
385
- trace_guest_mem_before_exec(cpu, addr, info);
386
- trace_guest_mem_before_exec(cpu, addr, info | TRACE_MEM_ST);
387
+ trace_guest_rmw_before_exec(cpu, addr, oi);
388
}
389
390
static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
391
@@ -XXX,XX +XXX,XX @@ static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
392
static void atomic_trace_ld_pre(CPUArchState *env, target_ulong addr,
393
MemOpIdx oi)
394
{
395
- uint16_t info = trace_mem_get_info(oi, false);
396
-
397
- trace_guest_mem_before_exec(env_cpu(env), addr, info);
398
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
399
}
400
401
static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
402
@@ -XXX,XX +XXX,XX @@ static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
403
static void atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
404
MemOpIdx oi)
405
{
406
- uint16_t info = trace_mem_get_info(oi, true);
407
-
408
- trace_guest_mem_before_exec(env_cpu(env), addr, info);
409
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
410
}
411
412
static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
413
diff --git a/trace-events b/trace-events
414
index XXXXXXX..XXXXXXX 100644
415
--- a/trace-events
416
+++ b/trace-events
417
@@ -XXX,XX +XXX,XX @@ vcpu guest_cpu_reset(void)
418
# tcg/tcg-op.c
419
420
# @vaddr: Access' virtual address.
421
-# @info : Access' information (see below).
422
+# @memopidx: Access' information (see below).
423
#
424
# Start virtual memory access (before any potential access violation).
425
-#
426
# Does not include memory accesses performed by devices.
427
#
428
-# Access information can be parsed as:
429
-#
430
-# struct mem_info {
431
-# uint8_t size_shift : 4; /* interpreted as "1 << size_shift" bytes */
432
-# bool sign_extend: 1; /* sign-extended */
433
-# uint8_t endianness : 1; /* 0: little, 1: big */
434
-# bool store : 1; /* whether it is a store operation */
435
-# pad : 1;
436
-# uint8_t mmuidx : 4; /* mmuidx (softmmu only) */
437
-# };
438
-#
439
# Mode: user, softmmu
440
# Targets: TCG(all)
441
-vcpu tcg guest_mem_before(TCGv vaddr, uint16_t info) "info=%d", "vaddr=0x%016"PRIx64" info=%d"
442
+vcpu tcg guest_ld_before(TCGv vaddr, uint32_t memopidx) "info=%d", "vaddr=0x%016"PRIx64" memopidx=0x%x"
443
+vcpu tcg guest_st_before(TCGv vaddr, uint32_t memopidx) "info=%d", "vaddr=0x%016"PRIx64" memopidx=0x%x"
444
+vcpu tcg guest_rmw_before(TCGv vaddr, uint32_t memopidx) "info=%d", "vaddr=0x%016"PRIx64" memopidx=0x%x"
445
446
# include/user/syscall-trace.h
447
448
--
84
--
449
2.25.1
85
2.25.1
450
86
451
87
diff view generated by jsdifflib
1
We have lacked expressive support for memory sizes larger
1
Pull the "op r, a, 0 => movi r, 0" optimization into a function,
2
than 64-bits for a while. Fixing that requires adjustment
2
and use it in the outer opcode fold functions.
3
to several points where we used this for array indexing,
4
and two places that develop -Wswitch warnings after the change.
5
3
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
include/exec/memop.h | 14 +++++++++-----
8
tcg/optimize.c | 38 ++++++++++++++++++++------------------
11
target/arm/translate-a64.c | 2 +-
9
1 file changed, 20 insertions(+), 18 deletions(-)
12
tcg/tcg-op.c | 13 ++++++++-----
13
target/s390x/tcg/translate_vx.c.inc | 2 +-
14
tcg/aarch64/tcg-target.c.inc | 4 ++--
15
tcg/arm/tcg-target.c.inc | 4 ++--
16
tcg/i386/tcg-target.c.inc | 4 ++--
17
tcg/mips/tcg-target.c.inc | 4 ++--
18
tcg/ppc/tcg-target.c.inc | 8 ++++----
19
tcg/riscv/tcg-target.c.inc | 4 ++--
20
tcg/s390/tcg-target.c.inc | 4 ++--
21
tcg/sparc/tcg-target.c.inc | 16 ++++++++--------
22
12 files changed, 43 insertions(+), 36 deletions(-)
23
10
24
diff --git a/include/exec/memop.h b/include/exec/memop.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
25
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
26
--- a/include/exec/memop.h
13
--- a/tcg/optimize.c
27
+++ b/include/exec/memop.h
14
+++ b/tcg/optimize.c
28
@@ -XXX,XX +XXX,XX @@ typedef enum MemOp {
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
29
MO_16 = 1,
16
return false;
30
MO_32 = 2,
17
}
31
MO_64 = 3,
18
32
- MO_SIZE = 3, /* Mask for the above. */
19
+/* If the binary operation has second argument @i, fold to @i. */
33
+ MO_128 = 4,
20
+static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
34
+ MO_256 = 5,
21
+{
35
+ MO_512 = 6,
22
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
36
+ MO_1024 = 7,
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
37
+ MO_SIZE = 0x07, /* Mask for the above. */
24
+ }
38
25
+ return false;
39
- MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
26
+}
40
+ MO_SIGN = 0x08, /* Sign-extended, otherwise zero-extended. */
27
+
41
28
/* If the binary operation has both arguments equal, fold to @i. */
42
- MO_BSWAP = 8, /* Host reverse endian. */
29
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
43
+ MO_BSWAP = 0x10, /* Host reverse endian. */
44
#ifdef HOST_WORDS_BIGENDIAN
45
MO_LE = MO_BSWAP,
46
MO_BE = 0,
47
@@ -XXX,XX +XXX,XX @@ typedef enum MemOp {
48
* - an alignment to a specified size, which may be more or less than
49
* the access size (MO_ALIGN_x where 'x' is a size in bytes);
50
*/
51
- MO_ASHIFT = 4,
52
- MO_AMASK = 7 << MO_ASHIFT,
53
+ MO_ASHIFT = 5,
54
+ MO_AMASK = 0x7 << MO_ASHIFT,
55
#ifdef NEED_CPU_H
56
#ifdef TARGET_ALIGNED_ONLY
57
MO_ALIGN = 0,
58
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/translate-a64.c
61
+++ b/target/arm/translate-a64.c
62
@@ -XXX,XX +XXX,XX @@ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
63
int element, MemOp memop)
64
{
30
{
65
int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
66
- switch (memop) {
32
static bool fold_and(OptContext *ctx, TCGOp *op)
67
+ switch ((unsigned)memop) {
33
{
68
case MO_8:
34
if (fold_const2(ctx, op) ||
69
tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
35
+ fold_xi_to_i(ctx, op, 0) ||
70
break;
36
fold_xx_to_x(ctx, op)) {
71
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
37
return true;
72
index XXXXXXX..XXXXXXX 100644
73
--- a/tcg/tcg-op.c
74
+++ b/tcg/tcg-op.c
75
@@ -XXX,XX +XXX,XX @@ static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
76
}
77
break;
78
case MO_64:
79
- if (!is64) {
80
- tcg_abort();
81
+ if (is64) {
82
+ op &= ~MO_SIGN;
83
+ break;
84
}
85
- break;
86
+ /* fall through */
87
+ default:
88
+ g_assert_not_reached();
89
}
38
}
90
if (st) {
39
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
91
op &= ~MO_SIGN;
40
92
@@ -XXX,XX +XXX,XX @@ typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv,
41
static bool fold_mul(OptContext *ctx, TCGOp *op)
93
# define WITH_ATOMIC64(X)
42
{
94
#endif
43
- return fold_const2(ctx, op);
95
44
+ if (fold_const2(ctx, op) ||
96
-static void * const table_cmpxchg[16] = {
45
+ fold_xi_to_i(ctx, op, 0)) {
97
+static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = {
46
+ return true;
98
[MO_8] = gen_helper_atomic_cmpxchgb,
47
+ }
99
[MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
48
+ return false;
100
[MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
101
@@ -XXX,XX +XXX,XX @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
102
}
49
}
103
50
104
#define GEN_ATOMIC_HELPER(NAME, OP, NEW) \
51
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
105
-static void * const table_##NAME[16] = { \
106
+static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \
107
[MO_8] = gen_helper_atomic_##NAME##b, \
108
[MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \
109
[MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \
110
diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc
111
index XXXXXXX..XXXXXXX 100644
112
--- a/target/s390x/tcg/translate_vx.c.inc
113
+++ b/target/s390x/tcg/translate_vx.c.inc
114
@@ -XXX,XX +XXX,XX @@ static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr,
115
{
52
{
116
const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
53
- return fold_const2(ctx, op);
117
54
+ if (fold_const2(ctx, op) ||
118
- switch (memop) {
55
+ fold_xi_to_i(ctx, op, 0)) {
119
+ switch ((unsigned)memop) {
56
+ return true;
120
case ES_8:
57
+ }
121
tcg_gen_ld8u_i64(dst, cpu_env, offs);
58
+ return false;
122
break;
123
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
124
index XXXXXXX..XXXXXXX 100644
125
--- a/tcg/aarch64/tcg-target.c.inc
126
+++ b/tcg/aarch64/tcg-target.c.inc
127
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
128
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
129
* TCGMemOpIdx oi, uintptr_t ra)
130
*/
131
-static void * const qemu_ld_helpers[4] = {
132
+static void * const qemu_ld_helpers[MO_SIZE + 1] = {
133
[MO_8] = helper_ret_ldub_mmu,
134
#ifdef HOST_WORDS_BIGENDIAN
135
[MO_16] = helper_be_lduw_mmu,
136
@@ -XXX,XX +XXX,XX @@ static void * const qemu_ld_helpers[4] = {
137
* uintxx_t val, TCGMemOpIdx oi,
138
* uintptr_t ra)
139
*/
140
-static void * const qemu_st_helpers[4] = {
141
+static void * const qemu_st_helpers[MO_SIZE + 1] = {
142
[MO_8] = helper_ret_stb_mmu,
143
#ifdef HOST_WORDS_BIGENDIAN
144
[MO_16] = helper_be_stw_mmu,
145
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
146
index XXXXXXX..XXXXXXX 100644
147
--- a/tcg/arm/tcg-target.c.inc
148
+++ b/tcg/arm/tcg-target.c.inc
149
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
150
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
151
* int mmu_idx, uintptr_t ra)
152
*/
153
-static void * const qemu_ld_helpers[8] = {
154
+static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
155
[MO_UB] = helper_ret_ldub_mmu,
156
[MO_SB] = helper_ret_ldsb_mmu,
157
#ifdef HOST_WORDS_BIGENDIAN
158
@@ -XXX,XX +XXX,XX @@ static void * const qemu_ld_helpers[8] = {
159
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
160
* uintxx_t val, int mmu_idx, uintptr_t ra)
161
*/
162
-static void * const qemu_st_helpers[4] = {
163
+static void * const qemu_st_helpers[MO_SIZE + 1] = {
164
[MO_8] = helper_ret_stb_mmu,
165
#ifdef HOST_WORDS_BIGENDIAN
166
[MO_16] = helper_be_stw_mmu,
167
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
168
index XXXXXXX..XXXXXXX 100644
169
--- a/tcg/i386/tcg-target.c.inc
170
+++ b/tcg/i386/tcg-target.c.inc
171
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nopn(TCGContext *s, int n)
172
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
173
* int mmu_idx, uintptr_t ra)
174
*/
175
-static void * const qemu_ld_helpers[16] = {
176
+static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
177
[MO_UB] = helper_ret_ldub_mmu,
178
[MO_LEUW] = helper_le_lduw_mmu,
179
[MO_LEUL] = helper_le_ldul_mmu,
180
@@ -XXX,XX +XXX,XX @@ static void * const qemu_ld_helpers[16] = {
181
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
182
* uintxx_t val, int mmu_idx, uintptr_t ra)
183
*/
184
-static void * const qemu_st_helpers[16] = {
185
+static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
186
[MO_UB] = helper_ret_stb_mmu,
187
[MO_LEUW] = helper_le_stw_mmu,
188
[MO_LEUL] = helper_le_stl_mmu,
189
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
190
index XXXXXXX..XXXXXXX 100644
191
--- a/tcg/mips/tcg-target.c.inc
192
+++ b/tcg/mips/tcg-target.c.inc
193
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
194
#if defined(CONFIG_SOFTMMU)
195
#include "../tcg-ldst.c.inc"
196
197
-static void * const qemu_ld_helpers[16] = {
198
+static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = {
199
[MO_UB] = helper_ret_ldub_mmu,
200
[MO_SB] = helper_ret_ldsb_mmu,
201
[MO_LEUW] = helper_le_lduw_mmu,
202
@@ -XXX,XX +XXX,XX @@ static void * const qemu_ld_helpers[16] = {
203
#endif
204
};
205
206
-static void * const qemu_st_helpers[16] = {
207
+static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
208
[MO_UB] = helper_ret_stb_mmu,
209
[MO_LEUW] = helper_le_stw_mmu,
210
[MO_LEUL] = helper_le_stl_mmu,
211
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
212
index XXXXXXX..XXXXXXX 100644
213
--- a/tcg/ppc/tcg-target.c.inc
214
+++ b/tcg/ppc/tcg-target.c.inc
215
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
216
#endif
217
}
59
}
218
60
219
-static const uint32_t qemu_ldx_opc[16] = {
61
static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
220
+static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = {
62
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
221
[MO_UB] = LBZX,
222
[MO_UW] = LHZX,
223
[MO_UL] = LWZX,
224
@@ -XXX,XX +XXX,XX @@ static const uint32_t qemu_ldx_opc[16] = {
225
[MO_BSWAP | MO_Q] = LDBRX,
226
};
227
228
-static const uint32_t qemu_stx_opc[16] = {
229
+static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = {
230
[MO_UB] = STBX,
231
[MO_UW] = STHX,
232
[MO_UL] = STWX,
233
@@ -XXX,XX +XXX,XX @@ static const uint32_t qemu_exts_opc[4] = {
234
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
235
* int mmu_idx, uintptr_t ra)
236
*/
237
-static void * const qemu_ld_helpers[16] = {
238
+static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
239
[MO_UB] = helper_ret_ldub_mmu,
240
[MO_LEUW] = helper_le_lduw_mmu,
241
[MO_LEUL] = helper_le_ldul_mmu,
242
@@ -XXX,XX +XXX,XX @@ static void * const qemu_ld_helpers[16] = {
243
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
244
* uintxx_t val, int mmu_idx, uintptr_t ra)
245
*/
246
-static void * const qemu_st_helpers[16] = {
247
+static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
248
[MO_UB] = helper_ret_stb_mmu,
249
[MO_LEUW] = helper_le_stw_mmu,
250
[MO_LEUL] = helper_le_stl_mmu,
251
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
252
index XXXXXXX..XXXXXXX 100644
253
--- a/tcg/riscv/tcg-target.c.inc
254
+++ b/tcg/riscv/tcg-target.c.inc
255
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
256
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
257
* TCGMemOpIdx oi, uintptr_t ra)
258
*/
259
-static void * const qemu_ld_helpers[8] = {
260
+static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
261
[MO_UB] = helper_ret_ldub_mmu,
262
[MO_SB] = helper_ret_ldsb_mmu,
263
#ifdef HOST_WORDS_BIGENDIAN
264
@@ -XXX,XX +XXX,XX @@ static void * const qemu_ld_helpers[8] = {
265
* uintxx_t val, TCGMemOpIdx oi,
266
* uintptr_t ra)
267
*/
268
-static void * const qemu_st_helpers[4] = {
269
+static void * const qemu_st_helpers[MO_SIZE + 1] = {
270
[MO_8] = helper_ret_stb_mmu,
271
#ifdef HOST_WORDS_BIGENDIAN
272
[MO_16] = helper_be_stw_mmu,
273
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
274
index XXXXXXX..XXXXXXX 100644
275
--- a/tcg/s390/tcg-target.c.inc
276
+++ b/tcg/s390/tcg-target.c.inc
277
@@ -XXX,XX +XXX,XX @@ static const uint8_t tcg_cond_to_ltr_cond[] = {
278
};
279
280
#ifdef CONFIG_SOFTMMU
281
-static void * const qemu_ld_helpers[16] = {
282
+static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = {
283
[MO_UB] = helper_ret_ldub_mmu,
284
[MO_SB] = helper_ret_ldsb_mmu,
285
[MO_LEUW] = helper_le_lduw_mmu,
286
@@ -XXX,XX +XXX,XX @@ static void * const qemu_ld_helpers[16] = {
287
[MO_BEQ] = helper_be_ldq_mmu,
288
};
289
290
-static void * const qemu_st_helpers[16] = {
291
+static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
292
[MO_UB] = helper_ret_stb_mmu,
293
[MO_LEUW] = helper_le_stw_mmu,
294
[MO_LEUL] = helper_le_stl_mmu,
295
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
296
index XXXXXXX..XXXXXXX 100644
297
--- a/tcg/sparc/tcg-target.c.inc
298
+++ b/tcg/sparc/tcg-target.c.inc
299
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
300
}
301
302
#ifdef CONFIG_SOFTMMU
303
-static const tcg_insn_unit *qemu_ld_trampoline[16];
304
-static const tcg_insn_unit *qemu_st_trampoline[16];
305
+static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
306
+static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
307
308
static void emit_extend(TCGContext *s, TCGReg r, int op)
309
{
310
@@ -XXX,XX +XXX,XX @@ static void emit_extend(TCGContext *s, TCGReg r, int op)
311
312
static void build_trampolines(TCGContext *s)
313
{
314
- static void * const qemu_ld_helpers[16] = {
315
+ static void * const qemu_ld_helpers[] = {
316
[MO_UB] = helper_ret_ldub_mmu,
317
[MO_SB] = helper_ret_ldsb_mmu,
318
[MO_LEUW] = helper_le_lduw_mmu,
319
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
320
[MO_BEUL] = helper_be_ldul_mmu,
321
[MO_BEQ] = helper_be_ldq_mmu,
322
};
323
- static void * const qemu_st_helpers[16] = {
324
+ static void * const qemu_st_helpers[] = {
325
[MO_UB] = helper_ret_stb_mmu,
326
[MO_LEUW] = helper_le_stw_mmu,
327
[MO_LEUL] = helper_le_stl_mmu,
328
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
329
int i;
330
TCGReg ra;
331
332
- for (i = 0; i < 16; ++i) {
333
+ for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
334
if (qemu_ld_helpers[i] == NULL) {
335
continue;
63
continue;
336
}
64
}
337
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
65
338
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
66
- /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
339
}
67
- switch (opc) {
340
68
- CASE_OP_32_64_VEC(and):
341
- for (i = 0; i < 16; ++i) {
69
- CASE_OP_32_64_VEC(mul):
342
+ for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
70
- CASE_OP_32_64(muluh):
343
if (qemu_st_helpers[i] == NULL) {
71
- CASE_OP_32_64(mulsh):
344
continue;
72
- if (arg_is_const(op->args[2])
345
}
73
- && arg_info(op->args[2])->val == 0) {
346
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
74
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
347
}
75
- continue;
348
#endif /* CONFIG_SOFTMMU */
76
- }
349
77
- break;
350
-static const int qemu_ld_opc[16] = {
78
- default:
351
+static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
79
- break;
352
[MO_UB] = LDUB,
80
- }
353
[MO_SB] = LDSB,
81
-
354
82
/*
355
@@ -XXX,XX +XXX,XX @@ static const int qemu_ld_opc[16] = {
83
* Process each opcode.
356
[MO_LEQ] = LDX_LE,
84
* Sorted alphabetically by opcode as much as possible.
357
};
358
359
-static const int qemu_st_opc[16] = {
360
+static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
361
[MO_UB] = STB,
362
363
[MO_BEUW] = STH,
364
--
85
--
365
2.25.1
86
2.25.1
366
87
367
88
diff view generated by jsdifflib
New patch
1
1
Compute the type of the operation early.
2
3
There are at least 4 places that used a def->flags ladder
4
to determine the type of the operation being optimized.
5
6
There were two places that assumed !TCG_OPF_64BIT means
7
TCG_TYPE_I32, and so could potentially compute incorrect
8
results for vector operations.
9
10
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
tcg/optimize.c | 149 +++++++++++++++++++++++++++++--------------------
14
1 file changed, 89 insertions(+), 60 deletions(-)
15
16
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/optimize.c
19
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
21
22
/* In flight values from optimization. */
23
uint64_t z_mask;
24
+ TCGType type;
25
} OptContext;
26
27
static inline TempOptInfo *ts_info(TCGTemp *ts)
28
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
29
{
30
TCGTemp *dst_ts = arg_temp(dst);
31
TCGTemp *src_ts = arg_temp(src);
32
- const TCGOpDef *def;
33
TempOptInfo *di;
34
TempOptInfo *si;
35
uint64_t z_mask;
36
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
37
reset_ts(dst_ts);
38
di = ts_info(dst_ts);
39
si = ts_info(src_ts);
40
- def = &tcg_op_defs[op->opc];
41
- if (def->flags & TCG_OPF_VECTOR) {
42
- new_op = INDEX_op_mov_vec;
43
- } else if (def->flags & TCG_OPF_64BIT) {
44
- new_op = INDEX_op_mov_i64;
45
- } else {
46
+
47
+ switch (ctx->type) {
48
+ case TCG_TYPE_I32:
49
new_op = INDEX_op_mov_i32;
50
+ break;
51
+ case TCG_TYPE_I64:
52
+ new_op = INDEX_op_mov_i64;
53
+ break;
54
+ case TCG_TYPE_V64:
55
+ case TCG_TYPE_V128:
56
+ case TCG_TYPE_V256:
57
+ /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
58
+ new_op = INDEX_op_mov_vec;
59
+ break;
60
+ default:
61
+ g_assert_not_reached();
62
}
63
op->opc = new_op;
64
- /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
65
op->args[0] = dst;
66
op->args[1] = src;
67
68
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
69
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
70
TCGArg dst, uint64_t val)
71
{
72
- const TCGOpDef *def = &tcg_op_defs[op->opc];
73
- TCGType type;
74
- TCGTemp *tv;
75
-
76
- if (def->flags & TCG_OPF_VECTOR) {
77
- type = TCGOP_VECL(op) + TCG_TYPE_V64;
78
- } else if (def->flags & TCG_OPF_64BIT) {
79
- type = TCG_TYPE_I64;
80
- } else {
81
- type = TCG_TYPE_I32;
82
- }
83
-
84
/* Convert movi to mov with constant temp. */
85
- tv = tcg_constant_internal(type, val);
86
+ TCGTemp *tv = tcg_constant_internal(ctx->type, val);
87
+
88
init_ts_info(ctx, tv);
89
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
90
}
91
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
92
}
93
}
94
95
-static uint64_t do_constant_folding(TCGOpcode op, uint64_t x, uint64_t y)
96
+static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
97
+ uint64_t x, uint64_t y)
98
{
99
- const TCGOpDef *def = &tcg_op_defs[op];
100
uint64_t res = do_constant_folding_2(op, x, y);
101
- if (!(def->flags & TCG_OPF_64BIT)) {
102
+ if (type == TCG_TYPE_I32) {
103
res = (int32_t)res;
104
}
105
return res;
106
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
107
* Return -1 if the condition can't be simplified,
108
* and the result of the condition (0 or 1) if it can.
109
*/
110
-static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
111
+static int do_constant_folding_cond(TCGType type, TCGArg x,
112
TCGArg y, TCGCond c)
113
{
114
uint64_t xv = arg_info(x)->val;
115
uint64_t yv = arg_info(y)->val;
116
117
if (arg_is_const(x) && arg_is_const(y)) {
118
- const TCGOpDef *def = &tcg_op_defs[op];
119
- tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR));
120
- if (def->flags & TCG_OPF_64BIT) {
121
- return do_constant_folding_cond_64(xv, yv, c);
122
- } else {
123
+ switch (type) {
124
+ case TCG_TYPE_I32:
125
return do_constant_folding_cond_32(xv, yv, c);
126
+ case TCG_TYPE_I64:
127
+ return do_constant_folding_cond_64(xv, yv, c);
128
+ default:
129
+ /* Only scalar comparisons are optimizable */
130
+ return -1;
131
}
132
} else if (args_are_copies(x, y)) {
133
return do_constant_folding_cond_eq(c);
134
@@ -XXX,XX +XXX,XX @@ static bool fold_const1(OptContext *ctx, TCGOp *op)
135
uint64_t t;
136
137
t = arg_info(op->args[1])->val;
138
- t = do_constant_folding(op->opc, t, 0);
139
+ t = do_constant_folding(op->opc, ctx->type, t, 0);
140
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
141
}
142
return false;
143
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
144
uint64_t t1 = arg_info(op->args[1])->val;
145
uint64_t t2 = arg_info(op->args[2])->val;
146
147
- t1 = do_constant_folding(op->opc, t1, t2);
148
+ t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
149
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
150
}
151
return false;
152
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
153
static bool fold_brcond(OptContext *ctx, TCGOp *op)
154
{
155
TCGCond cond = op->args[2];
156
- int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
157
+ int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
158
159
if (i == 0) {
160
tcg_op_remove(ctx->tcg, op);
161
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
162
* Simplify EQ/NE comparisons where one of the pairs
163
* can be simplified.
164
*/
165
- i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
166
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
167
op->args[2], cond);
168
switch (i ^ inv) {
169
case 0:
170
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
171
goto do_brcond_high;
172
}
173
174
- i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
175
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
176
op->args[3], cond);
177
switch (i ^ inv) {
178
case 0:
179
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
180
if (arg_is_const(op->args[1])) {
181
uint64_t t = arg_info(op->args[1])->val;
182
183
- t = do_constant_folding(op->opc, t, op->args[2]);
184
+ t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
185
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
186
}
187
return false;
188
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
189
uint64_t t = arg_info(op->args[1])->val;
190
191
if (t != 0) {
192
- t = do_constant_folding(op->opc, t, 0);
193
+ t = do_constant_folding(op->opc, ctx->type, t, 0);
194
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
195
}
196
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
197
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
198
199
static bool fold_movcond(OptContext *ctx, TCGOp *op)
200
{
201
- TCGOpcode opc = op->opc;
202
TCGCond cond = op->args[5];
203
- int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
204
+ int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
205
206
if (i >= 0) {
207
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
208
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
209
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
210
uint64_t tv = arg_info(op->args[3])->val;
211
uint64_t fv = arg_info(op->args[4])->val;
212
+ TCGOpcode opc;
213
214
- opc = (opc == INDEX_op_movcond_i32
215
- ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
216
+ switch (ctx->type) {
217
+ case TCG_TYPE_I32:
218
+ opc = INDEX_op_setcond_i32;
219
+ break;
220
+ case TCG_TYPE_I64:
221
+ opc = INDEX_op_setcond_i64;
222
+ break;
223
+ default:
224
+ g_assert_not_reached();
225
+ }
226
227
if (tv == 1 && fv == 0) {
228
op->opc = opc;
229
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
230
static bool fold_setcond(OptContext *ctx, TCGOp *op)
231
{
232
TCGCond cond = op->args[3];
233
- int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
234
+ int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
235
236
if (i >= 0) {
237
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
238
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
239
* Simplify EQ/NE comparisons where one of the pairs
240
* can be simplified.
241
*/
242
- i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
243
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
244
op->args[3], cond);
245
switch (i ^ inv) {
246
case 0:
247
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
248
goto do_setcond_high;
249
}
250
251
- i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
252
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
253
op->args[4], cond);
254
switch (i ^ inv) {
255
case 0:
256
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
257
init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
258
copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
259
260
+ /* Pre-compute the type of the operation. */
261
+ if (def->flags & TCG_OPF_VECTOR) {
262
+ ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
263
+ } else if (def->flags & TCG_OPF_64BIT) {
264
+ ctx.type = TCG_TYPE_I64;
265
+ } else {
266
+ ctx.type = TCG_TYPE_I32;
267
+ }
268
+
269
/* For commutative operations make constant second argument */
270
switch (opc) {
271
CASE_OP_32_64_VEC(add):
272
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
273
/* Proceed with possible constant folding. */
274
break;
275
}
276
- if (opc == INDEX_op_sub_i32) {
277
+ switch (ctx.type) {
278
+ case TCG_TYPE_I32:
279
neg_op = INDEX_op_neg_i32;
280
have_neg = TCG_TARGET_HAS_neg_i32;
281
- } else if (opc == INDEX_op_sub_i64) {
282
+ break;
283
+ case TCG_TYPE_I64:
284
neg_op = INDEX_op_neg_i64;
285
have_neg = TCG_TARGET_HAS_neg_i64;
286
- } else if (TCG_TARGET_HAS_neg_vec) {
287
- TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64;
288
- unsigned vece = TCGOP_VECE(op);
289
- neg_op = INDEX_op_neg_vec;
290
- have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0;
291
- } else {
292
break;
293
+ case TCG_TYPE_V64:
294
+ case TCG_TYPE_V128:
295
+ case TCG_TYPE_V256:
296
+ neg_op = INDEX_op_neg_vec;
297
+ have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
298
+ TCGOP_VECE(op)) > 0;
299
+ break;
300
+ default:
301
+ g_assert_not_reached();
302
}
303
if (!have_neg) {
304
break;
305
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
306
TCGOpcode not_op;
307
bool have_not;
308
309
- if (def->flags & TCG_OPF_VECTOR) {
310
- not_op = INDEX_op_not_vec;
311
- have_not = TCG_TARGET_HAS_not_vec;
312
- } else if (def->flags & TCG_OPF_64BIT) {
313
- not_op = INDEX_op_not_i64;
314
- have_not = TCG_TARGET_HAS_not_i64;
315
- } else {
316
+ switch (ctx.type) {
317
+ case TCG_TYPE_I32:
318
not_op = INDEX_op_not_i32;
319
have_not = TCG_TARGET_HAS_not_i32;
320
+ break;
321
+ case TCG_TYPE_I64:
322
+ not_op = INDEX_op_not_i64;
323
+ have_not = TCG_TARGET_HAS_not_i64;
324
+ break;
325
+ case TCG_TYPE_V64:
326
+ case TCG_TYPE_V128:
327
+ case TCG_TYPE_V256:
328
+ not_op = INDEX_op_not_vec;
329
+ have_not = TCG_TARGET_HAS_not_vec;
330
+ break;
331
+ default:
332
+ g_assert_not_reached();
333
}
334
if (!have_not) {
335
break;
336
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
337
below, we can ignore high bits, but for further optimizations we
338
need to record that the high bits contain garbage. */
339
partmask = z_mask;
340
- if (!(def->flags & TCG_OPF_64BIT)) {
341
+ if (ctx.type == TCG_TYPE_I32) {
342
z_mask |= ~(tcg_target_ulong)0xffffffffu;
343
partmask &= 0xffffffffu;
344
affected &= 0xffffffffu;
345
--
346
2.25.1
347
348
diff view generated by jsdifflib
1
Split out the conditional conversion from a more complex logical
2
operation to a simple NOT. Create a couple more helpers to make
3
this easy for the outer-most logical operations.
4
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
7
---
3
tcg/s390x/tcg-target.c.inc | 122 ++++++++++++++++++++++++++++++++++++-
8
tcg/optimize.c | 158 +++++++++++++++++++++++++++----------------------
4
1 file changed, 119 insertions(+), 3 deletions(-)
9
1 file changed, 86 insertions(+), 72 deletions(-)
5
10
6
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
7
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/s390x/tcg-target.c.inc
13
--- a/tcg/optimize.c
9
+++ b/tcg/s390x/tcg-target.c.inc
14
+++ b/tcg/optimize.c
10
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
11
RX_STC = 0x42,
16
return false;
12
RX_STH = 0x40,
17
}
13
18
14
+ VRIa_VGBM = 0xe744,
19
+/*
15
+ VRIa_VREPI = 0xe745,
20
+ * Convert @op to NOT, if NOT is supported by the host.
16
+ VRIb_VGM = 0xe746,
21
+ * Return true f the conversion is successful, which will still
17
+ VRIc_VREP = 0xe74d,
22
+ * indicate that the processing is complete.
18
+
23
+ */
19
VRRa_VLR = 0xe756,
24
+static bool fold_not(OptContext *ctx, TCGOp *op);
20
+ VRRf_VLVGP = 0xe762,
25
+static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
21
22
VRSb_VLVG = 0xe722,
23
VRSc_VLGV = 0xe721,
24
25
VRX_VL = 0xe706,
26
VRX_VLLEZ = 0xe704,
27
+ VRX_VLREP = 0xe705,
28
VRX_VST = 0xe70e,
29
VRX_VSTEF = 0xe70b,
30
VRX_VSTEG = 0xe70a,
31
@@ -XXX,XX +XXX,XX @@ static int RXB(TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
32
| ((v4 & 0x10) << (4 + 0));
33
}
34
35
+static void tcg_out_insn_VRIa(TCGContext *s, S390Opcode op,
36
+ TCGReg v1, uint16_t i2, int m3)
37
+{
26
+{
38
+ tcg_debug_assert(is_vector_reg(v1));
27
+ TCGOpcode not_op;
39
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
28
+ bool have_not;
40
+ tcg_out16(s, i2);
29
+
41
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
30
+ switch (ctx->type) {
31
+ case TCG_TYPE_I32:
32
+ not_op = INDEX_op_not_i32;
33
+ have_not = TCG_TARGET_HAS_not_i32;
34
+ break;
35
+ case TCG_TYPE_I64:
36
+ not_op = INDEX_op_not_i64;
37
+ have_not = TCG_TARGET_HAS_not_i64;
38
+ break;
39
+ case TCG_TYPE_V64:
40
+ case TCG_TYPE_V128:
41
+ case TCG_TYPE_V256:
42
+ not_op = INDEX_op_not_vec;
43
+ have_not = TCG_TARGET_HAS_not_vec;
44
+ break;
45
+ default:
46
+ g_assert_not_reached();
47
+ }
48
+ if (have_not) {
49
+ op->opc = not_op;
50
+ op->args[1] = op->args[idx];
51
+ return fold_not(ctx, op);
52
+ }
53
+ return false;
42
+}
54
+}
43
+
55
+
44
+static void tcg_out_insn_VRIb(TCGContext *s, S390Opcode op,
56
+/* If the binary operation has first argument @i, fold to NOT. */
45
+ TCGReg v1, uint8_t i2, uint8_t i3, int m4)
57
+static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
46
+{
58
+{
47
+ tcg_debug_assert(is_vector_reg(v1));
59
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
48
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
60
+ return fold_to_not(ctx, op, 2);
49
+ tcg_out16(s, (i2 << 8) | (i3 & 0xff));
61
+ }
50
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
62
+ return false;
51
+}
63
+}
52
+
64
+
53
+static void tcg_out_insn_VRIc(TCGContext *s, S390Opcode op,
65
/* If the binary operation has second argument @i, fold to @i. */
54
+ TCGReg v1, uint16_t i2, TCGReg v3, int m4)
66
static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
67
{
68
@@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
69
return false;
70
}
71
72
+/* If the binary operation has second argument @i, fold to NOT. */
73
+static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
55
+{
74
+{
56
+ tcg_debug_assert(is_vector_reg(v1));
75
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
57
+ tcg_debug_assert(is_vector_reg(v3));
76
+ return fold_to_not(ctx, op, 1);
58
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
77
+ }
59
+ tcg_out16(s, i2);
78
+ return false;
60
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12));
61
+}
79
+}
62
+
80
+
63
static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op,
81
/* If the binary operation has both arguments equal, fold to @i. */
64
TCGReg v1, TCGReg v2, int m3)
82
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
65
{
83
{
66
@@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op,
84
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
67
tcg_out32(s, (op & 0x00ff) | RXB(v1, v2, 0, 0) | (m3 << 12));
85
static bool fold_andc(OptContext *ctx, TCGOp *op)
68
}
86
{
69
87
if (fold_const2(ctx, op) ||
70
+static void tcg_out_insn_VRRf(TCGContext *s, S390Opcode op,
88
- fold_xx_to_i(ctx, op, 0)) {
71
+ TCGReg v1, TCGReg r2, TCGReg r3)
89
+ fold_xx_to_i(ctx, op, 0) ||
72
+{
90
+ fold_ix_to_not(ctx, op, -1)) {
73
+ tcg_debug_assert(is_vector_reg(v1));
91
return true;
74
+ tcg_debug_assert(is_general_reg(r2));
92
}
75
+ tcg_debug_assert(is_general_reg(r3));
93
return false;
76
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r2);
94
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
77
+ tcg_out16(s, r3 << 12);
95
78
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0));
96
static bool fold_eqv(OptContext *ctx, TCGOp *op)
79
+}
97
{
80
+
98
- return fold_const2(ctx, op);
81
static void tcg_out_insn_VRSb(TCGContext *s, S390Opcode op, TCGReg v1,
99
+ if (fold_const2(ctx, op) ||
82
intptr_t d2, TCGReg b2, TCGReg r3, int m4)
100
+ fold_xi_to_not(ctx, op, 0)) {
83
{
101
+ return true;
84
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
102
+ }
85
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
103
+ return false;
86
TCGReg dst, TCGReg src)
104
}
87
{
105
88
- g_assert_not_reached();
106
static bool fold_extract(OptContext *ctx, TCGOp *op)
89
+ if (is_general_reg(src)) {
107
@@ -XXX,XX +XXX,XX @@ static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
90
+ /* Replicate general register into two MO_64. */
108
91
+ tcg_out_insn(s, VRRf, VLVGP, dst, src, src);
109
static bool fold_nand(OptContext *ctx, TCGOp *op)
92
+ if (vece == MO_64) {
110
{
93
+ return true;
111
- return fold_const2(ctx, op);
94
+ }
112
+ if (fold_const2(ctx, op) ||
95
+ }
113
+ fold_xi_to_not(ctx, op, -1)) {
96
+
114
+ return true;
97
+ /*
115
+ }
98
+ * Recall that the "standard" integer, within a vector, is the
116
+ return false;
99
+ * rightmost element of the leftmost doubleword, a-la VLLEZ.
117
}
100
+ */
118
101
+ tcg_out_insn(s, VRIc, VREP, dst, (8 >> vece) - 1, src, vece);
119
static bool fold_neg(OptContext *ctx, TCGOp *op)
120
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
121
122
static bool fold_nor(OptContext *ctx, TCGOp *op)
123
{
124
- return fold_const2(ctx, op);
125
+ if (fold_const2(ctx, op) ||
126
+ fold_xi_to_not(ctx, op, 0)) {
127
+ return true;
128
+ }
129
+ return false;
130
}
131
132
static bool fold_not(OptContext *ctx, TCGOp *op)
133
{
134
- return fold_const1(ctx, op);
135
+ if (fold_const1(ctx, op)) {
136
+ return true;
137
+ }
138
+
139
+ /* Because of fold_to_not, we want to always return true, via finish. */
140
+ finish_folding(ctx, op);
102
+ return true;
141
+ return true;
103
}
142
}
104
143
105
static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
144
static bool fold_or(OptContext *ctx, TCGOp *op)
106
TCGReg dst, TCGReg base, intptr_t offset)
145
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
107
{
146
108
- g_assert_not_reached();
147
static bool fold_orc(OptContext *ctx, TCGOp *op)
109
+ tcg_out_vrx_mem(s, VRX_VLREP, dst, base, TCG_REG_NONE, offset, vece);
148
{
110
+ return true;
149
- return fold_const2(ctx, op);
111
}
150
+ if (fold_const2(ctx, op) ||
112
151
+ fold_ix_to_not(ctx, op, 0)) {
113
static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
152
+ return true;
114
TCGReg dst, int64_t val)
153
+ }
115
{
154
+ return false;
116
- g_assert_not_reached();
155
}
117
+ int i, mask, msb, lsb;
156
118
+
157
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
119
+ /* Look for int16_t elements. */
158
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
120
+ if (vece <= MO_16 ||
159
static bool fold_xor(OptContext *ctx, TCGOp *op)
121
+ (vece == MO_32 ? (int32_t)val : val) == (int16_t)val) {
160
{
122
+ tcg_out_insn(s, VRIa, VREPI, dst, val, vece);
161
if (fold_const2(ctx, op) ||
123
+ return;
162
- fold_xx_to_i(ctx, op, 0)) {
124
+ }
163
+ fold_xx_to_i(ctx, op, 0) ||
125
+
164
+ fold_xi_to_not(ctx, op, -1)) {
126
+ /* Look for bit masks. */
165
return true;
127
+ if (vece == MO_32) {
166
}
128
+ if (risbg_mask((int32_t)val)) {
167
return false;
129
+ /* Handle wraparound by swapping msb and lsb. */
168
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
+ if ((val & 0x80000001u) == 0x80000001u) {
169
}
131
+ msb = 32 - ctz32(~val);
170
}
132
+ lsb = clz32(~val) - 1;
171
break;
133
+ } else {
172
- CASE_OP_32_64_VEC(xor):
134
+ msb = clz32(val);
173
- CASE_OP_32_64(nand):
135
+ lsb = 31 - ctz32(val);
174
- if (!arg_is_const(op->args[1])
136
+ }
175
- && arg_is_const(op->args[2])
137
+ tcg_out_insn(s, VRIb, VGM, dst, lsb, msb, MO_32);
176
- && arg_info(op->args[2])->val == -1) {
138
+ return;
177
- i = 1;
139
+ }
178
- goto try_not;
140
+ } else {
179
- }
141
+ if (risbg_mask(val)) {
180
- break;
142
+ /* Handle wraparound by swapping msb and lsb. */
181
- CASE_OP_32_64(nor):
143
+ if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
182
- if (!arg_is_const(op->args[1])
144
+ /* Handle wraparound by swapping msb and lsb. */
183
- && arg_is_const(op->args[2])
145
+ msb = 64 - ctz64(~val);
184
- && arg_info(op->args[2])->val == 0) {
146
+ lsb = clz64(~val) - 1;
185
- i = 1;
147
+ } else {
186
- goto try_not;
148
+ msb = clz64(val);
187
- }
149
+ lsb = 63 - ctz64(val);
188
- break;
150
+ }
189
- CASE_OP_32_64_VEC(andc):
151
+ tcg_out_insn(s, VRIb, VGM, dst, lsb, msb, MO_64);
190
- if (!arg_is_const(op->args[2])
152
+ return;
191
- && arg_is_const(op->args[1])
153
+ }
192
- && arg_info(op->args[1])->val == -1) {
154
+ }
193
- i = 2;
155
+
194
- goto try_not;
156
+ /* Look for all bytes 0x00 or 0xff. */
195
- }
157
+ for (i = mask = 0; i < 8; i++) {
196
- break;
158
+ uint8_t byte = val >> (i * 8);
197
- CASE_OP_32_64_VEC(orc):
159
+ if (byte == 0xff) {
198
- CASE_OP_32_64(eqv):
160
+ mask |= 1 << i;
199
- if (!arg_is_const(op->args[2])
161
+ } else if (byte != 0) {
200
- && arg_is_const(op->args[1])
162
+ break;
201
- && arg_info(op->args[1])->val == 0) {
163
+ }
202
- i = 2;
164
+ }
203
- goto try_not;
165
+ if (i == 8) {
204
- }
166
+ tcg_out_insn(s, VRIa, VGBM, dst, mask * 0x0101, 0);
205
- break;
167
+ return;
206
- try_not:
168
+ }
207
- {
169
+
208
- TCGOpcode not_op;
170
+ /* Otherwise, stuff it in the constant pool. */
209
- bool have_not;
171
+ tcg_out_insn(s, RIL, LARL, TCG_TMP0, 0);
210
-
172
+ new_pool_label(s, val, R_390_PC32DBL, s->code_ptr - 2, 2);
211
- switch (ctx.type) {
173
+ tcg_out_insn(s, VRX, VLREP, dst, TCG_TMP0, TCG_REG_NONE, 0, MO_64);
212
- case TCG_TYPE_I32:
174
}
213
- not_op = INDEX_op_not_i32;
175
214
- have_not = TCG_TARGET_HAS_not_i32;
176
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
215
- break;
216
- case TCG_TYPE_I64:
217
- not_op = INDEX_op_not_i64;
218
- have_not = TCG_TARGET_HAS_not_i64;
219
- break;
220
- case TCG_TYPE_V64:
221
- case TCG_TYPE_V128:
222
- case TCG_TYPE_V256:
223
- not_op = INDEX_op_not_vec;
224
- have_not = TCG_TARGET_HAS_not_vec;
225
- break;
226
- default:
227
- g_assert_not_reached();
228
- }
229
- if (!have_not) {
230
- break;
231
- }
232
- op->opc = not_op;
233
- reset_temp(op->args[0]);
234
- op->args[1] = op->args[i];
235
- continue;
236
- }
237
default:
238
break;
239
}
177
--
240
--
178
2.25.1
241
2.25.1
179
242
180
243
diff view generated by jsdifflib
New patch
1
Even though there is only one user, place this more complex
2
conversion into its own helper.
1
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 89 ++++++++++++++++++++++++++------------------------
8
1 file changed, 47 insertions(+), 42 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
15
16
static bool fold_neg(OptContext *ctx, TCGOp *op)
17
{
18
- return fold_const1(ctx, op);
19
+ if (fold_const1(ctx, op)) {
20
+ return true;
21
+ }
22
+ /*
23
+ * Because of fold_sub_to_neg, we want to always return true,
24
+ * via finish_folding.
25
+ */
26
+ finish_folding(ctx, op);
27
+ return true;
28
}
29
30
static bool fold_nor(OptContext *ctx, TCGOp *op)
31
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
32
return fold_const2(ctx, op);
33
}
34
35
+static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
36
+{
37
+ TCGOpcode neg_op;
38
+ bool have_neg;
39
+
40
+ if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
41
+ return false;
42
+ }
43
+
44
+ switch (ctx->type) {
45
+ case TCG_TYPE_I32:
46
+ neg_op = INDEX_op_neg_i32;
47
+ have_neg = TCG_TARGET_HAS_neg_i32;
48
+ break;
49
+ case TCG_TYPE_I64:
50
+ neg_op = INDEX_op_neg_i64;
51
+ have_neg = TCG_TARGET_HAS_neg_i64;
52
+ break;
53
+ case TCG_TYPE_V64:
54
+ case TCG_TYPE_V128:
55
+ case TCG_TYPE_V256:
56
+ neg_op = INDEX_op_neg_vec;
57
+ have_neg = (TCG_TARGET_HAS_neg_vec &&
58
+ tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
59
+ break;
60
+ default:
61
+ g_assert_not_reached();
62
+ }
63
+ if (have_neg) {
64
+ op->opc = neg_op;
65
+ op->args[1] = op->args[2];
66
+ return fold_neg(ctx, op);
67
+ }
68
+ return false;
69
+}
70
+
71
static bool fold_sub(OptContext *ctx, TCGOp *op)
72
{
73
if (fold_const2(ctx, op) ||
74
- fold_xx_to_i(ctx, op, 0)) {
75
+ fold_xx_to_i(ctx, op, 0) ||
76
+ fold_sub_to_neg(ctx, op)) {
77
return true;
78
}
79
return false;
80
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
81
continue;
82
}
83
break;
84
- CASE_OP_32_64_VEC(sub):
85
- {
86
- TCGOpcode neg_op;
87
- bool have_neg;
88
-
89
- if (arg_is_const(op->args[2])) {
90
- /* Proceed with possible constant folding. */
91
- break;
92
- }
93
- switch (ctx.type) {
94
- case TCG_TYPE_I32:
95
- neg_op = INDEX_op_neg_i32;
96
- have_neg = TCG_TARGET_HAS_neg_i32;
97
- break;
98
- case TCG_TYPE_I64:
99
- neg_op = INDEX_op_neg_i64;
100
- have_neg = TCG_TARGET_HAS_neg_i64;
101
- break;
102
- case TCG_TYPE_V64:
103
- case TCG_TYPE_V128:
104
- case TCG_TYPE_V256:
105
- neg_op = INDEX_op_neg_vec;
106
- have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
107
- TCGOP_VECE(op)) > 0;
108
- break;
109
- default:
110
- g_assert_not_reached();
111
- }
112
- if (!have_neg) {
113
- break;
114
- }
115
- if (arg_is_const(op->args[1])
116
- && arg_info(op->args[1])->val == 0) {
117
- op->opc = neg_op;
118
- reset_temp(op->args[0]);
119
- op->args[1] = op->args[2];
120
- continue;
121
- }
122
- }
123
- break;
124
default:
125
break;
126
}
127
--
128
2.25.1
129
130
diff view generated by jsdifflib
New patch
1
Pull the "op r, a, i => mov r, a" optimization into a function,
2
and use them in the outer-most logical operations.
1
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 61 +++++++++++++++++++++-----------------------------
8
1 file changed, 26 insertions(+), 35 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
15
return false;
16
}
17
18
+/* If the binary operation has second argument @i, fold to identity. */
19
+static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
20
+{
21
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
22
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
23
+ }
24
+ return false;
25
+}
26
+
27
/* If the binary operation has second argument @i, fold to NOT. */
28
static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
29
{
30
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
31
32
static bool fold_add(OptContext *ctx, TCGOp *op)
33
{
34
- return fold_const2(ctx, op);
35
+ if (fold_const2(ctx, op) ||
36
+ fold_xi_to_x(ctx, op, 0)) {
37
+ return true;
38
+ }
39
+ return false;
40
}
41
42
static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
43
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
44
{
45
if (fold_const2(ctx, op) ||
46
fold_xi_to_i(ctx, op, 0) ||
47
+ fold_xi_to_x(ctx, op, -1) ||
48
fold_xx_to_x(ctx, op)) {
49
return true;
50
}
51
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
52
{
53
if (fold_const2(ctx, op) ||
54
fold_xx_to_i(ctx, op, 0) ||
55
+ fold_xi_to_x(ctx, op, 0) ||
56
fold_ix_to_not(ctx, op, -1)) {
57
return true;
58
}
59
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
60
static bool fold_eqv(OptContext *ctx, TCGOp *op)
61
{
62
if (fold_const2(ctx, op) ||
63
+ fold_xi_to_x(ctx, op, -1) ||
64
fold_xi_to_not(ctx, op, 0)) {
65
return true;
66
}
67
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
68
static bool fold_or(OptContext *ctx, TCGOp *op)
69
{
70
if (fold_const2(ctx, op) ||
71
+ fold_xi_to_x(ctx, op, 0) ||
72
fold_xx_to_x(ctx, op)) {
73
return true;
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
76
static bool fold_orc(OptContext *ctx, TCGOp *op)
77
{
78
if (fold_const2(ctx, op) ||
79
+ fold_xi_to_x(ctx, op, -1) ||
80
fold_ix_to_not(ctx, op, 0)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
84
85
static bool fold_shift(OptContext *ctx, TCGOp *op)
86
{
87
- return fold_const2(ctx, op);
88
+ if (fold_const2(ctx, op) ||
89
+ fold_xi_to_x(ctx, op, 0)) {
90
+ return true;
91
+ }
92
+ return false;
93
}
94
95
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
96
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
97
{
98
if (fold_const2(ctx, op) ||
99
fold_xx_to_i(ctx, op, 0) ||
100
+ fold_xi_to_x(ctx, op, 0) ||
101
fold_sub_to_neg(ctx, op)) {
102
return true;
103
}
104
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
105
{
106
if (fold_const2(ctx, op) ||
107
fold_xx_to_i(ctx, op, 0) ||
108
+ fold_xi_to_x(ctx, op, 0) ||
109
fold_xi_to_not(ctx, op, -1)) {
110
return true;
111
}
112
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
113
break;
114
}
115
116
- /* Simplify expression for "op r, a, const => mov r, a" cases */
117
- switch (opc) {
118
- CASE_OP_32_64_VEC(add):
119
- CASE_OP_32_64_VEC(sub):
120
- CASE_OP_32_64_VEC(or):
121
- CASE_OP_32_64_VEC(xor):
122
- CASE_OP_32_64_VEC(andc):
123
- CASE_OP_32_64(shl):
124
- CASE_OP_32_64(shr):
125
- CASE_OP_32_64(sar):
126
- CASE_OP_32_64(rotl):
127
- CASE_OP_32_64(rotr):
128
- if (!arg_is_const(op->args[1])
129
- && arg_is_const(op->args[2])
130
- && arg_info(op->args[2])->val == 0) {
131
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
132
- continue;
133
- }
134
- break;
135
- CASE_OP_32_64_VEC(and):
136
- CASE_OP_32_64_VEC(orc):
137
- CASE_OP_32_64(eqv):
138
- if (!arg_is_const(op->args[1])
139
- && arg_is_const(op->args[2])
140
- && arg_info(op->args[2])->val == -1) {
141
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
142
- continue;
143
- }
144
- break;
145
- default:
146
- break;
147
- }
148
-
149
/* Simplify using known-zero bits. Currently only ops with a single
150
output argument is supported. */
151
z_mask = -1;
152
--
153
2.25.1
154
155
diff view generated by jsdifflib
1
Pull the "op r, 0, b => movi r, 0" optimization into a function,
2
and use it in fold_shift.
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
7
---
3
tcg/s390x/tcg-target-con-set.h | 1 +
8
tcg/optimize.c | 28 ++++++++++------------------
4
tcg/s390x/tcg-target.h | 2 +-
9
1 file changed, 10 insertions(+), 18 deletions(-)
5
tcg/s390x/tcg-target.c.inc | 20 ++++++++++++++++++++
6
3 files changed, 22 insertions(+), 1 deletion(-)
7
10
8
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/s390x/tcg-target-con-set.h
13
--- a/tcg/optimize.c
11
+++ b/tcg/s390x/tcg-target-con-set.h
14
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, ri)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
13
C_O1_I2(r, rZ, r)
16
return false;
14
C_O1_I2(v, v, r)
15
C_O1_I2(v, v, v)
16
+C_O1_I3(v, v, v, v)
17
C_O1_I4(r, r, ri, r, 0)
18
C_O1_I4(r, r, ri, rI, 0)
19
C_O2_I2(b, a, 0, r)
20
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/s390x/tcg-target.h
23
+++ b/tcg/s390x/tcg-target.h
24
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
25
#define TCG_TARGET_HAS_mul_vec 1
26
#define TCG_TARGET_HAS_sat_vec 0
27
#define TCG_TARGET_HAS_minmax_vec 1
28
-#define TCG_TARGET_HAS_bitsel_vec 0
29
+#define TCG_TARGET_HAS_bitsel_vec 1
30
#define TCG_TARGET_HAS_cmpsel_vec 0
31
32
/* used for function call generation */
33
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
34
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/s390x/tcg-target.c.inc
36
+++ b/tcg/s390x/tcg-target.c.inc
37
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
38
VRRa_VUPH = 0xe7d7,
39
VRRa_VUPL = 0xe7d6,
40
VRRc_VX = 0xe76d,
41
+ VRRe_VSEL = 0xe78d,
42
VRRf_VLVGP = 0xe762,
43
44
VRSa_VERLL = 0xe733,
45
@@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_VRRc(TCGContext *s, S390Opcode op,
46
tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, 0) | (m4 << 12));
47
}
17
}
48
18
49
+static void tcg_out_insn_VRRe(TCGContext *s, S390Opcode op,
19
+/* If the binary operation has first argument @i, fold to @i. */
50
+ TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
20
+static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
51
+{
21
+{
52
+ tcg_debug_assert(is_vector_reg(v1));
22
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
53
+ tcg_debug_assert(is_vector_reg(v2));
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
54
+ tcg_debug_assert(is_vector_reg(v3));
24
+ }
55
+ tcg_debug_assert(is_vector_reg(v4));
25
+ return false;
56
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
57
+ tcg_out16(s, v3 << 12);
58
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, v4) | (v4 << 12));
59
+}
26
+}
60
+
27
+
61
static void tcg_out_insn_VRRf(TCGContext *s, S390Opcode op,
28
/* If the binary operation has first argument @i, fold to NOT. */
62
TCGReg v1, TCGReg r2, TCGReg r3)
29
static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
63
{
30
{
64
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
31
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
65
tcg_out_insn(s, VRRc, VMXL, a0, a1, a2, vece);
32
static bool fold_shift(OptContext *ctx, TCGOp *op)
66
break;
33
{
67
34
if (fold_const2(ctx, op) ||
68
+ case INDEX_op_bitsel_vec:
35
+ fold_ix_to_i(ctx, op, 0) ||
69
+ tcg_out_insn(s, VRRe, VSEL, a0, a1, a2, args[3]);
36
fold_xi_to_x(ctx, op, 0)) {
70
+ break;
37
return true;
71
+
38
}
72
case INDEX_op_cmp_vec:
39
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
73
switch ((TCGCond)args[3]) {
40
break;
74
case TCG_COND_EQ:
41
}
75
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
42
76
case INDEX_op_add_vec:
43
- /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
77
case INDEX_op_and_vec:
44
- and "sub r, 0, a => neg r, a" case. */
78
case INDEX_op_andc_vec:
45
- switch (opc) {
79
+ case INDEX_op_bitsel_vec:
46
- CASE_OP_32_64(shl):
80
case INDEX_op_neg_vec:
47
- CASE_OP_32_64(shr):
81
case INDEX_op_not_vec:
48
- CASE_OP_32_64(sar):
82
case INDEX_op_or_vec:
49
- CASE_OP_32_64(rotl):
83
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
50
- CASE_OP_32_64(rotr):
84
case INDEX_op_shrs_vec:
51
- if (arg_is_const(op->args[1])
85
case INDEX_op_sars_vec:
52
- && arg_info(op->args[1])->val == 0) {
86
return C_O1_I2(v, v, r);
53
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
87
+ case INDEX_op_bitsel_vec:
54
- continue;
88
+ return C_O1_I3(v, v, v, v);
55
- }
89
56
- break;
90
default:
57
- default:
91
g_assert_not_reached();
58
- break;
59
- }
60
-
61
/* Simplify using known-zero bits. Currently only ops with a single
62
output argument is supported. */
63
z_mask = -1;
92
--
64
--
93
2.25.1
65
2.25.1
94
66
95
67
diff view generated by jsdifflib
1
Implementing add, sub, and, or, xor as the minimal set.
1
Move all of the known-zero optimizations into the per-opcode
2
This allows us to actually enable vectors in query_s390_facilities.
2
functions. Use fold_masks when there is a possibility of the
3
result being determined, and simply set ctx->z_mask otherwise.
3
4
4
Reviewed-by: David Hildenbrand <david@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
8
---
7
tcg/s390x/tcg-target.c.inc | 154 ++++++++++++++++++++++++++++++++++++-
9
tcg/optimize.c | 545 ++++++++++++++++++++++++++-----------------------
8
1 file changed, 150 insertions(+), 4 deletions(-)
10
1 file changed, 294 insertions(+), 251 deletions(-)
9
11
10
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/s390x/tcg-target.c.inc
14
--- a/tcg/optimize.c
13
+++ b/tcg/s390x/tcg-target.c.inc
15
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
16
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
15
VRIc_VREP = 0xe74d,
17
TCGTempSet temps_used;
16
18
17
VRRa_VLR = 0xe756,
19
/* In flight values from optimization. */
18
+ VRRc_VA = 0xe7f3,
20
- uint64_t z_mask;
19
+ VRRc_VCEQ = 0xe7f8, /* we leave the m5 cs field 0 */
21
+ uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
20
+ VRRc_VCH = 0xe7fb, /* " */
22
+ uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
21
+ VRRc_VCHL = 0xe7f9, /* " */
23
TCGType type;
22
+ VRRc_VN = 0xe768,
24
} OptContext;
23
+ VRRc_VO = 0xe76a,
25
24
+ VRRc_VS = 0xe7f7,
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
25
+ VRRc_VX = 0xe76d,
27
return false;
26
VRRf_VLVGP = 0xe762,
28
}
27
29
28
VRSb_VLVG = 0xe722,
30
+static bool fold_masks(OptContext *ctx, TCGOp *op)
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op,
30
tcg_out32(s, (op & 0x00ff) | RXB(v1, v2, 0, 0) | (m3 << 12));
31
}
32
33
+static void tcg_out_insn_VRRc(TCGContext *s, S390Opcode op,
34
+ TCGReg v1, TCGReg v2, TCGReg v3, int m4)
35
+{
31
+{
36
+ tcg_debug_assert(is_vector_reg(v1));
32
+ uint64_t a_mask = ctx->a_mask;
37
+ tcg_debug_assert(is_vector_reg(v2));
33
+ uint64_t z_mask = ctx->z_mask;
38
+ tcg_debug_assert(is_vector_reg(v3));
34
+
39
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
35
+ /*
40
+ tcg_out16(s, v3 << 12);
36
+ * 32-bit ops generate 32-bit results. For the result is zero test
41
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, 0) | (m4 << 12));
37
+ * below, we can ignore high bits, but for further optimizations we
38
+ * need to record that the high bits contain garbage.
39
+ */
40
+ if (ctx->type == TCG_TYPE_I32) {
41
+ ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
42
+ a_mask &= MAKE_64BIT_MASK(0, 32);
43
+ z_mask &= MAKE_64BIT_MASK(0, 32);
44
+ }
45
+
46
+ if (z_mask == 0) {
47
+ return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
48
+ }
49
+ if (a_mask == 0) {
50
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
51
+ }
52
+ return false;
42
+}
53
+}
43
+
54
+
44
static void tcg_out_insn_VRRf(TCGContext *s, S390Opcode op,
55
/*
45
TCGReg v1, TCGReg r2, TCGReg r3)
56
* Convert @op to NOT, if NOT is supported by the host.
46
{
57
* Return true f the conversion is successful, which will still
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
58
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
48
unsigned vecl, unsigned vece,
59
49
const TCGArg *args, const int *const_args)
60
static bool fold_and(OptContext *ctx, TCGOp *op)
50
{
61
{
51
- g_assert_not_reached();
62
+ uint64_t z1, z2;
52
+ TCGType type = vecl + TCG_TYPE_V64;
63
+
53
+ TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
64
if (fold_const2(ctx, op) ||
54
+
65
fold_xi_to_i(ctx, op, 0) ||
55
+ switch (opc) {
66
fold_xi_to_x(ctx, op, -1) ||
56
+ case INDEX_op_ld_vec:
67
fold_xx_to_x(ctx, op)) {
57
+ tcg_out_ld(s, type, a0, a1, a2);
68
return true;
58
+ break;
69
}
59
+ case INDEX_op_st_vec:
70
- return false;
60
+ tcg_out_st(s, type, a0, a1, a2);
71
+
61
+ break;
72
+ z1 = arg_info(op->args[1])->z_mask;
62
+ case INDEX_op_dupm_vec:
73
+ z2 = arg_info(op->args[2])->z_mask;
63
+ tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
74
+ ctx->z_mask = z1 & z2;
64
+ break;
75
+
65
+
76
+ /*
66
+ case INDEX_op_add_vec:
77
+ * Known-zeros does not imply known-ones. Therefore unless
67
+ tcg_out_insn(s, VRRc, VA, a0, a1, a2, vece);
78
+ * arg2 is constant, we can't infer affected bits from it.
68
+ break;
79
+ */
69
+ case INDEX_op_sub_vec:
80
+ if (arg_is_const(op->args[2])) {
70
+ tcg_out_insn(s, VRRc, VS, a0, a1, a2, vece);
81
+ ctx->a_mask = z1 & ~z2;
71
+ break;
82
+ }
72
+ case INDEX_op_and_vec:
83
+
73
+ tcg_out_insn(s, VRRc, VN, a0, a1, a2, 0);
84
+ return fold_masks(ctx, op);
74
+ break;
85
}
75
+ case INDEX_op_or_vec:
86
76
+ tcg_out_insn(s, VRRc, VO, a0, a1, a2, 0);
87
static bool fold_andc(OptContext *ctx, TCGOp *op)
77
+ break;
88
{
78
+ case INDEX_op_xor_vec:
89
+ uint64_t z1;
79
+ tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0);
90
+
80
+ break;
91
if (fold_const2(ctx, op) ||
81
+
92
fold_xx_to_i(ctx, op, 0) ||
82
+ case INDEX_op_cmp_vec:
93
fold_xi_to_x(ctx, op, 0) ||
83
+ switch ((TCGCond)args[3]) {
94
fold_ix_to_not(ctx, op, -1)) {
84
+ case TCG_COND_EQ:
95
return true;
85
+ tcg_out_insn(s, VRRc, VCEQ, a0, a1, a2, vece);
96
}
86
+ break;
97
- return false;
87
+ case TCG_COND_GT:
98
+
88
+ tcg_out_insn(s, VRRc, VCH, a0, a1, a2, vece);
99
+ z1 = arg_info(op->args[1])->z_mask;
89
+ break;
100
+
90
+ case TCG_COND_GTU:
101
+ /*
91
+ tcg_out_insn(s, VRRc, VCHL, a0, a1, a2, vece);
102
+ * Known-zeros does not imply known-ones. Therefore unless
92
+ break;
103
+ * arg2 is constant, we can't infer anything from it.
93
+ default:
104
+ */
94
+ g_assert_not_reached();
105
+ if (arg_is_const(op->args[2])) {
95
+ }
106
+ uint64_t z2 = ~arg_info(op->args[2])->z_mask;
96
+ break;
107
+ ctx->a_mask = z1 & ~z2;
97
+
108
+ z1 &= z2;
98
+ case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
109
+ }
99
+ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
110
+ ctx->z_mask = z1;
111
+
112
+ return fold_masks(ctx, op);
113
}
114
115
static bool fold_brcond(OptContext *ctx, TCGOp *op)
116
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
117
118
static bool fold_bswap(OptContext *ctx, TCGOp *op)
119
{
120
+ uint64_t z_mask, sign;
121
+
122
if (arg_is_const(op->args[1])) {
123
uint64_t t = arg_info(op->args[1])->val;
124
125
t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
126
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
127
}
128
- return false;
129
+
130
+ z_mask = arg_info(op->args[1])->z_mask;
131
+ switch (op->opc) {
132
+ case INDEX_op_bswap16_i32:
133
+ case INDEX_op_bswap16_i64:
134
+ z_mask = bswap16(z_mask);
135
+ sign = INT16_MIN;
136
+ break;
137
+ case INDEX_op_bswap32_i32:
138
+ case INDEX_op_bswap32_i64:
139
+ z_mask = bswap32(z_mask);
140
+ sign = INT32_MIN;
141
+ break;
142
+ case INDEX_op_bswap64_i64:
143
+ z_mask = bswap64(z_mask);
144
+ sign = INT64_MIN;
145
+ break;
100
+ default:
146
+ default:
101
+ g_assert_not_reached();
147
+ g_assert_not_reached();
102
+ }
148
+ }
103
}
149
+
104
150
+ switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
105
int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
151
+ case TCG_BSWAP_OZ:
106
{
152
+ break;
107
- return 0;
153
+ case TCG_BSWAP_OS:
108
+ switch (opc) {
154
+ /* If the sign bit may be 1, force all the bits above to 1. */
109
+ case INDEX_op_add_vec:
155
+ if (z_mask & sign) {
110
+ case INDEX_op_and_vec:
156
+ z_mask |= sign;
111
+ case INDEX_op_or_vec:
157
+ }
112
+ case INDEX_op_sub_vec:
158
+ break;
113
+ case INDEX_op_xor_vec:
114
+ return 1;
115
+ case INDEX_op_cmp_vec:
116
+ return -1;
117
+ default:
159
+ default:
118
+ return 0;
160
+ /* The high bits are undefined: force all bits above the sign to 1. */
119
+ }
161
+ z_mask |= sign << 1;
120
+}
162
+ break;
121
+
163
+ }
122
+static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
164
+ ctx->z_mask = z_mask;
123
+ TCGv_vec v1, TCGv_vec v2, TCGCond cond)
165
+
124
+{
166
+ return fold_masks(ctx, op);
125
+ bool need_swap = false, need_inv = false;
167
}
126
+
168
127
+ switch (cond) {
169
static bool fold_call(OptContext *ctx, TCGOp *op)
128
+ case TCG_COND_EQ:
170
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
129
+ case TCG_COND_GT:
171
130
+ case TCG_COND_GTU:
172
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
131
+ break;
173
{
132
+ case TCG_COND_NE:
174
+ uint64_t z_mask;
133
+ case TCG_COND_LE:
175
+
134
+ case TCG_COND_LEU:
176
if (arg_is_const(op->args[1])) {
135
+ need_inv = true;
177
uint64_t t = arg_info(op->args[1])->val;
136
+ break;
178
137
+ case TCG_COND_LT:
179
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
138
+ case TCG_COND_LTU:
180
}
139
+ need_swap = true;
181
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
140
+ break;
182
}
141
+ case TCG_COND_GE:
183
+
142
+ case TCG_COND_GEU:
184
+ switch (ctx->type) {
143
+ need_swap = need_inv = true;
185
+ case TCG_TYPE_I32:
186
+ z_mask = 31;
187
+ break;
188
+ case TCG_TYPE_I64:
189
+ z_mask = 63;
144
+ break;
190
+ break;
145
+ default:
191
+ default:
146
+ g_assert_not_reached();
192
+ g_assert_not_reached();
147
+ }
193
+ }
148
+
194
+ ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
149
+ if (need_inv) {
195
+
150
+ cond = tcg_invert_cond(cond);
196
return false;
151
+ }
197
}
152
+ if (need_swap) {
198
153
+ TCGv_vec t1;
199
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
154
+ t1 = v1, v1 = v2, v2 = t1;
200
{
155
+ cond = tcg_swap_cond(cond);
201
- return fold_const1(ctx, op);
156
+ }
202
+ if (fold_const1(ctx, op)) {
157
+
203
+ return true;
158
+ vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
204
+ }
159
+ tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
205
+
160
+
206
+ switch (ctx->type) {
161
+ return need_inv;
207
+ case TCG_TYPE_I32:
162
+}
208
+ ctx->z_mask = 32 | 31;
163
+
209
+ break;
164
+static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
210
+ case TCG_TYPE_I64:
165
+ TCGv_vec v1, TCGv_vec v2, TCGCond cond)
211
+ ctx->z_mask = 64 | 63;
166
+{
212
+ break;
167
+ if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) {
168
+ tcg_gen_not_vec(vece, v0, v0);
169
+ }
170
}
171
172
void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
173
TCGArg a0, ...)
174
{
175
- g_assert_not_reached();
176
+ va_list va;
177
+ TCGv_vec v0, v1, v2;
178
+
179
+ va_start(va, a0);
180
+ v0 = temp_tcgv_vec(arg_temp(a0));
181
+ v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
182
+ v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
183
+
184
+ switch (opc) {
185
+ case INDEX_op_cmp_vec:
186
+ expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
187
+ break;
188
+
189
+ default:
213
+ default:
190
+ g_assert_not_reached();
214
+ g_assert_not_reached();
191
+ }
215
+ }
192
+ va_end(va);
216
+ return false;
193
}
217
}
194
218
195
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
219
static bool fold_deposit(OptContext *ctx, TCGOp *op)
196
@@ -XXX,XX +XXX,XX @@ static void query_s390_facilities(void)
220
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
197
* There is nothing else we currently care about in the 3rd word, so
221
t1 = deposit64(t1, op->args[3], op->args[4], t2);
198
* disable VECTOR with one store.
222
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
199
*/
223
}
200
- if (1 || !(hwcap & HWCAP_S390_VXRS)) {
224
+
201
+ if (!(hwcap & HWCAP_S390_VXRS)) {
225
+ ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
202
s390_facilities[2] = 0;
226
+ op->args[3], op->args[4],
203
}
227
+ arg_info(op->args[2])->z_mask);
204
}
228
return false;
229
}
230
231
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
232
233
static bool fold_extract(OptContext *ctx, TCGOp *op)
234
{
235
+ uint64_t z_mask_old, z_mask;
236
+
237
if (arg_is_const(op->args[1])) {
238
uint64_t t;
239
240
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
241
t = extract64(t, op->args[2], op->args[3]);
242
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
243
}
244
- return false;
245
+
246
+ z_mask_old = arg_info(op->args[1])->z_mask;
247
+ z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
248
+ if (op->args[2] == 0) {
249
+ ctx->a_mask = z_mask_old ^ z_mask;
250
+ }
251
+ ctx->z_mask = z_mask;
252
+
253
+ return fold_masks(ctx, op);
254
}
255
256
static bool fold_extract2(OptContext *ctx, TCGOp *op)
257
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
258
259
static bool fold_exts(OptContext *ctx, TCGOp *op)
260
{
261
- return fold_const1(ctx, op);
262
+ uint64_t z_mask_old, z_mask, sign;
263
+ bool type_change = false;
264
+
265
+ if (fold_const1(ctx, op)) {
266
+ return true;
267
+ }
268
+
269
+ z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
270
+
271
+ switch (op->opc) {
272
+ CASE_OP_32_64(ext8s):
273
+ sign = INT8_MIN;
274
+ z_mask = (uint8_t)z_mask;
275
+ break;
276
+ CASE_OP_32_64(ext16s):
277
+ sign = INT16_MIN;
278
+ z_mask = (uint16_t)z_mask;
279
+ break;
280
+ case INDEX_op_ext_i32_i64:
281
+ type_change = true;
282
+ QEMU_FALLTHROUGH;
283
+ case INDEX_op_ext32s_i64:
284
+ sign = INT32_MIN;
285
+ z_mask = (uint32_t)z_mask;
286
+ break;
287
+ default:
288
+ g_assert_not_reached();
289
+ }
290
+
291
+ if (z_mask & sign) {
292
+ z_mask |= sign;
293
+ } else if (!type_change) {
294
+ ctx->a_mask = z_mask_old ^ z_mask;
295
+ }
296
+ ctx->z_mask = z_mask;
297
+
298
+ return fold_masks(ctx, op);
299
}
300
301
static bool fold_extu(OptContext *ctx, TCGOp *op)
302
{
303
- return fold_const1(ctx, op);
304
+ uint64_t z_mask_old, z_mask;
305
+ bool type_change = false;
306
+
307
+ if (fold_const1(ctx, op)) {
308
+ return true;
309
+ }
310
+
311
+ z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
312
+
313
+ switch (op->opc) {
314
+ CASE_OP_32_64(ext8u):
315
+ z_mask = (uint8_t)z_mask;
316
+ break;
317
+ CASE_OP_32_64(ext16u):
318
+ z_mask = (uint16_t)z_mask;
319
+ break;
320
+ case INDEX_op_extrl_i64_i32:
321
+ case INDEX_op_extu_i32_i64:
322
+ type_change = true;
323
+ QEMU_FALLTHROUGH;
324
+ case INDEX_op_ext32u_i64:
325
+ z_mask = (uint32_t)z_mask;
326
+ break;
327
+ case INDEX_op_extrh_i64_i32:
328
+ type_change = true;
329
+ z_mask >>= 32;
330
+ break;
331
+ default:
332
+ g_assert_not_reached();
333
+ }
334
+
335
+ ctx->z_mask = z_mask;
336
+ if (!type_change) {
337
+ ctx->a_mask = z_mask_old ^ z_mask;
338
+ }
339
+ return fold_masks(ctx, op);
340
}
341
342
static bool fold_mb(OptContext *ctx, TCGOp *op)
343
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
344
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
345
}
346
347
+ ctx->z_mask = arg_info(op->args[3])->z_mask
348
+ | arg_info(op->args[4])->z_mask;
349
+
350
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
351
uint64_t tv = arg_info(op->args[3])->val;
352
uint64_t fv = arg_info(op->args[4])->val;
353
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
354
355
static bool fold_neg(OptContext *ctx, TCGOp *op)
356
{
357
+ uint64_t z_mask;
358
+
359
if (fold_const1(ctx, op)) {
360
return true;
361
}
362
+
363
+ /* Set to 1 all bits to the left of the rightmost. */
364
+ z_mask = arg_info(op->args[1])->z_mask;
365
+ ctx->z_mask = -(z_mask & -z_mask);
366
+
367
/*
368
* Because of fold_sub_to_neg, we want to always return true,
369
* via finish_folding.
370
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
371
fold_xx_to_x(ctx, op)) {
372
return true;
373
}
374
- return false;
375
+
376
+ ctx->z_mask = arg_info(op->args[1])->z_mask
377
+ | arg_info(op->args[2])->z_mask;
378
+ return fold_masks(ctx, op);
379
}
380
381
static bool fold_orc(OptContext *ctx, TCGOp *op)
382
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
383
384
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
385
{
386
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
387
+ MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
388
+ MemOp mop = get_memop(oi);
389
+ int width = 8 * memop_size(mop);
390
+
391
+ if (!(mop & MO_SIGN) && width < 64) {
392
+ ctx->z_mask = MAKE_64BIT_MASK(0, width);
393
+ }
394
+
395
/* Opcodes that touch guest memory stop the mb optimization. */
396
ctx->prev_mb = NULL;
397
return false;
398
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
399
if (i >= 0) {
400
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
401
}
402
+
403
+ ctx->z_mask = 1;
404
return false;
405
}
406
407
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
408
op->opc = INDEX_op_setcond_i32;
409
break;
410
}
411
+
412
+ ctx->z_mask = 1;
413
return false;
414
415
do_setcond_const:
416
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
417
418
static bool fold_sextract(OptContext *ctx, TCGOp *op)
419
{
420
+ int64_t z_mask_old, z_mask;
421
+
422
if (arg_is_const(op->args[1])) {
423
uint64_t t;
424
425
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
426
t = sextract64(t, op->args[2], op->args[3]);
427
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
428
}
429
- return false;
430
+
431
+ z_mask_old = arg_info(op->args[1])->z_mask;
432
+ z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
433
+ if (op->args[2] == 0 && z_mask >= 0) {
434
+ ctx->a_mask = z_mask_old ^ z_mask;
435
+ }
436
+ ctx->z_mask = z_mask;
437
+
438
+ return fold_masks(ctx, op);
439
}
440
441
static bool fold_shift(OptContext *ctx, TCGOp *op)
442
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
443
fold_xi_to_x(ctx, op, 0)) {
444
return true;
445
}
446
+
447
+ if (arg_is_const(op->args[2])) {
448
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type,
449
+ arg_info(op->args[1])->z_mask,
450
+ arg_info(op->args[2])->val);
451
+ return fold_masks(ctx, op);
452
+ }
453
return false;
454
}
455
456
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
457
return fold_addsub2_i32(ctx, op, false);
458
}
459
460
+static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
461
+{
462
+ /* We can't do any folding with a load, but we can record bits. */
463
+ switch (op->opc) {
464
+ CASE_OP_32_64(ld8u):
465
+ ctx->z_mask = MAKE_64BIT_MASK(0, 8);
466
+ break;
467
+ CASE_OP_32_64(ld16u):
468
+ ctx->z_mask = MAKE_64BIT_MASK(0, 16);
469
+ break;
470
+ case INDEX_op_ld32u_i64:
471
+ ctx->z_mask = MAKE_64BIT_MASK(0, 32);
472
+ break;
473
+ default:
474
+ g_assert_not_reached();
475
+ }
476
+ return false;
477
+}
478
+
479
static bool fold_xor(OptContext *ctx, TCGOp *op)
480
{
481
if (fold_const2(ctx, op) ||
482
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
483
fold_xi_to_not(ctx, op, -1)) {
484
return true;
485
}
486
- return false;
487
+
488
+ ctx->z_mask = arg_info(op->args[1])->z_mask
489
+ | arg_info(op->args[2])->z_mask;
490
+ return fold_masks(ctx, op);
491
}
492
493
/* Propagate constants and copies, fold constant expressions. */
494
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
495
}
496
497
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
498
- uint64_t z_mask, partmask, affected, tmp;
499
TCGOpcode opc = op->opc;
500
const TCGOpDef *def;
501
bool done = false;
502
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
503
break;
504
}
505
506
- /* Simplify using known-zero bits. Currently only ops with a single
507
- output argument is supported. */
508
- z_mask = -1;
509
- affected = -1;
510
- switch (opc) {
511
- CASE_OP_32_64(ext8s):
512
- if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
513
- break;
514
- }
515
- QEMU_FALLTHROUGH;
516
- CASE_OP_32_64(ext8u):
517
- z_mask = 0xff;
518
- goto and_const;
519
- CASE_OP_32_64(ext16s):
520
- if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
521
- break;
522
- }
523
- QEMU_FALLTHROUGH;
524
- CASE_OP_32_64(ext16u):
525
- z_mask = 0xffff;
526
- goto and_const;
527
- case INDEX_op_ext32s_i64:
528
- if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
529
- break;
530
- }
531
- QEMU_FALLTHROUGH;
532
- case INDEX_op_ext32u_i64:
533
- z_mask = 0xffffffffU;
534
- goto and_const;
535
-
536
- CASE_OP_32_64(and):
537
- z_mask = arg_info(op->args[2])->z_mask;
538
- if (arg_is_const(op->args[2])) {
539
- and_const:
540
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
541
- }
542
- z_mask = arg_info(op->args[1])->z_mask & z_mask;
543
- break;
544
-
545
- case INDEX_op_ext_i32_i64:
546
- if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
547
- break;
548
- }
549
- QEMU_FALLTHROUGH;
550
- case INDEX_op_extu_i32_i64:
551
- /* We do not compute affected as it is a size changing op. */
552
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
553
- break;
554
-
555
- CASE_OP_32_64(andc):
556
- /* Known-zeros does not imply known-ones. Therefore unless
557
- op->args[2] is constant, we can't infer anything from it. */
558
- if (arg_is_const(op->args[2])) {
559
- z_mask = ~arg_info(op->args[2])->z_mask;
560
- goto and_const;
561
- }
562
- /* But we certainly know nothing outside args[1] may be set. */
563
- z_mask = arg_info(op->args[1])->z_mask;
564
- break;
565
-
566
- case INDEX_op_sar_i32:
567
- if (arg_is_const(op->args[2])) {
568
- tmp = arg_info(op->args[2])->val & 31;
569
- z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
570
- }
571
- break;
572
- case INDEX_op_sar_i64:
573
- if (arg_is_const(op->args[2])) {
574
- tmp = arg_info(op->args[2])->val & 63;
575
- z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
576
- }
577
- break;
578
-
579
- case INDEX_op_shr_i32:
580
- if (arg_is_const(op->args[2])) {
581
- tmp = arg_info(op->args[2])->val & 31;
582
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
583
- }
584
- break;
585
- case INDEX_op_shr_i64:
586
- if (arg_is_const(op->args[2])) {
587
- tmp = arg_info(op->args[2])->val & 63;
588
- z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
589
- }
590
- break;
591
-
592
- case INDEX_op_extrl_i64_i32:
593
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
594
- break;
595
- case INDEX_op_extrh_i64_i32:
596
- z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
597
- break;
598
-
599
- CASE_OP_32_64(shl):
600
- if (arg_is_const(op->args[2])) {
601
- tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
602
- z_mask = arg_info(op->args[1])->z_mask << tmp;
603
- }
604
- break;
605
-
606
- CASE_OP_32_64(neg):
607
- /* Set to 1 all bits to the left of the rightmost. */
608
- z_mask = -(arg_info(op->args[1])->z_mask
609
- & -arg_info(op->args[1])->z_mask);
610
- break;
611
-
612
- CASE_OP_32_64(deposit):
613
- z_mask = deposit64(arg_info(op->args[1])->z_mask,
614
- op->args[3], op->args[4],
615
- arg_info(op->args[2])->z_mask);
616
- break;
617
-
618
- CASE_OP_32_64(extract):
619
- z_mask = extract64(arg_info(op->args[1])->z_mask,
620
- op->args[2], op->args[3]);
621
- if (op->args[2] == 0) {
622
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
623
- }
624
- break;
625
- CASE_OP_32_64(sextract):
626
- z_mask = sextract64(arg_info(op->args[1])->z_mask,
627
- op->args[2], op->args[3]);
628
- if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
629
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
630
- }
631
- break;
632
-
633
- CASE_OP_32_64(or):
634
- CASE_OP_32_64(xor):
635
- z_mask = arg_info(op->args[1])->z_mask
636
- | arg_info(op->args[2])->z_mask;
637
- break;
638
-
639
- case INDEX_op_clz_i32:
640
- case INDEX_op_ctz_i32:
641
- z_mask = arg_info(op->args[2])->z_mask | 31;
642
- break;
643
-
644
- case INDEX_op_clz_i64:
645
- case INDEX_op_ctz_i64:
646
- z_mask = arg_info(op->args[2])->z_mask | 63;
647
- break;
648
-
649
- case INDEX_op_ctpop_i32:
650
- z_mask = 32 | 31;
651
- break;
652
- case INDEX_op_ctpop_i64:
653
- z_mask = 64 | 63;
654
- break;
655
-
656
- CASE_OP_32_64(setcond):
657
- case INDEX_op_setcond2_i32:
658
- z_mask = 1;
659
- break;
660
-
661
- CASE_OP_32_64(movcond):
662
- z_mask = arg_info(op->args[3])->z_mask
663
- | arg_info(op->args[4])->z_mask;
664
- break;
665
-
666
- CASE_OP_32_64(ld8u):
667
- z_mask = 0xff;
668
- break;
669
- CASE_OP_32_64(ld16u):
670
- z_mask = 0xffff;
671
- break;
672
- case INDEX_op_ld32u_i64:
673
- z_mask = 0xffffffffu;
674
- break;
675
-
676
- CASE_OP_32_64(qemu_ld):
677
- {
678
- MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
679
- MemOp mop = get_memop(oi);
680
- if (!(mop & MO_SIGN)) {
681
- z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
682
- }
683
- }
684
- break;
685
-
686
- CASE_OP_32_64(bswap16):
687
- z_mask = arg_info(op->args[1])->z_mask;
688
- if (z_mask <= 0xffff) {
689
- op->args[2] |= TCG_BSWAP_IZ;
690
- }
691
- z_mask = bswap16(z_mask);
692
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
693
- case TCG_BSWAP_OZ:
694
- break;
695
- case TCG_BSWAP_OS:
696
- z_mask = (int16_t)z_mask;
697
- break;
698
- default: /* undefined high bits */
699
- z_mask |= MAKE_64BIT_MASK(16, 48);
700
- break;
701
- }
702
- break;
703
-
704
- case INDEX_op_bswap32_i64:
705
- z_mask = arg_info(op->args[1])->z_mask;
706
- if (z_mask <= 0xffffffffu) {
707
- op->args[2] |= TCG_BSWAP_IZ;
708
- }
709
- z_mask = bswap32(z_mask);
710
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
711
- case TCG_BSWAP_OZ:
712
- break;
713
- case TCG_BSWAP_OS:
714
- z_mask = (int32_t)z_mask;
715
- break;
716
- default: /* undefined high bits */
717
- z_mask |= MAKE_64BIT_MASK(32, 32);
718
- break;
719
- }
720
- break;
721
-
722
- default:
723
- break;
724
- }
725
-
726
- /* 32-bit ops generate 32-bit results. For the result is zero test
727
- below, we can ignore high bits, but for further optimizations we
728
- need to record that the high bits contain garbage. */
729
- partmask = z_mask;
730
- if (ctx.type == TCG_TYPE_I32) {
731
- z_mask |= ~(tcg_target_ulong)0xffffffffu;
732
- partmask &= 0xffffffffu;
733
- affected &= 0xffffffffu;
734
- }
735
- ctx.z_mask = z_mask;
736
-
737
- if (partmask == 0) {
738
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
739
- continue;
740
- }
741
- if (affected == 0) {
742
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
743
- continue;
744
- }
745
+ /* Assume all bits affected, and no bits known zero. */
746
+ ctx.a_mask = -1;
747
+ ctx.z_mask = -1;
748
749
/*
750
* Process each opcode.
751
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
752
case INDEX_op_extrh_i64_i32:
753
done = fold_extu(&ctx, op);
754
break;
755
+ CASE_OP_32_64(ld8u):
756
+ CASE_OP_32_64(ld16u):
757
+ case INDEX_op_ld32u_i64:
758
+ done = fold_tcg_ld(&ctx, op);
759
+ break;
760
case INDEX_op_mb:
761
done = fold_mb(&ctx, op);
762
break;
205
--
763
--
206
2.25.1
764
2.25.1
207
765
208
766
diff view generated by jsdifflib
New patch
1
Rename to fold_multiply2, and handle muls2_i32, mulu2_i64,
2
and muls2_i64.
1
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 44 +++++++++++++++++++++++++++++++++++---------
9
1 file changed, 35 insertions(+), 9 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
16
return false;
17
}
18
19
-static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
20
+static bool fold_multiply2(OptContext *ctx, TCGOp *op)
21
{
22
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
23
- uint32_t a = arg_info(op->args[2])->val;
24
- uint32_t b = arg_info(op->args[3])->val;
25
- uint64_t r = (uint64_t)a * b;
26
+ uint64_t a = arg_info(op->args[2])->val;
27
+ uint64_t b = arg_info(op->args[3])->val;
28
+ uint64_t h, l;
29
TCGArg rl, rh;
30
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
31
+ TCGOp *op2;
32
+
33
+ switch (op->opc) {
34
+ case INDEX_op_mulu2_i32:
35
+ l = (uint64_t)(uint32_t)a * (uint32_t)b;
36
+ h = (int32_t)(l >> 32);
37
+ l = (int32_t)l;
38
+ break;
39
+ case INDEX_op_muls2_i32:
40
+ l = (int64_t)(int32_t)a * (int32_t)b;
41
+ h = l >> 32;
42
+ l = (int32_t)l;
43
+ break;
44
+ case INDEX_op_mulu2_i64:
45
+ mulu64(&l, &h, a, b);
46
+ break;
47
+ case INDEX_op_muls2_i64:
48
+ muls64(&l, &h, a, b);
49
+ break;
50
+ default:
51
+ g_assert_not_reached();
52
+ }
53
54
rl = op->args[0];
55
rh = op->args[1];
56
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
57
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
58
+
59
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
60
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
61
+
62
+ tcg_opt_gen_movi(ctx, op, rl, l);
63
+ tcg_opt_gen_movi(ctx, op2, rh, h);
64
return true;
65
}
66
return false;
67
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
68
CASE_OP_32_64(muluh):
69
done = fold_mul_highpart(&ctx, op);
70
break;
71
- case INDEX_op_mulu2_i32:
72
- done = fold_mulu2_i32(&ctx, op);
73
+ CASE_OP_32_64(muls2):
74
+ CASE_OP_32_64(mulu2):
75
+ done = fold_multiply2(&ctx, op);
76
break;
77
CASE_OP_32_64(nand):
78
done = fold_nand(&ctx, op);
79
--
80
2.25.1
81
82
diff view generated by jsdifflib
1
We will shortly use the MemOpIdx directly, but in the meantime
1
Rename to fold_addsub2.
2
re-compute the trace meminfo.
2
Use Int128 to implement the wider operation.
3
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
8
---
7
accel/tcg/atomic_template.h | 48 +++++++++++++++++------------------
9
tcg/optimize.c | 65 ++++++++++++++++++++++++++++++++++----------------
8
accel/tcg/atomic_common.c.inc | 30 +++++++++++-----------
10
1 file changed, 44 insertions(+), 21 deletions(-)
9
2 files changed, 39 insertions(+), 39 deletions(-)
10
11
11
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/atomic_template.h
14
--- a/tcg/optimize.c
14
+++ b/accel/tcg/atomic_template.h
15
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
16
@@ -XXX,XX +XXX,XX @@
16
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
17
*/
17
PAGE_READ | PAGE_WRITE, retaddr);
18
18
DATA_TYPE ret;
19
#include "qemu/osdep.h"
19
- uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
20
+#include "qemu/int128.h"
20
21
#include "tcg/tcg-op.h"
21
+ atomic_trace_rmw_pre(env, addr, oi);
22
#include "tcg-internal.h"
22
#if DATA_SIZE == 16
23
23
ret = atomic16_cmpxchg(haddr, cmpv, newv);
24
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
24
#else
25
return false;
25
ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
26
#endif
27
ATOMIC_MMU_CLEANUP;
28
- atomic_trace_rmw_post(env, addr, info);
29
+ atomic_trace_rmw_post(env, addr, oi);
30
return ret;
31
}
26
}
32
27
33
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
28
-static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
34
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
29
+static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
35
PAGE_READ, retaddr);
30
{
36
DATA_TYPE val;
31
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
37
- uint16_t info = atomic_trace_ld_pre(env, addr, oi);
32
arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
38
33
- uint32_t al = arg_info(op->args[2])->val;
39
+ atomic_trace_ld_pre(env, addr, oi);
34
- uint32_t ah = arg_info(op->args[3])->val;
40
val = atomic16_read(haddr);
35
- uint32_t bl = arg_info(op->args[4])->val;
41
ATOMIC_MMU_CLEANUP;
36
- uint32_t bh = arg_info(op->args[5])->val;
42
- atomic_trace_ld_post(env, addr, info);
37
- uint64_t a = ((uint64_t)ah << 32) | al;
43
+ atomic_trace_ld_post(env, addr, oi);
38
- uint64_t b = ((uint64_t)bh << 32) | bl;
44
return val;
39
+ uint64_t al = arg_info(op->args[2])->val;
40
+ uint64_t ah = arg_info(op->args[3])->val;
41
+ uint64_t bl = arg_info(op->args[4])->val;
42
+ uint64_t bh = arg_info(op->args[5])->val;
43
TCGArg rl, rh;
44
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
45
+ TCGOp *op2;
46
47
- if (add) {
48
- a += b;
49
+ if (ctx->type == TCG_TYPE_I32) {
50
+ uint64_t a = deposit64(al, 32, 32, ah);
51
+ uint64_t b = deposit64(bl, 32, 32, bh);
52
+
53
+ if (add) {
54
+ a += b;
55
+ } else {
56
+ a -= b;
57
+ }
58
+
59
+ al = sextract64(a, 0, 32);
60
+ ah = sextract64(a, 32, 32);
61
} else {
62
- a -= b;
63
+ Int128 a = int128_make128(al, ah);
64
+ Int128 b = int128_make128(bl, bh);
65
+
66
+ if (add) {
67
+ a = int128_add(a, b);
68
+ } else {
69
+ a = int128_sub(a, b);
70
+ }
71
+
72
+ al = int128_getlo(a);
73
+ ah = int128_gethi(a);
74
}
75
76
rl = op->args[0];
77
rh = op->args[1];
78
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
79
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
80
+
81
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
82
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
83
+
84
+ tcg_opt_gen_movi(ctx, op, rl, al);
85
+ tcg_opt_gen_movi(ctx, op2, rh, ah);
86
return true;
87
}
88
return false;
45
}
89
}
46
90
47
@@ -XXX,XX +XXX,XX @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
91
-static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
92
+static bool fold_add2(OptContext *ctx, TCGOp *op)
48
{
93
{
49
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
94
- return fold_addsub2_i32(ctx, op, true);
50
PAGE_WRITE, retaddr);
95
+ return fold_addsub2(ctx, op, true);
51
- uint16_t info = atomic_trace_st_pre(env, addr, oi);
52
53
+ atomic_trace_st_pre(env, addr, oi);
54
atomic16_set(haddr, val);
55
ATOMIC_MMU_CLEANUP;
56
- atomic_trace_st_post(env, addr, info);
57
+ atomic_trace_st_post(env, addr, oi);
58
}
96
}
59
#endif
97
60
#else
98
static bool fold_and(OptContext *ctx, TCGOp *op)
61
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
99
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
62
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
100
return false;
63
PAGE_READ | PAGE_WRITE, retaddr);
64
DATA_TYPE ret;
65
- uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
66
67
+ atomic_trace_rmw_pre(env, addr, oi);
68
ret = qatomic_xchg__nocheck(haddr, val);
69
ATOMIC_MMU_CLEANUP;
70
- atomic_trace_rmw_post(env, addr, info);
71
+ atomic_trace_rmw_post(env, addr, oi);
72
return ret;
73
}
101
}
74
102
75
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
103
-static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
76
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
104
+static bool fold_sub2(OptContext *ctx, TCGOp *op)
77
PAGE_READ | PAGE_WRITE, retaddr); \
105
{
78
DATA_TYPE ret; \
106
- return fold_addsub2_i32(ctx, op, false);
79
- uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
107
+ return fold_addsub2(ctx, op, false);
80
+ atomic_trace_rmw_pre(env, addr, oi); \
81
ret = qatomic_##X(haddr, val); \
82
ATOMIC_MMU_CLEANUP; \
83
- atomic_trace_rmw_post(env, addr, info); \
84
+ atomic_trace_rmw_post(env, addr, oi); \
85
return ret; \
86
}
108
}
87
109
88
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
110
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
89
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
111
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
90
PAGE_READ | PAGE_WRITE, retaddr); \
112
CASE_OP_32_64_VEC(add):
91
XDATA_TYPE cmp, old, new, val = xval; \
113
done = fold_add(&ctx, op);
92
- uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
114
break;
93
+ atomic_trace_rmw_pre(env, addr, oi); \
115
- case INDEX_op_add2_i32:
94
smp_mb(); \
116
- done = fold_add2_i32(&ctx, op);
95
cmp = qatomic_read__nocheck(haddr); \
117
+ CASE_OP_32_64(add2):
96
do { \
118
+ done = fold_add2(&ctx, op);
97
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
119
break;
98
cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
120
CASE_OP_32_64_VEC(and):
99
} while (cmp != old); \
121
done = fold_and(&ctx, op);
100
ATOMIC_MMU_CLEANUP; \
122
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
101
- atomic_trace_rmw_post(env, addr, info); \
123
CASE_OP_32_64_VEC(sub):
102
+ atomic_trace_rmw_post(env, addr, oi); \
124
done = fold_sub(&ctx, op);
103
return RET; \
125
break;
104
}
126
- case INDEX_op_sub2_i32:
105
127
- done = fold_sub2_i32(&ctx, op);
106
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
128
+ CASE_OP_32_64(sub2):
107
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
129
+ done = fold_sub2(&ctx, op);
108
PAGE_READ | PAGE_WRITE, retaddr);
130
break;
109
DATA_TYPE ret;
131
CASE_OP_32_64_VEC(xor):
110
- uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
132
done = fold_xor(&ctx, op);
111
112
+ atomic_trace_rmw_pre(env, addr, oi);
113
#if DATA_SIZE == 16
114
ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
115
#else
116
ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
117
#endif
118
ATOMIC_MMU_CLEANUP;
119
- atomic_trace_rmw_post(env, addr, info);
120
+ atomic_trace_rmw_post(env, addr, oi);
121
return BSWAP(ret);
122
}
123
124
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
125
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
126
PAGE_READ, retaddr);
127
DATA_TYPE val;
128
- uint16_t info = atomic_trace_ld_pre(env, addr, oi);
129
130
+ atomic_trace_ld_pre(env, addr, oi);
131
val = atomic16_read(haddr);
132
ATOMIC_MMU_CLEANUP;
133
- atomic_trace_ld_post(env, addr, info);
134
+ atomic_trace_ld_post(env, addr, oi);
135
return BSWAP(val);
136
}
137
138
@@ -XXX,XX +XXX,XX @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
139
{
140
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
141
PAGE_WRITE, retaddr);
142
- uint16_t info = atomic_trace_st_pre(env, addr, oi);
143
144
+ atomic_trace_st_pre(env, addr, oi);
145
val = BSWAP(val);
146
atomic16_set(haddr, val);
147
ATOMIC_MMU_CLEANUP;
148
- atomic_trace_st_post(env, addr, info);
149
+ atomic_trace_st_post(env, addr, oi);
150
}
151
#endif
152
#else
153
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
154
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
155
PAGE_READ | PAGE_WRITE, retaddr);
156
ABI_TYPE ret;
157
- uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
158
159
+ atomic_trace_rmw_pre(env, addr, oi);
160
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
161
ATOMIC_MMU_CLEANUP;
162
- atomic_trace_rmw_post(env, addr, info);
163
+ atomic_trace_rmw_post(env, addr, oi);
164
return BSWAP(ret);
165
}
166
167
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
168
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
169
PAGE_READ | PAGE_WRITE, retaddr); \
170
DATA_TYPE ret; \
171
- uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
172
+ atomic_trace_rmw_pre(env, addr, oi); \
173
ret = qatomic_##X(haddr, BSWAP(val)); \
174
ATOMIC_MMU_CLEANUP; \
175
- atomic_trace_rmw_post(env, addr, info); \
176
+ atomic_trace_rmw_post(env, addr, oi); \
177
return BSWAP(ret); \
178
}
179
180
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
181
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
182
PAGE_READ | PAGE_WRITE, retaddr); \
183
XDATA_TYPE ldo, ldn, old, new, val = xval; \
184
- uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
185
+ atomic_trace_rmw_pre(env, addr, oi); \
186
smp_mb(); \
187
ldn = qatomic_read__nocheck(haddr); \
188
do { \
189
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
190
ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
191
} while (ldo != ldn); \
192
ATOMIC_MMU_CLEANUP; \
193
- atomic_trace_rmw_post(env, addr, info); \
194
+ atomic_trace_rmw_post(env, addr, oi); \
195
return RET; \
196
}
197
198
diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc
199
index XXXXXXX..XXXXXXX 100644
200
--- a/accel/tcg/atomic_common.c.inc
201
+++ b/accel/tcg/atomic_common.c.inc
202
@@ -XXX,XX +XXX,XX @@
203
* See the COPYING file in the top-level directory.
204
*/
205
206
-static uint16_t atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr,
207
- MemOpIdx oi)
208
+static void atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr,
209
+ MemOpIdx oi)
210
{
211
CPUState *cpu = env_cpu(env);
212
uint16_t info = trace_mem_get_info(oi, false);
213
214
trace_guest_mem_before_exec(cpu, addr, info);
215
trace_guest_mem_before_exec(cpu, addr, info | TRACE_MEM_ST);
216
-
217
- return info;
218
}
219
220
static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
221
- uint16_t info)
222
+ MemOpIdx oi)
223
{
224
+ uint16_t info = trace_mem_get_info(oi, false);
225
+
226
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
227
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST);
228
}
229
230
#if HAVE_ATOMIC128
231
-static uint16_t atomic_trace_ld_pre(CPUArchState *env, target_ulong addr,
232
- MemOpIdx oi)
233
+static void atomic_trace_ld_pre(CPUArchState *env, target_ulong addr,
234
+ MemOpIdx oi)
235
{
236
uint16_t info = trace_mem_get_info(oi, false);
237
238
trace_guest_mem_before_exec(env_cpu(env), addr, info);
239
-
240
- return info;
241
}
242
243
static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
244
- uint16_t info)
245
+ MemOpIdx oi)
246
{
247
+ uint16_t info = trace_mem_get_info(oi, false);
248
+
249
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
250
}
251
252
-static uint16_t atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
253
- MemOpIdx oi)
254
+static void atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
255
+ MemOpIdx oi)
256
{
257
uint16_t info = trace_mem_get_info(oi, true);
258
259
trace_guest_mem_before_exec(env_cpu(env), addr, info);
260
-
261
- return info;
262
}
263
264
static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
265
- uint16_t info)
266
+ MemOpIdx oi)
267
{
268
+ uint16_t info = trace_mem_get_info(oi, false);
269
+
270
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
271
}
272
#endif
273
--
133
--
274
2.25.1
134
2.25.1
275
135
276
136
diff view generated by jsdifflib
1
For usadd, we only have to consider overflow. Since ~B + B == -1,
1
Most of these are handled by creating a fold_const2_commutative
2
the maximum value for A that saturates is ~B.
2
to handle all of the binary operators. The rest were already
3
3
handled on a case-by-case basis in the switch, and have their
4
For ussub, we only have to consider underflow. The minimum value
4
own fold function in which to place the call.
5
that saturates to 0 from A - B is B.
5
6
6
We now have only one major switch on TCGOpcode.
7
8
Introduce NO_DEST and a block comment for swap_commutative in
9
order to make the handling of brcond and movcond opcodes cleaner.
10
11
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
13
---
9
tcg/tcg-op-vec.c | 37 +++++++++++++++++++++++++++++++++++--
14
tcg/optimize.c | 142 ++++++++++++++++++++++++-------------------------
10
1 file changed, 35 insertions(+), 2 deletions(-)
15
1 file changed, 70 insertions(+), 72 deletions(-)
11
16
12
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
17
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/tcg-op-vec.c
19
--- a/tcg/optimize.c
15
+++ b/tcg/tcg-op-vec.c
20
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ bool tcg_can_emit_vecop_list(const TCGOpcode *list,
21
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
17
continue;
22
return -1;
18
}
19
break;
20
+ case INDEX_op_usadd_vec:
21
+ if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece) ||
22
+ tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece)) {
23
+ continue;
24
+ }
25
+ break;
26
+ case INDEX_op_ussub_vec:
27
+ if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece) ||
28
+ tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece)) {
29
+ continue;
30
+ }
31
+ break;
32
case INDEX_op_cmpsel_vec:
33
case INDEX_op_smin_vec:
34
case INDEX_op_smax_vec:
35
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ssadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
36
37
void tcg_gen_usadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
38
{
39
- do_op3_nofail(vece, r, a, b, INDEX_op_usadd_vec);
40
+ if (!do_op3(vece, r, a, b, INDEX_op_usadd_vec)) {
41
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
42
+ TCGv_vec t = tcg_temp_new_vec_matching(r);
43
+
44
+ /* usadd(a, b) = min(a, ~b) + b */
45
+ tcg_gen_not_vec(vece, t, b);
46
+ tcg_gen_umin_vec(vece, t, t, a);
47
+ tcg_gen_add_vec(vece, r, t, b);
48
+
49
+ tcg_temp_free_vec(t);
50
+ tcg_swap_vecop_list(hold_list);
51
+ }
52
}
23
}
53
24
54
void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
25
+/**
55
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
26
+ * swap_commutative:
56
27
+ * @dest: TCGArg of the destination argument, or NO_DEST.
57
void tcg_gen_ussub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
28
+ * @p1: first paired argument
58
{
29
+ * @p2: second paired argument
59
- do_op3_nofail(vece, r, a, b, INDEX_op_ussub_vec);
30
+ *
60
+ if (!do_op3(vece, r, a, b, INDEX_op_ussub_vec)) {
31
+ * If *@p1 is a constant and *@p2 is not, swap.
61
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
32
+ * If *@p2 matches @dest, swap.
62
+ TCGv_vec t = tcg_temp_new_vec_matching(r);
33
+ * Return true if a swap was performed.
63
+
34
+ */
64
+ /* ussub(a, b) = max(a, b) - b */
35
+
65
+ tcg_gen_umax_vec(vece, t, a, b);
36
+#define NO_DEST temp_arg(NULL)
66
+ tcg_gen_sub_vec(vece, r, t, b);
37
+
67
+
38
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
68
+ tcg_temp_free_vec(t);
39
{
69
+ tcg_swap_vecop_list(hold_list);
40
TCGArg a1 = *p1, a2 = *p2;
70
+ }
41
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
42
return false;
71
}
43
}
72
44
73
static void do_minmax(unsigned vece, TCGv_vec r, TCGv_vec a,
45
+static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
46
+{
47
+ swap_commutative(op->args[0], &op->args[1], &op->args[2]);
48
+ return fold_const2(ctx, op);
49
+}
50
+
51
static bool fold_masks(OptContext *ctx, TCGOp *op)
52
{
53
uint64_t a_mask = ctx->a_mask;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
55
56
static bool fold_add(OptContext *ctx, TCGOp *op)
57
{
58
- if (fold_const2(ctx, op) ||
59
+ if (fold_const2_commutative(ctx, op) ||
60
fold_xi_to_x(ctx, op, 0)) {
61
return true;
62
}
63
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
64
65
static bool fold_add2(OptContext *ctx, TCGOp *op)
66
{
67
+ /* Note that the high and low parts may be independently swapped. */
68
+ swap_commutative(op->args[0], &op->args[2], &op->args[4]);
69
+ swap_commutative(op->args[1], &op->args[3], &op->args[5]);
70
+
71
return fold_addsub2(ctx, op, true);
72
}
73
74
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
75
{
76
uint64_t z1, z2;
77
78
- if (fold_const2(ctx, op) ||
79
+ if (fold_const2_commutative(ctx, op) ||
80
fold_xi_to_i(ctx, op, 0) ||
81
fold_xi_to_x(ctx, op, -1) ||
82
fold_xx_to_x(ctx, op)) {
83
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
85
{
86
TCGCond cond = op->args[2];
87
- int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
88
+ int i;
89
90
+ if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) {
91
+ op->args[2] = cond = tcg_swap_cond(cond);
92
+ }
93
+
94
+ i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
95
if (i == 0) {
96
tcg_op_remove(ctx->tcg, op);
97
return true;
98
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
99
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
100
{
101
TCGCond cond = op->args[4];
102
- int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
103
TCGArg label = op->args[5];
104
- int inv = 0;
105
+ int i, inv = 0;
106
107
+ if (swap_commutative2(&op->args[0], &op->args[2])) {
108
+ op->args[4] = cond = tcg_swap_cond(cond);
109
+ }
110
+
111
+ i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
112
if (i >= 0) {
113
goto do_brcond_const;
114
}
115
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
116
117
static bool fold_eqv(OptContext *ctx, TCGOp *op)
118
{
119
- if (fold_const2(ctx, op) ||
120
+ if (fold_const2_commutative(ctx, op) ||
121
fold_xi_to_x(ctx, op, -1) ||
122
fold_xi_to_not(ctx, op, 0)) {
123
return true;
124
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
125
static bool fold_movcond(OptContext *ctx, TCGOp *op)
126
{
127
TCGCond cond = op->args[5];
128
- int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
129
+ int i;
130
131
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
132
+ op->args[5] = cond = tcg_swap_cond(cond);
133
+ }
134
+ /*
135
+ * Canonicalize the "false" input reg to match the destination reg so
136
+ * that the tcg backend can implement a "move if true" operation.
137
+ */
138
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
139
+ op->args[5] = cond = tcg_invert_cond(cond);
140
+ }
141
+
142
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
143
if (i >= 0) {
144
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
145
}
146
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
147
148
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
149
{
150
- if (fold_const2(ctx, op) ||
151
+ if (fold_const2_commutative(ctx, op) ||
152
fold_xi_to_i(ctx, op, 0)) {
153
return true;
154
}
155
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
156
157
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
158
{
159
+ swap_commutative(op->args[0], &op->args[2], &op->args[3]);
160
+
161
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
162
uint64_t a = arg_info(op->args[2])->val;
163
uint64_t b = arg_info(op->args[3])->val;
164
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
165
166
static bool fold_nand(OptContext *ctx, TCGOp *op)
167
{
168
- if (fold_const2(ctx, op) ||
169
+ if (fold_const2_commutative(ctx, op) ||
170
fold_xi_to_not(ctx, op, -1)) {
171
return true;
172
}
173
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
174
175
static bool fold_nor(OptContext *ctx, TCGOp *op)
176
{
177
- if (fold_const2(ctx, op) ||
178
+ if (fold_const2_commutative(ctx, op) ||
179
fold_xi_to_not(ctx, op, 0)) {
180
return true;
181
}
182
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
183
184
static bool fold_or(OptContext *ctx, TCGOp *op)
185
{
186
- if (fold_const2(ctx, op) ||
187
+ if (fold_const2_commutative(ctx, op) ||
188
fold_xi_to_x(ctx, op, 0) ||
189
fold_xx_to_x(ctx, op)) {
190
return true;
191
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
192
static bool fold_setcond(OptContext *ctx, TCGOp *op)
193
{
194
TCGCond cond = op->args[3];
195
- int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
196
+ int i;
197
198
+ if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
199
+ op->args[3] = cond = tcg_swap_cond(cond);
200
+ }
201
+
202
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
203
if (i >= 0) {
204
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
205
}
206
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
207
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
208
{
209
TCGCond cond = op->args[5];
210
- int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
211
- int inv = 0;
212
+ int i, inv = 0;
213
214
+ if (swap_commutative2(&op->args[1], &op->args[3])) {
215
+ op->args[5] = cond = tcg_swap_cond(cond);
216
+ }
217
+
218
+ i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
219
if (i >= 0) {
220
goto do_setcond_const;
221
}
222
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
223
224
static bool fold_xor(OptContext *ctx, TCGOp *op)
225
{
226
- if (fold_const2(ctx, op) ||
227
+ if (fold_const2_commutative(ctx, op) ||
228
fold_xx_to_i(ctx, op, 0) ||
229
fold_xi_to_x(ctx, op, 0) ||
230
fold_xi_to_not(ctx, op, -1)) {
231
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
232
ctx.type = TCG_TYPE_I32;
233
}
234
235
- /* For commutative operations make constant second argument */
236
- switch (opc) {
237
- CASE_OP_32_64_VEC(add):
238
- CASE_OP_32_64_VEC(mul):
239
- CASE_OP_32_64_VEC(and):
240
- CASE_OP_32_64_VEC(or):
241
- CASE_OP_32_64_VEC(xor):
242
- CASE_OP_32_64(eqv):
243
- CASE_OP_32_64(nand):
244
- CASE_OP_32_64(nor):
245
- CASE_OP_32_64(muluh):
246
- CASE_OP_32_64(mulsh):
247
- swap_commutative(op->args[0], &op->args[1], &op->args[2]);
248
- break;
249
- CASE_OP_32_64(brcond):
250
- if (swap_commutative(-1, &op->args[0], &op->args[1])) {
251
- op->args[2] = tcg_swap_cond(op->args[2]);
252
- }
253
- break;
254
- CASE_OP_32_64(setcond):
255
- if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
256
- op->args[3] = tcg_swap_cond(op->args[3]);
257
- }
258
- break;
259
- CASE_OP_32_64(movcond):
260
- if (swap_commutative(-1, &op->args[1], &op->args[2])) {
261
- op->args[5] = tcg_swap_cond(op->args[5]);
262
- }
263
- /* For movcond, we canonicalize the "false" input reg to match
264
- the destination reg so that the tcg backend can implement
265
- a "move if true" operation. */
266
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
267
- op->args[5] = tcg_invert_cond(op->args[5]);
268
- }
269
- break;
270
- CASE_OP_32_64(add2):
271
- swap_commutative(op->args[0], &op->args[2], &op->args[4]);
272
- swap_commutative(op->args[1], &op->args[3], &op->args[5]);
273
- break;
274
- CASE_OP_32_64(mulu2):
275
- CASE_OP_32_64(muls2):
276
- swap_commutative(op->args[0], &op->args[2], &op->args[3]);
277
- break;
278
- case INDEX_op_brcond2_i32:
279
- if (swap_commutative2(&op->args[0], &op->args[2])) {
280
- op->args[4] = tcg_swap_cond(op->args[4]);
281
- }
282
- break;
283
- case INDEX_op_setcond2_i32:
284
- if (swap_commutative2(&op->args[1], &op->args[3])) {
285
- op->args[5] = tcg_swap_cond(op->args[5]);
286
- }
287
- break;
288
- default:
289
- break;
290
- }
291
-
292
/* Assume all bits affected, and no bits known zero. */
293
ctx.a_mask = -1;
294
ctx.z_mask = -1;
74
--
295
--
75
2.25.1
296
2.25.1
76
297
77
298
diff view generated by jsdifflib
1
These logical and arithmetic operations are optional but trivial.
1
This "garbage" setting pre-dates the addition of the type
2
changing opcodes INDEX_op_ext_i32_i64, INDEX_op_extu_i32_i64,
3
and INDEX_op_extr{l,h}_i64_i32.
2
4
3
Reviewed-by: David Hildenbrand <david@redhat.com>
5
So now we have a definitive points at which to adjust z_mask
6
to eliminate such bits from the 32-bit operands.
7
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
11
---
6
tcg/s390x/tcg-target-con-set.h | 1 +
12
tcg/optimize.c | 35 ++++++++++++++++-------------------
7
tcg/s390x/tcg-target.h | 11 ++++++-----
13
1 file changed, 16 insertions(+), 19 deletions(-)
8
tcg/s390x/tcg-target.c.inc | 32 ++++++++++++++++++++++++++++++++
9
3 files changed, 39 insertions(+), 5 deletions(-)
10
14
11
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
15
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/s390x/tcg-target-con-set.h
17
--- a/tcg/optimize.c
14
+++ b/tcg/s390x/tcg-target-con-set.h
18
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ C_O0_I2(v, r)
19
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
16
C_O1_I1(r, L)
20
ti->is_const = true;
17
C_O1_I1(r, r)
21
ti->val = ts->val;
18
C_O1_I1(v, r)
22
ti->z_mask = ts->val;
19
+C_O1_I1(v, v)
23
- if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
20
C_O1_I1(v, vr)
24
- /* High bits of a 32-bit quantity are garbage. */
21
C_O1_I2(r, 0, ri)
25
- ti->z_mask |= ~0xffffffffull;
22
C_O1_I2(r, 0, rI)
26
- }
23
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
27
} else {
24
index XXXXXXX..XXXXXXX 100644
28
ti->is_const = false;
25
--- a/tcg/s390x/tcg-target.h
29
ti->z_mask = -1;
26
+++ b/tcg/s390x/tcg-target.h
30
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
27
@@ -XXX,XX +XXX,XX @@ typedef enum TCGReg {
31
TCGTemp *src_ts = arg_temp(src);
28
#define FACILITY_DISTINCT_OPS FACILITY_LOAD_ON_COND
32
TempOptInfo *di;
29
#define FACILITY_LOAD_ON_COND2 53
33
TempOptInfo *si;
30
#define FACILITY_VECTOR 129
34
- uint64_t z_mask;
31
+#define FACILITY_VECTOR_ENH1 135
35
TCGOpcode new_op;
32
36
33
extern uint64_t s390_facilities[3];
37
if (ts_are_copies(dst_ts, src_ts)) {
34
38
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
35
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
39
op->args[0] = dst;
36
#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR)
40
op->args[1] = src;
37
#define TCG_TARGET_HAS_v256 0
41
38
42
- z_mask = si->z_mask;
39
-#define TCG_TARGET_HAS_andc_vec 0
43
- if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
40
-#define TCG_TARGET_HAS_orc_vec 0
44
- /* High bits of the destination are now garbage. */
41
-#define TCG_TARGET_HAS_not_vec 0
45
- z_mask |= ~0xffffffffull;
42
-#define TCG_TARGET_HAS_neg_vec 0
46
- }
43
-#define TCG_TARGET_HAS_abs_vec 0
47
- di->z_mask = z_mask;
44
+#define TCG_TARGET_HAS_andc_vec 1
48
+ di->z_mask = si->z_mask;
45
+#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1)
49
46
+#define TCG_TARGET_HAS_not_vec 1
50
if (src_ts->type == dst_ts->type) {
47
+#define TCG_TARGET_HAS_neg_vec 1
51
TempOptInfo *ni = ts_info(si->next_copy);
48
+#define TCG_TARGET_HAS_abs_vec 1
52
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
49
#define TCG_TARGET_HAS_roti_vec 0
53
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
50
#define TCG_TARGET_HAS_rots_vec 0
54
TCGArg dst, uint64_t val)
51
#define TCG_TARGET_HAS_rotv_vec 0
55
{
52
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
56
- /* Convert movi to mov with constant temp. */
53
index XXXXXXX..XXXXXXX 100644
57
- TCGTemp *tv = tcg_constant_internal(ctx->type, val);
54
--- a/tcg/s390x/tcg-target.c.inc
58
+ TCGTemp *tv;
55
+++ b/tcg/s390x/tcg-target.c.inc
59
56
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
60
+ if (ctx->type == TCG_TYPE_I32) {
57
VRIb_VGM = 0xe746,
61
+ val = (int32_t)val;
58
VRIc_VREP = 0xe74d,
62
+ }
59
60
+ VRRa_VLC = 0xe7de,
61
+ VRRa_VLP = 0xe7df,
62
VRRa_VLR = 0xe756,
63
VRRc_VA = 0xe7f3,
64
VRRc_VCEQ = 0xe7f8, /* we leave the m5 cs field 0 */
65
VRRc_VCH = 0xe7fb, /* " */
66
VRRc_VCHL = 0xe7f9, /* " */
67
VRRc_VN = 0xe768,
68
+ VRRc_VNC = 0xe769,
69
+ VRRc_VNO = 0xe76b,
70
VRRc_VO = 0xe76a,
71
+ VRRc_VOC = 0xe76f,
72
VRRc_VS = 0xe7f7,
73
VRRc_VX = 0xe76d,
74
VRRf_VLVGP = 0xe762,
75
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
76
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
77
break;
78
79
+ case INDEX_op_abs_vec:
80
+ tcg_out_insn(s, VRRa, VLP, a0, a1, vece);
81
+ break;
82
+ case INDEX_op_neg_vec:
83
+ tcg_out_insn(s, VRRa, VLC, a0, a1, vece);
84
+ break;
85
+ case INDEX_op_not_vec:
86
+ tcg_out_insn(s, VRRc, VNO, a0, a1, a1, 0);
87
+ break;
88
+
63
+
89
case INDEX_op_add_vec:
64
+ /* Convert movi to mov with constant temp. */
90
tcg_out_insn(s, VRRc, VA, a0, a1, a2, vece);
65
+ tv = tcg_constant_internal(ctx->type, val);
91
break;
66
init_ts_info(ctx, tv);
92
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
67
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
93
case INDEX_op_and_vec:
68
}
94
tcg_out_insn(s, VRRc, VN, a0, a1, a2, 0);
69
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
95
break;
70
uint64_t z_mask = ctx->z_mask;
96
+ case INDEX_op_andc_vec:
71
97
+ tcg_out_insn(s, VRRc, VNC, a0, a1, a2, 0);
72
/*
98
+ break;
73
- * 32-bit ops generate 32-bit results. For the result is zero test
99
case INDEX_op_or_vec:
74
- * below, we can ignore high bits, but for further optimizations we
100
tcg_out_insn(s, VRRc, VO, a0, a1, a2, 0);
75
- * need to record that the high bits contain garbage.
101
break;
76
+ * 32-bit ops generate 32-bit results, which for the purpose of
102
+ case INDEX_op_orc_vec:
77
+ * simplifying tcg are sign-extended. Certainly that's how we
103
+ tcg_out_insn(s, VRRc, VOC, a0, a1, a2, 0);
78
+ * represent our constants elsewhere. Note that the bits will
104
+ break;
79
+ * be reset properly for a 64-bit value when encountering the
105
case INDEX_op_xor_vec:
80
+ * type changing opcodes.
106
tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0);
81
*/
107
break;
82
if (ctx->type == TCG_TYPE_I32) {
108
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
83
- ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
109
int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
84
- a_mask &= MAKE_64BIT_MASK(0, 32);
110
{
85
- z_mask &= MAKE_64BIT_MASK(0, 32);
111
switch (opc) {
86
+ a_mask = (int32_t)a_mask;
112
+ case INDEX_op_abs_vec:
87
+ z_mask = (int32_t)z_mask;
113
case INDEX_op_add_vec:
88
+ ctx->z_mask = z_mask;
114
case INDEX_op_and_vec:
89
}
115
+ case INDEX_op_andc_vec:
90
116
+ case INDEX_op_neg_vec:
91
if (z_mask == 0) {
117
+ case INDEX_op_not_vec:
118
case INDEX_op_or_vec:
119
+ case INDEX_op_orc_vec:
120
case INDEX_op_sub_vec:
121
case INDEX_op_xor_vec:
122
return 1;
123
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
124
return C_O1_I1(v, r);
125
case INDEX_op_dup_vec:
126
return C_O1_I1(v, vr);
127
+ case INDEX_op_abs_vec:
128
+ case INDEX_op_neg_vec:
129
+ case INDEX_op_not_vec:
130
+ return C_O1_I1(v, v);
131
case INDEX_op_add_vec:
132
case INDEX_op_sub_vec:
133
case INDEX_op_and_vec:
134
+ case INDEX_op_andc_vec:
135
case INDEX_op_or_vec:
136
+ case INDEX_op_orc_vec:
137
case INDEX_op_xor_vec:
138
case INDEX_op_cmp_vec:
139
return C_O1_I2(v, v, v);
140
--
92
--
141
2.25.1
93
2.25.1
142
94
143
95
diff view generated by jsdifflib
1
They are rightly values in the same enumeration.
1
Recognize the constant function for or-complement.
2
2
3
Reviewed-by: David Hildenbrand <david@redhat.com>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
7
---
6
tcg/s390x/tcg-target.h | 28 +++++++---------------------
8
tcg/optimize.c | 1 +
7
1 file changed, 7 insertions(+), 21 deletions(-)
9
1 file changed, 1 insertion(+)
8
10
9
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/s390x/tcg-target.h
13
--- a/tcg/optimize.c
12
+++ b/tcg/s390x/tcg-target.h
14
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
#define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
16
static bool fold_orc(OptContext *ctx, TCGOp *op)
15
16
typedef enum TCGReg {
17
- TCG_REG_R0 = 0,
18
- TCG_REG_R1,
19
- TCG_REG_R2,
20
- TCG_REG_R3,
21
- TCG_REG_R4,
22
- TCG_REG_R5,
23
- TCG_REG_R6,
24
- TCG_REG_R7,
25
- TCG_REG_R8,
26
- TCG_REG_R9,
27
- TCG_REG_R10,
28
- TCG_REG_R11,
29
- TCG_REG_R12,
30
- TCG_REG_R13,
31
- TCG_REG_R14,
32
- TCG_REG_R15
33
+ TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3,
34
+ TCG_REG_R4, TCG_REG_R5, TCG_REG_R6, TCG_REG_R7,
35
+ TCG_REG_R8, TCG_REG_R9, TCG_REG_R10, TCG_REG_R11,
36
+ TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, TCG_REG_R15,
37
+
38
+ TCG_AREG0 = TCG_REG_R10,
39
+ TCG_REG_CALL_STACK = TCG_REG_R15
40
} TCGReg;
41
42
#define TCG_TARGET_NB_REGS 16
43
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[1];
44
#define TCG_TARGET_HAS_mulsh_i64 0
45
46
/* used for function call generation */
47
-#define TCG_REG_CALL_STACK        TCG_REG_R15
48
#define TCG_TARGET_STACK_ALIGN        8
49
#define TCG_TARGET_CALL_STACK_OFFSET    160
50
51
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[1];
52
53
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
54
55
-enum {
56
- TCG_AREG0 = TCG_REG_R10,
57
-};
58
-
59
static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
60
uintptr_t jmp_rw, uintptr_t addr)
61
{
17
{
18
if (fold_const2(ctx, op) ||
19
+ fold_xx_to_i(ctx, op, -1) ||
20
fold_xi_to_x(ctx, op, -1) ||
21
fold_ix_to_not(ctx, op, 0)) {
22
return true;
62
--
23
--
63
2.25.1
24
2.25.1
64
25
65
26
diff view generated by jsdifflib
1
This emphasizes that we don't support s390, only 64-bit s390x hosts.
1
Recognize the identity function for low-part multiply.
2
2
3
Reviewed-by: Thomas Huth <thuth@redhat.com>
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
meson.build | 2 --
8
tcg/optimize.c | 3 ++-
9
tcg/{s390 => s390x}/tcg-target-con-set.h | 0
9
1 file changed, 2 insertions(+), 1 deletion(-)
10
tcg/{s390 => s390x}/tcg-target-con-str.h | 0
11
tcg/{s390 => s390x}/tcg-target.h | 0
12
tcg/{s390 => s390x}/tcg-target.c.inc | 0
13
5 files changed, 2 deletions(-)
14
rename tcg/{s390 => s390x}/tcg-target-con-set.h (100%)
15
rename tcg/{s390 => s390x}/tcg-target-con-str.h (100%)
16
rename tcg/{s390 => s390x}/tcg-target.h (100%)
17
rename tcg/{s390 => s390x}/tcg-target.c.inc (100%)
18
10
19
diff --git a/meson.build b/meson.build
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
20
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
21
--- a/meson.build
13
--- a/tcg/optimize.c
22
+++ b/meson.build
14
+++ b/tcg/optimize.c
23
@@ -XXX,XX +XXX,XX @@ if not get_option('tcg').disabled()
15
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
24
tcg_arch = 'tci'
16
static bool fold_mul(OptContext *ctx, TCGOp *op)
25
elif config_host['ARCH'] == 'sparc64'
17
{
26
tcg_arch = 'sparc'
18
if (fold_const2(ctx, op) ||
27
- elif config_host['ARCH'] == 's390x'
19
- fold_xi_to_i(ctx, op, 0)) {
28
- tcg_arch = 's390'
20
+ fold_xi_to_i(ctx, op, 0) ||
29
elif config_host['ARCH'] in ['x86_64', 'x32']
21
+ fold_xi_to_x(ctx, op, 1)) {
30
tcg_arch = 'i386'
22
return true;
31
elif config_host['ARCH'] == 'ppc64'
23
}
32
diff --git a/tcg/s390/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
24
return false;
33
similarity index 100%
34
rename from tcg/s390/tcg-target-con-set.h
35
rename to tcg/s390x/tcg-target-con-set.h
36
diff --git a/tcg/s390/tcg-target-con-str.h b/tcg/s390x/tcg-target-con-str.h
37
similarity index 100%
38
rename from tcg/s390/tcg-target-con-str.h
39
rename to tcg/s390x/tcg-target-con-str.h
40
diff --git a/tcg/s390/tcg-target.h b/tcg/s390x/tcg-target.h
41
similarity index 100%
42
rename from tcg/s390/tcg-target.h
43
rename to tcg/s390x/tcg-target.h
44
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
45
similarity index 100%
46
rename from tcg/s390/tcg-target.c.inc
47
rename to tcg/s390x/tcg-target.c.inc
48
--
25
--
49
2.25.1
26
2.25.1
50
27
51
28
diff view generated by jsdifflib
1
Despite the comment, the members were not kept at the end.
1
Recognize the identity function for division.
2
2
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
7
---
6
include/hw/core/cpu.h | 11 +++++++----
8
tcg/optimize.c | 6 +++++-
7
1 file changed, 7 insertions(+), 4 deletions(-)
9
1 file changed, 5 insertions(+), 1 deletion(-)
8
10
9
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
11
--- a/include/hw/core/cpu.h
13
--- a/tcg/optimize.c
12
+++ b/include/hw/core/cpu.h
14
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
15
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
14
ObjectClass *(*class_by_name)(const char *cpu_model);
16
15
void (*parse_features)(const char *typename, char *str, Error **errp);
17
static bool fold_divide(OptContext *ctx, TCGOp *op)
16
18
{
17
- int reset_dump_flags;
19
- return fold_const2(ctx, op);
18
bool (*has_work)(CPUState *cpu);
20
+ if (fold_const2(ctx, op) ||
19
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
21
+ fold_xi_to_x(ctx, op, 1)) {
20
uint8_t *buf, int len, bool is_write);
22
+ return true;
21
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
23
+ }
22
void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
24
+ return false;
23
25
}
24
const char *deprecation_note;
26
25
- /* Keep non-pointer data at the end to minimize holes. */
27
static bool fold_dup(OptContext *ctx, TCGOp *op)
26
- int gdb_num_core_regs;
27
- bool gdb_stop_before_watchpoint;
28
struct AccelCPUClass *accel_cpu;
29
30
/* when system emulation is not available, this pointer is NULL */
31
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
32
* class data that depends on the accelerator, see accel/accel-common.c.
33
*/
34
void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc);
35
+
36
+ /*
37
+ * Keep non-pointer data at the end to minimize holes.
38
+ */
39
+ int reset_dump_flags;
40
+ int gdb_num_core_regs;
41
+ bool gdb_stop_before_watchpoint;
42
};
43
44
/*
45
--
28
--
46
2.25.1
29
2.25.1
47
30
48
31
diff view generated by jsdifflib
1
By using PKG_CONFIG_PATH instead of PKG_CONFIG_LIBDIR,
1
Recognize the constant function for remainder.
2
we were still including the 64-bit packages. Install
3
pcre-devel.i686 to fill a missing glib2 dependency.
4
2
5
By using --extra-cflags instead of --cpu, we incorrectly
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
6
use the wrong probing during meson.
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 6 +++++-
8
1 file changed, 5 insertions(+), 1 deletion(-)
7
9
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
10
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
11
Message-Id: <20210930163636.721311-3-richard.henderson@linaro.org>
12
---
13
tests/docker/dockerfiles/fedora-i386-cross.docker | 5 +++--
14
1 file changed, 3 insertions(+), 2 deletions(-)
15
16
diff --git a/tests/docker/dockerfiles/fedora-i386-cross.docker b/tests/docker/dockerfiles/fedora-i386-cross.docker
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/tests/docker/dockerfiles/fedora-i386-cross.docker
12
--- a/tcg/optimize.c
19
+++ b/tests/docker/dockerfiles/fedora-i386-cross.docker
13
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ ENV PACKAGES \
14
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
21
glibc-static.i686 \
15
22
gnutls-devel.i686 \
16
static bool fold_remainder(OptContext *ctx, TCGOp *op)
23
nettle-devel.i686 \
17
{
24
+ pcre-devel.i686 \
18
- return fold_const2(ctx, op);
25
perl-Test-Harness \
19
+ if (fold_const2(ctx, op) ||
26
pixman-devel.i686 \
20
+ fold_xx_to_i(ctx, op, 0)) {
27
sysprof-capture-devel.i686 \
21
+ return true;
28
zlib-devel.i686
22
+ }
29
23
+ return false;
30
-ENV QEMU_CONFIGURE_OPTS --extra-cflags=-m32 --disable-vhost-user
24
}
31
-ENV PKG_CONFIG_PATH /usr/lib/pkgconfig
25
32
+ENV QEMU_CONFIGURE_OPTS --cpu=i386 --disable-vhost-user
26
static bool fold_setcond(OptContext *ctx, TCGOp *op)
33
+ENV PKG_CONFIG_LIBDIR /usr/lib/pkgconfig
34
35
RUN dnf update -y && dnf install -y $PACKAGES
36
RUN rpm -q $PACKAGES | sort > /packages.txt
37
--
27
--
38
2.25.1
28
2.25.1
39
29
40
30
diff view generated by jsdifflib
1
Reviewed-by: David Hildenbrand <david@redhat.com>
1
Certain targets, like riscv, produce signed 32-bit results.
2
This can lead to lots of redundant extensions as values are
3
manipulated.
4
5
Begin by tracking only the obvious sign-extensions, and
6
converting them to simple copies when possible.
7
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
11
---
4
tcg/s390x/tcg-target-con-set.h | 1 +
12
tcg/optimize.c | 123 ++++++++++++++++++++++++++++++++++++++++---------
5
tcg/s390x/tcg-target.h | 12 ++---
13
1 file changed, 102 insertions(+), 21 deletions(-)
6
tcg/s390x/tcg-target.c.inc | 93 +++++++++++++++++++++++++++++++++-
14
7
3 files changed, 99 insertions(+), 7 deletions(-)
15
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
9
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
10
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/s390x/tcg-target-con-set.h
17
--- a/tcg/optimize.c
12
+++ b/tcg/s390x/tcg-target-con-set.h
18
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, 0, rI)
19
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
14
C_O1_I2(r, 0, rJ)
20
TCGTemp *next_copy;
15
C_O1_I2(r, r, ri)
21
uint64_t val;
16
C_O1_I2(r, rZ, r)
22
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
17
+C_O1_I2(v, v, r)
23
+ uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
18
C_O1_I2(v, v, v)
24
} TempOptInfo;
19
C_O1_I4(r, r, ri, r, 0)
25
20
C_O1_I4(r, r, ri, rI, 0)
26
typedef struct OptContext {
21
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
27
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
22
index XXXXXXX..XXXXXXX 100644
28
/* In flight values from optimization. */
23
--- a/tcg/s390x/tcg-target.h
29
uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
24
+++ b/tcg/s390x/tcg-target.h
30
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
25
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
31
+ uint64_t s_mask; /* mask of clrsb(value) bits */
26
#define TCG_TARGET_HAS_not_vec 1
32
TCGType type;
27
#define TCG_TARGET_HAS_neg_vec 1
33
} OptContext;
28
#define TCG_TARGET_HAS_abs_vec 1
34
29
-#define TCG_TARGET_HAS_roti_vec 0
35
+/* Calculate the smask for a specific value. */
30
-#define TCG_TARGET_HAS_rots_vec 0
36
+static uint64_t smask_from_value(uint64_t value)
31
-#define TCG_TARGET_HAS_rotv_vec 0
32
-#define TCG_TARGET_HAS_shi_vec 0
33
-#define TCG_TARGET_HAS_shs_vec 0
34
-#define TCG_TARGET_HAS_shv_vec 0
35
+#define TCG_TARGET_HAS_roti_vec 1
36
+#define TCG_TARGET_HAS_rots_vec 1
37
+#define TCG_TARGET_HAS_rotv_vec 1
38
+#define TCG_TARGET_HAS_shi_vec 1
39
+#define TCG_TARGET_HAS_shs_vec 1
40
+#define TCG_TARGET_HAS_shv_vec 1
41
#define TCG_TARGET_HAS_mul_vec 1
42
#define TCG_TARGET_HAS_sat_vec 0
43
#define TCG_TARGET_HAS_minmax_vec 0
44
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/s390x/tcg-target.c.inc
47
+++ b/tcg/s390x/tcg-target.c.inc
48
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
49
VRRc_VCEQ = 0xe7f8, /* we leave the m5 cs field 0 */
50
VRRc_VCH = 0xe7fb, /* " */
51
VRRc_VCHL = 0xe7f9, /* " */
52
+ VRRc_VERLLV = 0xe773,
53
+ VRRc_VESLV = 0xe770,
54
+ VRRc_VESRAV = 0xe77a,
55
+ VRRc_VESRLV = 0xe778,
56
VRRc_VML = 0xe7a2,
57
VRRc_VN = 0xe768,
58
VRRc_VNC = 0xe769,
59
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
60
VRRc_VX = 0xe76d,
61
VRRf_VLVGP = 0xe762,
62
63
+ VRSa_VERLL = 0xe733,
64
+ VRSa_VESL = 0xe730,
65
+ VRSa_VESRA = 0xe73a,
66
+ VRSa_VESRL = 0xe738,
67
VRSb_VLVG = 0xe722,
68
VRSc_VLGV = 0xe721,
69
70
@@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_VRRf(TCGContext *s, S390Opcode op,
71
tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0));
72
}
73
74
+static void tcg_out_insn_VRSa(TCGContext *s, S390Opcode op, TCGReg v1,
75
+ intptr_t d2, TCGReg b2, TCGReg v3, int m4)
76
+{
37
+{
77
+ tcg_debug_assert(is_vector_reg(v1));
38
+ int rep = clrsb64(value);
78
+ tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
39
+ return ~(~0ull >> rep);
79
+ tcg_debug_assert(is_general_reg(b2));
80
+ tcg_debug_assert(is_vector_reg(v3));
81
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
82
+ tcg_out16(s, b2 << 12 | d2);
83
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12));
84
+}
40
+}
85
+
41
+
86
static void tcg_out_insn_VRSb(TCGContext *s, S390Opcode op, TCGReg v1,
42
+/*
87
intptr_t d2, TCGReg b2, TCGReg r3, int m4)
43
+ * Calculate the smask for a given set of known-zeros.
88
{
44
+ * If there are lots of zeros on the left, we can consider the remainder
89
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
45
+ * an unsigned field, and thus the corresponding signed field is one bit
90
tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0);
46
+ * larger.
91
break;
47
+ */
92
48
+static uint64_t smask_from_zmask(uint64_t zmask)
93
+ case INDEX_op_shli_vec:
49
+{
94
+ tcg_out_insn(s, VRSa, VESL, a0, a2, TCG_REG_NONE, a1, vece);
50
+ /*
95
+ break;
51
+ * Only the 0 bits are significant for zmask, thus the msb itself
96
+ case INDEX_op_shri_vec:
52
+ * must be zero, else we have no sign information.
97
+ tcg_out_insn(s, VRSa, VESRL, a0, a2, TCG_REG_NONE, a1, vece);
53
+ */
98
+ break;
54
+ int rep = clz64(zmask);
99
+ case INDEX_op_sari_vec:
55
+ if (rep == 0) {
100
+ tcg_out_insn(s, VRSa, VESRA, a0, a2, TCG_REG_NONE, a1, vece);
56
+ return 0;
101
+ break;
57
+ }
102
+ case INDEX_op_rotli_vec:
58
+ rep -= 1;
103
+ tcg_out_insn(s, VRSa, VERLL, a0, a2, TCG_REG_NONE, a1, vece);
59
+ return ~(~0ull >> rep);
104
+ break;
60
+}
105
+ case INDEX_op_shls_vec:
61
+
106
+ tcg_out_insn(s, VRSa, VESL, a0, 0, a2, a1, vece);
62
static inline TempOptInfo *ts_info(TCGTemp *ts)
107
+ break;
63
{
108
+ case INDEX_op_shrs_vec:
64
return ts->state_ptr;
109
+ tcg_out_insn(s, VRSa, VESRL, a0, 0, a2, a1, vece);
65
@@ -XXX,XX +XXX,XX @@ static void reset_ts(TCGTemp *ts)
110
+ break;
66
ti->prev_copy = ts;
111
+ case INDEX_op_sars_vec:
67
ti->is_const = false;
112
+ tcg_out_insn(s, VRSa, VESRA, a0, 0, a2, a1, vece);
68
ti->z_mask = -1;
113
+ break;
69
+ ti->s_mask = 0;
114
+ case INDEX_op_rotls_vec:
70
}
115
+ tcg_out_insn(s, VRSa, VERLL, a0, 0, a2, a1, vece);
71
116
+ break;
72
static void reset_temp(TCGArg arg)
117
+ case INDEX_op_shlv_vec:
73
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
118
+ tcg_out_insn(s, VRRc, VESLV, a0, a1, a2, vece);
74
ti->is_const = true;
119
+ break;
75
ti->val = ts->val;
120
+ case INDEX_op_shrv_vec:
76
ti->z_mask = ts->val;
121
+ tcg_out_insn(s, VRRc, VESRLV, a0, a1, a2, vece);
77
+ ti->s_mask = smask_from_value(ts->val);
122
+ break;
78
} else {
123
+ case INDEX_op_sarv_vec:
79
ti->is_const = false;
124
+ tcg_out_insn(s, VRRc, VESRAV, a0, a1, a2, vece);
80
ti->z_mask = -1;
125
+ break;
81
+ ti->s_mask = 0;
126
+ case INDEX_op_rotlv_vec:
82
}
127
+ tcg_out_insn(s, VRRc, VERLLV, a0, a1, a2, vece);
83
}
128
+ break;
84
129
+
85
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
130
case INDEX_op_cmp_vec:
86
op->args[1] = src;
131
switch ((TCGCond)args[3]) {
87
132
case TCG_COND_EQ:
88
di->z_mask = si->z_mask;
133
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
89
+ di->s_mask = si->s_mask;
134
case INDEX_op_not_vec:
90
135
case INDEX_op_or_vec:
91
if (src_ts->type == dst_ts->type) {
136
case INDEX_op_orc_vec:
92
TempOptInfo *ni = ts_info(si->next_copy);
137
+ case INDEX_op_rotli_vec:
93
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
138
+ case INDEX_op_rotls_vec:
94
139
+ case INDEX_op_rotlv_vec:
95
nb_oargs = def->nb_oargs;
140
+ case INDEX_op_sari_vec:
96
for (i = 0; i < nb_oargs; i++) {
141
+ case INDEX_op_sars_vec:
97
- reset_temp(op->args[i]);
142
+ case INDEX_op_sarv_vec:
98
+ TCGTemp *ts = arg_temp(op->args[i]);
143
+ case INDEX_op_shli_vec:
99
+ reset_ts(ts);
144
+ case INDEX_op_shls_vec:
100
/*
145
+ case INDEX_op_shlv_vec:
101
- * Save the corresponding known-zero bits mask for the
146
+ case INDEX_op_shri_vec:
102
+ * Save the corresponding known-zero/sign bits mask for the
147
+ case INDEX_op_shrs_vec:
103
* first output argument (only one supported so far).
148
+ case INDEX_op_shrv_vec:
104
*/
149
case INDEX_op_sub_vec:
105
if (i == 0) {
150
case INDEX_op_xor_vec:
106
- arg_info(op->args[i])->z_mask = ctx->z_mask;
151
return 1;
107
+ ts_info(ts)->z_mask = ctx->z_mask;
152
case INDEX_op_cmp_vec:
108
+ ts_info(ts)->s_mask = ctx->s_mask;
153
+ case INDEX_op_rotrv_vec:
109
}
154
return -1;
110
}
155
case INDEX_op_mul_vec:
111
}
156
return vece < MO_64;
112
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
157
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
113
{
158
TCGArg a0, ...)
114
uint64_t a_mask = ctx->a_mask;
159
{
115
uint64_t z_mask = ctx->z_mask;
160
va_list va;
116
+ uint64_t s_mask = ctx->s_mask;
161
- TCGv_vec v0, v1, v2;
117
162
+ TCGv_vec v0, v1, v2, t0;
118
/*
163
119
* 32-bit ops generate 32-bit results, which for the purpose of
164
va_start(va, a0);
120
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
165
v0 = temp_tcgv_vec(arg_temp(a0));
121
if (ctx->type == TCG_TYPE_I32) {
166
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
122
a_mask = (int32_t)a_mask;
167
expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
123
z_mask = (int32_t)z_mask;
168
break;
124
+ s_mask |= MAKE_64BIT_MASK(32, 32);
169
125
ctx->z_mask = z_mask;
170
+ case INDEX_op_rotrv_vec:
126
+ ctx->s_mask = s_mask;
171
+ t0 = tcg_temp_new_vec(type);
127
}
172
+ tcg_gen_neg_vec(vece, t0, v2);
128
173
+ tcg_gen_rotlv_vec(vece, v0, v1, t0);
129
if (z_mask == 0) {
174
+ tcg_temp_free_vec(t0);
130
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
175
+ break;
131
176
+
132
static bool fold_bswap(OptContext *ctx, TCGOp *op)
133
{
134
- uint64_t z_mask, sign;
135
+ uint64_t z_mask, s_mask, sign;
136
137
if (arg_is_const(op->args[1])) {
138
uint64_t t = arg_info(op->args[1])->val;
139
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
140
}
141
142
z_mask = arg_info(op->args[1])->z_mask;
143
+
144
switch (op->opc) {
145
case INDEX_op_bswap16_i32:
146
case INDEX_op_bswap16_i64:
147
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
177
default:
148
default:
178
g_assert_not_reached();
149
g_assert_not_reached();
179
}
150
}
180
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
151
+ s_mask = smask_from_zmask(z_mask);
181
case INDEX_op_abs_vec:
152
182
case INDEX_op_neg_vec:
153
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
183
case INDEX_op_not_vec:
154
case TCG_BSWAP_OZ:
184
+ case INDEX_op_rotli_vec:
155
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
185
+ case INDEX_op_sari_vec:
156
/* If the sign bit may be 1, force all the bits above to 1. */
186
+ case INDEX_op_shli_vec:
157
if (z_mask & sign) {
187
+ case INDEX_op_shri_vec:
158
z_mask |= sign;
188
return C_O1_I1(v, v);
159
+ s_mask = sign << 1;
189
case INDEX_op_add_vec:
160
}
190
case INDEX_op_sub_vec:
161
break;
191
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
162
default:
192
case INDEX_op_xor_vec:
163
/* The high bits are undefined: force all bits above the sign to 1. */
193
case INDEX_op_cmp_vec:
164
z_mask |= sign << 1;
194
case INDEX_op_mul_vec:
165
+ s_mask = 0;
195
+ case INDEX_op_rotlv_vec:
166
break;
196
+ case INDEX_op_rotrv_vec:
167
}
197
+ case INDEX_op_shlv_vec:
168
ctx->z_mask = z_mask;
198
+ case INDEX_op_shrv_vec:
169
+ ctx->s_mask = s_mask;
199
+ case INDEX_op_sarv_vec:
170
200
return C_O1_I2(v, v, v);
171
return fold_masks(ctx, op);
201
+ case INDEX_op_rotls_vec:
172
}
202
+ case INDEX_op_shls_vec:
173
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
203
+ case INDEX_op_shrs_vec:
174
static bool fold_extract(OptContext *ctx, TCGOp *op)
204
+ case INDEX_op_sars_vec:
175
{
205
+ return C_O1_I2(v, v, r);
176
uint64_t z_mask_old, z_mask;
206
177
+ int pos = op->args[2];
178
+ int len = op->args[3];
179
180
if (arg_is_const(op->args[1])) {
181
uint64_t t;
182
183
t = arg_info(op->args[1])->val;
184
- t = extract64(t, op->args[2], op->args[3]);
185
+ t = extract64(t, pos, len);
186
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
187
}
188
189
z_mask_old = arg_info(op->args[1])->z_mask;
190
- z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
191
- if (op->args[2] == 0) {
192
+ z_mask = extract64(z_mask_old, pos, len);
193
+ if (pos == 0) {
194
ctx->a_mask = z_mask_old ^ z_mask;
195
}
196
ctx->z_mask = z_mask;
197
+ ctx->s_mask = smask_from_zmask(z_mask);
198
199
return fold_masks(ctx, op);
200
}
201
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
202
203
static bool fold_exts(OptContext *ctx, TCGOp *op)
204
{
205
- uint64_t z_mask_old, z_mask, sign;
206
+ uint64_t s_mask_old, s_mask, z_mask, sign;
207
bool type_change = false;
208
209
if (fold_const1(ctx, op)) {
210
return true;
211
}
212
213
- z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
214
+ z_mask = arg_info(op->args[1])->z_mask;
215
+ s_mask = arg_info(op->args[1])->s_mask;
216
+ s_mask_old = s_mask;
217
218
switch (op->opc) {
219
CASE_OP_32_64(ext8s):
220
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
221
222
if (z_mask & sign) {
223
z_mask |= sign;
224
- } else if (!type_change) {
225
- ctx->a_mask = z_mask_old ^ z_mask;
226
}
227
+ s_mask |= sign << 1;
228
+
229
ctx->z_mask = z_mask;
230
+ ctx->s_mask = s_mask;
231
+ if (!type_change) {
232
+ ctx->a_mask = s_mask & ~s_mask_old;
233
+ }
234
235
return fold_masks(ctx, op);
236
}
237
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
238
}
239
240
ctx->z_mask = z_mask;
241
+ ctx->s_mask = smask_from_zmask(z_mask);
242
if (!type_change) {
243
ctx->a_mask = z_mask_old ^ z_mask;
244
}
245
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
246
MemOp mop = get_memop(oi);
247
int width = 8 * memop_size(mop);
248
249
- if (!(mop & MO_SIGN) && width < 64) {
250
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
251
+ if (width < 64) {
252
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
253
+ if (!(mop & MO_SIGN)) {
254
+ ctx->z_mask = MAKE_64BIT_MASK(0, width);
255
+ ctx->s_mask <<= 1;
256
+ }
257
}
258
259
/* Opcodes that touch guest memory stop the mb optimization. */
260
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
261
262
static bool fold_sextract(OptContext *ctx, TCGOp *op)
263
{
264
- int64_t z_mask_old, z_mask;
265
+ uint64_t z_mask, s_mask, s_mask_old;
266
+ int pos = op->args[2];
267
+ int len = op->args[3];
268
269
if (arg_is_const(op->args[1])) {
270
uint64_t t;
271
272
t = arg_info(op->args[1])->val;
273
- t = sextract64(t, op->args[2], op->args[3]);
274
+ t = sextract64(t, pos, len);
275
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
276
}
277
278
- z_mask_old = arg_info(op->args[1])->z_mask;
279
- z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
280
- if (op->args[2] == 0 && z_mask >= 0) {
281
- ctx->a_mask = z_mask_old ^ z_mask;
282
- }
283
+ z_mask = arg_info(op->args[1])->z_mask;
284
+ z_mask = sextract64(z_mask, pos, len);
285
ctx->z_mask = z_mask;
286
287
+ s_mask_old = arg_info(op->args[1])->s_mask;
288
+ s_mask = sextract64(s_mask_old, pos, len);
289
+ s_mask |= MAKE_64BIT_MASK(len, 64 - len);
290
+ ctx->s_mask = s_mask;
291
+
292
+ if (pos == 0) {
293
+ ctx->a_mask = s_mask & ~s_mask_old;
294
+ }
295
+
296
return fold_masks(ctx, op);
297
}
298
299
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
300
{
301
/* We can't do any folding with a load, but we can record bits. */
302
switch (op->opc) {
303
+ CASE_OP_32_64(ld8s):
304
+ ctx->s_mask = MAKE_64BIT_MASK(8, 56);
305
+ break;
306
CASE_OP_32_64(ld8u):
307
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
308
+ ctx->s_mask = MAKE_64BIT_MASK(9, 55);
309
+ break;
310
+ CASE_OP_32_64(ld16s):
311
+ ctx->s_mask = MAKE_64BIT_MASK(16, 48);
312
break;
313
CASE_OP_32_64(ld16u):
314
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
315
+ ctx->s_mask = MAKE_64BIT_MASK(17, 47);
316
+ break;
317
+ case INDEX_op_ld32s_i64:
318
+ ctx->s_mask = MAKE_64BIT_MASK(32, 32);
319
break;
320
case INDEX_op_ld32u_i64:
321
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
322
+ ctx->s_mask = MAKE_64BIT_MASK(33, 31);
323
break;
207
default:
324
default:
208
g_assert_not_reached();
325
g_assert_not_reached();
326
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
327
ctx.type = TCG_TYPE_I32;
328
}
329
330
- /* Assume all bits affected, and no bits known zero. */
331
+ /* Assume all bits affected, no bits known zero, no sign reps. */
332
ctx.a_mask = -1;
333
ctx.z_mask = -1;
334
+ ctx.s_mask = 0;
335
336
/*
337
* Process each opcode.
338
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
339
case INDEX_op_extrh_i64_i32:
340
done = fold_extu(&ctx, op);
341
break;
342
+ CASE_OP_32_64(ld8s):
343
CASE_OP_32_64(ld8u):
344
+ CASE_OP_32_64(ld16s):
345
CASE_OP_32_64(ld16u):
346
+ case INDEX_op_ld32s_i64:
347
case INDEX_op_ld32u_i64:
348
done = fold_tcg_ld(&ctx, op);
349
break;
209
--
350
--
210
2.25.1
351
2.25.1
211
352
212
353
diff view generated by jsdifflib
1
We are already inconsistent about whether or not
1
Sign repetitions are perforce all identical, whether they are 1 or 0.
2
MO_SIGN is set in trace_mem_get_info. Dropping it
2
Bitwise operations preserve the relative quantity of the repetitions.
3
entirely allows some simplification.
4
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
accel/tcg/cputlb.c | 10 +++-------
9
tcg/optimize.c | 29 +++++++++++++++++++++++++++++
9
accel/tcg/user-exec.c | 45 ++++++-------------------------------------
10
1 file changed, 29 insertions(+)
10
2 files changed, 9 insertions(+), 46 deletions(-)
11
11
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
14
--- a/tcg/optimize.c
15
+++ b/accel/tcg/cputlb.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
16
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
17
meminfo = trace_mem_get_info(op, mmu_idx, false);
17
z2 = arg_info(op->args[2])->z_mask;
18
trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
18
ctx->z_mask = z1 & z2;
19
19
20
- op &= ~MO_SIGN;
20
+ /*
21
oi = make_memop_idx(op, mmu_idx);
21
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
22
ret = full_load(env, addr, oi, retaddr);
22
+ * Bitwise operations preserve the relative quantity of the repetitions.
23
23
+ */
24
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
24
+ ctx->s_mask = arg_info(op->args[1])->s_mask
25
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
25
+ & arg_info(op->args[2])->s_mask;
26
int mmu_idx, uintptr_t ra)
26
+
27
{
27
/*
28
- return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB,
28
* Known-zeros does not imply known-ones. Therefore unless
29
- full_ldub_mmu);
29
* arg2 is constant, we can't infer affected bits from it.
30
+ return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
30
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
31
}
32
ctx->z_mask = z1;
33
34
+ ctx->s_mask = arg_info(op->args[1])->s_mask
35
+ & arg_info(op->args[2])->s_mask;
36
return fold_masks(ctx, op);
31
}
37
}
32
38
33
uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
39
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
34
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
40
fold_xi_to_not(ctx, op, 0)) {
35
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
41
return true;
36
int mmu_idx, uintptr_t ra)
42
}
37
{
43
+
38
- return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW,
44
+ ctx->s_mask = arg_info(op->args[1])->s_mask
39
- full_be_lduw_mmu);
45
+ & arg_info(op->args[2])->s_mask;
40
+ return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
46
return false;
41
}
47
}
42
48
43
uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
49
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
44
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
50
45
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
51
ctx->z_mask = arg_info(op->args[3])->z_mask
46
int mmu_idx, uintptr_t ra)
52
| arg_info(op->args[4])->z_mask;
47
{
53
+ ctx->s_mask = arg_info(op->args[3])->s_mask
48
- return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW,
54
+ & arg_info(op->args[4])->s_mask;
49
- full_le_lduw_mmu);
55
50
+ return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
56
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
57
uint64_t tv = arg_info(op->args[3])->val;
58
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
59
fold_xi_to_not(ctx, op, -1)) {
60
return true;
61
}
62
+
63
+ ctx->s_mask = arg_info(op->args[1])->s_mask
64
+ & arg_info(op->args[2])->s_mask;
65
return false;
51
}
66
}
52
67
53
uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
68
@@ -XXX,XX +XXX,XX @@ static bool fold_nor(OptContext *ctx, TCGOp *op)
54
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
69
fold_xi_to_not(ctx, op, 0)) {
55
index XXXXXXX..XXXXXXX 100644
70
return true;
56
--- a/accel/tcg/user-exec.c
71
}
57
+++ b/accel/tcg/user-exec.c
72
+
58
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
73
+ ctx->s_mask = arg_info(op->args[1])->s_mask
59
74
+ & arg_info(op->args[2])->s_mask;
60
int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
75
return false;
61
{
62
- int ret;
63
- uint16_t meminfo = trace_mem_get_info(MO_SB, MMU_USER_IDX, false);
64
-
65
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
66
- ret = ldsb_p(g2h(env_cpu(env), ptr));
67
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
68
- return ret;
69
+ return (int8_t)cpu_ldub_data(env, ptr);
70
}
76
}
71
77
72
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
78
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
73
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
79
return true;
74
80
}
75
int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr)
81
76
{
82
+ ctx->s_mask = arg_info(op->args[1])->s_mask;
77
- int ret;
83
+
78
- uint16_t meminfo = trace_mem_get_info(MO_BESW, MMU_USER_IDX, false);
84
/* Because of fold_to_not, we want to always return true, via finish. */
79
-
85
finish_folding(ctx, op);
80
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
86
return true;
81
- ret = ldsw_be_p(g2h(env_cpu(env), ptr));
87
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
82
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
88
83
- return ret;
89
ctx->z_mask = arg_info(op->args[1])->z_mask
84
+ return (int16_t)cpu_lduw_be_data(env, ptr);
90
| arg_info(op->args[2])->z_mask;
91
+ ctx->s_mask = arg_info(op->args[1])->s_mask
92
+ & arg_info(op->args[2])->s_mask;
93
return fold_masks(ctx, op);
85
}
94
}
86
95
87
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
96
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
88
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr)
97
fold_ix_to_not(ctx, op, 0)) {
89
98
return true;
90
int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr)
99
}
91
{
100
+
92
- int ret;
101
+ ctx->s_mask = arg_info(op->args[1])->s_mask
93
- uint16_t meminfo = trace_mem_get_info(MO_LESW, MMU_USER_IDX, false);
102
+ & arg_info(op->args[2])->s_mask;
94
-
103
return false;
95
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
96
- ret = ldsw_le_p(g2h(env_cpu(env), ptr));
97
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
98
- return ret;
99
+ return (int16_t)cpu_lduw_le_data(env, ptr);
100
}
104
}
101
105
102
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
106
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
103
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
107
104
108
ctx->z_mask = arg_info(op->args[1])->z_mask
105
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
109
| arg_info(op->args[2])->z_mask;
106
{
110
+ ctx->s_mask = arg_info(op->args[1])->s_mask
107
- int ret;
111
+ & arg_info(op->args[2])->s_mask;
108
-
112
return fold_masks(ctx, op);
109
- set_helper_retaddr(retaddr);
110
- ret = cpu_ldsb_data(env, ptr);
111
- clear_helper_retaddr();
112
- return ret;
113
+ return (int8_t)cpu_ldub_data_ra(env, ptr, retaddr);
114
}
113
}
115
114
116
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
117
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
118
119
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
120
{
121
- int ret;
122
-
123
- set_helper_retaddr(retaddr);
124
- ret = cpu_ldsw_be_data(env, ptr);
125
- clear_helper_retaddr();
126
- return ret;
127
+ return (int16_t)cpu_lduw_be_data_ra(env, ptr, retaddr);
128
}
129
130
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
131
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
132
133
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
134
{
135
- int ret;
136
-
137
- set_helper_retaddr(retaddr);
138
- ret = cpu_ldsw_le_data(env, ptr);
139
- clear_helper_retaddr();
140
- return ret;
141
+ return (int16_t)cpu_lduw_le_data_ra(env, ptr, retaddr);
142
}
143
144
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
145
--
115
--
146
2.25.1
116
2.25.1
147
117
148
118
diff view generated by jsdifflib
1
The image was upgraded to a full image in ee381b7fe146.
1
The result is either 0 or 1, which means that we have
2
This makes it possible to use docker-test@image syntax
2
a 2 bit signed result, and thus 62 bits of sign.
3
with this container.
3
For clarity, use the smask_from_zmask function.
4
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
7
Message-Id: <20210930163636.721311-2-richard.henderson@linaro.org>
8
---
8
---
9
tests/docker/Makefile.include | 2 +-
9
tcg/optimize.c | 2 ++
10
1 file changed, 1 insertion(+), 1 deletion(-)
10
1 file changed, 2 insertions(+)
11
11
12
diff --git a/tests/docker/Makefile.include b/tests/docker/Makefile.include
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tests/docker/Makefile.include
14
--- a/tcg/optimize.c
15
+++ b/tests/docker/Makefile.include
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ DOCKER_PARTIAL_IMAGES += debian-riscv64-cross
16
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
17
DOCKER_PARTIAL_IMAGES += debian-sh4-cross debian-sparc64-cross
17
}
18
DOCKER_PARTIAL_IMAGES += debian-tricore-cross
18
19
DOCKER_PARTIAL_IMAGES += debian-xtensa-cross
19
ctx->z_mask = 1;
20
-DOCKER_PARTIAL_IMAGES += fedora-i386-cross fedora-cris-cross
20
+ ctx->s_mask = smask_from_zmask(1);
21
+DOCKER_PARTIAL_IMAGES += fedora-cris-cross
21
return false;
22
22
}
23
# Rules for building linux-user powered images
23
24
#
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
25
}
26
27
ctx->z_mask = 1;
28
+ ctx->s_mask = smask_from_zmask(1);
29
return false;
30
31
do_setcond_const:
25
--
32
--
26
2.25.1
33
2.25.1
27
34
28
35
diff view generated by jsdifflib
1
The unsigned saturations are handled via generic code
1
The results are generally 6 bit unsigned values, though
2
using min/max. The signed saturations are expanded using
2
the count leading and trailing bits may produce any value
3
double-sized arithmetic and a saturating pack.
3
for a zero input.
4
4
5
Since all operations are done via expansion, do not
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
actually set TCG_TARGET_HAS_sat_vec.
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
8
---
10
tcg/s390x/tcg-target.opc.h | 3 ++
9
tcg/optimize.c | 3 ++-
11
tcg/s390x/tcg-target.c.inc | 63 ++++++++++++++++++++++++++++++++++++++
10
1 file changed, 2 insertions(+), 1 deletion(-)
12
2 files changed, 66 insertions(+)
13
11
14
diff --git a/tcg/s390x/tcg-target.opc.h b/tcg/s390x/tcg-target.opc.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/s390x/tcg-target.opc.h
14
--- a/tcg/optimize.c
17
+++ b/tcg/s390x/tcg-target.opc.h
15
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
19
* emitted by tcg_expand_vec_op. For those familiar with GCC internals,
17
g_assert_not_reached();
20
* consider these to be UNSPEC with names.
21
*/
22
+DEF(s390_vuph_vec, 1, 1, 0, IMPLVEC)
23
+DEF(s390_vupl_vec, 1, 1, 0, IMPLVEC)
24
+DEF(s390_vpks_vec, 1, 2, 0, IMPLVEC)
25
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/s390x/tcg-target.c.inc
28
+++ b/tcg/s390x/tcg-target.c.inc
29
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
30
VRRc_VNO = 0xe76b,
31
VRRc_VO = 0xe76a,
32
VRRc_VOC = 0xe76f,
33
+ VRRc_VPKS = 0xe797, /* we leave the m5 cs field 0 */
34
VRRc_VS = 0xe7f7,
35
+ VRRa_VUPH = 0xe7d7,
36
+ VRRa_VUPL = 0xe7d6,
37
VRRc_VX = 0xe76d,
38
VRRf_VLVGP = 0xe762,
39
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
41
}
42
break;
43
44
+ case INDEX_op_s390_vuph_vec:
45
+ tcg_out_insn(s, VRRa, VUPH, a0, a1, vece);
46
+ break;
47
+ case INDEX_op_s390_vupl_vec:
48
+ tcg_out_insn(s, VRRa, VUPL, a0, a1, vece);
49
+ break;
50
+ case INDEX_op_s390_vpks_vec:
51
+ tcg_out_insn(s, VRRc, VPKS, a0, a1, a2, vece);
52
+ break;
53
+
54
case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
55
case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
56
default:
57
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
58
return -1;
59
case INDEX_op_mul_vec:
60
return vece < MO_64;
61
+ case INDEX_op_ssadd_vec:
62
+ case INDEX_op_sssub_vec:
63
+ return vece < MO_64 ? -1 : 0;
64
default:
65
return 0;
66
}
18
}
67
@@ -XXX,XX +XXX,XX @@ static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
19
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
68
}
20
-
21
+ ctx->s_mask = smask_from_zmask(ctx->z_mask);
22
return false;
69
}
23
}
70
24
71
+static void expand_vec_sat(TCGType type, unsigned vece, TCGv_vec v0,
25
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
72
+ TCGv_vec v1, TCGv_vec v2, TCGOpcode add_sub_opc)
73
+{
74
+ TCGv_vec h1 = tcg_temp_new_vec(type);
75
+ TCGv_vec h2 = tcg_temp_new_vec(type);
76
+ TCGv_vec l1 = tcg_temp_new_vec(type);
77
+ TCGv_vec l2 = tcg_temp_new_vec(type);
78
+
79
+ tcg_debug_assert (vece < MO_64);
80
+
81
+ /* Unpack with sign-extension. */
82
+ vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
83
+ tcgv_vec_arg(h1), tcgv_vec_arg(v1));
84
+ vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
85
+ tcgv_vec_arg(h2), tcgv_vec_arg(v2));
86
+
87
+ vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
88
+ tcgv_vec_arg(l1), tcgv_vec_arg(v1));
89
+ vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
90
+ tcgv_vec_arg(l2), tcgv_vec_arg(v2));
91
+
92
+ /* Arithmetic on a wider element size. */
93
+ vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(h1),
94
+ tcgv_vec_arg(h1), tcgv_vec_arg(h2));
95
+ vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(l1),
96
+ tcgv_vec_arg(l1), tcgv_vec_arg(l2));
97
+
98
+ /* Pack with saturation. */
99
+ vec_gen_3(INDEX_op_s390_vpks_vec, type, vece + 1,
100
+ tcgv_vec_arg(v0), tcgv_vec_arg(h1), tcgv_vec_arg(l1));
101
+
102
+ tcg_temp_free_vec(h1);
103
+ tcg_temp_free_vec(h2);
104
+ tcg_temp_free_vec(l1);
105
+ tcg_temp_free_vec(l2);
106
+}
107
+
108
void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
109
TCGArg a0, ...)
110
{
111
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
112
tcg_temp_free_vec(t0);
113
break;
114
115
+ case INDEX_op_ssadd_vec:
116
+ expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_add_vec);
117
+ break;
118
+ case INDEX_op_sssub_vec:
119
+ expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_sub_vec);
120
+ break;
121
+
122
default:
26
default:
123
g_assert_not_reached();
27
g_assert_not_reached();
124
}
28
}
125
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
29
+ ctx->s_mask = smask_from_zmask(ctx->z_mask);
126
case INDEX_op_sari_vec:
30
return false;
127
case INDEX_op_shli_vec:
31
}
128
case INDEX_op_shri_vec:
32
129
+ case INDEX_op_s390_vuph_vec:
130
+ case INDEX_op_s390_vupl_vec:
131
return C_O1_I1(v, v);
132
case INDEX_op_add_vec:
133
case INDEX_op_sub_vec:
134
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
135
case INDEX_op_smin_vec:
136
case INDEX_op_umax_vec:
137
case INDEX_op_umin_vec:
138
+ case INDEX_op_s390_vpks_vec:
139
return C_O1_I2(v, v, v);
140
case INDEX_op_rotls_vec:
141
case INDEX_op_shls_vec:
142
--
33
--
143
2.25.1
34
2.25.1
144
35
145
36
diff view generated by jsdifflib
1
We will shortly need to be able to check facilities beyond the
1
For constant shifts, we can simply shift the s_mask.
2
first 64. Instead of explicitly masking against s390_facilities,
3
create a HAVE_FACILITY macro that indexes an array.
4
2
5
Reviewed-by: David Hildenbrand <david@redhat.com>
3
For variable shifts, we know that sar does not reduce
4
the s_mask, which helps for sequences like
5
6
ext32s_i64 t, in
7
sar_i64 t, t, v
8
ext32s_i64 out, t
9
10
allowing the final extend to be eliminated.
11
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
15
---
8
v2: Change name to HAVE_FACILITY (david)
16
tcg/optimize.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++---
9
---
17
1 file changed, 47 insertions(+), 3 deletions(-)
10
tcg/s390x/tcg-target.h | 29 ++++++++-------
11
tcg/s390x/tcg-target.c.inc | 74 +++++++++++++++++++-------------------
12
2 files changed, 52 insertions(+), 51 deletions(-)
13
18
14
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
19
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/s390x/tcg-target.h
21
--- a/tcg/optimize.c
17
+++ b/tcg/s390x/tcg-target.h
22
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef enum TCGReg {
23
@@ -XXX,XX +XXX,XX @@ static uint64_t smask_from_zmask(uint64_t zmask)
19
/* A list of relevant facilities used by this translator. Some of these
24
return ~(~0ull >> rep);
20
are required for proper operation, and these are checked at startup. */
25
}
21
26
22
-#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
27
+/*
23
-#define FACILITY_LONG_DISP (1ULL << (63 - 18))
28
+ * Recreate a properly left-aligned smask after manipulation.
24
-#define FACILITY_EXT_IMM (1ULL << (63 - 21))
29
+ * Some bit-shuffling, particularly shifts and rotates, may
25
-#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
30
+ * retain sign bits on the left, but may scatter disconnected
26
-#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
31
+ * sign bits on the right. Retain only what remains to the left.
27
+#define FACILITY_ZARCH_ACTIVE 2
32
+ */
28
+#define FACILITY_LONG_DISP 18
33
+static uint64_t smask_from_smask(int64_t smask)
29
+#define FACILITY_EXT_IMM 21
34
+{
30
+#define FACILITY_GEN_INST_EXT 34
35
+ /* Only the 1 bits are significant for smask */
31
+#define FACILITY_LOAD_ON_COND 45
36
+ return smask_from_zmask(~smask);
32
#define FACILITY_FAST_BCR_SER FACILITY_LOAD_ON_COND
37
+}
33
#define FACILITY_DISTINCT_OPS FACILITY_LOAD_ON_COND
34
-#define FACILITY_LOAD_ON_COND2 (1ULL << (63 - 53))
35
+#define FACILITY_LOAD_ON_COND2 53
36
37
-extern uint64_t s390_facilities;
38
+extern uint64_t s390_facilities[1];
39
+
38
+
40
+#define HAVE_FACILITY(X) \
39
static inline TempOptInfo *ts_info(TCGTemp *ts)
41
+ ((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
40
{
42
41
return ts->state_ptr;
43
/* optional instructions */
42
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
44
#define TCG_TARGET_HAS_div2_i32 1
43
45
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities;
44
static bool fold_shift(OptContext *ctx, TCGOp *op)
46
#define TCG_TARGET_HAS_clz_i32 0
45
{
47
#define TCG_TARGET_HAS_ctz_i32 0
46
+ uint64_t s_mask, z_mask, sign;
48
#define TCG_TARGET_HAS_ctpop_i32 0
47
+
49
-#define TCG_TARGET_HAS_deposit_i32 (s390_facilities & FACILITY_GEN_INST_EXT)
48
if (fold_const2(ctx, op) ||
50
-#define TCG_TARGET_HAS_extract_i32 (s390_facilities & FACILITY_GEN_INST_EXT)
49
fold_ix_to_i(ctx, op, 0) ||
51
+#define TCG_TARGET_HAS_deposit_i32 HAVE_FACILITY(GEN_INST_EXT)
50
fold_xi_to_x(ctx, op, 0)) {
52
+#define TCG_TARGET_HAS_extract_i32 HAVE_FACILITY(GEN_INST_EXT)
51
return true;
53
#define TCG_TARGET_HAS_sextract_i32 0
54
#define TCG_TARGET_HAS_extract2_i32 0
55
#define TCG_TARGET_HAS_movcond_i32 1
56
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities;
57
#define TCG_TARGET_HAS_mulsh_i32 0
58
#define TCG_TARGET_HAS_extrl_i64_i32 0
59
#define TCG_TARGET_HAS_extrh_i64_i32 0
60
-#define TCG_TARGET_HAS_direct_jump (s390_facilities & FACILITY_GEN_INST_EXT)
61
+#define TCG_TARGET_HAS_direct_jump HAVE_FACILITY(GEN_INST_EXT)
62
#define TCG_TARGET_HAS_qemu_st8_i32 0
63
64
#define TCG_TARGET_HAS_div2_i64 1
65
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities;
66
#define TCG_TARGET_HAS_eqv_i64 0
67
#define TCG_TARGET_HAS_nand_i64 0
68
#define TCG_TARGET_HAS_nor_i64 0
69
-#define TCG_TARGET_HAS_clz_i64 (s390_facilities & FACILITY_EXT_IMM)
70
+#define TCG_TARGET_HAS_clz_i64 HAVE_FACILITY(EXT_IMM)
71
#define TCG_TARGET_HAS_ctz_i64 0
72
#define TCG_TARGET_HAS_ctpop_i64 0
73
-#define TCG_TARGET_HAS_deposit_i64 (s390_facilities & FACILITY_GEN_INST_EXT)
74
-#define TCG_TARGET_HAS_extract_i64 (s390_facilities & FACILITY_GEN_INST_EXT)
75
+#define TCG_TARGET_HAS_deposit_i64 HAVE_FACILITY(GEN_INST_EXT)
76
+#define TCG_TARGET_HAS_extract_i64 HAVE_FACILITY(GEN_INST_EXT)
77
#define TCG_TARGET_HAS_sextract_i64 0
78
#define TCG_TARGET_HAS_extract2_i64 0
79
#define TCG_TARGET_HAS_movcond_i64 1
80
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
81
index XXXXXXX..XXXXXXX 100644
82
--- a/tcg/s390x/tcg-target.c.inc
83
+++ b/tcg/s390x/tcg-target.c.inc
84
@@ -XXX,XX +XXX,XX @@
85
We don't need this when we have pc-relative loads with the general
86
instructions extension facility. */
87
#define TCG_REG_TB TCG_REG_R12
88
-#define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT))
89
+#define USE_REG_TB (!HAVE_FACILITY(GEN_INST_EXT))
90
91
#ifndef CONFIG_SOFTMMU
92
#define TCG_GUEST_BASE_REG TCG_REG_R13
93
@@ -XXX,XX +XXX,XX @@ static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
94
#endif
95
96
static const tcg_insn_unit *tb_ret_addr;
97
-uint64_t s390_facilities;
98
+uint64_t s390_facilities[1];
99
100
static bool patch_reloc(tcg_insn_unit *src_rw, int type,
101
intptr_t value, intptr_t addend)
102
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
103
}
52
}
104
53
105
/* Try all 48-bit insns that can load it in one go. */
54
+ s_mask = arg_info(op->args[1])->s_mask;
106
- if (s390_facilities & FACILITY_EXT_IMM) {
55
+ z_mask = arg_info(op->args[1])->z_mask;
107
+ if (HAVE_FACILITY(EXT_IMM)) {
56
+
108
if (sval == (int32_t)sval) {
57
if (arg_is_const(op->args[2])) {
109
tcg_out_insn(s, RIL, LGFI, ret, sval);
58
- ctx->z_mask = do_constant_folding(op->opc, ctx->type,
110
return;
59
- arg_info(op->args[1])->z_mask,
111
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
60
- arg_info(op->args[2])->val);
61
+ int sh = arg_info(op->args[2])->val;
62
+
63
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
64
+
65
+ s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
66
+ ctx->s_mask = smask_from_smask(s_mask);
67
+
68
return fold_masks(ctx, op);
112
}
69
}
113
70
+
114
/* Otherwise, stuff it in the constant pool. */
71
+ switch (op->opc) {
115
- if (s390_facilities & FACILITY_GEN_INST_EXT) {
72
+ CASE_OP_32_64(sar):
116
+ if (HAVE_FACILITY(GEN_INST_EXT)) {
73
+ /*
117
tcg_out_insn(s, RIL, LGRL, ret, 0);
74
+ * Arithmetic right shift will not reduce the number of
118
new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
75
+ * input sign repetitions.
119
} else if (USE_REG_TB && !in_prologue) {
76
+ */
120
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ld_abs(TCGContext *s, TCGType type,
77
+ ctx->s_mask = s_mask;
121
{
78
+ break;
122
intptr_t addr = (intptr_t)abs;
79
+ CASE_OP_32_64(shr):
123
80
+ /*
124
- if ((s390_facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
81
+ * If the sign bit is known zero, then logical right shift
125
+ if (HAVE_FACILITY(GEN_INST_EXT) && !(addr & 1)) {
82
+ * will not reduced the number of input sign repetitions.
126
ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
83
+ */
127
if (disp == (int32_t)disp) {
84
+ sign = (s_mask & -s_mask) >> 1;
128
if (type == TCG_TYPE_I32) {
85
+ if (!(z_mask & sign)) {
129
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
86
+ ctx->s_mask = s_mask;
130
87
+ }
131
static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
88
+ break;
132
{
89
+ default:
133
- if (s390_facilities & FACILITY_EXT_IMM) {
90
+ break;
134
+ if (HAVE_FACILITY(EXT_IMM)) {
91
+ }
135
tcg_out_insn(s, RRE, LGBR, dest, src);
92
+
136
return;
93
return false;
137
}
138
@@ -XXX,XX +XXX,XX @@ static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
139
140
static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
141
{
142
- if (s390_facilities & FACILITY_EXT_IMM) {
143
+ if (HAVE_FACILITY(EXT_IMM)) {
144
tcg_out_insn(s, RRE, LLGCR, dest, src);
145
return;
146
}
147
@@ -XXX,XX +XXX,XX @@ static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
148
149
static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
150
{
151
- if (s390_facilities & FACILITY_EXT_IMM) {
152
+ if (HAVE_FACILITY(EXT_IMM)) {
153
tcg_out_insn(s, RRE, LGHR, dest, src);
154
return;
155
}
156
@@ -XXX,XX +XXX,XX @@ static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
157
158
static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
159
{
160
- if (s390_facilities & FACILITY_EXT_IMM) {
161
+ if (HAVE_FACILITY(EXT_IMM)) {
162
tcg_out_insn(s, RRE, LLGHR, dest, src);
163
return;
164
}
165
@@ -XXX,XX +XXX,XX @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
166
tgen_ext32u(s, dest, dest);
167
return;
168
}
169
- if (s390_facilities & FACILITY_EXT_IMM) {
170
+ if (HAVE_FACILITY(EXT_IMM)) {
171
if ((val & valid) == 0xff) {
172
tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
173
return;
174
@@ -XXX,XX +XXX,XX @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
175
}
176
177
/* Try all 48-bit insns that can perform it in one go. */
178
- if (s390_facilities & FACILITY_EXT_IMM) {
179
+ if (HAVE_FACILITY(EXT_IMM)) {
180
for (i = 0; i < 2; i++) {
181
tcg_target_ulong mask = ~(0xffffffffull << i*32);
182
if (((val | ~valid) & mask) == mask) {
183
@@ -XXX,XX +XXX,XX @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
184
}
185
}
186
}
187
- if ((s390_facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
188
+ if (HAVE_FACILITY(GEN_INST_EXT) && risbg_mask(val)) {
189
tgen_andi_risbg(s, dest, dest, val);
190
return;
191
}
192
@@ -XXX,XX +XXX,XX @@ static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
193
}
194
195
/* Try all 48-bit insns that can perform it in one go. */
196
- if (s390_facilities & FACILITY_EXT_IMM) {
197
+ if (HAVE_FACILITY(EXT_IMM)) {
198
for (i = 0; i < 2; i++) {
199
tcg_target_ulong mask = (0xffffffffull << i*32);
200
if ((val & mask) != 0 && (val & ~mask) == 0) {
201
@@ -XXX,XX +XXX,XX @@ static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
202
/* Perform the OR via sequential modifications to the high and
203
low parts. Do this via recursion to handle 16-bit vs 32-bit
204
masks in each half. */
205
- tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM);
206
+ tcg_debug_assert(HAVE_FACILITY(EXT_IMM));
207
tgen_ori(s, type, dest, val & 0x00000000ffffffffull);
208
tgen_ori(s, type, dest, val & 0xffffffff00000000ull);
209
}
210
@@ -XXX,XX +XXX,XX @@ static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
211
static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
212
{
213
/* Try all 48-bit insns that can perform it in one go. */
214
- if (s390_facilities & FACILITY_EXT_IMM) {
215
+ if (HAVE_FACILITY(EXT_IMM)) {
216
if ((val & 0xffffffff00000000ull) == 0) {
217
tcg_out_insn(s, RIL, XILF, dest, val);
218
return;
219
@@ -XXX,XX +XXX,XX @@ static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
220
tcg_tbrel_diff(s, NULL));
221
} else {
222
/* Perform the xor by parts. */
223
- tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM);
224
+ tcg_debug_assert(HAVE_FACILITY(EXT_IMM));
225
if (val & 0xffffffff) {
226
tcg_out_insn(s, RIL, XILF, dest, val);
227
}
228
@@ -XXX,XX +XXX,XX @@ static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
229
goto exit;
230
}
231
232
- if (s390_facilities & FACILITY_EXT_IMM) {
233
+ if (HAVE_FACILITY(EXT_IMM)) {
234
if (type == TCG_TYPE_I32) {
235
op = (is_unsigned ? RIL_CLFI : RIL_CFI);
236
tcg_out_insn_RIL(s, op, r1, c2);
237
@@ -XXX,XX +XXX,XX @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
238
bool have_loc;
239
240
/* With LOC2, we can always emit the minimum 3 insns. */
241
- if (s390_facilities & FACILITY_LOAD_ON_COND2) {
242
+ if (HAVE_FACILITY(LOAD_ON_COND2)) {
243
/* Emit: d = 0, d = (cc ? 1 : d). */
244
cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
245
tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
246
@@ -XXX,XX +XXX,XX @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
247
return;
248
}
249
250
- have_loc = (s390_facilities & FACILITY_LOAD_ON_COND) != 0;
251
+ have_loc = HAVE_FACILITY(LOAD_ON_COND);
252
253
/* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */
254
restart:
255
@@ -XXX,XX +XXX,XX @@ static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
256
TCGArg v3, int v3const)
257
{
258
int cc;
259
- if (s390_facilities & FACILITY_LOAD_ON_COND) {
260
+ if (HAVE_FACILITY(LOAD_ON_COND)) {
261
cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
262
if (v3const) {
263
tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc);
264
@@ -XXX,XX +XXX,XX @@ static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
265
} else {
266
tcg_out_mov(s, TCG_TYPE_I64, dest, a2);
267
}
268
- if (s390_facilities & FACILITY_LOAD_ON_COND) {
269
+ if (HAVE_FACILITY(LOAD_ON_COND)) {
270
/* Emit: if (one bit found) dest = r0. */
271
tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2);
272
} else {
273
@@ -XXX,XX +XXX,XX @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
274
{
275
int cc;
276
277
- if (s390_facilities & FACILITY_GEN_INST_EXT) {
278
+ if (HAVE_FACILITY(GEN_INST_EXT)) {
279
bool is_unsigned = is_unsigned_cond(c);
280
bool in_range;
281
S390Opcode opc;
282
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
283
cross pages using the address of the last byte of the access. */
284
a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask);
285
tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
286
- if ((s390_facilities & FACILITY_GEN_INST_EXT) && a_off == 0) {
287
+ if (HAVE_FACILITY(GEN_INST_EXT) && a_off == 0) {
288
tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
289
} else {
290
tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
291
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
292
tcg_out_insn(s, RI, AHI, a0, a2);
293
break;
294
}
295
- if (s390_facilities & FACILITY_EXT_IMM) {
296
+ if (HAVE_FACILITY(EXT_IMM)) {
297
tcg_out_insn(s, RIL, AFI, a0, a2);
298
break;
299
}
300
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
301
tcg_out_insn(s, RI, AGHI, a0, a2);
302
break;
303
}
304
- if (s390_facilities & FACILITY_EXT_IMM) {
305
+ if (HAVE_FACILITY(EXT_IMM)) {
306
if (a2 == (int32_t)a2) {
307
tcg_out_insn(s, RIL, AGFI, a0, a2);
308
break;
309
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
310
/* The host memory model is quite strong, we simply need to
311
serialize the instruction stream. */
312
if (args[0] & TCG_MO_ST_LD) {
313
- tcg_out_insn(s, RR, BCR,
314
- s390_facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0);
315
+ tcg_out_insn(s, RR, BCR, HAVE_FACILITY(FAST_BCR_SER) ? 14 : 15, 0);
316
}
317
break;
318
319
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
320
case INDEX_op_or_i64:
321
case INDEX_op_xor_i32:
322
case INDEX_op_xor_i64:
323
- return (s390_facilities & FACILITY_DISTINCT_OPS
324
+ return (HAVE_FACILITY(DISTINCT_OPS)
325
? C_O1_I2(r, r, ri)
326
: C_O1_I2(r, 0, ri));
327
328
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
329
/* If we have the general-instruction-extensions, then we have
330
MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we
331
have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
332
- return (s390_facilities & FACILITY_GEN_INST_EXT
333
+ return (HAVE_FACILITY(GEN_INST_EXT)
334
? C_O1_I2(r, 0, ri)
335
: C_O1_I2(r, 0, rI));
336
337
case INDEX_op_mul_i64:
338
- return (s390_facilities & FACILITY_GEN_INST_EXT
339
+ return (HAVE_FACILITY(GEN_INST_EXT)
340
? C_O1_I2(r, 0, rJ)
341
: C_O1_I2(r, 0, rI));
342
343
case INDEX_op_shl_i32:
344
case INDEX_op_shr_i32:
345
case INDEX_op_sar_i32:
346
- return (s390_facilities & FACILITY_DISTINCT_OPS
347
+ return (HAVE_FACILITY(DISTINCT_OPS)
348
? C_O1_I2(r, r, ri)
349
: C_O1_I2(r, 0, ri));
350
351
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
352
353
case INDEX_op_movcond_i32:
354
case INDEX_op_movcond_i64:
355
- return (s390_facilities & FACILITY_LOAD_ON_COND2
356
+ return (HAVE_FACILITY(LOAD_ON_COND2)
357
? C_O1_I4(r, r, ri, rI, 0)
358
: C_O1_I4(r, r, ri, r, 0));
359
360
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
361
362
case INDEX_op_add2_i32:
363
case INDEX_op_sub2_i32:
364
- return (s390_facilities & FACILITY_EXT_IMM
365
+ return (HAVE_FACILITY(EXT_IMM)
366
? C_O2_I4(r, r, 0, 1, ri, r)
367
: C_O2_I4(r, r, 0, 1, r, r));
368
369
case INDEX_op_add2_i64:
370
case INDEX_op_sub2_i64:
371
- return (s390_facilities & FACILITY_EXT_IMM
372
+ return (HAVE_FACILITY(EXT_IMM)
373
? C_O2_I4(r, r, 0, 1, rA, r)
374
: C_O2_I4(r, r, 0, 1, r, r));
375
376
@@ -XXX,XX +XXX,XX @@ static void query_s390_facilities(void)
377
/* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
378
is present on all 64-bit systems, but let's check for it anyway. */
379
if (hwcap & HWCAP_S390_STFLE) {
380
- register int r0 __asm__("0");
381
- register void *r1 __asm__("1");
382
+ register int r0 __asm__("0") = ARRAY_SIZE(s390_facilities) - 1;
383
+ register void *r1 __asm__("1") = s390_facilities;
384
385
/* stfle 0(%r1) */
386
- r1 = &s390_facilities;
387
asm volatile(".word 0xb2b0,0x1000"
388
- : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
389
+ : "=r"(r0) : "r"(r0), "r"(r1) : "memory", "cc");
390
}
391
}
94
}
392
95
393
--
96
--
394
2.25.1
97
2.25.1
395
98
396
99
diff view generated by jsdifflib