1
This is mostly my code_gen_buffer cleanup, plus a few other random
1
The following changes since commit c52d69e7dbaaed0ffdef8125e79218672c30161d:
2
changes thrown in. Including a fix for a recent float32_exp2 bug.
3
2
4
3
Merge remote-tracking branch 'remotes/cschoenebeck/tags/pull-9p-20211027' into staging (2021-10-27 11:45:18 -0700)
5
r~
6
7
8
The following changes since commit 894fc4fd670aaf04a67dc7507739f914ff4bacf2:
9
10
Merge remote-tracking branch 'remotes/jasowang/tags/net-pull-request' into staging (2021-06-11 09:21:48 +0100)
11
4
12
are available in the Git repository at:
5
are available in the Git repository at:
13
6
14
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20210611
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211027
15
8
16
for you to fetch changes up to 60afaddc208d34f6dc86dd974f6e02724fba6eb6:
9
for you to fetch changes up to 820c025f0dcacf2f3c12735b1f162893fbfa7bc6:
17
10
18
docs/devel: Explain in more detail the TB chaining mechanisms (2021-06-11 09:41:25 -0700)
11
tcg/optimize: Propagate sign info for shifting (2021-10-27 17:11:23 -0700)
19
12
20
----------------------------------------------------------------
13
----------------------------------------------------------------
21
Clean up code_gen_buffer allocation.
14
Improvements to qemu/int128
22
Add tcg_remove_ops_after.
15
Fixes for 128/64 division.
23
Fix tcg_constant_* documentation.
16
Cleanup tcg/optimize.c
24
Improve TB chaining documentation.
17
Optimize redundant sign extensions
25
Fix float32_exp2.
26
18
27
----------------------------------------------------------------
19
----------------------------------------------------------------
28
Jose R. Ziviani (1):
20
Frédéric Pétrot (1):
29
tcg/arm: Fix tcg_out_op function signature
21
qemu/int128: Add int128_{not,xor}
30
22
31
Luis Pires (1):
23
Luis Pires (4):
32
docs/devel: Explain in more detail the TB chaining mechanisms
24
host-utils: move checks out of divu128/divs128
25
host-utils: move udiv_qrnnd() to host-utils
26
host-utils: add 128-bit quotient support to divu128/divs128
27
host-utils: add unit tests for divu128/divs128
33
28
34
Richard Henderson (32):
29
Richard Henderson (51):
35
meson: Split out tcg/meson.build
30
tcg/optimize: Rename "mask" to "z_mask"
36
meson: Split out fpu/meson.build
31
tcg/optimize: Split out OptContext
37
tcg: Re-order tcg_region_init vs tcg_prologue_init
32
tcg/optimize: Remove do_default label
38
tcg: Remove error return from tcg_region_initial_alloc__locked
33
tcg/optimize: Change tcg_opt_gen_{mov,movi} interface
39
tcg: Split out tcg_region_initial_alloc
34
tcg/optimize: Move prev_mb into OptContext
40
tcg: Split out tcg_region_prologue_set
35
tcg/optimize: Split out init_arguments
41
tcg: Split out region.c
36
tcg/optimize: Split out copy_propagate
42
accel/tcg: Inline cpu_gen_init
37
tcg/optimize: Split out fold_call
43
accel/tcg: Move alloc_code_gen_buffer to tcg/region.c
38
tcg/optimize: Drop nb_oargs, nb_iargs locals
44
accel/tcg: Rename tcg_init to tcg_init_machine
39
tcg/optimize: Change fail return for do_constant_folding_cond*
45
tcg: Create tcg_init
40
tcg/optimize: Return true from tcg_opt_gen_{mov,movi}
46
accel/tcg: Merge tcg_exec_init into tcg_init_machine
41
tcg/optimize: Split out finish_folding
47
accel/tcg: Use MiB in tcg_init_machine
42
tcg/optimize: Use a boolean to avoid a mass of continues
48
accel/tcg: Pass down max_cpus to tcg_init
43
tcg/optimize: Split out fold_mb, fold_qemu_{ld,st}
49
tcg: Introduce tcg_max_ctxs
44
tcg/optimize: Split out fold_const{1,2}
50
tcg: Move MAX_CODE_GEN_BUFFER_SIZE to tcg-target.h
45
tcg/optimize: Split out fold_setcond2
51
tcg: Replace region.end with region.total_size
46
tcg/optimize: Split out fold_brcond2
52
tcg: Rename region.start to region.after_prologue
47
tcg/optimize: Split out fold_brcond
53
tcg: Tidy tcg_n_regions
48
tcg/optimize: Split out fold_setcond
54
tcg: Tidy split_cross_256mb
49
tcg/optimize: Split out fold_mulu2_i32
55
tcg: Move in_code_gen_buffer and tests to region.c
50
tcg/optimize: Split out fold_addsub2_i32
56
tcg: Allocate code_gen_buffer into struct tcg_region_state
51
tcg/optimize: Split out fold_movcond
57
tcg: Return the map protection from alloc_code_gen_buffer
52
tcg/optimize: Split out fold_extract2
58
tcg: Sink qemu_madvise call to common code
53
tcg/optimize: Split out fold_extract, fold_sextract
59
util/osdep: Add qemu_mprotect_rw
54
tcg/optimize: Split out fold_deposit
60
tcg: Round the tb_size default from qemu_get_host_physmem
55
tcg/optimize: Split out fold_count_zeros
61
tcg: Merge buffer protection and guard page protection
56
tcg/optimize: Split out fold_bswap
62
tcg: When allocating for !splitwx, begin with PROT_NONE
57
tcg/optimize: Split out fold_dup, fold_dup2
63
tcg: Move tcg_init_ctx and tcg_ctx from accel/tcg/
58
tcg/optimize: Split out fold_mov
64
tcg: Introduce tcg_remove_ops_after
59
tcg/optimize: Split out fold_xx_to_i
65
tcg: Fix documentation for tcg_constant_* vs tcg_temp_free_*
60
tcg/optimize: Split out fold_xx_to_x
66
softfloat: Fix tp init in float32_exp2
61
tcg/optimize: Split out fold_xi_to_i
62
tcg/optimize: Add type to OptContext
63
tcg/optimize: Split out fold_to_not
64
tcg/optimize: Split out fold_sub_to_neg
65
tcg/optimize: Split out fold_xi_to_x
66
tcg/optimize: Split out fold_ix_to_i
67
tcg/optimize: Split out fold_masks
68
tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies
69
tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops
70
tcg/optimize: Sink commutative operand swapping into fold functions
71
tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values
72
tcg/optimize: Use fold_xx_to_i for orc
73
tcg/optimize: Use fold_xi_to_x for mul
74
tcg/optimize: Use fold_xi_to_x for div
75
tcg/optimize: Use fold_xx_to_i for rem
76
tcg/optimize: Optimize sign extensions
77
tcg/optimize: Propagate sign info for logical operations
78
tcg/optimize: Propagate sign info for setcond
79
tcg/optimize: Propagate sign info for bit counting
80
tcg/optimize: Propagate sign info for shifting
67
81
68
docs/devel/tcg.rst | 101 ++++-
82
include/fpu/softfloat-macros.h | 82 --
69
meson.build | 12 +-
83
include/hw/clock.h | 5 +-
70
accel/tcg/internal.h | 2 +
84
include/qemu/host-utils.h | 121 +-
71
include/qemu/osdep.h | 1 +
85
include/qemu/int128.h | 20 +
72
include/sysemu/tcg.h | 2 -
86
target/ppc/int_helper.c | 23 +-
73
include/tcg/tcg.h | 28 +-
87
tcg/optimize.c | 2644 ++++++++++++++++++++++++----------------
74
tcg/aarch64/tcg-target.h | 1 +
88
tests/unit/test-div128.c | 197 +++
75
tcg/arm/tcg-target.h | 1 +
89
util/host-utils.c | 147 ++-
76
tcg/i386/tcg-target.h | 2 +
90
tests/unit/meson.build | 1 +
77
tcg/mips/tcg-target.h | 6 +
91
9 files changed, 2053 insertions(+), 1187 deletions(-)
78
tcg/ppc/tcg-target.h | 2 +
92
create mode 100644 tests/unit/test-div128.c
79
tcg/riscv/tcg-target.h | 1 +
80
tcg/s390/tcg-target.h | 3 +
81
tcg/sparc/tcg-target.h | 1 +
82
tcg/tcg-internal.h | 40 ++
83
tcg/tci/tcg-target.h | 1 +
84
accel/tcg/tcg-all.c | 32 +-
85
accel/tcg/translate-all.c | 439 +-------------------
86
bsd-user/main.c | 3 +-
87
fpu/softfloat.c | 2 +-
88
linux-user/main.c | 1 -
89
tcg/region.c | 999 ++++++++++++++++++++++++++++++++++++++++++++++
90
tcg/tcg.c | 649 +++---------------------------
91
util/osdep.c | 9 +
92
tcg/arm/tcg-target.c.inc | 3 +-
93
fpu/meson.build | 1 +
94
tcg/meson.build | 14 +
95
27 files changed, 1266 insertions(+), 1090 deletions(-)
96
create mode 100644 tcg/tcg-internal.h
97
create mode 100644 tcg/region.c
98
create mode 100644 fpu/meson.build
99
create mode 100644 tcg/meson.build
100
93
diff view generated by jsdifflib
New patch
1
From: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
1
2
3
Addition of not and xor on 128-bit integers.
4
5
Signed-off-by: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
6
Co-authored-by: Fabien Portas <fabien.portas@grenoble-inp.org>
7
Message-Id: <20211025122818.168890-3-frederic.petrot@univ-grenoble-alpes.fr>
8
[rth: Split out logical operations.]
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
include/qemu/int128.h | 20 ++++++++++++++++++++
13
1 file changed, 20 insertions(+)
14
15
diff --git a/include/qemu/int128.h b/include/qemu/int128.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/qemu/int128.h
18
+++ b/include/qemu/int128.h
19
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_exts64(int64_t a)
20
return a;
21
}
22
23
+static inline Int128 int128_not(Int128 a)
24
+{
25
+ return ~a;
26
+}
27
+
28
static inline Int128 int128_and(Int128 a, Int128 b)
29
{
30
return a & b;
31
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_or(Int128 a, Int128 b)
32
return a | b;
33
}
34
35
+static inline Int128 int128_xor(Int128 a, Int128 b)
36
+{
37
+ return a ^ b;
38
+}
39
+
40
static inline Int128 int128_rshift(Int128 a, int n)
41
{
42
return a >> n;
43
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_exts64(int64_t a)
44
return int128_make128(a, (a < 0) ? -1 : 0);
45
}
46
47
+static inline Int128 int128_not(Int128 a)
48
+{
49
+ return int128_make128(~a.lo, ~a.hi);
50
+}
51
+
52
static inline Int128 int128_and(Int128 a, Int128 b)
53
{
54
return int128_make128(a.lo & b.lo, a.hi & b.hi);
55
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_or(Int128 a, Int128 b)
56
return int128_make128(a.lo | b.lo, a.hi | b.hi);
57
}
58
59
+static inline Int128 int128_xor(Int128 a, Int128 b)
60
+{
61
+ return int128_make128(a.lo ^ b.lo, a.hi ^ b.hi);
62
+}
63
+
64
static inline Int128 int128_rshift(Int128 a, int n)
65
{
66
int64_t h;
67
--
68
2.25.1
69
70
diff view generated by jsdifflib
New patch
1
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
3
In preparation for changing the divu128/divs128 implementations
4
to allow for quotients larger than 64 bits, move the div-by-zero
5
and overflow checks to the callers.
6
7
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20211025191154.350831-2-luis.pires@eldorado.org.br>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
include/hw/clock.h | 5 +++--
13
include/qemu/host-utils.h | 34 ++++++++++++---------------------
14
target/ppc/int_helper.c | 14 +++++++++-----
15
util/host-utils.c | 40 ++++++++++++++++++---------------------
16
4 files changed, 42 insertions(+), 51 deletions(-)
17
18
diff --git a/include/hw/clock.h b/include/hw/clock.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/clock.h
21
+++ b/include/hw/clock.h
22
@@ -XXX,XX +XXX,XX @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
23
return 0;
24
}
25
/*
26
- * Ignore divu128() return value as we've caught div-by-zero and don't
27
- * need different behaviour for overflow.
28
+ * BUG: when CONFIG_INT128 is not defined, the current implementation of
29
+ * divu128 does not return a valid truncated quotient, so the result will
30
+ * be wrong.
31
*/
32
divu128(&lo, &hi, clk->period);
33
return lo;
34
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/include/qemu/host-utils.h
37
+++ b/include/qemu/host-utils.h
38
@@ -XXX,XX +XXX,XX @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
39
return (__int128_t)a * b / c;
40
}
41
42
-static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
43
+static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
44
{
45
- if (divisor == 0) {
46
- return 1;
47
- } else {
48
- __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
49
- __uint128_t result = dividend / divisor;
50
- *plow = result;
51
- *phigh = dividend % divisor;
52
- return result > UINT64_MAX;
53
- }
54
+ __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
55
+ __uint128_t result = dividend / divisor;
56
+ *plow = result;
57
+ *phigh = dividend % divisor;
58
}
59
60
-static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
61
+static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
62
{
63
- if (divisor == 0) {
64
- return 1;
65
- } else {
66
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
67
- __int128_t result = dividend / divisor;
68
- *plow = result;
69
- *phigh = dividend % divisor;
70
- return result != *plow;
71
- }
72
+ __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
73
+ __int128_t result = dividend / divisor;
74
+ *plow = result;
75
+ *phigh = dividend % divisor;
76
}
77
#else
78
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
79
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
80
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
81
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
82
+void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
83
+void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
84
85
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
86
{
87
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/ppc/int_helper.c
90
+++ b/target/ppc/int_helper.c
91
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
92
uint64_t rt = 0;
93
int overflow = 0;
94
95
- overflow = divu128(&rt, &ra, rb);
96
-
97
- if (unlikely(overflow)) {
98
+ if (unlikely(rb == 0 || ra >= rb)) {
99
+ overflow = 1;
100
rt = 0; /* Undefined */
101
+ } else {
102
+ divu128(&rt, &ra, rb);
103
}
104
105
if (oe) {
106
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
107
int64_t rt = 0;
108
int64_t ra = (int64_t)rau;
109
int64_t rb = (int64_t)rbu;
110
- int overflow = divs128(&rt, &ra, rb);
111
+ int overflow = 0;
112
113
- if (unlikely(overflow)) {
114
+ if (unlikely(rb == 0 || uabs64(ra) >= uabs64(rb))) {
115
+ overflow = 1;
116
rt = 0; /* Undefined */
117
+ } else {
118
+ divs128(&rt, &ra, rb);
119
}
120
121
if (oe) {
122
diff --git a/util/host-utils.c b/util/host-utils.c
123
index XXXXXXX..XXXXXXX 100644
124
--- a/util/host-utils.c
125
+++ b/util/host-utils.c
126
@@ -XXX,XX +XXX,XX @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
127
*phigh = rh;
128
}
129
130
-/* Unsigned 128x64 division. Returns 1 if overflow (divide by zero or */
131
-/* quotient exceeds 64 bits). Otherwise returns quotient via plow and */
132
-/* remainder via phigh. */
133
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
134
+/*
135
+ * Unsigned 128-by-64 division. Returns quotient via plow and
136
+ * remainder via phigh.
137
+ * The result must fit in 64 bits (plow) - otherwise, the result
138
+ * is undefined.
139
+ * This function will cause a division by zero if passed a zero divisor.
140
+ */
141
+void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
142
{
143
uint64_t dhi = *phigh;
144
uint64_t dlo = *plow;
145
unsigned i;
146
uint64_t carry = 0;
147
148
- if (divisor == 0) {
149
- return 1;
150
- } else if (dhi == 0) {
151
+ if (divisor == 0 || dhi == 0) {
152
*plow = dlo / divisor;
153
*phigh = dlo % divisor;
154
- return 0;
155
- } else if (dhi >= divisor) {
156
- return 1;
157
} else {
158
159
for (i = 0; i < 64; i++) {
160
@@ -XXX,XX +XXX,XX @@ int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
161
162
*plow = dlo;
163
*phigh = dhi;
164
- return 0;
165
}
166
}
167
168
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
169
+/*
170
+ * Signed 128-by-64 division. Returns quotient via plow and
171
+ * remainder via phigh.
172
+ * The result must fit in 64 bits (plow) - otherwise, the result
173
+ * is undefined.
174
+ * This function will cause a division by zero if passed a zero divisor.
175
+ */
176
+void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
177
{
178
int sgn_dvdnd = *phigh < 0;
179
int sgn_divsr = divisor < 0;
180
- int overflow = 0;
181
182
if (sgn_dvdnd) {
183
*plow = ~(*plow);
184
@@ -XXX,XX +XXX,XX @@ int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
185
divisor = 0 - divisor;
186
}
187
188
- overflow = divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
189
+ divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
190
191
if (sgn_dvdnd ^ sgn_divsr) {
192
*plow = 0 - *plow;
193
}
194
-
195
- if (!overflow) {
196
- if ((*plow < 0) ^ (sgn_dvdnd ^ sgn_divsr)) {
197
- overflow = 1;
198
- }
199
- }
200
-
201
- return overflow;
202
}
203
#endif
204
205
--
206
2.25.1
207
208
diff view generated by jsdifflib
1
Buffer management is integral to tcg. Do not leave the allocation
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
to code outside of tcg/. This is code movement, with further
2
3
cleanups to follow.
3
Move udiv_qrnnd() from include/fpu/softfloat-macros.h to host-utils,
4
4
so it can be reused by divu128().
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20211025191154.350831-3-luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
10
---
9
include/tcg/tcg.h | 2 +-
11
include/fpu/softfloat-macros.h | 82 ----------------------------------
10
accel/tcg/translate-all.c | 414 +-----------------------------------
12
include/qemu/host-utils.h | 81 +++++++++++++++++++++++++++++++++
11
tcg/region.c | 431 +++++++++++++++++++++++++++++++++++++-
13
2 files changed, 81 insertions(+), 82 deletions(-)
12
3 files changed, 428 insertions(+), 419 deletions(-)
14
13
15
diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h
14
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg.h
17
--- a/include/fpu/softfloat-macros.h
17
+++ b/include/tcg/tcg.h
18
+++ b/include/fpu/softfloat-macros.h
18
@@ -XXX,XX +XXX,XX @@ void *tcg_malloc_internal(TCGContext *s, int size);
19
void tcg_pool_reset(TCGContext *s);
20
TranslationBlock *tcg_tb_alloc(TCGContext *s);
21
22
-void tcg_region_init(void);
23
+void tcg_region_init(size_t tb_size, int splitwx);
24
void tb_destroy(TranslationBlock *tb);
25
void tcg_region_reset_all(void);
26
27
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/accel/tcg/translate-all.c
30
+++ b/accel/tcg/translate-all.c
31
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@
20
* so some portions are provided under:
21
* the SoftFloat-2a license
22
* the BSD license
23
- * GPL-v2-or-later
24
*
25
* Any future contributions to this file after December 1st 2014 will be
26
* taken to be licensed under the Softfloat-2a license unless specifically
27
@@ -XXX,XX +XXX,XX @@ this code that are retained.
28
* THE POSSIBILITY OF SUCH DAMAGE.
32
*/
29
*/
33
30
34
#include "qemu/osdep.h"
31
-/* Portions of this work are licensed under the terms of the GNU GPL,
35
-#include "qemu/units.h"
32
- * version 2 or later. See the COPYING file in the top-level directory.
36
#include "qemu-common.h"
33
- */
37
34
-
38
#define NO_CPU_IO_DEFS
35
#ifndef FPU_SOFTFLOAT_MACROS_H
39
@@ -XXX,XX +XXX,XX @@
36
#define FPU_SOFTFLOAT_MACROS_H
40
#include "exec/cputlb.h"
37
41
#include "exec/translate-all.h"
38
@@ -XXX,XX +XXX,XX @@ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b)
42
#include "qemu/bitmap.h"
39
43
-#include "qemu/error-report.h"
44
#include "qemu/qemu-print.h"
45
#include "qemu/timer.h"
46
#include "qemu/main-loop.h"
47
@@ -XXX,XX +XXX,XX @@ static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
48
}
49
}
40
}
50
41
51
-/* Minimum size of the code gen buffer. This number is randomly chosen,
42
-/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
52
- but not so small that we can't have a fair number of TB's live. */
43
- * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
53
-#define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
44
- *
54
-
45
- * Licensed under the GPLv2/LGPLv3
55
-/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
46
- */
56
- indicated, this is constrained by the range of direct branches on the
47
-static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
57
- host cpu, as used by the TCG implementation of goto_tb. */
48
- uint64_t n0, uint64_t d)
49
-{
58
-#if defined(__x86_64__)
50
-#if defined(__x86_64__)
59
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
51
- uint64_t q;
60
-#elif defined(__sparc__)
52
- asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
61
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
53
- return q;
62
-#elif defined(__powerpc64__)
54
-#elif defined(__s390x__) && !defined(__clang__)
63
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
55
- /* Need to use a TImode type to get an even register pair for DLGR. */
64
-#elif defined(__powerpc__)
56
- unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
65
-# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
57
- asm("dlgr %0, %1" : "+r"(n) : "r"(d));
66
-#elif defined(__aarch64__)
58
- *r = n >> 64;
67
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
59
- return n;
68
-#elif defined(__s390x__)
60
-#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
69
- /* We have a +- 4GB range on the branches; leave some slop. */
61
- /* From Power ISA 2.06, programming note for divdeu. */
70
-# define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
62
- uint64_t q1, q2, Q, r1, r2, R;
71
-#elif defined(__mips__)
63
- asm("divdeu %0,%2,%4; divdu %1,%3,%4"
72
- /* We have a 256MB branch region, but leave room to make sure the
64
- : "=&r"(q1), "=r"(q2)
73
- main executable is also within that region. */
65
- : "r"(n1), "r"(n0), "r"(d));
74
-# define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
66
- r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
67
- r2 = n0 - (q2 * d);
68
- Q = q1 + q2;
69
- R = r1 + r2;
70
- if (R >= d || R < r2) { /* overflow implies R > d */
71
- Q += 1;
72
- R -= d;
73
- }
74
- *r = R;
75
- return Q;
75
-#else
76
-#else
76
-# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
77
- uint64_t d0, d1, q0, q1, r1, r0, m;
77
-#endif
78
-
78
-
79
- d0 = (uint32_t)d;
79
-#if TCG_TARGET_REG_BITS == 32
80
- d1 = d >> 32;
80
-#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
81
-
81
-#ifdef CONFIG_USER_ONLY
82
- r1 = n1 % d1;
82
-/*
83
- q1 = n1 / d1;
83
- * For user mode on smaller 32 bit systems we may run into trouble
84
- m = q1 * d0;
84
- * allocating big chunks of data in the right place. On these systems
85
- r1 = (r1 << 32) | (n0 >> 32);
85
- * we utilise a static code generation buffer directly in the binary.
86
- if (r1 < m) {
86
- */
87
- q1 -= 1;
87
-#define USE_STATIC_CODE_GEN_BUFFER
88
- r1 += d;
88
-#endif
89
- if (r1 >= d) {
89
-#else /* TCG_TARGET_REG_BITS == 64 */
90
- if (r1 < m) {
90
-#ifdef CONFIG_USER_ONLY
91
- q1 -= 1;
91
-/*
92
- r1 += d;
92
- * As user-mode emulation typically means running multiple instances
93
- }
93
- * of the translator don't go too nuts with our default code gen
94
- * buffer lest we make things too hard for the OS.
95
- */
96
-#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
97
-#else
98
-/*
99
- * We expect most system emulation to run one or two guests per host.
100
- * Users running large scale system emulation may want to tweak their
101
- * runtime setup via the tb-size control on the command line.
102
- */
103
-#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
104
-#endif
105
-#endif
106
-
107
-#define DEFAULT_CODE_GEN_BUFFER_SIZE \
108
- (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
109
- ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
110
-
111
-static size_t size_code_gen_buffer(size_t tb_size)
112
-{
113
- /* Size the buffer. */
114
- if (tb_size == 0) {
115
- size_t phys_mem = qemu_get_host_physmem();
116
- if (phys_mem == 0) {
117
- tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
118
- } else {
119
- tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8);
120
- }
94
- }
121
- }
95
- }
122
- if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
96
- r1 -= m;
123
- tb_size = MIN_CODE_GEN_BUFFER_SIZE;
97
-
98
- r0 = r1 % d1;
99
- q0 = r1 / d1;
100
- m = q0 * d0;
101
- r0 = (r0 << 32) | (uint32_t)n0;
102
- if (r0 < m) {
103
- q0 -= 1;
104
- r0 += d;
105
- if (r0 >= d) {
106
- if (r0 < m) {
107
- q0 -= 1;
108
- r0 += d;
109
- }
110
- }
124
- }
111
- }
125
- if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
112
- r0 -= m;
126
- tb_size = MAX_CODE_GEN_BUFFER_SIZE;
113
-
127
- }
114
- *r = r0;
128
- return tb_size;
115
- return (q1 << 32) | q0;
116
-#endif
129
-}
117
-}
130
-
118
-
131
-#ifdef __mips__
119
/*----------------------------------------------------------------------------
132
-/* In order to use J and JAL within the code_gen_buffer, we require
120
| Returns an approximation to the square root of the 32-bit significand given
133
- that the buffer not cross a 256MB boundary. */
121
| by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of
134
-static inline bool cross_256mb(void *addr, size_t size)
122
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
135
-{
136
- return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
137
-}
138
-
139
-/* We weren't able to allocate a buffer without crossing that boundary,
140
- so make do with the larger portion of the buffer that doesn't cross.
141
- Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
142
-static inline void *split_cross_256mb(void *buf1, size_t size1)
143
-{
144
- void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
145
- size_t size2 = buf1 + size1 - buf2;
146
-
147
- size1 = buf2 - buf1;
148
- if (size1 < size2) {
149
- size1 = size2;
150
- buf1 = buf2;
151
- }
152
-
153
- tcg_ctx->code_gen_buffer_size = size1;
154
- return buf1;
155
-}
156
-#endif
157
-
158
-#ifdef USE_STATIC_CODE_GEN_BUFFER
159
-static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
160
- __attribute__((aligned(CODE_GEN_ALIGN)));
161
-
162
-static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
163
-{
164
- void *buf, *end;
165
- size_t size;
166
-
167
- if (splitwx > 0) {
168
- error_setg(errp, "jit split-wx not supported");
169
- return false;
170
- }
171
-
172
- /* page-align the beginning and end of the buffer */
173
- buf = static_code_gen_buffer;
174
- end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
175
- buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
176
- end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
177
-
178
- size = end - buf;
179
-
180
- /* Honor a command-line option limiting the size of the buffer. */
181
- if (size > tb_size) {
182
- size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
183
- }
184
- tcg_ctx->code_gen_buffer_size = size;
185
-
186
-#ifdef __mips__
187
- if (cross_256mb(buf, size)) {
188
- buf = split_cross_256mb(buf, size);
189
- size = tcg_ctx->code_gen_buffer_size;
190
- }
191
-#endif
192
-
193
- if (qemu_mprotect_rwx(buf, size)) {
194
- error_setg_errno(errp, errno, "mprotect of jit buffer");
195
- return false;
196
- }
197
- qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
198
-
199
- tcg_ctx->code_gen_buffer = buf;
200
- return true;
201
-}
202
-#elif defined(_WIN32)
203
-static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
204
-{
205
- void *buf;
206
-
207
- if (splitwx > 0) {
208
- error_setg(errp, "jit split-wx not supported");
209
- return false;
210
- }
211
-
212
- buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
213
- PAGE_EXECUTE_READWRITE);
214
- if (buf == NULL) {
215
- error_setg_win32(errp, GetLastError(),
216
- "allocate %zu bytes for jit buffer", size);
217
- return false;
218
- }
219
-
220
- tcg_ctx->code_gen_buffer = buf;
221
- tcg_ctx->code_gen_buffer_size = size;
222
- return true;
223
-}
224
-#else
225
-static bool alloc_code_gen_buffer_anon(size_t size, int prot,
226
- int flags, Error **errp)
227
-{
228
- void *buf;
229
-
230
- buf = mmap(NULL, size, prot, flags, -1, 0);
231
- if (buf == MAP_FAILED) {
232
- error_setg_errno(errp, errno,
233
- "allocate %zu bytes for jit buffer", size);
234
- return false;
235
- }
236
- tcg_ctx->code_gen_buffer_size = size;
237
-
238
-#ifdef __mips__
239
- if (cross_256mb(buf, size)) {
240
- /*
241
- * Try again, with the original still mapped, to avoid re-acquiring
242
- * the same 256mb crossing.
243
- */
244
- size_t size2;
245
- void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
246
- switch ((int)(buf2 != MAP_FAILED)) {
247
- case 1:
248
- if (!cross_256mb(buf2, size)) {
249
- /* Success! Use the new buffer. */
250
- munmap(buf, size);
251
- break;
252
- }
253
- /* Failure. Work with what we had. */
254
- munmap(buf2, size);
255
- /* fallthru */
256
- default:
257
- /* Split the original buffer. Free the smaller half. */
258
- buf2 = split_cross_256mb(buf, size);
259
- size2 = tcg_ctx->code_gen_buffer_size;
260
- if (buf == buf2) {
261
- munmap(buf + size2, size - size2);
262
- } else {
263
- munmap(buf, size - size2);
264
- }
265
- size = size2;
266
- break;
267
- }
268
- buf = buf2;
269
- }
270
-#endif
271
-
272
- /* Request large pages for the buffer. */
273
- qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
274
-
275
- tcg_ctx->code_gen_buffer = buf;
276
- return true;
277
-}
278
-
279
-#ifndef CONFIG_TCG_INTERPRETER
280
-#ifdef CONFIG_POSIX
281
-#include "qemu/memfd.h"
282
-
283
-static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
284
-{
285
- void *buf_rw = NULL, *buf_rx = MAP_FAILED;
286
- int fd = -1;
287
-
288
-#ifdef __mips__
289
- /* Find space for the RX mapping, vs the 256MiB regions. */
290
- if (!alloc_code_gen_buffer_anon(size, PROT_NONE,
291
- MAP_PRIVATE | MAP_ANONYMOUS |
292
- MAP_NORESERVE, errp)) {
293
- return false;
294
- }
295
- /* The size of the mapping may have been adjusted. */
296
- size = tcg_ctx->code_gen_buffer_size;
297
- buf_rx = tcg_ctx->code_gen_buffer;
298
-#endif
299
-
300
- buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
301
- if (buf_rw == NULL) {
302
- goto fail;
303
- }
304
-
305
-#ifdef __mips__
306
- void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
307
- MAP_SHARED | MAP_FIXED, fd, 0);
308
- if (tmp != buf_rx) {
309
- goto fail_rx;
310
- }
311
-#else
312
- buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
313
- if (buf_rx == MAP_FAILED) {
314
- goto fail_rx;
315
- }
316
-#endif
317
-
318
- close(fd);
319
- tcg_ctx->code_gen_buffer = buf_rw;
320
- tcg_ctx->code_gen_buffer_size = size;
321
- tcg_splitwx_diff = buf_rx - buf_rw;
322
-
323
- /* Request large pages for the buffer and the splitwx. */
324
- qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
325
- qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
326
- return true;
327
-
328
- fail_rx:
329
- error_setg_errno(errp, errno, "failed to map shared memory for execute");
330
- fail:
331
- if (buf_rx != MAP_FAILED) {
332
- munmap(buf_rx, size);
333
- }
334
- if (buf_rw) {
335
- munmap(buf_rw, size);
336
- }
337
- if (fd >= 0) {
338
- close(fd);
339
- }
340
- return false;
341
-}
342
-#endif /* CONFIG_POSIX */
343
-
344
-#ifdef CONFIG_DARWIN
345
-#include <mach/mach.h>
346
-
347
-extern kern_return_t mach_vm_remap(vm_map_t target_task,
348
- mach_vm_address_t *target_address,
349
- mach_vm_size_t size,
350
- mach_vm_offset_t mask,
351
- int flags,
352
- vm_map_t src_task,
353
- mach_vm_address_t src_address,
354
- boolean_t copy,
355
- vm_prot_t *cur_protection,
356
- vm_prot_t *max_protection,
357
- vm_inherit_t inheritance);
358
-
359
-static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
360
-{
361
- kern_return_t ret;
362
- mach_vm_address_t buf_rw, buf_rx;
363
- vm_prot_t cur_prot, max_prot;
364
-
365
- /* Map the read-write portion via normal anon memory. */
366
- if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
367
- MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
368
- return false;
369
- }
370
-
371
- buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer;
372
- buf_rx = 0;
373
- ret = mach_vm_remap(mach_task_self(),
374
- &buf_rx,
375
- size,
376
- 0,
377
- VM_FLAGS_ANYWHERE,
378
- mach_task_self(),
379
- buf_rw,
380
- false,
381
- &cur_prot,
382
- &max_prot,
383
- VM_INHERIT_NONE);
384
- if (ret != KERN_SUCCESS) {
385
- /* TODO: Convert "ret" to a human readable error message. */
386
- error_setg(errp, "vm_remap for jit splitwx failed");
387
- munmap((void *)buf_rw, size);
388
- return false;
389
- }
390
-
391
- if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
392
- error_setg_errno(errp, errno, "mprotect for jit splitwx");
393
- munmap((void *)buf_rx, size);
394
- munmap((void *)buf_rw, size);
395
- return false;
396
- }
397
-
398
- tcg_splitwx_diff = buf_rx - buf_rw;
399
- return true;
400
-}
401
-#endif /* CONFIG_DARWIN */
402
-#endif /* CONFIG_TCG_INTERPRETER */
403
-
404
-static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
405
-{
406
-#ifndef CONFIG_TCG_INTERPRETER
407
-# ifdef CONFIG_DARWIN
408
- return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
409
-# endif
410
-# ifdef CONFIG_POSIX
411
- return alloc_code_gen_buffer_splitwx_memfd(size, errp);
412
-# endif
413
-#endif
414
- error_setg(errp, "jit split-wx not supported");
415
- return false;
416
-}
417
-
418
-static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
419
-{
420
- ERRP_GUARD();
421
- int prot, flags;
422
-
423
- if (splitwx) {
424
- if (alloc_code_gen_buffer_splitwx(size, errp)) {
425
- return true;
426
- }
427
- /*
428
- * If splitwx force-on (1), fail;
429
- * if splitwx default-on (-1), fall through to splitwx off.
430
- */
431
- if (splitwx > 0) {
432
- return false;
433
- }
434
- error_free_or_abort(errp);
435
- }
436
-
437
- prot = PROT_READ | PROT_WRITE | PROT_EXEC;
438
- flags = MAP_PRIVATE | MAP_ANONYMOUS;
439
-#ifdef CONFIG_TCG_INTERPRETER
440
- /* The tcg interpreter does not need execute permission. */
441
- prot = PROT_READ | PROT_WRITE;
442
-#elif defined(CONFIG_DARWIN)
443
- /* Applicable to both iOS and macOS (Apple Silicon). */
444
- if (!splitwx) {
445
- flags |= MAP_JIT;
446
- }
447
-#endif
448
-
449
- return alloc_code_gen_buffer_anon(size, prot, flags, errp);
450
-}
451
-#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
452
-
453
static bool tb_cmp(const void *ap, const void *bp)
454
{
455
const TranslationBlock *a = ap;
456
@@ -XXX,XX +XXX,XX @@ static void tb_htable_init(void)
457
size. */
458
void tcg_exec_init(unsigned long tb_size, int splitwx)
459
{
460
- bool ok;
461
-
462
tcg_allowed = true;
463
tcg_context_init(&tcg_init_ctx);
464
page_init();
465
tb_htable_init();
466
-
467
- ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
468
- splitwx, &error_fatal);
469
- assert(ok);
470
-
471
- /* TODO: allocating regions is hand-in-glove with code_gen_buffer. */
472
- tcg_region_init();
473
+ tcg_region_init(tb_size, splitwx);
474
475
#if defined(CONFIG_SOFTMMU)
476
/* There's no guest base to take into account, so go ahead and
477
diff --git a/tcg/region.c b/tcg/region.c
478
index XXXXXXX..XXXXXXX 100644
123
index XXXXXXX..XXXXXXX 100644
479
--- a/tcg/region.c
124
--- a/include/qemu/host-utils.h
480
+++ b/tcg/region.c
125
+++ b/include/qemu/host-utils.h
481
@@ -XXX,XX +XXX,XX @@
126
@@ -XXX,XX +XXX,XX @@
127
* THE SOFTWARE.
482
*/
128
*/
483
129
484
#include "qemu/osdep.h"
130
+/* Portions of this work are licensed under the terms of the GNU GPL,
485
+#include "qemu/units.h"
131
+ * version 2 or later. See the COPYING file in the top-level directory.
486
+#include "qapi/error.h"
487
#include "exec/exec-all.h"
488
#include "tcg/tcg.h"
489
#if !defined(CONFIG_USER_ONLY)
490
@@ -XXX,XX +XXX,XX @@ static size_t tcg_n_regions(void)
491
}
492
#endif
493
494
+/*
495
+ * Minimum size of the code gen buffer. This number is randomly chosen,
496
+ * but not so small that we can't have a fair number of TB's live.
497
+ */
132
+ */
498
+#define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
133
+
499
+
134
#ifndef HOST_UTILS_H
500
+/*
135
#define HOST_UTILS_H
501
+ * Maximum size of the code gen buffer we'd like to use. Unless otherwise
136
502
+ * indicated, this is constrained by the range of direct branches on the
137
@@ -XXX,XX +XXX,XX @@ void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
503
+ * host cpu, as used by the TCG implementation of goto_tb.
138
*/
139
void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
140
141
+/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
142
+ * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
143
+ *
144
+ * Licensed under the GPLv2/LGPLv3
504
+ */
145
+ */
146
+static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
147
+ uint64_t n0, uint64_t d)
148
+{
505
+#if defined(__x86_64__)
149
+#if defined(__x86_64__)
506
+# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
150
+ uint64_t q;
507
+#elif defined(__sparc__)
151
+ asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
508
+# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
152
+ return q;
509
+#elif defined(__powerpc64__)
153
+#elif defined(__s390x__) && !defined(__clang__)
510
+# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
154
+ /* Need to use a TImode type to get an even register pair for DLGR. */
511
+#elif defined(__powerpc__)
155
+ unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
512
+# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
156
+ asm("dlgr %0, %1" : "+r"(n) : "r"(d));
513
+#elif defined(__aarch64__)
157
+ *r = n >> 64;
514
+# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
158
+ return n;
515
+#elif defined(__s390x__)
159
+#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
516
+ /* We have a +- 4GB range on the branches; leave some slop. */
160
+ /* From Power ISA 2.06, programming note for divdeu. */
517
+# define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
161
+ uint64_t q1, q2, Q, r1, r2, R;
518
+#elif defined(__mips__)
162
+ asm("divdeu %0,%2,%4; divdu %1,%3,%4"
519
+ /*
163
+ : "=&r"(q1), "=r"(q2)
520
+ * We have a 256MB branch region, but leave room to make sure the
164
+ : "r"(n1), "r"(n0), "r"(d));
521
+ * main executable is also within that region.
165
+ r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
522
+ */
166
+ r2 = n0 - (q2 * d);
523
+# define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
167
+ Q = q1 + q2;
168
+ R = r1 + r2;
169
+ if (R >= d || R < r2) { /* overflow implies R > d */
170
+ Q += 1;
171
+ R -= d;
172
+ }
173
+ *r = R;
174
+ return Q;
524
+#else
175
+#else
525
+# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
176
+ uint64_t d0, d1, q0, q1, r1, r0, m;
526
+#endif
177
+
527
+
178
+ d0 = (uint32_t)d;
528
+#if TCG_TARGET_REG_BITS == 32
179
+ d1 = d >> 32;
529
+#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
180
+
530
+#ifdef CONFIG_USER_ONLY
181
+ r1 = n1 % d1;
531
+/*
182
+ q1 = n1 / d1;
532
+ * For user mode on smaller 32 bit systems we may run into trouble
183
+ m = q1 * d0;
533
+ * allocating big chunks of data in the right place. On these systems
184
+ r1 = (r1 << 32) | (n0 >> 32);
534
+ * we utilise a static code generation buffer directly in the binary.
185
+ if (r1 < m) {
535
+ */
186
+ q1 -= 1;
536
+#define USE_STATIC_CODE_GEN_BUFFER
187
+ r1 += d;
537
+#endif
188
+ if (r1 >= d) {
538
+#else /* TCG_TARGET_REG_BITS == 64 */
189
+ if (r1 < m) {
539
+#ifdef CONFIG_USER_ONLY
190
+ q1 -= 1;
540
+/*
191
+ r1 += d;
541
+ * As user-mode emulation typically means running multiple instances
192
+ }
542
+ * of the translator don't go too nuts with our default code gen
543
+ * buffer lest we make things too hard for the OS.
544
+ */
545
+#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
546
+#else
547
+/*
548
+ * We expect most system emulation to run one or two guests per host.
549
+ * Users running large scale system emulation may want to tweak their
550
+ * runtime setup via the tb-size control on the command line.
551
+ */
552
+#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
553
+#endif
554
+#endif
555
+
556
+#define DEFAULT_CODE_GEN_BUFFER_SIZE \
557
+ (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
558
+ ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
559
+
560
+static size_t size_code_gen_buffer(size_t tb_size)
561
+{
562
+ /* Size the buffer. */
563
+ if (tb_size == 0) {
564
+ size_t phys_mem = qemu_get_host_physmem();
565
+ if (phys_mem == 0) {
566
+ tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
567
+ } else {
568
+ tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8);
569
+ }
193
+ }
570
+ }
194
+ }
571
+ if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
195
+ r1 -= m;
572
+ tb_size = MIN_CODE_GEN_BUFFER_SIZE;
196
+
197
+ r0 = r1 % d1;
198
+ q0 = r1 / d1;
199
+ m = q0 * d0;
200
+ r0 = (r0 << 32) | (uint32_t)n0;
201
+ if (r0 < m) {
202
+ q0 -= 1;
203
+ r0 += d;
204
+ if (r0 >= d) {
205
+ if (r0 < m) {
206
+ q0 -= 1;
207
+ r0 += d;
208
+ }
209
+ }
573
+ }
210
+ }
574
+ if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
211
+ r0 -= m;
575
+ tb_size = MAX_CODE_GEN_BUFFER_SIZE;
212
+
576
+ }
213
+ *r = r0;
577
+ return tb_size;
214
+ return (q1 << 32) | q0;
215
+#endif
578
+}
216
+}
579
+
217
+
580
+#ifdef __mips__
218
#endif
581
+/*
582
+ * In order to use J and JAL within the code_gen_buffer, we require
583
+ * that the buffer not cross a 256MB boundary.
584
+ */
585
+static inline bool cross_256mb(void *addr, size_t size)
586
+{
587
+ return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
588
+}
589
+
590
+/*
591
+ * We weren't able to allocate a buffer without crossing that boundary,
592
+ * so make do with the larger portion of the buffer that doesn't cross.
593
+ * Returns the new base of the buffer, and adjusts code_gen_buffer_size.
594
+ */
595
+static inline void *split_cross_256mb(void *buf1, size_t size1)
596
+{
597
+ void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
598
+ size_t size2 = buf1 + size1 - buf2;
599
+
600
+ size1 = buf2 - buf1;
601
+ if (size1 < size2) {
602
+ size1 = size2;
603
+ buf1 = buf2;
604
+ }
605
+
606
+ tcg_ctx->code_gen_buffer_size = size1;
607
+ return buf1;
608
+}
609
+#endif
610
+
611
+#ifdef USE_STATIC_CODE_GEN_BUFFER
612
+static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
613
+ __attribute__((aligned(CODE_GEN_ALIGN)));
614
+
615
+static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
616
+{
617
+ void *buf, *end;
618
+ size_t size;
619
+
620
+ if (splitwx > 0) {
621
+ error_setg(errp, "jit split-wx not supported");
622
+ return false;
623
+ }
624
+
625
+ /* page-align the beginning and end of the buffer */
626
+ buf = static_code_gen_buffer;
627
+ end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
628
+ buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
629
+ end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
630
+
631
+ size = end - buf;
632
+
633
+ /* Honor a command-line option limiting the size of the buffer. */
634
+ if (size > tb_size) {
635
+ size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
636
+ }
637
+ tcg_ctx->code_gen_buffer_size = size;
638
+
639
+#ifdef __mips__
640
+ if (cross_256mb(buf, size)) {
641
+ buf = split_cross_256mb(buf, size);
642
+ size = tcg_ctx->code_gen_buffer_size;
643
+ }
644
+#endif
645
+
646
+ if (qemu_mprotect_rwx(buf, size)) {
647
+ error_setg_errno(errp, errno, "mprotect of jit buffer");
648
+ return false;
649
+ }
650
+ qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
651
+
652
+ tcg_ctx->code_gen_buffer = buf;
653
+ return true;
654
+}
655
+#elif defined(_WIN32)
656
+static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
657
+{
658
+ void *buf;
659
+
660
+ if (splitwx > 0) {
661
+ error_setg(errp, "jit split-wx not supported");
662
+ return false;
663
+ }
664
+
665
+ buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
666
+ PAGE_EXECUTE_READWRITE);
667
+ if (buf == NULL) {
668
+ error_setg_win32(errp, GetLastError(),
669
+ "allocate %zu bytes for jit buffer", size);
670
+ return false;
671
+ }
672
+
673
+ tcg_ctx->code_gen_buffer = buf;
674
+ tcg_ctx->code_gen_buffer_size = size;
675
+ return true;
676
+}
677
+#else
678
+static bool alloc_code_gen_buffer_anon(size_t size, int prot,
679
+ int flags, Error **errp)
680
+{
681
+ void *buf;
682
+
683
+ buf = mmap(NULL, size, prot, flags, -1, 0);
684
+ if (buf == MAP_FAILED) {
685
+ error_setg_errno(errp, errno,
686
+ "allocate %zu bytes for jit buffer", size);
687
+ return false;
688
+ }
689
+ tcg_ctx->code_gen_buffer_size = size;
690
+
691
+#ifdef __mips__
692
+ if (cross_256mb(buf, size)) {
693
+ /*
694
+ * Try again, with the original still mapped, to avoid re-acquiring
695
+ * the same 256mb crossing.
696
+ */
697
+ size_t size2;
698
+ void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
699
+ switch ((int)(buf2 != MAP_FAILED)) {
700
+ case 1:
701
+ if (!cross_256mb(buf2, size)) {
702
+ /* Success! Use the new buffer. */
703
+ munmap(buf, size);
704
+ break;
705
+ }
706
+ /* Failure. Work with what we had. */
707
+ munmap(buf2, size);
708
+ /* fallthru */
709
+ default:
710
+ /* Split the original buffer. Free the smaller half. */
711
+ buf2 = split_cross_256mb(buf, size);
712
+ size2 = tcg_ctx->code_gen_buffer_size;
713
+ if (buf == buf2) {
714
+ munmap(buf + size2, size - size2);
715
+ } else {
716
+ munmap(buf, size - size2);
717
+ }
718
+ size = size2;
719
+ break;
720
+ }
721
+ buf = buf2;
722
+ }
723
+#endif
724
+
725
+ /* Request large pages for the buffer. */
726
+ qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
727
+
728
+ tcg_ctx->code_gen_buffer = buf;
729
+ return true;
730
+}
731
+
732
+#ifndef CONFIG_TCG_INTERPRETER
733
+#ifdef CONFIG_POSIX
734
+#include "qemu/memfd.h"
735
+
736
+static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
737
+{
738
+ void *buf_rw = NULL, *buf_rx = MAP_FAILED;
739
+ int fd = -1;
740
+
741
+#ifdef __mips__
742
+ /* Find space for the RX mapping, vs the 256MiB regions. */
743
+ if (!alloc_code_gen_buffer_anon(size, PROT_NONE,
744
+ MAP_PRIVATE | MAP_ANONYMOUS |
745
+ MAP_NORESERVE, errp)) {
746
+ return false;
747
+ }
748
+ /* The size of the mapping may have been adjusted. */
749
+ size = tcg_ctx->code_gen_buffer_size;
750
+ buf_rx = tcg_ctx->code_gen_buffer;
751
+#endif
752
+
753
+ buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
754
+ if (buf_rw == NULL) {
755
+ goto fail;
756
+ }
757
+
758
+#ifdef __mips__
759
+ void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
760
+ MAP_SHARED | MAP_FIXED, fd, 0);
761
+ if (tmp != buf_rx) {
762
+ goto fail_rx;
763
+ }
764
+#else
765
+ buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
766
+ if (buf_rx == MAP_FAILED) {
767
+ goto fail_rx;
768
+ }
769
+#endif
770
+
771
+ close(fd);
772
+ tcg_ctx->code_gen_buffer = buf_rw;
773
+ tcg_ctx->code_gen_buffer_size = size;
774
+ tcg_splitwx_diff = buf_rx - buf_rw;
775
+
776
+ /* Request large pages for the buffer and the splitwx. */
777
+ qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
778
+ qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
779
+ return true;
780
+
781
+ fail_rx:
782
+ error_setg_errno(errp, errno, "failed to map shared memory for execute");
783
+ fail:
784
+ if (buf_rx != MAP_FAILED) {
785
+ munmap(buf_rx, size);
786
+ }
787
+ if (buf_rw) {
788
+ munmap(buf_rw, size);
789
+ }
790
+ if (fd >= 0) {
791
+ close(fd);
792
+ }
793
+ return false;
794
+}
795
+#endif /* CONFIG_POSIX */
796
+
797
+#ifdef CONFIG_DARWIN
798
+#include <mach/mach.h>
799
+
800
+extern kern_return_t mach_vm_remap(vm_map_t target_task,
801
+ mach_vm_address_t *target_address,
802
+ mach_vm_size_t size,
803
+ mach_vm_offset_t mask,
804
+ int flags,
805
+ vm_map_t src_task,
806
+ mach_vm_address_t src_address,
807
+ boolean_t copy,
808
+ vm_prot_t *cur_protection,
809
+ vm_prot_t *max_protection,
810
+ vm_inherit_t inheritance);
811
+
812
+static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
813
+{
814
+ kern_return_t ret;
815
+ mach_vm_address_t buf_rw, buf_rx;
816
+ vm_prot_t cur_prot, max_prot;
817
+
818
+ /* Map the read-write portion via normal anon memory. */
819
+ if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
820
+ MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
821
+ return false;
822
+ }
823
+
824
+ buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer;
825
+ buf_rx = 0;
826
+ ret = mach_vm_remap(mach_task_self(),
827
+ &buf_rx,
828
+ size,
829
+ 0,
830
+ VM_FLAGS_ANYWHERE,
831
+ mach_task_self(),
832
+ buf_rw,
833
+ false,
834
+ &cur_prot,
835
+ &max_prot,
836
+ VM_INHERIT_NONE);
837
+ if (ret != KERN_SUCCESS) {
838
+ /* TODO: Convert "ret" to a human readable error message. */
839
+ error_setg(errp, "vm_remap for jit splitwx failed");
840
+ munmap((void *)buf_rw, size);
841
+ return false;
842
+ }
843
+
844
+ if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
845
+ error_setg_errno(errp, errno, "mprotect for jit splitwx");
846
+ munmap((void *)buf_rx, size);
847
+ munmap((void *)buf_rw, size);
848
+ return false;
849
+ }
850
+
851
+ tcg_splitwx_diff = buf_rx - buf_rw;
852
+ return true;
853
+}
854
+#endif /* CONFIG_DARWIN */
855
+#endif /* CONFIG_TCG_INTERPRETER */
856
+
857
+static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
858
+{
859
+#ifndef CONFIG_TCG_INTERPRETER
860
+# ifdef CONFIG_DARWIN
861
+ return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
862
+# endif
863
+# ifdef CONFIG_POSIX
864
+ return alloc_code_gen_buffer_splitwx_memfd(size, errp);
865
+# endif
866
+#endif
867
+ error_setg(errp, "jit split-wx not supported");
868
+ return false;
869
+}
870
+
871
+static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
872
+{
873
+ ERRP_GUARD();
874
+ int prot, flags;
875
+
876
+ if (splitwx) {
877
+ if (alloc_code_gen_buffer_splitwx(size, errp)) {
878
+ return true;
879
+ }
880
+ /*
881
+ * If splitwx force-on (1), fail;
882
+ * if splitwx default-on (-1), fall through to splitwx off.
883
+ */
884
+ if (splitwx > 0) {
885
+ return false;
886
+ }
887
+ error_free_or_abort(errp);
888
+ }
889
+
890
+ prot = PROT_READ | PROT_WRITE | PROT_EXEC;
891
+ flags = MAP_PRIVATE | MAP_ANONYMOUS;
892
+#ifdef CONFIG_TCG_INTERPRETER
893
+ /* The tcg interpreter does not need execute permission. */
894
+ prot = PROT_READ | PROT_WRITE;
895
+#elif defined(CONFIG_DARWIN)
896
+ /* Applicable to both iOS and macOS (Apple Silicon). */
897
+ if (!splitwx) {
898
+ flags |= MAP_JIT;
899
+ }
900
+#endif
901
+
902
+ return alloc_code_gen_buffer_anon(size, prot, flags, errp);
903
+}
904
+#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
905
+
906
/*
907
* Initializes region partitioning.
908
*
909
@@ -XXX,XX +XXX,XX @@ static size_t tcg_n_regions(void)
910
* in practice. Multi-threaded guests share most if not all of their translated
911
* code, which makes parallel code generation less appealing than in softmmu.
912
*/
913
-void tcg_region_init(void)
914
+void tcg_region_init(size_t tb_size, int splitwx)
915
{
916
- void *buf = tcg_init_ctx.code_gen_buffer;
917
- void *aligned;
918
- size_t size = tcg_init_ctx.code_gen_buffer_size;
919
- size_t page_size = qemu_real_host_page_size;
920
+ void *buf, *aligned;
921
+ size_t size;
922
+ size_t page_size;
923
size_t region_size;
924
size_t n_regions;
925
size_t i;
926
+ bool ok;
927
928
+ ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
929
+ splitwx, &error_fatal);
930
+ assert(ok);
931
+
932
+ buf = tcg_init_ctx.code_gen_buffer;
933
+ size = tcg_init_ctx.code_gen_buffer_size;
934
+ page_size = qemu_real_host_page_size;
935
n_regions = tcg_n_regions();
936
937
/* The first region will be 'aligned - buf' bytes larger than the others */
938
--
219
--
939
2.25.1
220
2.25.1
940
221
941
222
diff view generated by jsdifflib
1
From: Luis Pires <luis.pires@eldorado.org.br>
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
2
3
These will be used to implement new decimal floating point
4
instructions from Power ISA 3.1.
5
6
The remainder is now returned directly by divu128/divs128,
7
freeing up phigh to receive the high 64 bits of the quotient.
8
3
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
9
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
4
Message-Id: <20210601125143.191165-1-luis.pires@eldorado.org.br>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20211025191154.350831-4-luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
13
---
7
docs/devel/tcg.rst | 101 ++++++++++++++++++++++++++++++++++++++++-----
14
include/hw/clock.h | 6 +-
8
1 file changed, 90 insertions(+), 11 deletions(-)
15
include/qemu/host-utils.h | 20 ++++--
9
16
target/ppc/int_helper.c | 9 +--
10
diff --git a/docs/devel/tcg.rst b/docs/devel/tcg.rst
17
util/host-utils.c | 133 +++++++++++++++++++++++++-------------
11
index XXXXXXX..XXXXXXX 100644
18
4 files changed, 108 insertions(+), 60 deletions(-)
12
--- a/docs/devel/tcg.rst
19
13
+++ b/docs/devel/tcg.rst
20
diff --git a/include/hw/clock.h b/include/hw/clock.h
14
@@ -XXX,XX +XXX,XX @@ performances.
21
index XXXXXXX..XXXXXXX 100644
15
QEMU's dynamic translation backend is called TCG, for "Tiny Code
22
--- a/include/hw/clock.h
16
Generator". For more information, please take a look at ``tcg/README``.
23
+++ b/include/hw/clock.h
17
24
@@ -XXX,XX +XXX,XX @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
18
-Some notable features of QEMU's dynamic translator are:
25
if (clk->period == 0) {
19
+The following sections outline some notable features and implementation
26
return 0;
20
+details of QEMU's dynamic translator.
27
}
21
28
- /*
22
CPU state optimisations
29
- * BUG: when CONFIG_INT128 is not defined, the current implementation of
23
-----------------------
30
- * divu128 does not return a valid truncated quotient, so the result will
24
31
- * be wrong.
25
-The target CPUs have many internal states which change the way it
32
- */
26
-evaluates instructions. In order to achieve a good speed, the
33
+
27
+The target CPUs have many internal states which change the way they
34
divu128(&lo, &hi, clk->period);
28
+evaluate instructions. In order to achieve a good speed, the
35
return lo;
29
translation phase considers that some state information of the virtual
36
}
30
CPU cannot change in it. The state is recorded in the Translation
37
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
31
Block (TB). If the state changes (e.g. privilege level), a new TB will
38
index XXXXXXX..XXXXXXX 100644
32
@@ -XXX,XX +XXX,XX @@ Direct block chaining
39
--- a/include/qemu/host-utils.h
33
---------------------
40
+++ b/include/qemu/host-utils.h
34
41
@@ -XXX,XX +XXX,XX @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
35
After each translated basic block is executed, QEMU uses the simulated
42
return (__int128_t)a * b / c;
36
-Program Counter (PC) and other cpu state information (such as the CS
43
}
37
+Program Counter (PC) and other CPU state information (such as the CS
44
38
segment base value) to find the next basic block.
45
-static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
39
46
+static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh,
40
-In order to accelerate the most common cases where the new simulated PC
47
+ uint64_t divisor)
41
-is known, QEMU can patch a basic block so that it jumps directly to the
48
{
42
-next one.
49
__uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
43
+In its simplest, less optimized form, this is done by exiting from the
50
__uint128_t result = dividend / divisor;
44
+current TB, going through the TB epilogue, and then back to the
51
+
45
+main loop. That’s where QEMU looks for the next TB to execute,
52
*plow = result;
46
+translating it from the guest architecture if it isn’t already available
53
- *phigh = dividend % divisor;
47
+in memory. Then QEMU proceeds to execute this next TB, starting at the
54
+ *phigh = result >> 64;
48
+prologue and then moving on to the translated instructions.
55
+ return dividend % divisor;
49
56
}
50
-The most portable code uses an indirect jump. An indirect jump makes
57
51
-it easier to make the jump target modification atomic. On some host
58
-static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
52
-architectures (such as x86 or PowerPC), the ``JUMP`` opcode is
59
+static inline int64_t divs128(uint64_t *plow, int64_t *phigh,
53
-directly patched so that the block chaining has no overhead.
60
+ int64_t divisor)
54
+Exiting from the TB this way will cause the ``cpu_exec_interrupt()``
61
{
55
+callback to be re-evaluated before executing additional instructions.
62
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
56
+It is mandatory to exit this way after any CPU state changes that may
63
+ __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
57
+unmask interrupts.
64
__int128_t result = dividend / divisor;
58
+
65
+
59
+In order to accelerate the cases where the TB for the new
66
*plow = result;
60
+simulated PC is already available, QEMU has mechanisms that allow
67
- *phigh = dividend % divisor;
61
+multiple TBs to be chained directly, without having to go back to the
68
+ *phigh = result >> 64;
62
+main loop as described above. These mechanisms are:
69
+ return dividend % divisor;
63
+
70
}
64
+``lookup_and_goto_ptr``
71
#else
65
+^^^^^^^^^^^^^^^^^^^^^^^
72
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
66
+
73
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
67
+Calling ``tcg_gen_lookup_and_goto_ptr()`` will emit a call to
74
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
68
+``helper_lookup_tb_ptr``. This helper will look for an existing TB that
75
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
69
+matches the current CPU state. If the destination TB is available its
76
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
70
+code address is returned, otherwise the address of the JIT epilogue is
77
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor);
71
+returned. The call to the helper is always followed by the tcg ``goto_ptr``
78
72
+opcode, which branches to the returned address. In this way, we either
79
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
73
+branch to the next TB or return to the main loop.
80
{
74
+
81
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
75
+``goto_tb + exit_tb``
82
index XXXXXXX..XXXXXXX 100644
76
+^^^^^^^^^^^^^^^^^^^^^
83
--- a/target/ppc/int_helper.c
77
+
84
+++ b/target/ppc/int_helper.c
78
+The translation code usually implements branching by performing the
85
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
79
+following steps:
86
80
+
87
uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
81
+1. Call ``tcg_gen_goto_tb()`` passing a jump slot index (either 0 or 1)
88
{
82
+ as a parameter.
89
- int64_t rt = 0;
83
+
90
+ uint64_t rt = 0;
84
+2. Emit TCG instructions to update the CPU state with any information
91
int64_t ra = (int64_t)rau;
85
+ that has been assumed constant and is required by the main loop to
92
int64_t rb = (int64_t)rbu;
86
+ correctly locate and execute the next TB. For most guests, this is
93
int overflow = 0;
87
+ just the PC of the branch destination, but others may store additional
94
@@ -XXX,XX +XXX,XX @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
88
+ data. The information updated in this step must be inferable from both
95
int cr;
89
+ ``cpu_get_tb_cpu_state()`` and ``cpu_restore_state()``.
96
uint64_t lo_value;
90
+
97
uint64_t hi_value;
91
+3. Call ``tcg_gen_exit_tb()`` passing the address of the current TB and
98
+ uint64_t rem;
92
+ the jump slot index again.
99
ppc_avr_t ret = { .u64 = { 0, 0 } };
93
+
100
94
+Step 1, ``tcg_gen_goto_tb()``, will emit a ``goto_tb`` TCG
101
if (b->VsrSD(0) < 0) {
95
+instruction that later on gets translated to a jump to an address
102
@@ -XXX,XX +XXX,XX @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
96
+associated with the specified jump slot. Initially, this is the address
103
* In that case, we leave r unchanged.
97
+of step 2's instructions, which update the CPU state information. Step 3,
104
*/
98
+``tcg_gen_exit_tb()``, exits from the current TB returning a tagged
105
} else {
99
+pointer composed of the last executed TB’s address and the jump slot
106
- divu128(&lo_value, &hi_value, 1000000000000000ULL);
100
+index.
107
+ rem = divu128(&lo_value, &hi_value, 1000000000000000ULL);
101
+
108
102
+The first time this whole sequence is executed, step 1 simply jumps
109
- for (i = 1; i < 16; hi_value /= 10, i++) {
103
+to step 2. Then the CPU state information gets updated and we exit from
110
- bcd_put_digit(&ret, hi_value % 10, i);
104
+the current TB. As a result, the behavior is very similar to the less
111
+ for (i = 1; i < 16; rem /= 10, i++) {
105
+optimized form described earlier in this section.
112
+ bcd_put_digit(&ret, rem % 10, i);
106
+
113
}
107
+Next, the main loop looks for the next TB to execute using the
114
108
+current CPU state information (creating the TB if it wasn’t already
115
for (; i < 32; lo_value /= 10, i++) {
109
+available) and, before starting to execute the new TB’s instructions,
116
diff --git a/util/host-utils.c b/util/host-utils.c
110
+patches the previously executed TB by associating one of its jump
117
index XXXXXXX..XXXXXXX 100644
111
+slots (the one specified in the call to ``tcg_gen_exit_tb()``) with the
118
--- a/util/host-utils.c
112
+address of the new TB.
119
+++ b/util/host-utils.c
113
+
120
@@ -XXX,XX +XXX,XX @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
114
+The next time this previous TB is executed and we get to that same
121
}
115
+``goto_tb`` step, it will already be patched (assuming the destination TB
122
116
+is still in memory) and will jump directly to the first instruction of
123
/*
117
+the destination TB, without going back to the main loop.
124
- * Unsigned 128-by-64 division. Returns quotient via plow and
118
+
125
- * remainder via phigh.
119
+For the ``goto_tb + exit_tb`` mechanism to be used, the following
126
- * The result must fit in 64 bits (plow) - otherwise, the result
120
+conditions need to be satisfied:
127
- * is undefined.
121
+
128
- * This function will cause a division by zero if passed a zero divisor.
122
+* The change in CPU state must be constant, e.g., a direct branch and
129
+ * Unsigned 128-by-64 division.
123
+ not an indirect branch.
130
+ * Returns the remainder.
124
+
131
+ * Returns quotient via plow and phigh.
125
+* The direct branch cannot cross a page boundary. Memory mappings
132
+ * Also returns the remainder via the function return value.
126
+ may change, causing the code at the destination address to change.
133
*/
127
+
134
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
128
+Note that, on step 3 (``tcg_gen_exit_tb()``), in addition to the
135
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
129
+jump slot index, the address of the TB just executed is also returned.
136
{
130
+This address corresponds to the TB that will be patched; it may be
137
uint64_t dhi = *phigh;
131
+different than the one that was directly executed from the main loop
138
uint64_t dlo = *plow;
132
+if the latter had already been chained to other TBs.
139
- unsigned i;
133
140
- uint64_t carry = 0;
134
Self-modifying code and translated code invalidation
141
+ uint64_t rem, dhighest;
135
----------------------------------------------------
142
+ int sh;
143
144
if (divisor == 0 || dhi == 0) {
145
*plow = dlo / divisor;
146
- *phigh = dlo % divisor;
147
+ *phigh = 0;
148
+ return dlo % divisor;
149
} else {
150
+ sh = clz64(divisor);
151
152
- for (i = 0; i < 64; i++) {
153
- carry = dhi >> 63;
154
- dhi = (dhi << 1) | (dlo >> 63);
155
- if (carry || (dhi >= divisor)) {
156
- dhi -= divisor;
157
- carry = 1;
158
- } else {
159
- carry = 0;
160
+ if (dhi < divisor) {
161
+ if (sh != 0) {
162
+ /* normalize the divisor, shifting the dividend accordingly */
163
+ divisor <<= sh;
164
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
165
+ dlo <<= sh;
166
}
167
- dlo = (dlo << 1) | carry;
168
+
169
+ *phigh = 0;
170
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
171
+ } else {
172
+ if (sh != 0) {
173
+ /* normalize the divisor, shifting the dividend accordingly */
174
+ divisor <<= sh;
175
+ dhighest = dhi >> (64 - sh);
176
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
177
+ dlo <<= sh;
178
+
179
+ *phigh = udiv_qrnnd(&dhi, dhighest, dhi, divisor);
180
+ } else {
181
+ /**
182
+ * dhi >= divisor
183
+ * Since the MSB of divisor is set (sh == 0),
184
+ * (dhi - divisor) < divisor
185
+ *
186
+ * Thus, the high part of the quotient is 1, and we can
187
+ * calculate the low part with a single call to udiv_qrnnd
188
+ * after subtracting divisor from dhi
189
+ */
190
+ dhi -= divisor;
191
+ *phigh = 1;
192
+ }
193
+
194
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
195
}
196
197
- *plow = dlo;
198
- *phigh = dhi;
199
+ /*
200
+ * since the dividend/divisor might have been normalized,
201
+ * the remainder might also have to be shifted back
202
+ */
203
+ return rem >> sh;
204
}
205
}
206
207
/*
208
- * Signed 128-by-64 division. Returns quotient via plow and
209
- * remainder via phigh.
210
- * The result must fit in 64 bits (plow) - otherwise, the result
211
- * is undefined.
212
- * This function will cause a division by zero if passed a zero divisor.
213
+ * Signed 128-by-64 division.
214
+ * Returns quotient via plow and phigh.
215
+ * Also returns the remainder via the function return value.
216
*/
217
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
218
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor)
219
{
220
- int sgn_dvdnd = *phigh < 0;
221
- int sgn_divsr = divisor < 0;
222
+ bool neg_quotient = false, neg_remainder = false;
223
+ uint64_t unsig_hi = *phigh, unsig_lo = *plow;
224
+ uint64_t rem;
225
226
- if (sgn_dvdnd) {
227
- *plow = ~(*plow);
228
- *phigh = ~(*phigh);
229
- if (*plow == (int64_t)-1) {
230
+ if (*phigh < 0) {
231
+ neg_quotient = !neg_quotient;
232
+ neg_remainder = !neg_remainder;
233
+
234
+ if (unsig_lo == 0) {
235
+ unsig_hi = -unsig_hi;
236
+ } else {
237
+ unsig_hi = ~unsig_hi;
238
+ unsig_lo = -unsig_lo;
239
+ }
240
+ }
241
+
242
+ if (divisor < 0) {
243
+ neg_quotient = !neg_quotient;
244
+
245
+ divisor = -divisor;
246
+ }
247
+
248
+ rem = divu128(&unsig_lo, &unsig_hi, (uint64_t)divisor);
249
+
250
+ if (neg_quotient) {
251
+ if (unsig_lo == 0) {
252
+ *phigh = -unsig_hi;
253
*plow = 0;
254
- (*phigh)++;
255
- } else {
256
- (*plow)++;
257
- }
258
+ } else {
259
+ *phigh = ~unsig_hi;
260
+ *plow = -unsig_lo;
261
+ }
262
+ } else {
263
+ *phigh = unsig_hi;
264
+ *plow = unsig_lo;
265
}
266
267
- if (sgn_divsr) {
268
- divisor = 0 - divisor;
269
- }
270
-
271
- divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
272
-
273
- if (sgn_dvdnd ^ sgn_divsr) {
274
- *plow = 0 - *plow;
275
+ if (neg_remainder) {
276
+ return -rem;
277
+ } else {
278
+ return rem;
279
}
280
}
281
#endif
136
--
282
--
137
2.25.1
283
2.25.1
138
284
139
285
diff view generated by jsdifflib
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
3
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20211025191154.350831-5-luis.pires@eldorado.org.br>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
7
---
5
tcg/tcg-internal.h | 37 +++
8
tests/unit/test-div128.c | 197 +++++++++++++++++++++++++++++++++++++++
6
tcg/region.c | 572 +++++++++++++++++++++++++++++++++++++++++++++
9
tests/unit/meson.build | 1 +
7
tcg/tcg.c | 547 +------------------------------------------
10
2 files changed, 198 insertions(+)
8
tcg/meson.build | 1 +
11
create mode 100644 tests/unit/test-div128.c
9
4 files changed, 613 insertions(+), 544 deletions(-)
12
10
create mode 100644 tcg/tcg-internal.h
13
diff --git a/tests/unit/test-div128.c b/tests/unit/test-div128.c
11
create mode 100644 tcg/region.c
12
13
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
14
new file mode 100644
14
new file mode 100644
15
index XXXXXXX..XXXXXXX
15
index XXXXXXX..XXXXXXX
16
--- /dev/null
16
--- /dev/null
17
+++ b/tcg/tcg-internal.h
17
+++ b/tests/unit/test-div128.c
18
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@
19
+/*
19
+/*
20
+ * Internal declarations for Tiny Code Generator for QEMU
20
+ * Test 128-bit division functions
21
+ *
21
+ *
22
+ * Copyright (c) 2008 Fabrice Bellard
22
+ * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
23
+ *
23
+ *
24
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
24
+ * This library is free software; you can redistribute it and/or
25
+ * of this software and associated documentation files (the "Software"), to deal
25
+ * modify it under the terms of the GNU Lesser General Public
26
+ * in the Software without restriction, including without limitation the rights
26
+ * License as published by the Free Software Foundation; either
27
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
27
+ * version 2.1 of the License, or (at your option) any later version.
28
+ * copies of the Software, and to permit persons to whom the Software is
28
+ *
29
+ * furnished to do so, subject to the following conditions:
29
+ * This library is distributed in the hope that it will be useful,
30
+ *
30
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
31
+ * The above copyright notice and this permission notice shall be included in
31
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
32
+ * all copies or substantial portions of the Software.
32
+ * Lesser General Public License for more details.
33
+ *
33
+ *
34
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
34
+ * You should have received a copy of the GNU Lesser General Public
35
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
35
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
37
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
38
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
39
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
40
+ * THE SOFTWARE.
41
+ */
36
+ */
42
+
37
+
43
+#ifndef TCG_INTERNAL_H
44
+#define TCG_INTERNAL_H 1
45
+
46
+#define TCG_HIGHWATER 1024
47
+
48
+extern TCGContext **tcg_ctxs;
49
+extern unsigned int n_tcg_ctxs;
50
+
51
+bool tcg_region_alloc(TCGContext *s);
52
+void tcg_region_initial_alloc(TCGContext *s);
53
+void tcg_region_prologue_set(TCGContext *s);
54
+
55
+#endif /* TCG_INTERNAL_H */
56
diff --git a/tcg/region.c b/tcg/region.c
57
new file mode 100644
58
index XXXXXXX..XXXXXXX
59
--- /dev/null
60
+++ b/tcg/region.c
61
@@ -XXX,XX +XXX,XX @@
62
+/*
63
+ * Memory region management for Tiny Code Generator for QEMU
64
+ *
65
+ * Copyright (c) 2008 Fabrice Bellard
66
+ *
67
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
68
+ * of this software and associated documentation files (the "Software"), to deal
69
+ * in the Software without restriction, including without limitation the rights
70
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
71
+ * copies of the Software, and to permit persons to whom the Software is
72
+ * furnished to do so, subject to the following conditions:
73
+ *
74
+ * The above copyright notice and this permission notice shall be included in
75
+ * all copies or substantial portions of the Software.
76
+ *
77
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
78
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
79
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
80
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
81
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
82
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
83
+ * THE SOFTWARE.
84
+ */
85
+
86
+#include "qemu/osdep.h"
38
+#include "qemu/osdep.h"
87
+#include "exec/exec-all.h"
39
+#include "qemu/host-utils.h"
88
+#include "tcg/tcg.h"
40
+
89
+#if !defined(CONFIG_USER_ONLY)
41
+typedef struct {
90
+#include "hw/boards.h"
42
+ uint64_t high;
91
+#endif
43
+ uint64_t low;
92
+#include "tcg-internal.h"
44
+ uint64_t rhigh;
93
+
45
+ uint64_t rlow;
94
+
46
+ uint64_t divisor;
95
+struct tcg_region_tree {
47
+ uint64_t remainder;
96
+ QemuMutex lock;
48
+} test_data_unsigned;
97
+ GTree *tree;
49
+
98
+ /* padding to avoid false sharing is computed at run-time */
50
+typedef struct {
51
+ int64_t high;
52
+ uint64_t low;
53
+ int64_t rhigh;
54
+ uint64_t rlow;
55
+ int64_t divisor;
56
+ int64_t remainder;
57
+} test_data_signed;
58
+
59
+static const test_data_unsigned test_table_unsigned[] = {
60
+ /* Dividend fits in 64 bits */
61
+ { 0x0000000000000000ULL, 0x0000000000000000ULL,
62
+ 0x0000000000000000ULL, 0x0000000000000000ULL,
63
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
64
+ { 0x0000000000000000ULL, 0x0000000000000001ULL,
65
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
66
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
67
+ { 0x0000000000000000ULL, 0x0000000000000003ULL,
68
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
69
+ 0x0000000000000002ULL, 0x0000000000000001ULL},
70
+ { 0x0000000000000000ULL, 0x8000000000000000ULL,
71
+ 0x0000000000000000ULL, 0x8000000000000000ULL,
72
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
73
+ { 0x0000000000000000ULL, 0xa000000000000000ULL,
74
+ 0x0000000000000000ULL, 0x0000000000000002ULL,
75
+ 0x4000000000000000ULL, 0x2000000000000000ULL},
76
+ { 0x0000000000000000ULL, 0x8000000000000000ULL,
77
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
78
+ 0x8000000000000000ULL, 0x0000000000000000ULL},
79
+
80
+ /* Dividend > 64 bits, with MSB 0 */
81
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
82
+ 0x123456789abcdefeULL, 0xefedcba987654321ULL,
83
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
84
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
85
+ 0x0000000000000001ULL, 0x000000000000000dULL,
86
+ 0x123456789abcdefeULL, 0x03456789abcdf03bULL},
87
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
88
+ 0x0123456789abcdefULL, 0xeefedcba98765432ULL,
89
+ 0x0000000000000010ULL, 0x0000000000000001ULL},
90
+
91
+ /* Dividend > 64 bits, with MSB 1 */
92
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
93
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
94
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
95
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
96
+ 0x0000000000000001ULL, 0x0000000000000000ULL,
97
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL},
98
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
99
+ 0x0feeddccbbaa9988ULL, 0x7766554433221100ULL,
100
+ 0x0000000000000010ULL, 0x000000000000000fULL},
101
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
102
+ 0x000000000000000eULL, 0x00f0f0f0f0f0f35aULL,
103
+ 0x123456789abcdefeULL, 0x0f8922bc55ef90c3ULL},
104
+
105
+ /**
106
+ * Divisor == 64 bits, with MSB 1
107
+ * and high 64 bits of dividend >= divisor
108
+ * (for testing normalization)
109
+ */
110
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
111
+ 0x0000000000000001ULL, 0x0000000000000000ULL,
112
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL},
113
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
114
+ 0x0000000000000001ULL, 0xfddbb9977553310aULL,
115
+ 0x8000000000000001ULL, 0x78899aabbccddf05ULL},
116
+
117
+ /* Dividend > 64 bits, divisor almost as big */
118
+ { 0x0000000000000001ULL, 0x23456789abcdef01ULL,
119
+ 0x0000000000000000ULL, 0x000000000000000fULL,
120
+ 0x123456789abcdefeULL, 0x123456789abcde1fULL},
99
+};
121
+};
100
+
122
+
101
+/*
123
+static const test_data_signed test_table_signed[] = {
102
+ * We divide code_gen_buffer into equally-sized "regions" that TCG threads
124
+ /* Positive dividend, positive/negative divisors */
103
+ * dynamically allocate from as demand dictates. Given appropriate region
125
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
104
+ * sizing, this minimizes flushes even when some TCG threads generate a lot
126
+ 0x0000000000000000LL, 0x0000000000bc614eULL,
105
+ * more code than others.
127
+ 0x0000000000000001LL, 0x0000000000000000LL},
106
+ */
128
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
107
+struct tcg_region_state {
129
+ 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
108
+ QemuMutex lock;
130
+ 0xffffffffffffffffLL, 0x0000000000000000LL},
109
+
131
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
110
+ /* fields set at init time */
132
+ 0x0000000000000000LL, 0x00000000005e30a7ULL,
111
+ void *start;
133
+ 0x0000000000000002LL, 0x0000000000000000LL},
112
+ void *start_aligned;
134
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
113
+ void *end;
135
+ 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL,
114
+ size_t n;
136
+ 0xfffffffffffffffeLL, 0x0000000000000000LL},
115
+ size_t size; /* size of one region */
137
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
116
+ size_t stride; /* .size + guard size */
138
+ 0x0000000000000000LL, 0x0000000000178c29ULL,
117
+
139
+ 0x0000000000000008LL, 0x0000000000000006LL},
118
+ /* fields protected by the lock */
140
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
119
+ size_t current; /* current region index */
141
+ 0xffffffffffffffffLL, 0xffffffffffe873d7ULL,
120
+ size_t agg_size_full; /* aggregate size of full regions */
142
+ 0xfffffffffffffff8LL, 0x0000000000000006LL},
143
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
144
+ 0x0000000000000000LL, 0x000000000000550dULL,
145
+ 0x0000000000000237LL, 0x0000000000000183LL},
146
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
147
+ 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL,
148
+ 0xfffffffffffffdc9LL, 0x0000000000000183LL},
149
+
150
+ /* Negative dividend, positive/negative divisors */
151
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
152
+ 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
153
+ 0x0000000000000001LL, 0x0000000000000000LL},
154
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
155
+ 0x0000000000000000LL, 0x0000000000bc614eULL,
156
+ 0xffffffffffffffffLL, 0x0000000000000000LL},
157
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
158
+ 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL,
159
+ 0x0000000000000002LL, 0x0000000000000000LL},
160
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
161
+ 0x0000000000000000LL, 0x00000000005e30a7ULL,
162
+ 0xfffffffffffffffeLL, 0x0000000000000000LL},
163
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
164
+ 0xffffffffffffffffLL, 0xffffffffffe873d7ULL,
165
+ 0x0000000000000008LL, 0xfffffffffffffffaLL},
166
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
167
+ 0x0000000000000000LL, 0x0000000000178c29ULL,
168
+ 0xfffffffffffffff8LL, 0xfffffffffffffffaLL},
169
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
170
+ 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL,
171
+ 0x0000000000000237LL, 0xfffffffffffffe7dLL},
172
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
173
+ 0x0000000000000000LL, 0x000000000000550dULL,
174
+ 0xfffffffffffffdc9LL, 0xfffffffffffffe7dLL},
121
+};
175
+};
122
+
176
+
123
+static struct tcg_region_state region;
177
+static void test_divu128(void)
124
+
125
+/*
126
+ * This is an array of struct tcg_region_tree's, with padding.
127
+ * We use void * to simplify the computation of region_trees[i]; each
128
+ * struct is found every tree_size bytes.
129
+ */
130
+static void *region_trees;
131
+static size_t tree_size;
132
+
133
+/* compare a pointer @ptr and a tb_tc @s */
134
+static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
135
+{
178
+{
136
+ if (ptr >= s->ptr + s->size) {
179
+ int i;
137
+ return 1;
180
+ uint64_t rem;
138
+ } else if (ptr < s->ptr) {
181
+ test_data_unsigned tmp;
139
+ return -1;
182
+
140
+ }
183
+ for (i = 0; i < ARRAY_SIZE(test_table_unsigned); ++i) {
141
+ return 0;
184
+ tmp = test_table_unsigned[i];
142
+}
185
+
143
+
186
+ rem = divu128(&tmp.low, &tmp.high, tmp.divisor);
144
+static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
187
+ g_assert_cmpuint(tmp.low, ==, tmp.rlow);
145
+{
188
+ g_assert_cmpuint(tmp.high, ==, tmp.rhigh);
146
+ const struct tb_tc *a = ap;
189
+ g_assert_cmpuint(rem, ==, tmp.remainder);
147
+ const struct tb_tc *b = bp;
148
+
149
+ /*
150
+ * When both sizes are set, we know this isn't a lookup.
151
+ * This is the most likely case: every TB must be inserted; lookups
152
+ * are a lot less frequent.
153
+ */
154
+ if (likely(a->size && b->size)) {
155
+ if (a->ptr > b->ptr) {
156
+ return 1;
157
+ } else if (a->ptr < b->ptr) {
158
+ return -1;
159
+ }
160
+ /* a->ptr == b->ptr should happen only on deletions */
161
+ g_assert(a->size == b->size);
162
+ return 0;
163
+ }
164
+ /*
165
+ * All lookups have either .size field set to 0.
166
+ * From the glib sources we see that @ap is always the lookup key. However
167
+ * the docs provide no guarantee, so we just mark this case as likely.
168
+ */
169
+ if (likely(a->size == 0)) {
170
+ return ptr_cmp_tb_tc(a->ptr, b);
171
+ }
172
+ return ptr_cmp_tb_tc(b->ptr, a);
173
+}
174
+
175
+static void tcg_region_trees_init(void)
176
+{
177
+ size_t i;
178
+
179
+ tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
180
+ region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
181
+ for (i = 0; i < region.n; i++) {
182
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
183
+
184
+ qemu_mutex_init(&rt->lock);
185
+ rt->tree = g_tree_new(tb_tc_cmp);
186
+ }
190
+ }
187
+}
191
+}
188
+
192
+
189
+static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p)
193
+static void test_divs128(void)
190
+{
194
+{
191
+ size_t region_idx;
195
+ int i;
192
+
196
+ int64_t rem;
193
+ /*
197
+ test_data_signed tmp;
194
+ * Like tcg_splitwx_to_rw, with no assert. The pc may come from
198
+
195
+ * a signal handler over which the caller has no control.
199
+ for (i = 0; i < ARRAY_SIZE(test_table_signed); ++i) {
196
+ */
200
+ tmp = test_table_signed[i];
197
+ if (!in_code_gen_buffer(p)) {
201
+
198
+ p -= tcg_splitwx_diff;
202
+ rem = divs128(&tmp.low, &tmp.high, tmp.divisor);
199
+ if (!in_code_gen_buffer(p)) {
203
+ g_assert_cmpuint(tmp.low, ==, tmp.rlow);
200
+ return NULL;
204
+ g_assert_cmpuint(tmp.high, ==, tmp.rhigh);
201
+ }
205
+ g_assert_cmpuint(rem, ==, tmp.remainder);
202
+ }
203
+
204
+ if (p < region.start_aligned) {
205
+ region_idx = 0;
206
+ } else {
207
+ ptrdiff_t offset = p - region.start_aligned;
208
+
209
+ if (offset > region.stride * (region.n - 1)) {
210
+ region_idx = region.n - 1;
211
+ } else {
212
+ region_idx = offset / region.stride;
213
+ }
214
+ }
215
+ return region_trees + region_idx * tree_size;
216
+}
217
+
218
+void tcg_tb_insert(TranslationBlock *tb)
219
+{
220
+ struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
221
+
222
+ g_assert(rt != NULL);
223
+ qemu_mutex_lock(&rt->lock);
224
+ g_tree_insert(rt->tree, &tb->tc, tb);
225
+ qemu_mutex_unlock(&rt->lock);
226
+}
227
+
228
+void tcg_tb_remove(TranslationBlock *tb)
229
+{
230
+ struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
231
+
232
+ g_assert(rt != NULL);
233
+ qemu_mutex_lock(&rt->lock);
234
+ g_tree_remove(rt->tree, &tb->tc);
235
+ qemu_mutex_unlock(&rt->lock);
236
+}
237
+
238
+/*
239
+ * Find the TB 'tb' such that
240
+ * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
241
+ * Return NULL if not found.
242
+ */
243
+TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
244
+{
245
+ struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
246
+ TranslationBlock *tb;
247
+ struct tb_tc s = { .ptr = (void *)tc_ptr };
248
+
249
+ if (rt == NULL) {
250
+ return NULL;
251
+ }
252
+
253
+ qemu_mutex_lock(&rt->lock);
254
+ tb = g_tree_lookup(rt->tree, &s);
255
+ qemu_mutex_unlock(&rt->lock);
256
+ return tb;
257
+}
258
+
259
+static void tcg_region_tree_lock_all(void)
260
+{
261
+ size_t i;
262
+
263
+ for (i = 0; i < region.n; i++) {
264
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
265
+
266
+ qemu_mutex_lock(&rt->lock);
267
+ }
206
+ }
268
+}
207
+}
269
+
208
+
270
+static void tcg_region_tree_unlock_all(void)
209
+int main(int argc, char **argv)
271
+{
210
+{
272
+ size_t i;
211
+ g_test_init(&argc, &argv, NULL);
273
+
212
+ g_test_add_func("/host-utils/test_divu128", test_divu128);
274
+ for (i = 0; i < region.n; i++) {
213
+ g_test_add_func("/host-utils/test_divs128", test_divs128);
275
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
214
+ return g_test_run();
276
+
277
+ qemu_mutex_unlock(&rt->lock);
278
+ }
279
+}
215
+}
280
+
216
diff --git a/tests/unit/meson.build b/tests/unit/meson.build
281
+void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
282
+{
283
+ size_t i;
284
+
285
+ tcg_region_tree_lock_all();
286
+ for (i = 0; i < region.n; i++) {
287
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
288
+
289
+ g_tree_foreach(rt->tree, func, user_data);
290
+ }
291
+ tcg_region_tree_unlock_all();
292
+}
293
+
294
+size_t tcg_nb_tbs(void)
295
+{
296
+ size_t nb_tbs = 0;
297
+ size_t i;
298
+
299
+ tcg_region_tree_lock_all();
300
+ for (i = 0; i < region.n; i++) {
301
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
302
+
303
+ nb_tbs += g_tree_nnodes(rt->tree);
304
+ }
305
+ tcg_region_tree_unlock_all();
306
+ return nb_tbs;
307
+}
308
+
309
+static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data)
310
+{
311
+ TranslationBlock *tb = v;
312
+
313
+ tb_destroy(tb);
314
+ return FALSE;
315
+}
316
+
317
+static void tcg_region_tree_reset_all(void)
318
+{
319
+ size_t i;
320
+
321
+ tcg_region_tree_lock_all();
322
+ for (i = 0; i < region.n; i++) {
323
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
324
+
325
+ g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL);
326
+ /* Increment the refcount first so that destroy acts as a reset */
327
+ g_tree_ref(rt->tree);
328
+ g_tree_destroy(rt->tree);
329
+ }
330
+ tcg_region_tree_unlock_all();
331
+}
332
+
333
+static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
334
+{
335
+ void *start, *end;
336
+
337
+ start = region.start_aligned + curr_region * region.stride;
338
+ end = start + region.size;
339
+
340
+ if (curr_region == 0) {
341
+ start = region.start;
342
+ }
343
+ if (curr_region == region.n - 1) {
344
+ end = region.end;
345
+ }
346
+
347
+ *pstart = start;
348
+ *pend = end;
349
+}
350
+
351
+static void tcg_region_assign(TCGContext *s, size_t curr_region)
352
+{
353
+ void *start, *end;
354
+
355
+ tcg_region_bounds(curr_region, &start, &end);
356
+
357
+ s->code_gen_buffer = start;
358
+ s->code_gen_ptr = start;
359
+ s->code_gen_buffer_size = end - start;
360
+ s->code_gen_highwater = end - TCG_HIGHWATER;
361
+}
362
+
363
+static bool tcg_region_alloc__locked(TCGContext *s)
364
+{
365
+ if (region.current == region.n) {
366
+ return true;
367
+ }
368
+ tcg_region_assign(s, region.current);
369
+ region.current++;
370
+ return false;
371
+}
372
+
373
+/*
374
+ * Request a new region once the one in use has filled up.
375
+ * Returns true on error.
376
+ */
377
+bool tcg_region_alloc(TCGContext *s)
378
+{
379
+ bool err;
380
+ /* read the region size now; alloc__locked will overwrite it on success */
381
+ size_t size_full = s->code_gen_buffer_size;
382
+
383
+ qemu_mutex_lock(&region.lock);
384
+ err = tcg_region_alloc__locked(s);
385
+ if (!err) {
386
+ region.agg_size_full += size_full - TCG_HIGHWATER;
387
+ }
388
+ qemu_mutex_unlock(&region.lock);
389
+ return err;
390
+}
391
+
392
+/*
393
+ * Perform a context's first region allocation.
394
+ * This function does _not_ increment region.agg_size_full.
395
+ */
396
+static void tcg_region_initial_alloc__locked(TCGContext *s)
397
+{
398
+ bool err = tcg_region_alloc__locked(s);
399
+ g_assert(!err);
400
+}
401
+
402
+void tcg_region_initial_alloc(TCGContext *s)
403
+{
404
+ qemu_mutex_lock(&region.lock);
405
+ tcg_region_initial_alloc__locked(s);
406
+ qemu_mutex_unlock(&region.lock);
407
+}
408
+
409
+/* Call from a safe-work context */
410
+void tcg_region_reset_all(void)
411
+{
412
+ unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
413
+ unsigned int i;
414
+
415
+ qemu_mutex_lock(&region.lock);
416
+ region.current = 0;
417
+ region.agg_size_full = 0;
418
+
419
+ for (i = 0; i < n_ctxs; i++) {
420
+ TCGContext *s = qatomic_read(&tcg_ctxs[i]);
421
+ tcg_region_initial_alloc__locked(s);
422
+ }
423
+ qemu_mutex_unlock(&region.lock);
424
+
425
+ tcg_region_tree_reset_all();
426
+}
427
+
428
+#ifdef CONFIG_USER_ONLY
429
+static size_t tcg_n_regions(void)
430
+{
431
+ return 1;
432
+}
433
+#else
434
+/*
435
+ * It is likely that some vCPUs will translate more code than others, so we
436
+ * first try to set more regions than max_cpus, with those regions being of
437
+ * reasonable size. If that's not possible we make do by evenly dividing
438
+ * the code_gen_buffer among the vCPUs.
439
+ */
440
+static size_t tcg_n_regions(void)
441
+{
442
+ size_t i;
443
+
444
+ /* Use a single region if all we have is one vCPU thread */
445
+#if !defined(CONFIG_USER_ONLY)
446
+ MachineState *ms = MACHINE(qdev_get_machine());
447
+ unsigned int max_cpus = ms->smp.max_cpus;
448
+#endif
449
+ if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
450
+ return 1;
451
+ }
452
+
453
+ /* Try to have more regions than max_cpus, with each region being >= 2 MB */
454
+ for (i = 8; i > 0; i--) {
455
+ size_t regions_per_thread = i;
456
+ size_t region_size;
457
+
458
+ region_size = tcg_init_ctx.code_gen_buffer_size;
459
+ region_size /= max_cpus * regions_per_thread;
460
+
461
+ if (region_size >= 2 * 1024u * 1024) {
462
+ return max_cpus * regions_per_thread;
463
+ }
464
+ }
465
+ /* If we can't, then just allocate one region per vCPU thread */
466
+ return max_cpus;
467
+}
468
+#endif
469
+
470
+/*
471
+ * Initializes region partitioning.
472
+ *
473
+ * Called at init time from the parent thread (i.e. the one calling
474
+ * tcg_context_init), after the target's TCG globals have been set.
475
+ *
476
+ * Region partitioning works by splitting code_gen_buffer into separate regions,
477
+ * and then assigning regions to TCG threads so that the threads can translate
478
+ * code in parallel without synchronization.
479
+ *
480
+ * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
481
+ * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
482
+ * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
483
+ * must have been parsed before calling this function, since it calls
484
+ * qemu_tcg_mttcg_enabled().
485
+ *
486
+ * In user-mode we use a single region. Having multiple regions in user-mode
487
+ * is not supported, because the number of vCPU threads (recall that each thread
488
+ * spawned by the guest corresponds to a vCPU thread) is only bounded by the
489
+ * OS, and usually this number is huge (tens of thousands is not uncommon).
490
+ * Thus, given this large bound on the number of vCPU threads and the fact
491
+ * that code_gen_buffer is allocated at compile-time, we cannot guarantee
492
+ * that the availability of at least one region per vCPU thread.
493
+ *
494
+ * However, this user-mode limitation is unlikely to be a significant problem
495
+ * in practice. Multi-threaded guests share most if not all of their translated
496
+ * code, which makes parallel code generation less appealing than in softmmu.
497
+ */
498
+void tcg_region_init(void)
499
+{
500
+ void *buf = tcg_init_ctx.code_gen_buffer;
501
+ void *aligned;
502
+ size_t size = tcg_init_ctx.code_gen_buffer_size;
503
+ size_t page_size = qemu_real_host_page_size;
504
+ size_t region_size;
505
+ size_t n_regions;
506
+ size_t i;
507
+
508
+ n_regions = tcg_n_regions();
509
+
510
+ /* The first region will be 'aligned - buf' bytes larger than the others */
511
+ aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
512
+ g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
513
+ /*
514
+ * Make region_size a multiple of page_size, using aligned as the start.
515
+ * As a result of this we might end up with a few extra pages at the end of
516
+ * the buffer; we will assign those to the last region.
517
+ */
518
+ region_size = (size - (aligned - buf)) / n_regions;
519
+ region_size = QEMU_ALIGN_DOWN(region_size, page_size);
520
+
521
+ /* A region must have at least 2 pages; one code, one guard */
522
+ g_assert(region_size >= 2 * page_size);
523
+
524
+ /* init the region struct */
525
+ qemu_mutex_init(&region.lock);
526
+ region.n = n_regions;
527
+ region.size = region_size - page_size;
528
+ region.stride = region_size;
529
+ region.start = buf;
530
+ region.start_aligned = aligned;
531
+ /* page-align the end, since its last page will be a guard page */
532
+ region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
533
+ /* account for that last guard page */
534
+ region.end -= page_size;
535
+
536
+ /*
537
+ * Set guard pages in the rw buffer, as that's the one into which
538
+ * buffer overruns could occur. Do not set guard pages in the rx
539
+ * buffer -- let that one use hugepages throughout.
540
+ */
541
+ for (i = 0; i < region.n; i++) {
542
+ void *start, *end;
543
+
544
+ tcg_region_bounds(i, &start, &end);
545
+
546
+ /*
547
+ * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
548
+ * rejects a permission change from RWX -> NONE. Guard pages are
549
+ * nice for bug detection but are not essential; ignore any failure.
550
+ */
551
+ (void)qemu_mprotect_none(end, page_size);
552
+ }
553
+
554
+ tcg_region_trees_init();
555
+
556
+ /*
557
+ * Leave the initial context initialized to the first region.
558
+ * This will be the context into which we generate the prologue.
559
+ * It is also the only context for CONFIG_USER_ONLY.
560
+ */
561
+ tcg_region_initial_alloc__locked(&tcg_init_ctx);
562
+}
563
+
564
+void tcg_region_prologue_set(TCGContext *s)
565
+{
566
+ /* Deduct the prologue from the first region. */
567
+ g_assert(region.start == s->code_gen_buffer);
568
+ region.start = s->code_ptr;
569
+
570
+ /* Recompute boundaries of the first region. */
571
+ tcg_region_assign(s, 0);
572
+
573
+ /* Register the balance of the buffer with gdb. */
574
+ tcg_register_jit(tcg_splitwx_to_rx(region.start),
575
+ region.end - region.start);
576
+}
577
+
578
+/*
579
+ * Returns the size (in bytes) of all translated code (i.e. from all regions)
580
+ * currently in the cache.
581
+ * See also: tcg_code_capacity()
582
+ * Do not confuse with tcg_current_code_size(); that one applies to a single
583
+ * TCG context.
584
+ */
585
+size_t tcg_code_size(void)
586
+{
587
+ unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
588
+ unsigned int i;
589
+ size_t total;
590
+
591
+ qemu_mutex_lock(&region.lock);
592
+ total = region.agg_size_full;
593
+ for (i = 0; i < n_ctxs; i++) {
594
+ const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
595
+ size_t size;
596
+
597
+ size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
598
+ g_assert(size <= s->code_gen_buffer_size);
599
+ total += size;
600
+ }
601
+ qemu_mutex_unlock(&region.lock);
602
+ return total;
603
+}
604
+
605
+/*
606
+ * Returns the code capacity (in bytes) of the entire cache, i.e. including all
607
+ * regions.
608
+ * See also: tcg_code_size()
609
+ */
610
+size_t tcg_code_capacity(void)
611
+{
612
+ size_t guard_size, capacity;
613
+
614
+ /* no need for synchronization; these variables are set at init time */
615
+ guard_size = region.stride - region.size;
616
+ capacity = region.end + guard_size - region.start;
617
+ capacity -= region.n * (guard_size + TCG_HIGHWATER);
618
+ return capacity;
619
+}
620
+
621
+size_t tcg_tb_phys_invalidate_count(void)
622
+{
623
+ unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
624
+ unsigned int i;
625
+ size_t total = 0;
626
+
627
+ for (i = 0; i < n_ctxs; i++) {
628
+ const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
629
+
630
+ total += qatomic_read(&s->tb_phys_invalidate_count);
631
+ }
632
+ return total;
633
+}
634
diff --git a/tcg/tcg.c b/tcg/tcg.c
635
index XXXXXXX..XXXXXXX 100644
217
index XXXXXXX..XXXXXXX 100644
636
--- a/tcg/tcg.c
218
--- a/tests/unit/meson.build
637
+++ b/tcg/tcg.c
219
+++ b/tests/unit/meson.build
638
@@ -XXX,XX +XXX,XX @@
220
@@ -XXX,XX +XXX,XX @@ tests = {
639
221
# all code tested by test-x86-cpuid is inside topology.h
640
#include "elf.h"
222
'test-x86-cpuid': [],
641
#include "exec/log.h"
223
'test-cutils': [],
642
+#include "tcg-internal.h"
224
+ 'test-div128': [],
643
225
'test-shift128': [],
644
/* Forward declarations for functions declared in tcg-target.c.inc and
226
'test-mul64': [],
645
used here. */
227
# all code tested by test-int128 is inside int128.h
646
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
647
static int tcg_out_ldst_finalize(TCGContext *s);
648
#endif
649
650
-#define TCG_HIGHWATER 1024
651
-
652
-static TCGContext **tcg_ctxs;
653
-static unsigned int n_tcg_ctxs;
654
+TCGContext **tcg_ctxs;
655
+unsigned int n_tcg_ctxs;
656
TCGv_env cpu_env = 0;
657
const void *tcg_code_gen_epilogue;
658
uintptr_t tcg_splitwx_diff;
659
@@ -XXX,XX +XXX,XX @@ uintptr_t tcg_splitwx_diff;
660
tcg_prologue_fn *tcg_qemu_tb_exec;
661
#endif
662
663
-struct tcg_region_tree {
664
- QemuMutex lock;
665
- GTree *tree;
666
- /* padding to avoid false sharing is computed at run-time */
667
-};
668
-
669
-/*
670
- * We divide code_gen_buffer into equally-sized "regions" that TCG threads
671
- * dynamically allocate from as demand dictates. Given appropriate region
672
- * sizing, this minimizes flushes even when some TCG threads generate a lot
673
- * more code than others.
674
- */
675
-struct tcg_region_state {
676
- QemuMutex lock;
677
-
678
- /* fields set at init time */
679
- void *start;
680
- void *start_aligned;
681
- void *end;
682
- size_t n;
683
- size_t size; /* size of one region */
684
- size_t stride; /* .size + guard size */
685
-
686
- /* fields protected by the lock */
687
- size_t current; /* current region index */
688
- size_t agg_size_full; /* aggregate size of full regions */
689
-};
690
-
691
-static struct tcg_region_state region;
692
-/*
693
- * This is an array of struct tcg_region_tree's, with padding.
694
- * We use void * to simplify the computation of region_trees[i]; each
695
- * struct is found every tree_size bytes.
696
- */
697
-static void *region_trees;
698
-static size_t tree_size;
699
static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
700
static TCGRegSet tcg_target_call_clobber_regs;
701
702
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef constraint_sets[] = {
703
704
#include "tcg-target.c.inc"
705
706
-/* compare a pointer @ptr and a tb_tc @s */
707
-static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
708
-{
709
- if (ptr >= s->ptr + s->size) {
710
- return 1;
711
- } else if (ptr < s->ptr) {
712
- return -1;
713
- }
714
- return 0;
715
-}
716
-
717
-static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
718
-{
719
- const struct tb_tc *a = ap;
720
- const struct tb_tc *b = bp;
721
-
722
- /*
723
- * When both sizes are set, we know this isn't a lookup.
724
- * This is the most likely case: every TB must be inserted; lookups
725
- * are a lot less frequent.
726
- */
727
- if (likely(a->size && b->size)) {
728
- if (a->ptr > b->ptr) {
729
- return 1;
730
- } else if (a->ptr < b->ptr) {
731
- return -1;
732
- }
733
- /* a->ptr == b->ptr should happen only on deletions */
734
- g_assert(a->size == b->size);
735
- return 0;
736
- }
737
- /*
738
- * All lookups have either .size field set to 0.
739
- * From the glib sources we see that @ap is always the lookup key. However
740
- * the docs provide no guarantee, so we just mark this case as likely.
741
- */
742
- if (likely(a->size == 0)) {
743
- return ptr_cmp_tb_tc(a->ptr, b);
744
- }
745
- return ptr_cmp_tb_tc(b->ptr, a);
746
-}
747
-
748
-static void tcg_region_trees_init(void)
749
-{
750
- size_t i;
751
-
752
- tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
753
- region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
754
- for (i = 0; i < region.n; i++) {
755
- struct tcg_region_tree *rt = region_trees + i * tree_size;
756
-
757
- qemu_mutex_init(&rt->lock);
758
- rt->tree = g_tree_new(tb_tc_cmp);
759
- }
760
-}
761
-
762
-static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p)
763
-{
764
- size_t region_idx;
765
-
766
- /*
767
- * Like tcg_splitwx_to_rw, with no assert. The pc may come from
768
- * a signal handler over which the caller has no control.
769
- */
770
- if (!in_code_gen_buffer(p)) {
771
- p -= tcg_splitwx_diff;
772
- if (!in_code_gen_buffer(p)) {
773
- return NULL;
774
- }
775
- }
776
-
777
- if (p < region.start_aligned) {
778
- region_idx = 0;
779
- } else {
780
- ptrdiff_t offset = p - region.start_aligned;
781
-
782
- if (offset > region.stride * (region.n - 1)) {
783
- region_idx = region.n - 1;
784
- } else {
785
- region_idx = offset / region.stride;
786
- }
787
- }
788
- return region_trees + region_idx * tree_size;
789
-}
790
-
791
-void tcg_tb_insert(TranslationBlock *tb)
792
-{
793
- struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
794
-
795
- g_assert(rt != NULL);
796
- qemu_mutex_lock(&rt->lock);
797
- g_tree_insert(rt->tree, &tb->tc, tb);
798
- qemu_mutex_unlock(&rt->lock);
799
-}
800
-
801
-void tcg_tb_remove(TranslationBlock *tb)
802
-{
803
- struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
804
-
805
- g_assert(rt != NULL);
806
- qemu_mutex_lock(&rt->lock);
807
- g_tree_remove(rt->tree, &tb->tc);
808
- qemu_mutex_unlock(&rt->lock);
809
-}
810
-
811
-/*
812
- * Find the TB 'tb' such that
813
- * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
814
- * Return NULL if not found.
815
- */
816
-TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
817
-{
818
- struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
819
- TranslationBlock *tb;
820
- struct tb_tc s = { .ptr = (void *)tc_ptr };
821
-
822
- if (rt == NULL) {
823
- return NULL;
824
- }
825
-
826
- qemu_mutex_lock(&rt->lock);
827
- tb = g_tree_lookup(rt->tree, &s);
828
- qemu_mutex_unlock(&rt->lock);
829
- return tb;
830
-}
831
-
832
-static void tcg_region_tree_lock_all(void)
833
-{
834
- size_t i;
835
-
836
- for (i = 0; i < region.n; i++) {
837
- struct tcg_region_tree *rt = region_trees + i * tree_size;
838
-
839
- qemu_mutex_lock(&rt->lock);
840
- }
841
-}
842
-
843
-static void tcg_region_tree_unlock_all(void)
844
-{
845
- size_t i;
846
-
847
- for (i = 0; i < region.n; i++) {
848
- struct tcg_region_tree *rt = region_trees + i * tree_size;
849
-
850
- qemu_mutex_unlock(&rt->lock);
851
- }
852
-}
853
-
854
-void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
855
-{
856
- size_t i;
857
-
858
- tcg_region_tree_lock_all();
859
- for (i = 0; i < region.n; i++) {
860
- struct tcg_region_tree *rt = region_trees + i * tree_size;
861
-
862
- g_tree_foreach(rt->tree, func, user_data);
863
- }
864
- tcg_region_tree_unlock_all();
865
-}
866
-
867
-size_t tcg_nb_tbs(void)
868
-{
869
- size_t nb_tbs = 0;
870
- size_t i;
871
-
872
- tcg_region_tree_lock_all();
873
- for (i = 0; i < region.n; i++) {
874
- struct tcg_region_tree *rt = region_trees + i * tree_size;
875
-
876
- nb_tbs += g_tree_nnodes(rt->tree);
877
- }
878
- tcg_region_tree_unlock_all();
879
- return nb_tbs;
880
-}
881
-
882
-static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data)
883
-{
884
- TranslationBlock *tb = v;
885
-
886
- tb_destroy(tb);
887
- return FALSE;
888
-}
889
-
890
-static void tcg_region_tree_reset_all(void)
891
-{
892
- size_t i;
893
-
894
- tcg_region_tree_lock_all();
895
- for (i = 0; i < region.n; i++) {
896
- struct tcg_region_tree *rt = region_trees + i * tree_size;
897
-
898
- g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL);
899
- /* Increment the refcount first so that destroy acts as a reset */
900
- g_tree_ref(rt->tree);
901
- g_tree_destroy(rt->tree);
902
- }
903
- tcg_region_tree_unlock_all();
904
-}
905
-
906
-static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
907
-{
908
- void *start, *end;
909
-
910
- start = region.start_aligned + curr_region * region.stride;
911
- end = start + region.size;
912
-
913
- if (curr_region == 0) {
914
- start = region.start;
915
- }
916
- if (curr_region == region.n - 1) {
917
- end = region.end;
918
- }
919
-
920
- *pstart = start;
921
- *pend = end;
922
-}
923
-
924
-static void tcg_region_assign(TCGContext *s, size_t curr_region)
925
-{
926
- void *start, *end;
927
-
928
- tcg_region_bounds(curr_region, &start, &end);
929
-
930
- s->code_gen_buffer = start;
931
- s->code_gen_ptr = start;
932
- s->code_gen_buffer_size = end - start;
933
- s->code_gen_highwater = end - TCG_HIGHWATER;
934
-}
935
-
936
-static bool tcg_region_alloc__locked(TCGContext *s)
937
-{
938
- if (region.current == region.n) {
939
- return true;
940
- }
941
- tcg_region_assign(s, region.current);
942
- region.current++;
943
- return false;
944
-}
945
-
946
-/*
947
- * Request a new region once the one in use has filled up.
948
- * Returns true on error.
949
- */
950
-static bool tcg_region_alloc(TCGContext *s)
951
-{
952
- bool err;
953
- /* read the region size now; alloc__locked will overwrite it on success */
954
- size_t size_full = s->code_gen_buffer_size;
955
-
956
- qemu_mutex_lock(&region.lock);
957
- err = tcg_region_alloc__locked(s);
958
- if (!err) {
959
- region.agg_size_full += size_full - TCG_HIGHWATER;
960
- }
961
- qemu_mutex_unlock(&region.lock);
962
- return err;
963
-}
964
-
965
-/*
966
- * Perform a context's first region allocation.
967
- * This function does _not_ increment region.agg_size_full.
968
- */
969
-static void tcg_region_initial_alloc__locked(TCGContext *s)
970
-{
971
- bool err = tcg_region_alloc__locked(s);
972
- g_assert(!err);
973
-}
974
-
975
-#ifndef CONFIG_USER_ONLY
976
-static void tcg_region_initial_alloc(TCGContext *s)
977
-{
978
- qemu_mutex_lock(&region.lock);
979
- tcg_region_initial_alloc__locked(s);
980
- qemu_mutex_unlock(&region.lock);
981
-}
982
-#endif
983
-
984
-/* Call from a safe-work context */
985
-void tcg_region_reset_all(void)
986
-{
987
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
988
- unsigned int i;
989
-
990
- qemu_mutex_lock(&region.lock);
991
- region.current = 0;
992
- region.agg_size_full = 0;
993
-
994
- for (i = 0; i < n_ctxs; i++) {
995
- TCGContext *s = qatomic_read(&tcg_ctxs[i]);
996
- tcg_region_initial_alloc__locked(s);
997
- }
998
- qemu_mutex_unlock(&region.lock);
999
-
1000
- tcg_region_tree_reset_all();
1001
-}
1002
-
1003
-#ifdef CONFIG_USER_ONLY
1004
-static size_t tcg_n_regions(void)
1005
-{
1006
- return 1;
1007
-}
1008
-#else
1009
-/*
1010
- * It is likely that some vCPUs will translate more code than others, so we
1011
- * first try to set more regions than max_cpus, with those regions being of
1012
- * reasonable size. If that's not possible we make do by evenly dividing
1013
- * the code_gen_buffer among the vCPUs.
1014
- */
1015
-static size_t tcg_n_regions(void)
1016
-{
1017
- size_t i;
1018
-
1019
- /* Use a single region if all we have is one vCPU thread */
1020
-#if !defined(CONFIG_USER_ONLY)
1021
- MachineState *ms = MACHINE(qdev_get_machine());
1022
- unsigned int max_cpus = ms->smp.max_cpus;
1023
-#endif
1024
- if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
1025
- return 1;
1026
- }
1027
-
1028
- /* Try to have more regions than max_cpus, with each region being >= 2 MB */
1029
- for (i = 8; i > 0; i--) {
1030
- size_t regions_per_thread = i;
1031
- size_t region_size;
1032
-
1033
- region_size = tcg_init_ctx.code_gen_buffer_size;
1034
- region_size /= max_cpus * regions_per_thread;
1035
-
1036
- if (region_size >= 2 * 1024u * 1024) {
1037
- return max_cpus * regions_per_thread;
1038
- }
1039
- }
1040
- /* If we can't, then just allocate one region per vCPU thread */
1041
- return max_cpus;
1042
-}
1043
-#endif
1044
-
1045
-/*
1046
- * Initializes region partitioning.
1047
- *
1048
- * Called at init time from the parent thread (i.e. the one calling
1049
- * tcg_context_init), after the target's TCG globals have been set.
1050
- *
1051
- * Region partitioning works by splitting code_gen_buffer into separate regions,
1052
- * and then assigning regions to TCG threads so that the threads can translate
1053
- * code in parallel without synchronization.
1054
- *
1055
- * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
1056
- * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
1057
- * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
1058
- * must have been parsed before calling this function, since it calls
1059
- * qemu_tcg_mttcg_enabled().
1060
- *
1061
- * In user-mode we use a single region. Having multiple regions in user-mode
1062
- * is not supported, because the number of vCPU threads (recall that each thread
1063
- * spawned by the guest corresponds to a vCPU thread) is only bounded by the
1064
- * OS, and usually this number is huge (tens of thousands is not uncommon).
1065
- * Thus, given this large bound on the number of vCPU threads and the fact
1066
- * that code_gen_buffer is allocated at compile-time, we cannot guarantee
1067
- * that the availability of at least one region per vCPU thread.
1068
- *
1069
- * However, this user-mode limitation is unlikely to be a significant problem
1070
- * in practice. Multi-threaded guests share most if not all of their translated
1071
- * code, which makes parallel code generation less appealing than in softmmu.
1072
- */
1073
-void tcg_region_init(void)
1074
-{
1075
- void *buf = tcg_init_ctx.code_gen_buffer;
1076
- void *aligned;
1077
- size_t size = tcg_init_ctx.code_gen_buffer_size;
1078
- size_t page_size = qemu_real_host_page_size;
1079
- size_t region_size;
1080
- size_t n_regions;
1081
- size_t i;
1082
-
1083
- n_regions = tcg_n_regions();
1084
-
1085
- /* The first region will be 'aligned - buf' bytes larger than the others */
1086
- aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
1087
- g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
1088
- /*
1089
- * Make region_size a multiple of page_size, using aligned as the start.
1090
- * As a result of this we might end up with a few extra pages at the end of
1091
- * the buffer; we will assign those to the last region.
1092
- */
1093
- region_size = (size - (aligned - buf)) / n_regions;
1094
- region_size = QEMU_ALIGN_DOWN(region_size, page_size);
1095
-
1096
- /* A region must have at least 2 pages; one code, one guard */
1097
- g_assert(region_size >= 2 * page_size);
1098
-
1099
- /* init the region struct */
1100
- qemu_mutex_init(&region.lock);
1101
- region.n = n_regions;
1102
- region.size = region_size - page_size;
1103
- region.stride = region_size;
1104
- region.start = buf;
1105
- region.start_aligned = aligned;
1106
- /* page-align the end, since its last page will be a guard page */
1107
- region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
1108
- /* account for that last guard page */
1109
- region.end -= page_size;
1110
-
1111
- /*
1112
- * Set guard pages in the rw buffer, as that's the one into which
1113
- * buffer overruns could occur. Do not set guard pages in the rx
1114
- * buffer -- let that one use hugepages throughout.
1115
- */
1116
- for (i = 0; i < region.n; i++) {
1117
- void *start, *end;
1118
-
1119
- tcg_region_bounds(i, &start, &end);
1120
-
1121
- /*
1122
- * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
1123
- * rejects a permission change from RWX -> NONE. Guard pages are
1124
- * nice for bug detection but are not essential; ignore any failure.
1125
- */
1126
- (void)qemu_mprotect_none(end, page_size);
1127
- }
1128
-
1129
- tcg_region_trees_init();
1130
-
1131
- /*
1132
- * Leave the initial context initialized to the first region.
1133
- * This will be the context into which we generate the prologue.
1134
- * It is also the only context for CONFIG_USER_ONLY.
1135
- */
1136
- tcg_region_initial_alloc__locked(&tcg_init_ctx);
1137
-}
1138
-
1139
-static void tcg_region_prologue_set(TCGContext *s)
1140
-{
1141
- /* Deduct the prologue from the first region. */
1142
- g_assert(region.start == s->code_gen_buffer);
1143
- region.start = s->code_ptr;
1144
-
1145
- /* Recompute boundaries of the first region. */
1146
- tcg_region_assign(s, 0);
1147
-
1148
- /* Register the balance of the buffer with gdb. */
1149
- tcg_register_jit(tcg_splitwx_to_rx(region.start),
1150
- region.end - region.start);
1151
-}
1152
-
1153
#ifdef CONFIG_DEBUG_TCG
1154
const void *tcg_splitwx_to_rx(void *rw)
1155
{
1156
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
1157
}
1158
#endif /* !CONFIG_USER_ONLY */
1159
1160
-/*
1161
- * Returns the size (in bytes) of all translated code (i.e. from all regions)
1162
- * currently in the cache.
1163
- * See also: tcg_code_capacity()
1164
- * Do not confuse with tcg_current_code_size(); that one applies to a single
1165
- * TCG context.
1166
- */
1167
-size_t tcg_code_size(void)
1168
-{
1169
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
1170
- unsigned int i;
1171
- size_t total;
1172
-
1173
- qemu_mutex_lock(&region.lock);
1174
- total = region.agg_size_full;
1175
- for (i = 0; i < n_ctxs; i++) {
1176
- const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
1177
- size_t size;
1178
-
1179
- size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
1180
- g_assert(size <= s->code_gen_buffer_size);
1181
- total += size;
1182
- }
1183
- qemu_mutex_unlock(&region.lock);
1184
- return total;
1185
-}
1186
-
1187
-/*
1188
- * Returns the code capacity (in bytes) of the entire cache, i.e. including all
1189
- * regions.
1190
- * See also: tcg_code_size()
1191
- */
1192
-size_t tcg_code_capacity(void)
1193
-{
1194
- size_t guard_size, capacity;
1195
-
1196
- /* no need for synchronization; these variables are set at init time */
1197
- guard_size = region.stride - region.size;
1198
- capacity = region.end + guard_size - region.start;
1199
- capacity -= region.n * (guard_size + TCG_HIGHWATER);
1200
- return capacity;
1201
-}
1202
-
1203
-size_t tcg_tb_phys_invalidate_count(void)
1204
-{
1205
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
1206
- unsigned int i;
1207
- size_t total = 0;
1208
-
1209
- for (i = 0; i < n_ctxs; i++) {
1210
- const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
1211
-
1212
- total += qatomic_read(&s->tb_phys_invalidate_count);
1213
- }
1214
- return total;
1215
-}
1216
-
1217
/* pool based memory allocation */
1218
void *tcg_malloc_internal(TCGContext *s, int size)
1219
{
1220
diff --git a/tcg/meson.build b/tcg/meson.build
1221
index XXXXXXX..XXXXXXX 100644
1222
--- a/tcg/meson.build
1223
+++ b/tcg/meson.build
1224
@@ -XXX,XX +XXX,XX @@ tcg_ss = ss.source_set()
1225
1226
tcg_ss.add(files(
1227
'optimize.c',
1228
+ 'region.c',
1229
'tcg.c',
1230
'tcg-common.c',
1231
'tcg-op.c',
1232
--
228
--
1233
2.25.1
229
2.25.1
1234
230
1235
231
diff view generated by jsdifflib
New patch
1
Prepare for tracking different masks by renaming this one.
1
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 142 +++++++++++++++++++++++++------------------------
9
1 file changed, 72 insertions(+), 70 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
16
TCGTemp *prev_copy;
17
TCGTemp *next_copy;
18
uint64_t val;
19
- uint64_t mask;
20
+ uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
21
} TempOptInfo;
22
23
static inline TempOptInfo *ts_info(TCGTemp *ts)
24
@@ -XXX,XX +XXX,XX @@ static void reset_ts(TCGTemp *ts)
25
ti->next_copy = ts;
26
ti->prev_copy = ts;
27
ti->is_const = false;
28
- ti->mask = -1;
29
+ ti->z_mask = -1;
30
}
31
32
static void reset_temp(TCGArg arg)
33
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
34
if (ts->kind == TEMP_CONST) {
35
ti->is_const = true;
36
ti->val = ts->val;
37
- ti->mask = ts->val;
38
+ ti->z_mask = ts->val;
39
if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
40
/* High bits of a 32-bit quantity are garbage. */
41
- ti->mask |= ~0xffffffffull;
42
+ ti->z_mask |= ~0xffffffffull;
43
}
44
} else {
45
ti->is_const = false;
46
- ti->mask = -1;
47
+ ti->z_mask = -1;
48
}
49
}
50
51
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
52
const TCGOpDef *def;
53
TempOptInfo *di;
54
TempOptInfo *si;
55
- uint64_t mask;
56
+ uint64_t z_mask;
57
TCGOpcode new_op;
58
59
if (ts_are_copies(dst_ts, src_ts)) {
60
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
61
op->args[0] = dst;
62
op->args[1] = src;
63
64
- mask = si->mask;
65
+ z_mask = si->z_mask;
66
if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
67
/* High bits of the destination are now garbage. */
68
- mask |= ~0xffffffffull;
69
+ z_mask |= ~0xffffffffull;
70
}
71
- di->mask = mask;
72
+ di->z_mask = z_mask;
73
74
if (src_ts->type == dst_ts->type) {
75
TempOptInfo *ni = ts_info(si->next_copy);
76
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
77
}
78
79
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
80
- uint64_t mask, partmask, affected, tmp;
81
+ uint64_t z_mask, partmask, affected, tmp;
82
int nb_oargs, nb_iargs;
83
TCGOpcode opc = op->opc;
84
const TCGOpDef *def = &tcg_op_defs[opc];
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
86
87
/* Simplify using known-zero bits. Currently only ops with a single
88
output argument is supported. */
89
- mask = -1;
90
+ z_mask = -1;
91
affected = -1;
92
switch (opc) {
93
CASE_OP_32_64(ext8s):
94
- if ((arg_info(op->args[1])->mask & 0x80) != 0) {
95
+ if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
96
break;
97
}
98
QEMU_FALLTHROUGH;
99
CASE_OP_32_64(ext8u):
100
- mask = 0xff;
101
+ z_mask = 0xff;
102
goto and_const;
103
CASE_OP_32_64(ext16s):
104
- if ((arg_info(op->args[1])->mask & 0x8000) != 0) {
105
+ if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
106
break;
107
}
108
QEMU_FALLTHROUGH;
109
CASE_OP_32_64(ext16u):
110
- mask = 0xffff;
111
+ z_mask = 0xffff;
112
goto and_const;
113
case INDEX_op_ext32s_i64:
114
- if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
115
+ if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
116
break;
117
}
118
QEMU_FALLTHROUGH;
119
case INDEX_op_ext32u_i64:
120
- mask = 0xffffffffU;
121
+ z_mask = 0xffffffffU;
122
goto and_const;
123
124
CASE_OP_32_64(and):
125
- mask = arg_info(op->args[2])->mask;
126
+ z_mask = arg_info(op->args[2])->z_mask;
127
if (arg_is_const(op->args[2])) {
128
and_const:
129
- affected = arg_info(op->args[1])->mask & ~mask;
130
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
131
}
132
- mask = arg_info(op->args[1])->mask & mask;
133
+ z_mask = arg_info(op->args[1])->z_mask & z_mask;
134
break;
135
136
case INDEX_op_ext_i32_i64:
137
- if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
138
+ if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
139
break;
140
}
141
QEMU_FALLTHROUGH;
142
case INDEX_op_extu_i32_i64:
143
/* We do not compute affected as it is a size changing op. */
144
- mask = (uint32_t)arg_info(op->args[1])->mask;
145
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
146
break;
147
148
CASE_OP_32_64(andc):
149
/* Known-zeros does not imply known-ones. Therefore unless
150
op->args[2] is constant, we can't infer anything from it. */
151
if (arg_is_const(op->args[2])) {
152
- mask = ~arg_info(op->args[2])->mask;
153
+ z_mask = ~arg_info(op->args[2])->z_mask;
154
goto and_const;
155
}
156
/* But we certainly know nothing outside args[1] may be set. */
157
- mask = arg_info(op->args[1])->mask;
158
+ z_mask = arg_info(op->args[1])->z_mask;
159
break;
160
161
case INDEX_op_sar_i32:
162
if (arg_is_const(op->args[2])) {
163
tmp = arg_info(op->args[2])->val & 31;
164
- mask = (int32_t)arg_info(op->args[1])->mask >> tmp;
165
+ z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
166
}
167
break;
168
case INDEX_op_sar_i64:
169
if (arg_is_const(op->args[2])) {
170
tmp = arg_info(op->args[2])->val & 63;
171
- mask = (int64_t)arg_info(op->args[1])->mask >> tmp;
172
+ z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
173
}
174
break;
175
176
case INDEX_op_shr_i32:
177
if (arg_is_const(op->args[2])) {
178
tmp = arg_info(op->args[2])->val & 31;
179
- mask = (uint32_t)arg_info(op->args[1])->mask >> tmp;
180
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
181
}
182
break;
183
case INDEX_op_shr_i64:
184
if (arg_is_const(op->args[2])) {
185
tmp = arg_info(op->args[2])->val & 63;
186
- mask = (uint64_t)arg_info(op->args[1])->mask >> tmp;
187
+ z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
188
}
189
break;
190
191
case INDEX_op_extrl_i64_i32:
192
- mask = (uint32_t)arg_info(op->args[1])->mask;
193
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
194
break;
195
case INDEX_op_extrh_i64_i32:
196
- mask = (uint64_t)arg_info(op->args[1])->mask >> 32;
197
+ z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
198
break;
199
200
CASE_OP_32_64(shl):
201
if (arg_is_const(op->args[2])) {
202
tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
203
- mask = arg_info(op->args[1])->mask << tmp;
204
+ z_mask = arg_info(op->args[1])->z_mask << tmp;
205
}
206
break;
207
208
CASE_OP_32_64(neg):
209
/* Set to 1 all bits to the left of the rightmost. */
210
- mask = -(arg_info(op->args[1])->mask
211
- & -arg_info(op->args[1])->mask);
212
+ z_mask = -(arg_info(op->args[1])->z_mask
213
+ & -arg_info(op->args[1])->z_mask);
214
break;
215
216
CASE_OP_32_64(deposit):
217
- mask = deposit64(arg_info(op->args[1])->mask,
218
- op->args[3], op->args[4],
219
- arg_info(op->args[2])->mask);
220
+ z_mask = deposit64(arg_info(op->args[1])->z_mask,
221
+ op->args[3], op->args[4],
222
+ arg_info(op->args[2])->z_mask);
223
break;
224
225
CASE_OP_32_64(extract):
226
- mask = extract64(arg_info(op->args[1])->mask,
227
- op->args[2], op->args[3]);
228
+ z_mask = extract64(arg_info(op->args[1])->z_mask,
229
+ op->args[2], op->args[3]);
230
if (op->args[2] == 0) {
231
- affected = arg_info(op->args[1])->mask & ~mask;
232
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
233
}
234
break;
235
CASE_OP_32_64(sextract):
236
- mask = sextract64(arg_info(op->args[1])->mask,
237
- op->args[2], op->args[3]);
238
- if (op->args[2] == 0 && (tcg_target_long)mask >= 0) {
239
- affected = arg_info(op->args[1])->mask & ~mask;
240
+ z_mask = sextract64(arg_info(op->args[1])->z_mask,
241
+ op->args[2], op->args[3]);
242
+ if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
243
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
244
}
245
break;
246
247
CASE_OP_32_64(or):
248
CASE_OP_32_64(xor):
249
- mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask;
250
+ z_mask = arg_info(op->args[1])->z_mask
251
+ | arg_info(op->args[2])->z_mask;
252
break;
253
254
case INDEX_op_clz_i32:
255
case INDEX_op_ctz_i32:
256
- mask = arg_info(op->args[2])->mask | 31;
257
+ z_mask = arg_info(op->args[2])->z_mask | 31;
258
break;
259
260
case INDEX_op_clz_i64:
261
case INDEX_op_ctz_i64:
262
- mask = arg_info(op->args[2])->mask | 63;
263
+ z_mask = arg_info(op->args[2])->z_mask | 63;
264
break;
265
266
case INDEX_op_ctpop_i32:
267
- mask = 32 | 31;
268
+ z_mask = 32 | 31;
269
break;
270
case INDEX_op_ctpop_i64:
271
- mask = 64 | 63;
272
+ z_mask = 64 | 63;
273
break;
274
275
CASE_OP_32_64(setcond):
276
case INDEX_op_setcond2_i32:
277
- mask = 1;
278
+ z_mask = 1;
279
break;
280
281
CASE_OP_32_64(movcond):
282
- mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask;
283
+ z_mask = arg_info(op->args[3])->z_mask
284
+ | arg_info(op->args[4])->z_mask;
285
break;
286
287
CASE_OP_32_64(ld8u):
288
- mask = 0xff;
289
+ z_mask = 0xff;
290
break;
291
CASE_OP_32_64(ld16u):
292
- mask = 0xffff;
293
+ z_mask = 0xffff;
294
break;
295
case INDEX_op_ld32u_i64:
296
- mask = 0xffffffffu;
297
+ z_mask = 0xffffffffu;
298
break;
299
300
CASE_OP_32_64(qemu_ld):
301
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
302
MemOpIdx oi = op->args[nb_oargs + nb_iargs];
303
MemOp mop = get_memop(oi);
304
if (!(mop & MO_SIGN)) {
305
- mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
306
+ z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
307
}
308
}
309
break;
310
311
CASE_OP_32_64(bswap16):
312
- mask = arg_info(op->args[1])->mask;
313
- if (mask <= 0xffff) {
314
+ z_mask = arg_info(op->args[1])->z_mask;
315
+ if (z_mask <= 0xffff) {
316
op->args[2] |= TCG_BSWAP_IZ;
317
}
318
- mask = bswap16(mask);
319
+ z_mask = bswap16(z_mask);
320
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
321
case TCG_BSWAP_OZ:
322
break;
323
case TCG_BSWAP_OS:
324
- mask = (int16_t)mask;
325
+ z_mask = (int16_t)z_mask;
326
break;
327
default: /* undefined high bits */
328
- mask |= MAKE_64BIT_MASK(16, 48);
329
+ z_mask |= MAKE_64BIT_MASK(16, 48);
330
break;
331
}
332
break;
333
334
case INDEX_op_bswap32_i64:
335
- mask = arg_info(op->args[1])->mask;
336
- if (mask <= 0xffffffffu) {
337
+ z_mask = arg_info(op->args[1])->z_mask;
338
+ if (z_mask <= 0xffffffffu) {
339
op->args[2] |= TCG_BSWAP_IZ;
340
}
341
- mask = bswap32(mask);
342
+ z_mask = bswap32(z_mask);
343
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
344
case TCG_BSWAP_OZ:
345
break;
346
case TCG_BSWAP_OS:
347
- mask = (int32_t)mask;
348
+ z_mask = (int32_t)z_mask;
349
break;
350
default: /* undefined high bits */
351
- mask |= MAKE_64BIT_MASK(32, 32);
352
+ z_mask |= MAKE_64BIT_MASK(32, 32);
353
break;
354
}
355
break;
356
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
357
/* 32-bit ops generate 32-bit results. For the result is zero test
358
below, we can ignore high bits, but for further optimizations we
359
need to record that the high bits contain garbage. */
360
- partmask = mask;
361
+ partmask = z_mask;
362
if (!(def->flags & TCG_OPF_64BIT)) {
363
- mask |= ~(tcg_target_ulong)0xffffffffu;
364
+ z_mask |= ~(tcg_target_ulong)0xffffffffu;
365
partmask &= 0xffffffffu;
366
affected &= 0xffffffffu;
367
}
368
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
369
vs the high word of the input. */
370
do_setcond_high:
371
reset_temp(op->args[0]);
372
- arg_info(op->args[0])->mask = 1;
373
+ arg_info(op->args[0])->z_mask = 1;
374
op->opc = INDEX_op_setcond_i32;
375
op->args[1] = op->args[2];
376
op->args[2] = op->args[4];
377
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
378
}
379
do_setcond_low:
380
reset_temp(op->args[0]);
381
- arg_info(op->args[0])->mask = 1;
382
+ arg_info(op->args[0])->z_mask = 1;
383
op->opc = INDEX_op_setcond_i32;
384
op->args[2] = op->args[3];
385
op->args[3] = op->args[5];
386
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
387
/* Default case: we know nothing about operation (or were unable
388
to compute the operation result) so no propagation is done.
389
We trash everything if the operation is the end of a basic
390
- block, otherwise we only trash the output args. "mask" is
391
+ block, otherwise we only trash the output args. "z_mask" is
392
the non-zero bits mask for the first output arg. */
393
if (def->flags & TCG_OPF_BB_END) {
394
memset(&temps_used, 0, sizeof(temps_used));
395
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
396
/* Save the corresponding known-zero bits mask for the
397
first output argument (only one supported so far). */
398
if (i == 0) {
399
- arg_info(op->args[i])->mask = mask;
400
+ arg_info(op->args[i])->z_mask = z_mask;
401
}
402
}
403
}
404
--
405
2.25.1
406
407
diff view generated by jsdifflib
New patch
1
Provide what will become a larger context for splitting
2
the very large tcg_optimize function.
1
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 77 ++++++++++++++++++++++++++------------------------
10
1 file changed, 40 insertions(+), 37 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
17
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
18
} TempOptInfo;
19
20
+typedef struct OptContext {
21
+ TCGTempSet temps_used;
22
+} OptContext;
23
+
24
static inline TempOptInfo *ts_info(TCGTemp *ts)
25
{
26
return ts->state_ptr;
27
@@ -XXX,XX +XXX,XX @@ static void reset_temp(TCGArg arg)
28
}
29
30
/* Initialize and activate a temporary. */
31
-static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
32
+static void init_ts_info(OptContext *ctx, TCGTemp *ts)
33
{
34
size_t idx = temp_idx(ts);
35
TempOptInfo *ti;
36
37
- if (test_bit(idx, temps_used->l)) {
38
+ if (test_bit(idx, ctx->temps_used.l)) {
39
return;
40
}
41
- set_bit(idx, temps_used->l);
42
+ set_bit(idx, ctx->temps_used.l);
43
44
ti = ts->state_ptr;
45
if (ti == NULL) {
46
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
47
}
48
}
49
50
-static void init_arg_info(TCGTempSet *temps_used, TCGArg arg)
51
+static void init_arg_info(OptContext *ctx, TCGArg arg)
52
{
53
- init_ts_info(temps_used, arg_temp(arg));
54
+ init_ts_info(ctx, arg_temp(arg));
55
}
56
57
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
58
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
59
}
60
}
61
62
-static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
63
+static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
64
TCGOp *op, TCGArg dst, uint64_t val)
65
{
66
const TCGOpDef *def = &tcg_op_defs[op->opc];
67
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
68
69
/* Convert movi to mov with constant temp. */
70
tv = tcg_constant_internal(type, val);
71
- init_ts_info(temps_used, tv);
72
+ init_ts_info(ctx, tv);
73
tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
74
}
75
76
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
77
{
78
int nb_temps, nb_globals, i;
79
TCGOp *op, *op_next, *prev_mb = NULL;
80
- TCGTempSet temps_used;
81
+ OptContext ctx = {};
82
83
/* Array VALS has an element for each temp.
84
If this temp holds a constant then its value is kept in VALS' element.
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
86
nb_temps = s->nb_temps;
87
nb_globals = s->nb_globals;
88
89
- memset(&temps_used, 0, sizeof(temps_used));
90
for (i = 0; i < nb_temps; ++i) {
91
s->temps[i].state_ptr = NULL;
92
}
93
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
94
for (i = 0; i < nb_oargs + nb_iargs; i++) {
95
TCGTemp *ts = arg_temp(op->args[i]);
96
if (ts) {
97
- init_ts_info(&temps_used, ts);
98
+ init_ts_info(&ctx, ts);
99
}
100
}
101
} else {
102
nb_oargs = def->nb_oargs;
103
nb_iargs = def->nb_iargs;
104
for (i = 0; i < nb_oargs + nb_iargs; i++) {
105
- init_arg_info(&temps_used, op->args[i]);
106
+ init_arg_info(&ctx, op->args[i]);
107
}
108
}
109
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
CASE_OP_32_64(rotr):
112
if (arg_is_const(op->args[1])
113
&& arg_info(op->args[1])->val == 0) {
114
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
115
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
116
continue;
117
}
118
break;
119
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
120
121
if (partmask == 0) {
122
tcg_debug_assert(nb_oargs == 1);
123
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
124
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
125
continue;
126
}
127
if (affected == 0) {
128
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
129
CASE_OP_32_64(mulsh):
130
if (arg_is_const(op->args[2])
131
&& arg_info(op->args[2])->val == 0) {
132
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
133
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
134
continue;
135
}
136
break;
137
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
138
CASE_OP_32_64_VEC(sub):
139
CASE_OP_32_64_VEC(xor):
140
if (args_are_copies(op->args[1], op->args[2])) {
141
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
142
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
143
continue;
144
}
145
break;
146
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
147
if (arg_is_const(op->args[1])) {
148
tmp = arg_info(op->args[1])->val;
149
tmp = dup_const(TCGOP_VECE(op), tmp);
150
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
151
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
152
break;
153
}
154
goto do_default;
155
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
156
case INDEX_op_dup2_vec:
157
assert(TCG_TARGET_REG_BITS == 32);
158
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
159
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0],
160
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0],
161
deposit64(arg_info(op->args[1])->val, 32, 32,
162
arg_info(op->args[2])->val));
163
break;
164
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
165
case INDEX_op_extrh_i64_i32:
166
if (arg_is_const(op->args[1])) {
167
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
168
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
169
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
170
break;
171
}
172
goto do_default;
173
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
174
if (arg_is_const(op->args[1])) {
175
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
176
op->args[2]);
177
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
178
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
179
break;
180
}
181
goto do_default;
182
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
183
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
184
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
185
arg_info(op->args[2])->val);
186
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
187
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
188
break;
189
}
190
goto do_default;
191
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
192
TCGArg v = arg_info(op->args[1])->val;
193
if (v != 0) {
194
tmp = do_constant_folding(opc, v, 0);
195
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
196
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
197
} else {
198
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
199
}
200
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
201
tmp = deposit64(arg_info(op->args[1])->val,
202
op->args[3], op->args[4],
203
arg_info(op->args[2])->val);
204
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
205
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
206
break;
207
}
208
goto do_default;
209
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
210
if (arg_is_const(op->args[1])) {
211
tmp = extract64(arg_info(op->args[1])->val,
212
op->args[2], op->args[3]);
213
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
214
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
215
break;
216
}
217
goto do_default;
218
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
219
if (arg_is_const(op->args[1])) {
220
tmp = sextract64(arg_info(op->args[1])->val,
221
op->args[2], op->args[3]);
222
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
223
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
224
break;
225
}
226
goto do_default;
227
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
228
tmp = (int32_t)(((uint32_t)v1 >> shr) |
229
((uint32_t)v2 << (32 - shr)));
230
}
231
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
232
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
233
break;
234
}
235
goto do_default;
236
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
237
tmp = do_constant_folding_cond(opc, op->args[1],
238
op->args[2], op->args[3]);
239
if (tmp != 2) {
240
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
241
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
242
break;
243
}
244
goto do_default;
245
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
246
op->args[1], op->args[2]);
247
if (tmp != 2) {
248
if (tmp) {
249
- memset(&temps_used, 0, sizeof(temps_used));
250
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
251
op->opc = INDEX_op_br;
252
op->args[0] = op->args[3];
253
} else {
254
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
255
256
rl = op->args[0];
257
rh = op->args[1];
258
- tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)a);
259
- tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(a >> 32));
260
+ tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
261
+ tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
262
break;
263
}
264
goto do_default;
265
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
266
267
rl = op->args[0];
268
rh = op->args[1];
269
- tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)r);
270
- tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(r >> 32));
271
+ tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
272
+ tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
273
break;
274
}
275
goto do_default;
276
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
277
if (tmp != 2) {
278
if (tmp) {
279
do_brcond_true:
280
- memset(&temps_used, 0, sizeof(temps_used));
281
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
282
op->opc = INDEX_op_br;
283
op->args[0] = op->args[5];
284
} else {
285
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
286
/* Simplify LT/GE comparisons vs zero to a single compare
287
vs the high word of the input. */
288
do_brcond_high:
289
- memset(&temps_used, 0, sizeof(temps_used));
290
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
291
op->opc = INDEX_op_brcond_i32;
292
op->args[0] = op->args[1];
293
op->args[1] = op->args[3];
294
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
295
goto do_default;
296
}
297
do_brcond_low:
298
- memset(&temps_used, 0, sizeof(temps_used));
299
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
300
op->opc = INDEX_op_brcond_i32;
301
op->args[1] = op->args[2];
302
op->args[2] = op->args[4];
303
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
304
op->args[5]);
305
if (tmp != 2) {
306
do_setcond_const:
307
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
308
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
309
} else if ((op->args[5] == TCG_COND_LT
310
|| op->args[5] == TCG_COND_GE)
311
&& arg_is_const(op->args[3])
312
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
313
if (!(tcg_call_flags(op)
314
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
315
for (i = 0; i < nb_globals; i++) {
316
- if (test_bit(i, temps_used.l)) {
317
+ if (test_bit(i, ctx.temps_used.l)) {
318
reset_ts(&s->temps[i]);
319
}
320
}
321
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
322
block, otherwise we only trash the output args. "z_mask" is
323
the non-zero bits mask for the first output arg. */
324
if (def->flags & TCG_OPF_BB_END) {
325
- memset(&temps_used, 0, sizeof(temps_used));
326
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
327
} else {
328
do_reset_output:
329
for (i = 0; i < nb_oargs; i++) {
330
--
331
2.25.1
332
333
diff view generated by jsdifflib
New patch
1
Break the final cleanup clause out of the main switch
2
statement. When fully folding an opcode to mov/movi,
3
use "continue" to process the next opcode, else break
4
to fall into the final cleanup.
1
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 190 ++++++++++++++++++++++++-------------------------
12
1 file changed, 94 insertions(+), 96 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
19
switch (opc) {
20
CASE_OP_32_64_VEC(mov):
21
tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
22
- break;
23
+ continue;
24
25
case INDEX_op_dup_vec:
26
if (arg_is_const(op->args[1])) {
27
tmp = arg_info(op->args[1])->val;
28
tmp = dup_const(TCGOP_VECE(op), tmp);
29
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
30
- break;
31
+ continue;
32
}
33
- goto do_default;
34
+ break;
35
36
case INDEX_op_dup2_vec:
37
assert(TCG_TARGET_REG_BITS == 32);
38
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
39
tcg_opt_gen_movi(s, &ctx, op, op->args[0],
40
deposit64(arg_info(op->args[1])->val, 32, 32,
41
arg_info(op->args[2])->val));
42
- break;
43
+ continue;
44
} else if (args_are_copies(op->args[1], op->args[2])) {
45
op->opc = INDEX_op_dup_vec;
46
TCGOP_VECE(op) = MO_32;
47
nb_iargs = 1;
48
}
49
- goto do_default;
50
+ break;
51
52
CASE_OP_32_64(not):
53
CASE_OP_32_64(neg):
54
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
55
if (arg_is_const(op->args[1])) {
56
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
57
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
58
- break;
59
+ continue;
60
}
61
- goto do_default;
62
+ break;
63
64
CASE_OP_32_64(bswap16):
65
CASE_OP_32_64(bswap32):
66
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
67
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
68
op->args[2]);
69
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
70
- break;
71
+ continue;
72
}
73
- goto do_default;
74
+ break;
75
76
CASE_OP_32_64(add):
77
CASE_OP_32_64(sub):
78
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
79
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
80
arg_info(op->args[2])->val);
81
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
82
- break;
83
+ continue;
84
}
85
- goto do_default;
86
+ break;
87
88
CASE_OP_32_64(clz):
89
CASE_OP_32_64(ctz):
90
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
91
} else {
92
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
93
}
94
- break;
95
+ continue;
96
}
97
- goto do_default;
98
+ break;
99
100
CASE_OP_32_64(deposit):
101
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
102
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
103
op->args[3], op->args[4],
104
arg_info(op->args[2])->val);
105
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
106
- break;
107
+ continue;
108
}
109
- goto do_default;
110
+ break;
111
112
CASE_OP_32_64(extract):
113
if (arg_is_const(op->args[1])) {
114
tmp = extract64(arg_info(op->args[1])->val,
115
op->args[2], op->args[3]);
116
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
117
- break;
118
+ continue;
119
}
120
- goto do_default;
121
+ break;
122
123
CASE_OP_32_64(sextract):
124
if (arg_is_const(op->args[1])) {
125
tmp = sextract64(arg_info(op->args[1])->val,
126
op->args[2], op->args[3]);
127
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
128
- break;
129
+ continue;
130
}
131
- goto do_default;
132
+ break;
133
134
CASE_OP_32_64(extract2):
135
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
136
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
137
((uint32_t)v2 << (32 - shr)));
138
}
139
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
140
- break;
141
+ continue;
142
}
143
- goto do_default;
144
+ break;
145
146
CASE_OP_32_64(setcond):
147
tmp = do_constant_folding_cond(opc, op->args[1],
148
op->args[2], op->args[3]);
149
if (tmp != 2) {
150
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
151
- break;
152
+ continue;
153
}
154
- goto do_default;
155
+ break;
156
157
CASE_OP_32_64(brcond):
158
tmp = do_constant_folding_cond(opc, op->args[0],
159
op->args[1], op->args[2]);
160
- if (tmp != 2) {
161
- if (tmp) {
162
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
163
- op->opc = INDEX_op_br;
164
- op->args[0] = op->args[3];
165
- } else {
166
- tcg_op_remove(s, op);
167
- }
168
+ switch (tmp) {
169
+ case 0:
170
+ tcg_op_remove(s, op);
171
+ continue;
172
+ case 1:
173
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
174
+ op->opc = opc = INDEX_op_br;
175
+ op->args[0] = op->args[3];
176
break;
177
}
178
- goto do_default;
179
+ break;
180
181
CASE_OP_32_64(movcond):
182
tmp = do_constant_folding_cond(opc, op->args[1],
183
op->args[2], op->args[5]);
184
if (tmp != 2) {
185
tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
186
- break;
187
+ continue;
188
}
189
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
190
uint64_t tv = arg_info(op->args[3])->val;
191
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
192
if (fv == 1 && tv == 0) {
193
cond = tcg_invert_cond(cond);
194
} else if (!(tv == 1 && fv == 0)) {
195
- goto do_default;
196
+ break;
197
}
198
op->args[3] = cond;
199
op->opc = opc = (opc == INDEX_op_movcond_i32
200
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
201
: INDEX_op_setcond_i64);
202
nb_iargs = 2;
203
}
204
- goto do_default;
205
+ break;
206
207
case INDEX_op_add2_i32:
208
case INDEX_op_sub2_i32:
209
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
210
rh = op->args[1];
211
tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
212
tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
213
- break;
214
+ continue;
215
}
216
- goto do_default;
217
+ break;
218
219
case INDEX_op_mulu2_i32:
220
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
221
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
222
rh = op->args[1];
223
tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
224
tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
225
- break;
226
+ continue;
227
}
228
- goto do_default;
229
+ break;
230
231
case INDEX_op_brcond2_i32:
232
tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
233
op->args[4]);
234
- if (tmp != 2) {
235
- if (tmp) {
236
- do_brcond_true:
237
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
238
- op->opc = INDEX_op_br;
239
- op->args[0] = op->args[5];
240
- } else {
241
+ if (tmp == 0) {
242
do_brcond_false:
243
- tcg_op_remove(s, op);
244
- }
245
- } else if ((op->args[4] == TCG_COND_LT
246
- || op->args[4] == TCG_COND_GE)
247
- && arg_is_const(op->args[2])
248
- && arg_info(op->args[2])->val == 0
249
- && arg_is_const(op->args[3])
250
- && arg_info(op->args[3])->val == 0) {
251
+ tcg_op_remove(s, op);
252
+ continue;
253
+ }
254
+ if (tmp == 1) {
255
+ do_brcond_true:
256
+ op->opc = opc = INDEX_op_br;
257
+ op->args[0] = op->args[5];
258
+ break;
259
+ }
260
+ if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE)
261
+ && arg_is_const(op->args[2])
262
+ && arg_info(op->args[2])->val == 0
263
+ && arg_is_const(op->args[3])
264
+ && arg_info(op->args[3])->val == 0) {
265
/* Simplify LT/GE comparisons vs zero to a single compare
266
vs the high word of the input. */
267
do_brcond_high:
268
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
269
- op->opc = INDEX_op_brcond_i32;
270
+ op->opc = opc = INDEX_op_brcond_i32;
271
op->args[0] = op->args[1];
272
op->args[1] = op->args[3];
273
op->args[2] = op->args[4];
274
op->args[3] = op->args[5];
275
- } else if (op->args[4] == TCG_COND_EQ) {
276
+ break;
277
+ }
278
+ if (op->args[4] == TCG_COND_EQ) {
279
/* Simplify EQ comparisons where one of the pairs
280
can be simplified. */
281
tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
282
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
283
if (tmp == 0) {
284
goto do_brcond_false;
285
} else if (tmp != 1) {
286
- goto do_default;
287
+ break;
288
}
289
do_brcond_low:
290
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
291
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
292
op->args[1] = op->args[2];
293
op->args[2] = op->args[4];
294
op->args[3] = op->args[5];
295
- } else if (op->args[4] == TCG_COND_NE) {
296
+ break;
297
+ }
298
+ if (op->args[4] == TCG_COND_NE) {
299
/* Simplify NE comparisons where one of the pairs
300
can be simplified. */
301
tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
302
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
303
} else if (tmp == 1) {
304
goto do_brcond_true;
305
}
306
- goto do_default;
307
- } else {
308
- goto do_default;
309
}
310
break;
311
312
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
313
if (tmp != 2) {
314
do_setcond_const:
315
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
316
- } else if ((op->args[5] == TCG_COND_LT
317
- || op->args[5] == TCG_COND_GE)
318
- && arg_is_const(op->args[3])
319
- && arg_info(op->args[3])->val == 0
320
- && arg_is_const(op->args[4])
321
- && arg_info(op->args[4])->val == 0) {
322
+ continue;
323
+ }
324
+ if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
325
+ && arg_is_const(op->args[3])
326
+ && arg_info(op->args[3])->val == 0
327
+ && arg_is_const(op->args[4])
328
+ && arg_info(op->args[4])->val == 0) {
329
/* Simplify LT/GE comparisons vs zero to a single compare
330
vs the high word of the input. */
331
do_setcond_high:
332
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
333
op->args[1] = op->args[2];
334
op->args[2] = op->args[4];
335
op->args[3] = op->args[5];
336
- } else if (op->args[5] == TCG_COND_EQ) {
337
+ break;
338
+ }
339
+ if (op->args[5] == TCG_COND_EQ) {
340
/* Simplify EQ comparisons where one of the pairs
341
can be simplified. */
342
tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
343
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
344
if (tmp == 0) {
345
goto do_setcond_high;
346
} else if (tmp != 1) {
347
- goto do_default;
348
+ break;
349
}
350
do_setcond_low:
351
reset_temp(op->args[0]);
352
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
353
op->opc = INDEX_op_setcond_i32;
354
op->args[2] = op->args[3];
355
op->args[3] = op->args[5];
356
- } else if (op->args[5] == TCG_COND_NE) {
357
+ break;
358
+ }
359
+ if (op->args[5] == TCG_COND_NE) {
360
/* Simplify NE comparisons where one of the pairs
361
can be simplified. */
362
tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
363
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
364
} else if (tmp == 1) {
365
goto do_setcond_const;
366
}
367
- goto do_default;
368
- } else {
369
- goto do_default;
370
}
371
break;
372
373
- case INDEX_op_call:
374
- if (!(tcg_call_flags(op)
375
+ default:
376
+ break;
377
+ }
378
+
379
+ /* Some of the folding above can change opc. */
380
+ opc = op->opc;
381
+ def = &tcg_op_defs[opc];
382
+ if (def->flags & TCG_OPF_BB_END) {
383
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
384
+ } else {
385
+ if (opc == INDEX_op_call &&
386
+ !(tcg_call_flags(op)
387
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
388
for (i = 0; i < nb_globals; i++) {
389
if (test_bit(i, ctx.temps_used.l)) {
390
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
391
}
392
}
393
}
394
- goto do_reset_output;
395
396
- default:
397
- do_default:
398
- /* Default case: we know nothing about operation (or were unable
399
- to compute the operation result) so no propagation is done.
400
- We trash everything if the operation is the end of a basic
401
- block, otherwise we only trash the output args. "z_mask" is
402
- the non-zero bits mask for the first output arg. */
403
- if (def->flags & TCG_OPF_BB_END) {
404
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
405
- } else {
406
- do_reset_output:
407
- for (i = 0; i < nb_oargs; i++) {
408
- reset_temp(op->args[i]);
409
- /* Save the corresponding known-zero bits mask for the
410
- first output argument (only one supported so far). */
411
- if (i == 0) {
412
- arg_info(op->args[i])->z_mask = z_mask;
413
- }
414
+ for (i = 0; i < nb_oargs; i++) {
415
+ reset_temp(op->args[i]);
416
+ /* Save the corresponding known-zero bits mask for the
417
+ first output argument (only one supported so far). */
418
+ if (i == 0) {
419
+ arg_info(op->args[i])->z_mask = z_mask;
420
}
421
}
422
- break;
423
}
424
425
/* Eliminate duplicate and redundant fence instructions. */
426
--
427
2.25.1
428
429
diff view generated by jsdifflib
New patch
1
1
Adjust the interface to take the OptContext parameter instead
2
of TCGContext or both.
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 67 +++++++++++++++++++++++++-------------------------
9
1 file changed, 34 insertions(+), 33 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
16
} TempOptInfo;
17
18
typedef struct OptContext {
19
+ TCGContext *tcg;
20
TCGTempSet temps_used;
21
} OptContext;
22
23
@@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
24
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
25
}
26
27
-static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
28
+static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
29
{
30
TCGTemp *dst_ts = arg_temp(dst);
31
TCGTemp *src_ts = arg_temp(src);
32
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
33
TCGOpcode new_op;
34
35
if (ts_are_copies(dst_ts, src_ts)) {
36
- tcg_op_remove(s, op);
37
+ tcg_op_remove(ctx->tcg, op);
38
return;
39
}
40
41
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
42
}
43
}
44
45
-static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
46
- TCGOp *op, TCGArg dst, uint64_t val)
47
+static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
48
+ TCGArg dst, uint64_t val)
49
{
50
const TCGOpDef *def = &tcg_op_defs[op->opc];
51
TCGType type;
52
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
53
/* Convert movi to mov with constant temp. */
54
tv = tcg_constant_internal(type, val);
55
init_ts_info(ctx, tv);
56
- tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
57
+ tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
58
}
59
60
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
62
{
63
int nb_temps, nb_globals, i;
64
TCGOp *op, *op_next, *prev_mb = NULL;
65
- OptContext ctx = {};
66
+ OptContext ctx = { .tcg = s };
67
68
/* Array VALS has an element for each temp.
69
If this temp holds a constant then its value is kept in VALS' element.
70
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
71
CASE_OP_32_64(rotr):
72
if (arg_is_const(op->args[1])
73
&& arg_info(op->args[1])->val == 0) {
74
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
75
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
76
continue;
77
}
78
break;
79
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
80
if (!arg_is_const(op->args[1])
81
&& arg_is_const(op->args[2])
82
&& arg_info(op->args[2])->val == 0) {
83
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
84
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
85
continue;
86
}
87
break;
88
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
89
if (!arg_is_const(op->args[1])
90
&& arg_is_const(op->args[2])
91
&& arg_info(op->args[2])->val == -1) {
92
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
93
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
94
continue;
95
}
96
break;
97
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
98
99
if (partmask == 0) {
100
tcg_debug_assert(nb_oargs == 1);
101
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
102
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
103
continue;
104
}
105
if (affected == 0) {
106
tcg_debug_assert(nb_oargs == 1);
107
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
108
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
109
continue;
110
}
111
112
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
113
CASE_OP_32_64(mulsh):
114
if (arg_is_const(op->args[2])
115
&& arg_info(op->args[2])->val == 0) {
116
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
117
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
118
continue;
119
}
120
break;
121
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
122
CASE_OP_32_64_VEC(or):
123
CASE_OP_32_64_VEC(and):
124
if (args_are_copies(op->args[1], op->args[2])) {
125
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
126
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
127
continue;
128
}
129
break;
130
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
131
CASE_OP_32_64_VEC(sub):
132
CASE_OP_32_64_VEC(xor):
133
if (args_are_copies(op->args[1], op->args[2])) {
134
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
135
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
136
continue;
137
}
138
break;
139
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
140
allocator where needed and possible. Also detect copies. */
141
switch (opc) {
142
CASE_OP_32_64_VEC(mov):
143
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
144
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
145
continue;
146
147
case INDEX_op_dup_vec:
148
if (arg_is_const(op->args[1])) {
149
tmp = arg_info(op->args[1])->val;
150
tmp = dup_const(TCGOP_VECE(op), tmp);
151
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
152
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
153
continue;
154
}
155
break;
156
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
157
case INDEX_op_dup2_vec:
158
assert(TCG_TARGET_REG_BITS == 32);
159
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
160
- tcg_opt_gen_movi(s, &ctx, op, op->args[0],
161
+ tcg_opt_gen_movi(&ctx, op, op->args[0],
162
deposit64(arg_info(op->args[1])->val, 32, 32,
163
arg_info(op->args[2])->val));
164
continue;
165
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
166
case INDEX_op_extrh_i64_i32:
167
if (arg_is_const(op->args[1])) {
168
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
169
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
170
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
171
continue;
172
}
173
break;
174
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
175
if (arg_is_const(op->args[1])) {
176
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
177
op->args[2]);
178
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
179
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
180
continue;
181
}
182
break;
183
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
184
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
185
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
186
arg_info(op->args[2])->val);
187
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
188
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
189
continue;
190
}
191
break;
192
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
193
TCGArg v = arg_info(op->args[1])->val;
194
if (v != 0) {
195
tmp = do_constant_folding(opc, v, 0);
196
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
197
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
198
} else {
199
- tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
200
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
201
}
202
continue;
203
}
204
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
205
tmp = deposit64(arg_info(op->args[1])->val,
206
op->args[3], op->args[4],
207
arg_info(op->args[2])->val);
208
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
209
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
210
continue;
211
}
212
break;
213
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
214
if (arg_is_const(op->args[1])) {
215
tmp = extract64(arg_info(op->args[1])->val,
216
op->args[2], op->args[3]);
217
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
218
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
219
continue;
220
}
221
break;
222
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
223
if (arg_is_const(op->args[1])) {
224
tmp = sextract64(arg_info(op->args[1])->val,
225
op->args[2], op->args[3]);
226
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
227
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
228
continue;
229
}
230
break;
231
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
232
tmp = (int32_t)(((uint32_t)v1 >> shr) |
233
((uint32_t)v2 << (32 - shr)));
234
}
235
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
236
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
237
continue;
238
}
239
break;
240
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
241
tmp = do_constant_folding_cond(opc, op->args[1],
242
op->args[2], op->args[3]);
243
if (tmp != 2) {
244
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
245
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
246
continue;
247
}
248
break;
249
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
250
tmp = do_constant_folding_cond(opc, op->args[1],
251
op->args[2], op->args[5]);
252
if (tmp != 2) {
253
- tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
254
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
255
continue;
256
}
257
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
258
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
259
260
rl = op->args[0];
261
rh = op->args[1];
262
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
263
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
264
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
265
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
266
continue;
267
}
268
break;
269
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
270
271
rl = op->args[0];
272
rh = op->args[1];
273
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
274
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
275
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
276
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
277
continue;
278
}
279
break;
280
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
281
op->args[5]);
282
if (tmp != 2) {
283
do_setcond_const:
284
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
285
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
286
continue;
287
}
288
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
289
--
290
2.25.1
291
292
diff view generated by jsdifflib
New patch
1
This will expose the variable to subroutines that
2
will be broken out of tcg_optimize.
1
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 11 ++++++-----
10
1 file changed, 6 insertions(+), 5 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
17
18
typedef struct OptContext {
19
TCGContext *tcg;
20
+ TCGOp *prev_mb;
21
TCGTempSet temps_used;
22
} OptContext;
23
24
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
25
void tcg_optimize(TCGContext *s)
26
{
27
int nb_temps, nb_globals, i;
28
- TCGOp *op, *op_next, *prev_mb = NULL;
29
+ TCGOp *op, *op_next;
30
OptContext ctx = { .tcg = s };
31
32
/* Array VALS has an element for each temp.
33
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
34
}
35
36
/* Eliminate duplicate and redundant fence instructions. */
37
- if (prev_mb) {
38
+ if (ctx.prev_mb) {
39
switch (opc) {
40
case INDEX_op_mb:
41
/* Merge two barriers of the same type into one,
42
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
43
* barrier. This is stricter than specified but for
44
* the purposes of TCG is better than not optimizing.
45
*/
46
- prev_mb->args[0] |= op->args[0];
47
+ ctx.prev_mb->args[0] |= op->args[0];
48
tcg_op_remove(s, op);
49
break;
50
51
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
52
case INDEX_op_qemu_st_i64:
53
case INDEX_op_call:
54
/* Opcodes that touch guest memory stop the optimization. */
55
- prev_mb = NULL;
56
+ ctx.prev_mb = NULL;
57
break;
58
}
59
} else if (opc == INDEX_op_mb) {
60
- prev_mb = op;
61
+ ctx.prev_mb = op;
62
}
63
}
64
}
65
--
66
2.25.1
67
68
diff view generated by jsdifflib
New patch
1
There was no real reason for calls to have separate code here.
2
Unify init for calls vs non-calls using the call path, which
3
handles TCG_CALL_DUMMY_ARG.
1
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
tcg/optimize.c | 25 +++++++++++--------------
11
1 file changed, 11 insertions(+), 14 deletions(-)
12
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/optimize.c
16
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
18
}
19
}
20
21
-static void init_arg_info(OptContext *ctx, TCGArg arg)
22
-{
23
- init_ts_info(ctx, arg_temp(arg));
24
-}
25
-
26
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
27
{
28
TCGTemp *i, *g, *l;
29
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
30
return false;
31
}
32
33
+static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
34
+{
35
+ for (int i = 0; i < nb_args; i++) {
36
+ TCGTemp *ts = arg_temp(op->args[i]);
37
+ if (ts) {
38
+ init_ts_info(ctx, ts);
39
+ }
40
+ }
41
+}
42
+
43
/* Propagate constants and copies, fold constant expressions. */
44
void tcg_optimize(TCGContext *s)
45
{
46
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
47
if (opc == INDEX_op_call) {
48
nb_oargs = TCGOP_CALLO(op);
49
nb_iargs = TCGOP_CALLI(op);
50
- for (i = 0; i < nb_oargs + nb_iargs; i++) {
51
- TCGTemp *ts = arg_temp(op->args[i]);
52
- if (ts) {
53
- init_ts_info(&ctx, ts);
54
- }
55
- }
56
} else {
57
nb_oargs = def->nb_oargs;
58
nb_iargs = def->nb_iargs;
59
- for (i = 0; i < nb_oargs + nb_iargs; i++) {
60
- init_arg_info(&ctx, op->args[i]);
61
- }
62
}
63
+ init_arguments(&ctx, op, nb_oargs + nb_iargs);
64
65
/* Do copy propagation */
66
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
67
--
68
2.25.1
69
70
diff view generated by jsdifflib
1
For --enable-tcg-interpreter on Windows, we will need this.
1
Continue splitting tcg_optimize.
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
include/qemu/osdep.h | 1 +
8
tcg/optimize.c | 22 ++++++++++++++--------
9
util/osdep.c | 9 +++++++++
9
1 file changed, 14 insertions(+), 8 deletions(-)
10
2 files changed, 10 insertions(+)
11
10
12
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/include/qemu/osdep.h
13
--- a/tcg/optimize.c
15
+++ b/include/qemu/osdep.h
14
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ void sigaction_invoke(struct sigaction *action,
15
@@ -XXX,XX +XXX,XX @@ static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
17
#endif
16
}
18
19
int qemu_madvise(void *addr, size_t len, int advice);
20
+int qemu_mprotect_rw(void *addr, size_t size);
21
int qemu_mprotect_rwx(void *addr, size_t size);
22
int qemu_mprotect_none(void *addr, size_t size);
23
24
diff --git a/util/osdep.c b/util/osdep.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/util/osdep.c
27
+++ b/util/osdep.c
28
@@ -XXX,XX +XXX,XX @@ static int qemu_mprotect__osdep(void *addr, size_t size, int prot)
29
#endif
30
}
17
}
31
18
32
+int qemu_mprotect_rw(void *addr, size_t size)
19
+static void copy_propagate(OptContext *ctx, TCGOp *op,
20
+ int nb_oargs, int nb_iargs)
33
+{
21
+{
34
+#ifdef _WIN32
22
+ TCGContext *s = ctx->tcg;
35
+ return qemu_mprotect__osdep(addr, size, PAGE_READWRITE);
23
+
36
+#else
24
+ for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
37
+ return qemu_mprotect__osdep(addr, size, PROT_READ | PROT_WRITE);
25
+ TCGTemp *ts = arg_temp(op->args[i]);
38
+#endif
26
+ if (ts && ts_is_copy(ts)) {
27
+ op->args[i] = temp_arg(find_better_copy(s, ts));
28
+ }
29
+ }
39
+}
30
+}
40
+
31
+
41
int qemu_mprotect_rwx(void *addr, size_t size)
32
/* Propagate constants and copies, fold constant expressions. */
33
void tcg_optimize(TCGContext *s)
42
{
34
{
43
#ifdef _WIN32
35
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
36
nb_iargs = def->nb_iargs;
37
}
38
init_arguments(&ctx, op, nb_oargs + nb_iargs);
39
-
40
- /* Do copy propagation */
41
- for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
42
- TCGTemp *ts = arg_temp(op->args[i]);
43
- if (ts && ts_is_copy(ts)) {
44
- op->args[i] = temp_arg(find_better_copy(s, ts));
45
- }
46
- }
47
+ copy_propagate(&ctx, op, nb_oargs, nb_iargs);
48
49
/* For commutative operations make constant second argument */
50
switch (opc) {
44
--
51
--
45
2.25.1
52
2.25.1
46
53
47
54
diff view generated by jsdifflib
1
If qemu_get_host_physmem returns an odd number of pages,
1
Calls are special in that they have a variable number
2
then physmem / 8 will not be a multiple of the page size.
2
of arguments, and need to be able to clobber globals.
3
4
The following was observed on a gitlab runner:
5
6
ERROR qtest-arm/boot-serial-test - Bail out!
7
ERROR:../util/osdep.c:80:qemu_mprotect__osdep: \
8
assertion failed: (!(size & ~qemu_real_host_page_mask))
9
3
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
7
---
14
tcg/region.c | 47 +++++++++++++++++++++--------------------------
8
tcg/optimize.c | 63 ++++++++++++++++++++++++++++++++------------------
15
1 file changed, 21 insertions(+), 26 deletions(-)
9
1 file changed, 41 insertions(+), 22 deletions(-)
16
10
17
diff --git a/tcg/region.c b/tcg/region.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
18
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/region.c
13
--- a/tcg/optimize.c
20
+++ b/tcg/region.c
14
+++ b/tcg/optimize.c
21
@@ -XXX,XX +XXX,XX @@ static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus)
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
22
(DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
16
}
23
? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
17
}
24
18
25
-static size_t size_code_gen_buffer(size_t tb_size)
19
+static bool fold_call(OptContext *ctx, TCGOp *op)
26
-{
20
+{
27
- /* Size the buffer. */
21
+ TCGContext *s = ctx->tcg;
28
- if (tb_size == 0) {
22
+ int nb_oargs = TCGOP_CALLO(op);
29
- size_t phys_mem = qemu_get_host_physmem();
23
+ int nb_iargs = TCGOP_CALLI(op);
30
- if (phys_mem == 0) {
24
+ int flags, i;
31
- tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
25
+
32
- } else {
26
+ init_arguments(ctx, op, nb_oargs + nb_iargs);
33
- tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8);
27
+ copy_propagate(ctx, op, nb_oargs, nb_iargs);
34
- }
28
+
35
- }
29
+ /* If the function reads or writes globals, reset temp data. */
36
- if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
30
+ flags = tcg_call_flags(op);
37
- tb_size = MIN_CODE_GEN_BUFFER_SIZE;
31
+ if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
38
- }
32
+ int nb_globals = s->nb_globals;
39
- if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
33
+
40
- tb_size = MAX_CODE_GEN_BUFFER_SIZE;
34
+ for (i = 0; i < nb_globals; i++) {
41
- }
35
+ if (test_bit(i, ctx->temps_used.l)) {
42
- return tb_size;
36
+ reset_ts(&ctx->tcg->temps[i]);
43
-}
37
+ }
44
-
45
#ifdef __mips__
46
/*
47
* In order to use J and JAL within the code_gen_buffer, we require
48
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
49
*/
50
void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
51
{
52
- size_t page_size;
53
+ const size_t page_size = qemu_real_host_page_size;
54
size_t region_size;
55
size_t i;
56
int have_prot;
57
58
- have_prot = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
59
- splitwx, &error_fatal);
60
+ /* Size the buffer. */
61
+ if (tb_size == 0) {
62
+ size_t phys_mem = qemu_get_host_physmem();
63
+ if (phys_mem == 0) {
64
+ tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
65
+ } else {
66
+ tb_size = QEMU_ALIGN_DOWN(phys_mem / 8, page_size);
67
+ tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, tb_size);
68
+ }
38
+ }
69
+ }
39
+ }
70
+ if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
40
+
71
+ tb_size = MIN_CODE_GEN_BUFFER_SIZE;
41
+ /* Reset temp data for outputs. */
72
+ }
42
+ for (i = 0; i < nb_oargs; i++) {
73
+ if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
43
+ reset_temp(op->args[i]);
74
+ tb_size = MAX_CODE_GEN_BUFFER_SIZE;
75
+ }
44
+ }
76
+
45
+
77
+ have_prot = alloc_code_gen_buffer(tb_size, splitwx, &error_fatal);
46
+ /* Stop optimizing MB across calls. */
78
assert(have_prot >= 0);
47
+ ctx->prev_mb = NULL;
79
48
+ return true;
80
/* Request large pages for the buffer and the splitwx. */
49
+}
81
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
50
+
82
* As a result of this we might end up with a few extra pages at the end of
51
/* Propagate constants and copies, fold constant expressions. */
83
* the buffer; we will assign those to the last region.
52
void tcg_optimize(TCGContext *s)
84
*/
53
{
85
- region.n = tcg_n_regions(region.total_size, max_cpus);
54
- int nb_temps, nb_globals, i;
86
- page_size = qemu_real_host_page_size;
55
+ int nb_temps, i;
87
- region_size = region.total_size / region.n;
56
TCGOp *op, *op_next;
88
+ region.n = tcg_n_regions(tb_size, max_cpus);
57
OptContext ctx = { .tcg = s };
89
+ region_size = tb_size / region.n;
58
90
region_size = QEMU_ALIGN_DOWN(region_size, page_size);
59
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
91
60
available through the doubly linked circular list. */
92
/* A region must have at least 2 pages; one code, one guard */
61
62
nb_temps = s->nb_temps;
63
- nb_globals = s->nb_globals;
64
-
65
for (i = 0; i < nb_temps; ++i) {
66
s->temps[i].state_ptr = NULL;
67
}
68
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
69
uint64_t z_mask, partmask, affected, tmp;
70
int nb_oargs, nb_iargs;
71
TCGOpcode opc = op->opc;
72
- const TCGOpDef *def = &tcg_op_defs[opc];
73
+ const TCGOpDef *def;
74
75
- /* Count the arguments, and initialize the temps that are
76
- going to be used */
77
+ /* Calls are special. */
78
if (opc == INDEX_op_call) {
79
- nb_oargs = TCGOP_CALLO(op);
80
- nb_iargs = TCGOP_CALLI(op);
81
- } else {
82
- nb_oargs = def->nb_oargs;
83
- nb_iargs = def->nb_iargs;
84
+ fold_call(&ctx, op);
85
+ continue;
86
}
87
+
88
+ def = &tcg_op_defs[opc];
89
+ nb_oargs = def->nb_oargs;
90
+ nb_iargs = def->nb_iargs;
91
init_arguments(&ctx, op, nb_oargs + nb_iargs);
92
copy_propagate(&ctx, op, nb_oargs, nb_iargs);
93
94
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
95
if (def->flags & TCG_OPF_BB_END) {
96
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
97
} else {
98
- if (opc == INDEX_op_call &&
99
- !(tcg_call_flags(op)
100
- & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
101
- for (i = 0; i < nb_globals; i++) {
102
- if (test_bit(i, ctx.temps_used.l)) {
103
- reset_ts(&s->temps[i]);
104
- }
105
- }
106
- }
107
-
108
for (i = 0; i < nb_oargs; i++) {
109
reset_temp(op->args[i]);
110
/* Save the corresponding known-zero bits mask for the
111
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
112
case INDEX_op_qemu_st_i32:
113
case INDEX_op_qemu_st8_i32:
114
case INDEX_op_qemu_st_i64:
115
- case INDEX_op_call:
116
/* Opcodes that touch guest memory stop the optimization. */
117
ctx.prev_mb = NULL;
118
break;
93
--
119
--
94
2.25.1
120
2.25.1
95
121
96
122
diff view generated by jsdifflib
New patch
1
Rather than try to keep these up-to-date across folding,
2
re-read nb_oargs at the end, after re-reading the opcode.
1
3
4
A couple of asserts need dropping, but that will take care
5
of itself as we split the function further.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 14 ++++----------
12
1 file changed, 4 insertions(+), 10 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
19
20
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
21
uint64_t z_mask, partmask, affected, tmp;
22
- int nb_oargs, nb_iargs;
23
TCGOpcode opc = op->opc;
24
const TCGOpDef *def;
25
26
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
27
}
28
29
def = &tcg_op_defs[opc];
30
- nb_oargs = def->nb_oargs;
31
- nb_iargs = def->nb_iargs;
32
- init_arguments(&ctx, op, nb_oargs + nb_iargs);
33
- copy_propagate(&ctx, op, nb_oargs, nb_iargs);
34
+ init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
35
+ copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
36
37
/* For commutative operations make constant second argument */
38
switch (opc) {
39
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
40
41
CASE_OP_32_64(qemu_ld):
42
{
43
- MemOpIdx oi = op->args[nb_oargs + nb_iargs];
44
+ MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
45
MemOp mop = get_memop(oi);
46
if (!(mop & MO_SIGN)) {
47
z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
49
}
50
51
if (partmask == 0) {
52
- tcg_debug_assert(nb_oargs == 1);
53
tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
54
continue;
55
}
56
if (affected == 0) {
57
- tcg_debug_assert(nb_oargs == 1);
58
tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
59
continue;
60
}
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
62
} else if (args_are_copies(op->args[1], op->args[2])) {
63
op->opc = INDEX_op_dup_vec;
64
TCGOP_VECE(op) = MO_32;
65
- nb_iargs = 1;
66
}
67
break;
68
69
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
70
op->opc = opc = (opc == INDEX_op_movcond_i32
71
? INDEX_op_setcond_i32
72
: INDEX_op_setcond_i64);
73
- nb_iargs = 2;
74
}
75
break;
76
77
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
78
if (def->flags & TCG_OPF_BB_END) {
79
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
80
} else {
81
+ int nb_oargs = def->nb_oargs;
82
for (i = 0; i < nb_oargs; i++) {
83
reset_temp(op->args[i]);
84
/* Save the corresponding known-zero bits mask for the
85
--
86
2.25.1
87
88
diff view generated by jsdifflib
1
Start removing the include of hw/boards.h from tcg/.
1
Return -1 instead of 2 for failure, so that we can
2
Pass down the max_cpus value from tcg_init_machine,
2
use comparisons against 0 for all cases.
3
where we have the MachineState already.
4
3
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
include/tcg/tcg.h | 2 +-
8
tcg/optimize.c | 145 +++++++++++++++++++++++++------------------------
11
tcg/tcg-internal.h | 2 +-
9
1 file changed, 74 insertions(+), 71 deletions(-)
12
accel/tcg/tcg-all.c | 10 +++++++++-
10
13
tcg/region.c | 32 +++++++++++---------------------
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
tcg/tcg.c | 10 ++++------
15
5 files changed, 26 insertions(+), 30 deletions(-)
16
17
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
18
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
19
--- a/include/tcg/tcg.h
13
--- a/tcg/optimize.c
20
+++ b/include/tcg/tcg.h
14
+++ b/tcg/optimize.c
21
@@ -XXX,XX +XXX,XX @@ static inline void *tcg_malloc(int size)
15
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
22
}
16
}
23
}
17
}
24
18
25
-void tcg_init(size_t tb_size, int splitwx);
19
-/* Return 2 if the condition can't be simplified, and the result
26
+void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus);
20
- of the condition (0 or 1) if it can */
27
void tcg_register_thread(void);
21
-static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
28
void tcg_prologue_init(TCGContext *s);
22
- TCGArg y, TCGCond c)
29
void tcg_func_start(TCGContext *s);
23
+/*
30
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
24
+ * Return -1 if the condition can't be simplified,
31
index XXXXXXX..XXXXXXX 100644
25
+ * and the result of the condition (0 or 1) if it can.
32
--- a/tcg/tcg-internal.h
26
+ */
33
+++ b/tcg/tcg-internal.h
27
+static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
34
@@ -XXX,XX +XXX,XX @@
28
+ TCGArg y, TCGCond c)
35
extern TCGContext **tcg_ctxs;
36
extern unsigned int n_tcg_ctxs;
37
38
-void tcg_region_init(size_t tb_size, int splitwx);
39
+void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus);
40
bool tcg_region_alloc(TCGContext *s);
41
void tcg_region_initial_alloc(TCGContext *s);
42
void tcg_region_prologue_set(TCGContext *s);
43
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/accel/tcg/tcg-all.c
46
+++ b/accel/tcg/tcg-all.c
47
@@ -XXX,XX +XXX,XX @@
48
#include "qemu/accel.h"
49
#include "qapi/qapi-builtin-visit.h"
50
#include "qemu/units.h"
51
+#if !defined(CONFIG_USER_ONLY)
52
+#include "hw/boards.h"
53
+#endif
54
#include "internal.h"
55
56
struct TCGState {
57
@@ -XXX,XX +XXX,XX @@ bool mttcg_enabled;
58
static int tcg_init_machine(MachineState *ms)
59
{
29
{
60
TCGState *s = TCG_STATE(current_accel());
30
uint64_t xv = arg_info(x)->val;
61
+#ifdef CONFIG_USER_ONLY
31
uint64_t yv = arg_info(y)->val;
62
+ unsigned max_cpus = 1;
32
@@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
63
+#else
33
case TCG_COND_GEU:
64
+ unsigned max_cpus = ms->smp.max_cpus;
34
return 1;
65
+#endif
35
default:
66
36
- return 2;
67
tcg_allowed = true;
37
+ return -1;
68
mttcg_enabled = s->mttcg_enabled;
38
}
69
39
}
70
page_init();
40
- return 2;
71
tb_htable_init();
41
+ return -1;
72
- tcg_init(s->tb_size * MiB, s->splitwx_enabled);
73
+ tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_cpus);
74
75
#if defined(CONFIG_SOFTMMU)
76
/*
77
diff --git a/tcg/region.c b/tcg/region.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/tcg/region.c
80
+++ b/tcg/region.c
81
@@ -XXX,XX +XXX,XX @@
82
#include "qapi/error.h"
83
#include "exec/exec-all.h"
84
#include "tcg/tcg.h"
85
-#if !defined(CONFIG_USER_ONLY)
86
-#include "hw/boards.h"
87
-#endif
88
#include "tcg-internal.h"
89
90
91
@@ -XXX,XX +XXX,XX @@ void tcg_region_reset_all(void)
92
tcg_region_tree_reset_all();
93
}
42
}
94
43
95
+static size_t tcg_n_regions(unsigned max_cpus)
44
-/* Return 2 if the condition can't be simplified, and the result
96
+{
45
- of the condition (0 or 1) if it can */
97
#ifdef CONFIG_USER_ONLY
46
-static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
98
-static size_t tcg_n_regions(void)
47
+/*
99
-{
48
+ * Return -1 if the condition can't be simplified,
100
return 1;
49
+ * and the result of the condition (0 or 1) if it can.
101
-}
50
+ */
102
#else
51
+static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
103
-/*
52
{
104
- * It is likely that some vCPUs will translate more code than others, so we
53
TCGArg al = p1[0], ah = p1[1];
105
- * first try to set more regions than max_cpus, with those regions being of
54
TCGArg bl = p2[0], bh = p2[1];
106
- * reasonable size. If that's not possible we make do by evenly dividing
55
@@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
107
- * the code_gen_buffer among the vCPUs.
56
if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
108
- */
57
return do_constant_folding_cond_eq(c);
109
-static size_t tcg_n_regions(void)
110
-{
111
+ /*
112
+ * It is likely that some vCPUs will translate more code than others,
113
+ * so we first try to set more regions than max_cpus, with those regions
114
+ * being of reasonable size. If that's not possible we make do by evenly
115
+ * dividing the code_gen_buffer among the vCPUs.
116
+ */
117
size_t i;
118
119
/* Use a single region if all we have is one vCPU thread */
120
-#if !defined(CONFIG_USER_ONLY)
121
- MachineState *ms = MACHINE(qdev_get_machine());
122
- unsigned int max_cpus = ms->smp.max_cpus;
123
-#endif
124
if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
125
return 1;
126
}
58
}
127
@@ -XXX,XX +XXX,XX @@ static size_t tcg_n_regions(void)
59
- return 2;
128
}
60
+ return -1;
129
/* If we can't, then just allocate one region per vCPU thread */
130
return max_cpus;
131
-}
132
#endif
133
+}
134
135
/*
136
* Minimum size of the code gen buffer. This number is randomly chosen,
137
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
138
* in practice. Multi-threaded guests share most if not all of their translated
139
* code, which makes parallel code generation less appealing than in softmmu.
140
*/
141
-void tcg_region_init(size_t tb_size, int splitwx)
142
+void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
143
{
144
void *buf, *aligned;
145
size_t size;
146
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx)
147
buf = tcg_init_ctx.code_gen_buffer;
148
size = tcg_init_ctx.code_gen_buffer_size;
149
page_size = qemu_real_host_page_size;
150
- n_regions = tcg_n_regions();
151
+ n_regions = tcg_n_regions(max_cpus);
152
153
/* The first region will be 'aligned - buf' bytes larger than the others */
154
aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
155
diff --git a/tcg/tcg.c b/tcg/tcg.c
156
index XXXXXXX..XXXXXXX 100644
157
--- a/tcg/tcg.c
158
+++ b/tcg/tcg.c
159
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s);
160
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
161
TCGReg reg, const char *name);
162
163
-static void tcg_context_init(void)
164
+static void tcg_context_init(unsigned max_cpus)
165
{
166
TCGContext *s = &tcg_init_ctx;
167
int op, total_args, n, i;
168
@@ -XXX,XX +XXX,XX @@ static void tcg_context_init(void)
169
tcg_ctxs = &tcg_ctx;
170
n_tcg_ctxs = 1;
171
#else
172
- MachineState *ms = MACHINE(qdev_get_machine());
173
- unsigned int max_cpus = ms->smp.max_cpus;
174
tcg_ctxs = g_new(TCGContext *, max_cpus);
175
#endif
176
177
@@ -XXX,XX +XXX,XX @@ static void tcg_context_init(void)
178
cpu_env = temp_tcgv_ptr(ts);
179
}
61
}
180
62
181
-void tcg_init(size_t tb_size, int splitwx)
63
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
182
+void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
64
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
183
{
65
break;
184
- tcg_context_init();
66
185
- tcg_region_init(tb_size, splitwx);
67
CASE_OP_32_64(setcond):
186
+ tcg_context_init(max_cpus);
68
- tmp = do_constant_folding_cond(opc, op->args[1],
187
+ tcg_region_init(tb_size, splitwx, max_cpus);
69
- op->args[2], op->args[3]);
188
}
70
- if (tmp != 2) {
189
71
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
190
/*
72
+ i = do_constant_folding_cond(opc, op->args[1],
73
+ op->args[2], op->args[3]);
74
+ if (i >= 0) {
75
+ tcg_opt_gen_movi(&ctx, op, op->args[0], i);
76
continue;
77
}
78
break;
79
80
CASE_OP_32_64(brcond):
81
- tmp = do_constant_folding_cond(opc, op->args[0],
82
- op->args[1], op->args[2]);
83
- switch (tmp) {
84
- case 0:
85
+ i = do_constant_folding_cond(opc, op->args[0],
86
+ op->args[1], op->args[2]);
87
+ if (i == 0) {
88
tcg_op_remove(s, op);
89
continue;
90
- case 1:
91
+ } else if (i > 0) {
92
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
93
op->opc = opc = INDEX_op_br;
94
op->args[0] = op->args[3];
95
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
96
break;
97
98
CASE_OP_32_64(movcond):
99
- tmp = do_constant_folding_cond(opc, op->args[1],
100
- op->args[2], op->args[5]);
101
- if (tmp != 2) {
102
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
103
+ i = do_constant_folding_cond(opc, op->args[1],
104
+ op->args[2], op->args[5]);
105
+ if (i >= 0) {
106
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]);
107
continue;
108
}
109
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
break;
112
113
case INDEX_op_brcond2_i32:
114
- tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
115
- op->args[4]);
116
- if (tmp == 0) {
117
+ i = do_constant_folding_cond2(&op->args[0], &op->args[2],
118
+ op->args[4]);
119
+ if (i == 0) {
120
do_brcond_false:
121
tcg_op_remove(s, op);
122
continue;
123
}
124
- if (tmp == 1) {
125
+ if (i > 0) {
126
do_brcond_true:
127
op->opc = opc = INDEX_op_br;
128
op->args[0] = op->args[5];
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
if (op->args[4] == TCG_COND_EQ) {
131
/* Simplify EQ comparisons where one of the pairs
132
can be simplified. */
133
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
134
- op->args[0], op->args[2],
135
- TCG_COND_EQ);
136
- if (tmp == 0) {
137
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
138
+ op->args[0], op->args[2],
139
+ TCG_COND_EQ);
140
+ if (i == 0) {
141
goto do_brcond_false;
142
- } else if (tmp == 1) {
143
+ } else if (i > 0) {
144
goto do_brcond_high;
145
}
146
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
147
- op->args[1], op->args[3],
148
- TCG_COND_EQ);
149
- if (tmp == 0) {
150
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
151
+ op->args[1], op->args[3],
152
+ TCG_COND_EQ);
153
+ if (i == 0) {
154
goto do_brcond_false;
155
- } else if (tmp != 1) {
156
+ } else if (i < 0) {
157
break;
158
}
159
do_brcond_low:
160
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
161
if (op->args[4] == TCG_COND_NE) {
162
/* Simplify NE comparisons where one of the pairs
163
can be simplified. */
164
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
165
- op->args[0], op->args[2],
166
- TCG_COND_NE);
167
- if (tmp == 0) {
168
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
169
+ op->args[0], op->args[2],
170
+ TCG_COND_NE);
171
+ if (i == 0) {
172
goto do_brcond_high;
173
- } else if (tmp == 1) {
174
+ } else if (i > 0) {
175
goto do_brcond_true;
176
}
177
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
178
- op->args[1], op->args[3],
179
- TCG_COND_NE);
180
- if (tmp == 0) {
181
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
182
+ op->args[1], op->args[3],
183
+ TCG_COND_NE);
184
+ if (i == 0) {
185
goto do_brcond_low;
186
- } else if (tmp == 1) {
187
+ } else if (i > 0) {
188
goto do_brcond_true;
189
}
190
}
191
break;
192
193
case INDEX_op_setcond2_i32:
194
- tmp = do_constant_folding_cond2(&op->args[1], &op->args[3],
195
- op->args[5]);
196
- if (tmp != 2) {
197
+ i = do_constant_folding_cond2(&op->args[1], &op->args[3],
198
+ op->args[5]);
199
+ if (i >= 0) {
200
do_setcond_const:
201
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
202
+ tcg_opt_gen_movi(&ctx, op, op->args[0], i);
203
continue;
204
}
205
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
206
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
207
if (op->args[5] == TCG_COND_EQ) {
208
/* Simplify EQ comparisons where one of the pairs
209
can be simplified. */
210
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
211
- op->args[1], op->args[3],
212
- TCG_COND_EQ);
213
- if (tmp == 0) {
214
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
215
+ op->args[1], op->args[3],
216
+ TCG_COND_EQ);
217
+ if (i == 0) {
218
goto do_setcond_const;
219
- } else if (tmp == 1) {
220
+ } else if (i > 0) {
221
goto do_setcond_high;
222
}
223
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
224
- op->args[2], op->args[4],
225
- TCG_COND_EQ);
226
- if (tmp == 0) {
227
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
228
+ op->args[2], op->args[4],
229
+ TCG_COND_EQ);
230
+ if (i == 0) {
231
goto do_setcond_high;
232
- } else if (tmp != 1) {
233
+ } else if (i < 0) {
234
break;
235
}
236
do_setcond_low:
237
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
238
if (op->args[5] == TCG_COND_NE) {
239
/* Simplify NE comparisons where one of the pairs
240
can be simplified. */
241
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
242
- op->args[1], op->args[3],
243
- TCG_COND_NE);
244
- if (tmp == 0) {
245
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
246
+ op->args[1], op->args[3],
247
+ TCG_COND_NE);
248
+ if (i == 0) {
249
goto do_setcond_high;
250
- } else if (tmp == 1) {
251
+ } else if (i > 0) {
252
goto do_setcond_const;
253
}
254
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
255
- op->args[2], op->args[4],
256
- TCG_COND_NE);
257
- if (tmp == 0) {
258
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
259
+ op->args[2], op->args[4],
260
+ TCG_COND_NE);
261
+ if (i == 0) {
262
goto do_setcond_low;
263
- } else if (tmp == 1) {
264
+ } else if (i > 0) {
265
goto do_setcond_const;
266
}
267
}
191
--
268
--
192
2.25.1
269
2.25.1
193
270
194
271
diff view generated by jsdifflib
1
Compute the value using straight division and bounds,
1
This will allow callers to tail call to these functions
2
rather than a loop. Pass in tb_size rather than reading
2
and return true indicating processing complete.
3
from tcg_init_ctx.code_gen_buffer_size,
4
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
tcg/region.c | 29 ++++++++++++-----------------
9
tcg/optimize.c | 9 +++++----
10
1 file changed, 12 insertions(+), 17 deletions(-)
10
1 file changed, 5 insertions(+), 4 deletions(-)
11
11
12
diff --git a/tcg/region.c b/tcg/region.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/region.c
14
--- a/tcg/optimize.c
15
+++ b/tcg/region.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ void tcg_region_reset_all(void)
16
@@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
17
tcg_region_tree_reset_all();
17
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
18
}
18
}
19
19
20
-static size_t tcg_n_regions(unsigned max_cpus)
20
-static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
21
+static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus)
21
+static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
22
{
22
{
23
#ifdef CONFIG_USER_ONLY
23
TCGTemp *dst_ts = arg_temp(dst);
24
return 1;
24
TCGTemp *src_ts = arg_temp(src);
25
#else
25
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
26
+ size_t n_regions;
26
27
+
27
if (ts_are_copies(dst_ts, src_ts)) {
28
/*
28
tcg_op_remove(ctx->tcg, op);
29
* It is likely that some vCPUs will translate more code than others,
29
- return;
30
* so we first try to set more regions than max_cpus, with those regions
30
+ return true;
31
* being of reasonable size. If that's not possible we make do by evenly
32
* dividing the code_gen_buffer among the vCPUs.
33
*/
34
- size_t i;
35
-
36
/* Use a single region if all we have is one vCPU thread */
37
if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
38
return 1;
39
}
31
}
40
32
41
- /* Try to have more regions than max_cpus, with each region being >= 2 MB */
33
reset_ts(dst_ts);
42
- for (i = 8; i > 0; i--) {
34
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
43
- size_t regions_per_thread = i;
35
di->is_const = si->is_const;
44
- size_t region_size;
36
di->val = si->val;
45
-
46
- region_size = tcg_init_ctx.code_gen_buffer_size;
47
- region_size /= max_cpus * regions_per_thread;
48
-
49
- if (region_size >= 2 * 1024u * 1024) {
50
- return max_cpus * regions_per_thread;
51
- }
52
+ /*
53
+ * Try to have more regions than max_cpus, with each region being >= 2 MB.
54
+ * If we can't, then just allocate one region per vCPU thread.
55
+ */
56
+ n_regions = tb_size / (2 * MiB);
57
+ if (n_regions <= max_cpus) {
58
+ return max_cpus;
59
}
37
}
60
- /* If we can't, then just allocate one region per vCPU thread */
38
+ return true;
61
- return max_cpus;
62
+ return MIN(n_regions, max_cpus * 8);
63
#endif
64
}
39
}
65
40
66
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
41
-static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
67
buf = tcg_init_ctx.code_gen_buffer;
42
+static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
68
total_size = tcg_init_ctx.code_gen_buffer_size;
43
TCGArg dst, uint64_t val)
69
page_size = qemu_real_host_page_size;
44
{
70
- n_regions = tcg_n_regions(max_cpus);
45
const TCGOpDef *def = &tcg_op_defs[op->opc];
71
+ n_regions = tcg_n_regions(total_size, max_cpus);
46
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
72
47
/* Convert movi to mov with constant temp. */
73
/* The first region will be 'aligned - buf' bytes larger than the others */
48
tv = tcg_constant_internal(type, val);
74
aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
49
init_ts_info(ctx, tv);
50
- tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
51
+ return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
52
}
53
54
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
75
--
55
--
76
2.25.1
56
2.25.1
77
57
78
58
diff view generated by jsdifflib
New patch
1
Copy z_mask into OptContext, for writeback to the
2
first output within the new function.
1
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 49 +++++++++++++++++++++++++++++++++----------------
9
1 file changed, 33 insertions(+), 16 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
16
TCGContext *tcg;
17
TCGOp *prev_mb;
18
TCGTempSet temps_used;
19
+
20
+ /* In flight values from optimization. */
21
+ uint64_t z_mask;
22
} OptContext;
23
24
static inline TempOptInfo *ts_info(TCGTemp *ts)
25
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
26
}
27
}
28
29
+static void finish_folding(OptContext *ctx, TCGOp *op)
30
+{
31
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
32
+ int i, nb_oargs;
33
+
34
+ /*
35
+ * For an opcode that ends a BB, reset all temp data.
36
+ * We do no cross-BB optimization.
37
+ */
38
+ if (def->flags & TCG_OPF_BB_END) {
39
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
40
+ ctx->prev_mb = NULL;
41
+ return;
42
+ }
43
+
44
+ nb_oargs = def->nb_oargs;
45
+ for (i = 0; i < nb_oargs; i++) {
46
+ reset_temp(op->args[i]);
47
+ /*
48
+ * Save the corresponding known-zero bits mask for the
49
+ * first output argument (only one supported so far).
50
+ */
51
+ if (i == 0) {
52
+ arg_info(op->args[i])->z_mask = ctx->z_mask;
53
+ }
54
+ }
55
+}
56
+
57
static bool fold_call(OptContext *ctx, TCGOp *op)
58
{
59
TCGContext *s = ctx->tcg;
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
61
partmask &= 0xffffffffu;
62
affected &= 0xffffffffu;
63
}
64
+ ctx.z_mask = z_mask;
65
66
if (partmask == 0) {
67
tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
68
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
69
break;
70
}
71
72
- /* Some of the folding above can change opc. */
73
- opc = op->opc;
74
- def = &tcg_op_defs[opc];
75
- if (def->flags & TCG_OPF_BB_END) {
76
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
77
- } else {
78
- int nb_oargs = def->nb_oargs;
79
- for (i = 0; i < nb_oargs; i++) {
80
- reset_temp(op->args[i]);
81
- /* Save the corresponding known-zero bits mask for the
82
- first output argument (only one supported so far). */
83
- if (i == 0) {
84
- arg_info(op->args[i])->z_mask = z_mask;
85
- }
86
- }
87
- }
88
+ finish_folding(&ctx, op);
89
90
/* Eliminate duplicate and redundant fence instructions. */
91
if (ctx.prev_mb) {
92
--
93
2.25.1
94
95
diff view generated by jsdifflib
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
accel/tcg/tcg-all.c | 3 ++-
6
tcg/optimize.c | 9 ++++++---
7
1 file changed, 2 insertions(+), 1 deletion(-)
7
1 file changed, 6 insertions(+), 3 deletions(-)
8
8
9
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
11
--- a/accel/tcg/tcg-all.c
11
--- a/tcg/optimize.c
12
+++ b/accel/tcg/tcg-all.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
14
#include "qemu/error-report.h"
14
uint64_t z_mask, partmask, affected, tmp;
15
#include "qemu/accel.h"
15
TCGOpcode opc = op->opc;
16
#include "qapi/qapi-builtin-visit.h"
16
const TCGOpDef *def;
17
+#include "qemu/units.h"
17
+ bool done = false;
18
#include "internal.h"
18
19
19
/* Calls are special. */
20
struct TCGState {
20
if (opc == INDEX_op_call) {
21
@@ -XXX,XX +XXX,XX @@ static int tcg_init_machine(MachineState *ms)
21
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
22
22
allocator where needed and possible. Also detect copies. */
23
page_init();
23
switch (opc) {
24
tb_htable_init();
24
CASE_OP_32_64_VEC(mov):
25
- tcg_init(s->tb_size * 1024 * 1024, s->splitwx_enabled);
25
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
26
+ tcg_init(s->tb_size * MiB, s->splitwx_enabled);
26
- continue;
27
27
+ done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
28
#if defined(CONFIG_SOFTMMU)
28
+ break;
29
/*
29
30
case INDEX_op_dup_vec:
31
if (arg_is_const(op->args[1])) {
32
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
33
break;
34
}
35
36
- finish_folding(&ctx, op);
37
+ if (!done) {
38
+ finish_folding(&ctx, op);
39
+ }
40
41
/* Eliminate duplicate and redundant fence instructions. */
42
if (ctx.prev_mb) {
30
--
43
--
31
2.25.1
44
2.25.1
32
45
33
46
diff view generated by jsdifflib
1
Return output buffer and size via output pointer arguments,
1
This puts the separate mb optimization into the same framework
2
rather than returning size via tcg_ctx->code_gen_buffer_size.
2
as the others. While fold_qemu_{ld,st} are currently identical,
3
that won't last as more code gets moved.
3
4
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
tcg/region.c | 19 +++++++++----------
9
tcg/optimize.c | 89 +++++++++++++++++++++++++++++---------------------
9
1 file changed, 9 insertions(+), 10 deletions(-)
10
1 file changed, 51 insertions(+), 38 deletions(-)
10
11
11
diff --git a/tcg/region.c b/tcg/region.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/region.c
14
--- a/tcg/optimize.c
14
+++ b/tcg/region.c
15
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static inline bool cross_256mb(void *addr, size_t size)
16
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
/*
17
* We weren't able to allocate a buffer without crossing that boundary,
18
* so make do with the larger portion of the buffer that doesn't cross.
19
- * Returns the new base of the buffer, and adjusts code_gen_buffer_size.
20
+ * Returns the new base and size of the buffer in *obuf and *osize.
21
*/
22
-static inline void *split_cross_256mb(void *buf1, size_t size1)
23
+static inline void split_cross_256mb(void **obuf, size_t *osize,
24
+ void *buf1, size_t size1)
25
{
26
void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
27
size_t size2 = buf1 + size1 - buf2;
28
@@ -XXX,XX +XXX,XX @@ static inline void *split_cross_256mb(void *buf1, size_t size1)
29
buf1 = buf2;
30
}
31
32
- tcg_ctx->code_gen_buffer_size = size1;
33
- return buf1;
34
+ *obuf = buf1;
35
+ *osize = size1;
36
}
37
#endif
38
39
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
40
if (size > tb_size) {
41
size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
42
}
43
- tcg_ctx->code_gen_buffer_size = size;
44
45
#ifdef __mips__
46
if (cross_256mb(buf, size)) {
47
- buf = split_cross_256mb(buf, size);
48
- size = tcg_ctx->code_gen_buffer_size;
49
+ split_cross_256mb(&buf, &size, buf, size);
50
}
51
#endif
52
53
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
54
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
55
56
tcg_ctx->code_gen_buffer = buf;
57
+ tcg_ctx->code_gen_buffer_size = size;
58
return true;
17
return true;
59
}
18
}
60
#elif defined(_WIN32)
19
61
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot,
20
+static bool fold_mb(OptContext *ctx, TCGOp *op)
62
"allocate %zu bytes for jit buffer", size);
21
+{
63
return false;
22
+ /* Eliminate duplicate and redundant fence instructions. */
23
+ if (ctx->prev_mb) {
24
+ /*
25
+ * Merge two barriers of the same type into one,
26
+ * or a weaker barrier into a stronger one,
27
+ * or two weaker barriers into a stronger one.
28
+ * mb X; mb Y => mb X|Y
29
+ * mb; strl => mb; st
30
+ * ldaq; mb => ld; mb
31
+ * ldaq; strl => ld; mb; st
32
+ * Other combinations are also merged into a strong
33
+ * barrier. This is stricter than specified but for
34
+ * the purposes of TCG is better than not optimizing.
35
+ */
36
+ ctx->prev_mb->args[0] |= op->args[0];
37
+ tcg_op_remove(ctx->tcg, op);
38
+ } else {
39
+ ctx->prev_mb = op;
40
+ }
41
+ return true;
42
+}
43
+
44
+static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
45
+{
46
+ /* Opcodes that touch guest memory stop the mb optimization. */
47
+ ctx->prev_mb = NULL;
48
+ return false;
49
+}
50
+
51
+static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
52
+{
53
+ /* Opcodes that touch guest memory stop the mb optimization. */
54
+ ctx->prev_mb = NULL;
55
+ return false;
56
+}
57
+
58
/* Propagate constants and copies, fold constant expressions. */
59
void tcg_optimize(TCGContext *s)
60
{
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
62
}
63
break;
64
65
+ case INDEX_op_mb:
66
+ done = fold_mb(&ctx, op);
67
+ break;
68
+ case INDEX_op_qemu_ld_i32:
69
+ case INDEX_op_qemu_ld_i64:
70
+ done = fold_qemu_ld(&ctx, op);
71
+ break;
72
+ case INDEX_op_qemu_st_i32:
73
+ case INDEX_op_qemu_st8_i32:
74
+ case INDEX_op_qemu_st_i64:
75
+ done = fold_qemu_st(&ctx, op);
76
+ break;
77
+
78
default:
79
break;
80
}
81
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
82
if (!done) {
83
finish_folding(&ctx, op);
84
}
85
-
86
- /* Eliminate duplicate and redundant fence instructions. */
87
- if (ctx.prev_mb) {
88
- switch (opc) {
89
- case INDEX_op_mb:
90
- /* Merge two barriers of the same type into one,
91
- * or a weaker barrier into a stronger one,
92
- * or two weaker barriers into a stronger one.
93
- * mb X; mb Y => mb X|Y
94
- * mb; strl => mb; st
95
- * ldaq; mb => ld; mb
96
- * ldaq; strl => ld; mb; st
97
- * Other combinations are also merged into a strong
98
- * barrier. This is stricter than specified but for
99
- * the purposes of TCG is better than not optimizing.
100
- */
101
- ctx.prev_mb->args[0] |= op->args[0];
102
- tcg_op_remove(s, op);
103
- break;
104
-
105
- default:
106
- /* Opcodes that end the block stop the optimization. */
107
- if ((def->flags & TCG_OPF_BB_END) == 0) {
108
- break;
109
- }
110
- /* fallthru */
111
- case INDEX_op_qemu_ld_i32:
112
- case INDEX_op_qemu_ld_i64:
113
- case INDEX_op_qemu_st_i32:
114
- case INDEX_op_qemu_st8_i32:
115
- case INDEX_op_qemu_st_i64:
116
- /* Opcodes that touch guest memory stop the optimization. */
117
- ctx.prev_mb = NULL;
118
- break;
119
- }
120
- } else if (opc == INDEX_op_mb) {
121
- ctx.prev_mb = op;
122
- }
64
}
123
}
65
- tcg_ctx->code_gen_buffer_size = size;
66
67
#ifdef __mips__
68
if (cross_256mb(buf, size)) {
69
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot,
70
/* fallthru */
71
default:
72
/* Split the original buffer. Free the smaller half. */
73
- buf2 = split_cross_256mb(buf, size);
74
- size2 = tcg_ctx->code_gen_buffer_size;
75
+ split_cross_256mb(&buf2, &size2, buf, size);
76
if (buf == buf2) {
77
munmap(buf + size2, size - size2);
78
} else {
79
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot,
80
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
81
82
tcg_ctx->code_gen_buffer = buf;
83
+ tcg_ctx->code_gen_buffer_size = size;
84
return true;
85
}
124
}
86
87
--
125
--
88
2.25.1
126
2.25.1
89
127
90
128
diff view generated by jsdifflib
1
Do not mess around with setting values within tcg_init_ctx.
1
Split out a whole bunch of placeholder functions, which are
2
Put the values into 'region' directly, which is where they
2
currently identical. That won't last as more code gets moved.
3
will live for the lifetime of the program.
3
4
Use CASE_32_64_VEC for some logical operators that previously
5
missed the addition of vectors.
4
6
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
8
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
10
---
9
tcg/region.c | 64 ++++++++++++++++++++++------------------------------
11
tcg/optimize.c | 271 +++++++++++++++++++++++++++++++++++++++----------
10
1 file changed, 27 insertions(+), 37 deletions(-)
12
1 file changed, 219 insertions(+), 52 deletions(-)
11
13
12
diff --git a/tcg/region.c b/tcg/region.c
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/region.c
16
--- a/tcg/optimize.c
15
+++ b/tcg/region.c
17
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static size_t tree_size;
18
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
17
19
}
18
bool in_code_gen_buffer(const void *p)
20
}
21
22
+/*
23
+ * The fold_* functions return true when processing is complete,
24
+ * usually by folding the operation to a constant or to a copy,
25
+ * and calling tcg_opt_gen_{mov,movi}. They may do other things,
26
+ * like collect information about the value produced, for use in
27
+ * optimizing a subsequent operation.
28
+ *
29
+ * These first fold_* functions are all helpers, used by other
30
+ * folders for more specific operations.
31
+ */
32
+
33
+static bool fold_const1(OptContext *ctx, TCGOp *op)
34
+{
35
+ if (arg_is_const(op->args[1])) {
36
+ uint64_t t;
37
+
38
+ t = arg_info(op->args[1])->val;
39
+ t = do_constant_folding(op->opc, t, 0);
40
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
41
+ }
42
+ return false;
43
+}
44
+
45
+static bool fold_const2(OptContext *ctx, TCGOp *op)
46
+{
47
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
48
+ uint64_t t1 = arg_info(op->args[1])->val;
49
+ uint64_t t2 = arg_info(op->args[2])->val;
50
+
51
+ t1 = do_constant_folding(op->opc, t1, t2);
52
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
53
+ }
54
+ return false;
55
+}
56
+
57
+/*
58
+ * These outermost fold_<op> functions are sorted alphabetically.
59
+ */
60
+
61
+static bool fold_add(OptContext *ctx, TCGOp *op)
62
+{
63
+ return fold_const2(ctx, op);
64
+}
65
+
66
+static bool fold_and(OptContext *ctx, TCGOp *op)
67
+{
68
+ return fold_const2(ctx, op);
69
+}
70
+
71
+static bool fold_andc(OptContext *ctx, TCGOp *op)
72
+{
73
+ return fold_const2(ctx, op);
74
+}
75
+
76
static bool fold_call(OptContext *ctx, TCGOp *op)
19
{
77
{
20
- const TCGContext *s = &tcg_init_ctx;
78
TCGContext *s = ctx->tcg;
21
/*
79
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
22
* Much like it is valid to have a pointer to the byte past the
23
* end of an array (so long as you don't dereference it), allow
24
* a pointer to the byte past the end of the code gen buffer.
25
*/
26
- return (size_t)(p - s->code_gen_buffer) <= s->code_gen_buffer_size;
27
+ return (size_t)(p - region.start_aligned) <= region.total_size;
28
}
29
30
#ifdef CONFIG_DEBUG_TCG
31
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
32
}
33
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
34
35
- tcg_ctx->code_gen_buffer = buf;
36
- tcg_ctx->code_gen_buffer_size = size;
37
+ region.start_aligned = buf;
38
+ region.total_size = size;
39
return true;
80
return true;
40
}
81
}
41
#elif defined(_WIN32)
82
42
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
83
+static bool fold_ctpop(OptContext *ctx, TCGOp *op)
43
return false;
84
+{
44
}
85
+ return fold_const1(ctx, op);
45
86
+}
46
- tcg_ctx->code_gen_buffer = buf;
87
+
47
- tcg_ctx->code_gen_buffer_size = size;
88
+static bool fold_divide(OptContext *ctx, TCGOp *op)
48
+ region.start_aligned = buf;
89
+{
49
+ region.total_size = size;
90
+ return fold_const2(ctx, op);
91
+}
92
+
93
+static bool fold_eqv(OptContext *ctx, TCGOp *op)
94
+{
95
+ return fold_const2(ctx, op);
96
+}
97
+
98
+static bool fold_exts(OptContext *ctx, TCGOp *op)
99
+{
100
+ return fold_const1(ctx, op);
101
+}
102
+
103
+static bool fold_extu(OptContext *ctx, TCGOp *op)
104
+{
105
+ return fold_const1(ctx, op);
106
+}
107
+
108
static bool fold_mb(OptContext *ctx, TCGOp *op)
109
{
110
/* Eliminate duplicate and redundant fence instructions. */
111
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
50
return true;
112
return true;
51
}
113
}
52
#else
114
53
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot,
115
+static bool fold_mul(OptContext *ctx, TCGOp *op)
54
/* Request large pages for the buffer. */
116
+{
55
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
117
+ return fold_const2(ctx, op);
56
118
+}
57
- tcg_ctx->code_gen_buffer = buf;
119
+
58
- tcg_ctx->code_gen_buffer_size = size;
120
+static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
59
+ region.start_aligned = buf;
121
+{
60
+ region.total_size = size;
122
+ return fold_const2(ctx, op);
61
return true;
123
+}
124
+
125
+static bool fold_nand(OptContext *ctx, TCGOp *op)
126
+{
127
+ return fold_const2(ctx, op);
128
+}
129
+
130
+static bool fold_neg(OptContext *ctx, TCGOp *op)
131
+{
132
+ return fold_const1(ctx, op);
133
+}
134
+
135
+static bool fold_nor(OptContext *ctx, TCGOp *op)
136
+{
137
+ return fold_const2(ctx, op);
138
+}
139
+
140
+static bool fold_not(OptContext *ctx, TCGOp *op)
141
+{
142
+ return fold_const1(ctx, op);
143
+}
144
+
145
+static bool fold_or(OptContext *ctx, TCGOp *op)
146
+{
147
+ return fold_const2(ctx, op);
148
+}
149
+
150
+static bool fold_orc(OptContext *ctx, TCGOp *op)
151
+{
152
+ return fold_const2(ctx, op);
153
+}
154
+
155
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
156
{
157
/* Opcodes that touch guest memory stop the mb optimization. */
158
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
159
return false;
62
}
160
}
63
161
64
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
162
+static bool fold_remainder(OptContext *ctx, TCGOp *op)
65
return false;
163
+{
66
}
164
+ return fold_const2(ctx, op);
67
/* The size of the mapping may have been adjusted. */
165
+}
68
- size = tcg_ctx->code_gen_buffer_size;
166
+
69
- buf_rx = tcg_ctx->code_gen_buffer;
167
+static bool fold_shift(OptContext *ctx, TCGOp *op)
70
+ buf_rx = region.start_aligned;
168
+{
71
+ size = region.total_size;
169
+ return fold_const2(ctx, op);
72
#endif
170
+}
73
171
+
74
buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
172
+static bool fold_sub(OptContext *ctx, TCGOp *op)
75
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
173
+{
76
#endif
174
+ return fold_const2(ctx, op);
77
175
+}
78
close(fd);
176
+
79
- tcg_ctx->code_gen_buffer = buf_rw;
177
+static bool fold_xor(OptContext *ctx, TCGOp *op)
80
- tcg_ctx->code_gen_buffer_size = size;
178
+{
81
+ region.start_aligned = buf_rw;
179
+ return fold_const2(ctx, op);
82
+ region.total_size = size;
180
+}
83
tcg_splitwx_diff = buf_rx - buf_rw;
181
+
84
182
/* Propagate constants and copies, fold constant expressions. */
85
/* Request large pages for the buffer and the splitwx. */
183
void tcg_optimize(TCGContext *s)
86
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
87
return false;
88
}
89
90
- buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer;
91
+ buf_rw = region.start_aligned;
92
buf_rx = 0;
93
ret = mach_vm_remap(mach_task_self(),
94
&buf_rx,
95
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
96
*/
97
void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
98
{
184
{
99
- void *buf, *aligned, *end;
185
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
100
- size_t total_size;
186
}
101
size_t page_size;
187
break;
102
size_t region_size;
188
103
- size_t n_regions;
189
- CASE_OP_32_64(not):
104
size_t i;
190
- CASE_OP_32_64(neg):
105
bool ok;
191
- CASE_OP_32_64(ext8s):
106
192
- CASE_OP_32_64(ext8u):
107
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
193
- CASE_OP_32_64(ext16s):
108
splitwx, &error_fatal);
194
- CASE_OP_32_64(ext16u):
109
assert(ok);
195
- CASE_OP_32_64(ctpop):
110
196
- case INDEX_op_ext32s_i64:
111
- buf = tcg_init_ctx.code_gen_buffer;
197
- case INDEX_op_ext32u_i64:
112
- total_size = tcg_init_ctx.code_gen_buffer_size;
198
- case INDEX_op_ext_i32_i64:
113
- page_size = qemu_real_host_page_size;
199
- case INDEX_op_extu_i32_i64:
114
- n_regions = tcg_n_regions(total_size, max_cpus);
200
- case INDEX_op_extrl_i64_i32:
201
- case INDEX_op_extrh_i64_i32:
202
- if (arg_is_const(op->args[1])) {
203
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
204
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
205
- continue;
206
- }
207
- break;
115
-
208
-
116
- /* The first region will be 'aligned - buf' bytes larger than the others */
209
CASE_OP_32_64(bswap16):
117
- aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
210
CASE_OP_32_64(bswap32):
118
- g_assert(aligned < tcg_init_ctx.code_gen_buffer + total_size);
211
case INDEX_op_bswap64_i64:
212
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
213
}
214
break;
215
216
- CASE_OP_32_64(add):
217
- CASE_OP_32_64(sub):
218
- CASE_OP_32_64(mul):
219
- CASE_OP_32_64(or):
220
- CASE_OP_32_64(and):
221
- CASE_OP_32_64(xor):
222
- CASE_OP_32_64(shl):
223
- CASE_OP_32_64(shr):
224
- CASE_OP_32_64(sar):
225
- CASE_OP_32_64(rotl):
226
- CASE_OP_32_64(rotr):
227
- CASE_OP_32_64(andc):
228
- CASE_OP_32_64(orc):
229
- CASE_OP_32_64(eqv):
230
- CASE_OP_32_64(nand):
231
- CASE_OP_32_64(nor):
232
- CASE_OP_32_64(muluh):
233
- CASE_OP_32_64(mulsh):
234
- CASE_OP_32_64(div):
235
- CASE_OP_32_64(divu):
236
- CASE_OP_32_64(rem):
237
- CASE_OP_32_64(remu):
238
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
239
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
240
- arg_info(op->args[2])->val);
241
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
242
- continue;
243
- }
244
- break;
119
-
245
-
120
/*
246
CASE_OP_32_64(clz):
121
* Make region_size a multiple of page_size, using aligned as the start.
247
CASE_OP_32_64(ctz):
122
* As a result of this we might end up with a few extra pages at the end of
248
if (arg_is_const(op->args[1])) {
123
* the buffer; we will assign those to the last region.
249
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
124
*/
250
}
125
- region_size = (total_size - (aligned - buf)) / n_regions;
251
break;
126
+ region.n = tcg_n_regions(region.total_size, max_cpus);
252
127
+ page_size = qemu_real_host_page_size;
253
+ default:
128
+ region_size = region.total_size / region.n;
254
+ break;
129
region_size = QEMU_ALIGN_DOWN(region_size, page_size);
255
+
130
256
+ /* ---------------------------------------------------------- */
131
/* A region must have at least 2 pages; one code, one guard */
257
+ /* Sorted alphabetically by opcode as much as possible. */
132
g_assert(region_size >= 2 * page_size);
258
+
133
+ region.stride = region_size;
259
+ CASE_OP_32_64_VEC(add):
134
+
260
+ done = fold_add(&ctx, op);
135
+ /* Reserve space for guard pages. */
261
+ break;
136
+ region.size = region_size - page_size;
262
+ CASE_OP_32_64_VEC(and):
137
+ region.total_size -= page_size;
263
+ done = fold_and(&ctx, op);
138
+
264
+ break;
139
+ /*
265
+ CASE_OP_32_64_VEC(andc):
140
+ * The first region will be smaller than the others, via the prologue,
266
+ done = fold_andc(&ctx, op);
141
+ * which has yet to be allocated. For now, the first region begins at
267
+ break;
142
+ * the page boundary.
268
+ CASE_OP_32_64(ctpop):
143
+ */
269
+ done = fold_ctpop(&ctx, op);
144
+ region.after_prologue = region.start_aligned;
270
+ break;
145
271
+ CASE_OP_32_64(div):
146
/* init the region struct */
272
+ CASE_OP_32_64(divu):
147
qemu_mutex_init(&region.lock);
273
+ done = fold_divide(&ctx, op);
148
- region.n = n_regions;
274
+ break;
149
- region.size = region_size - page_size;
275
+ CASE_OP_32_64(eqv):
150
- region.stride = region_size;
276
+ done = fold_eqv(&ctx, op);
151
- region.after_prologue = buf;
277
+ break;
152
- region.start_aligned = aligned;
278
+ CASE_OP_32_64(ext8s):
153
- /* page-align the end, since its last page will be a guard page */
279
+ CASE_OP_32_64(ext16s):
154
- end = QEMU_ALIGN_PTR_DOWN(buf + total_size, page_size);
280
+ case INDEX_op_ext32s_i64:
155
- /* account for that last guard page */
281
+ case INDEX_op_ext_i32_i64:
156
- end -= page_size;
282
+ done = fold_exts(&ctx, op);
157
- total_size = end - aligned;
283
+ break;
158
- region.total_size = total_size;
284
+ CASE_OP_32_64(ext8u):
159
285
+ CASE_OP_32_64(ext16u):
160
/*
286
+ case INDEX_op_ext32u_i64:
161
* Set guard pages in the rw buffer, as that's the one into which
287
+ case INDEX_op_extu_i32_i64:
288
+ case INDEX_op_extrl_i64_i32:
289
+ case INDEX_op_extrh_i64_i32:
290
+ done = fold_extu(&ctx, op);
291
+ break;
292
case INDEX_op_mb:
293
done = fold_mb(&ctx, op);
294
break;
295
+ CASE_OP_32_64(mul):
296
+ done = fold_mul(&ctx, op);
297
+ break;
298
+ CASE_OP_32_64(mulsh):
299
+ CASE_OP_32_64(muluh):
300
+ done = fold_mul_highpart(&ctx, op);
301
+ break;
302
+ CASE_OP_32_64(nand):
303
+ done = fold_nand(&ctx, op);
304
+ break;
305
+ CASE_OP_32_64(neg):
306
+ done = fold_neg(&ctx, op);
307
+ break;
308
+ CASE_OP_32_64(nor):
309
+ done = fold_nor(&ctx, op);
310
+ break;
311
+ CASE_OP_32_64_VEC(not):
312
+ done = fold_not(&ctx, op);
313
+ break;
314
+ CASE_OP_32_64_VEC(or):
315
+ done = fold_or(&ctx, op);
316
+ break;
317
+ CASE_OP_32_64_VEC(orc):
318
+ done = fold_orc(&ctx, op);
319
+ break;
320
case INDEX_op_qemu_ld_i32:
321
case INDEX_op_qemu_ld_i64:
322
done = fold_qemu_ld(&ctx, op);
323
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
324
case INDEX_op_qemu_st_i64:
325
done = fold_qemu_st(&ctx, op);
326
break;
327
-
328
- default:
329
+ CASE_OP_32_64(rem):
330
+ CASE_OP_32_64(remu):
331
+ done = fold_remainder(&ctx, op);
332
+ break;
333
+ CASE_OP_32_64(rotl):
334
+ CASE_OP_32_64(rotr):
335
+ CASE_OP_32_64(sar):
336
+ CASE_OP_32_64(shl):
337
+ CASE_OP_32_64(shr):
338
+ done = fold_shift(&ctx, op);
339
+ break;
340
+ CASE_OP_32_64_VEC(sub):
341
+ done = fold_sub(&ctx, op);
342
+ break;
343
+ CASE_OP_32_64_VEC(xor):
344
+ done = fold_xor(&ctx, op);
345
break;
346
}
347
162
--
348
--
163
2.25.1
349
2.25.1
164
350
165
351
diff view generated by jsdifflib
1
These variables belong to the jit side, not the user side.
1
Reduce some code duplication by folding the NE and EQ cases.
2
3
Since tcg_init_ctx is no longer used outside of tcg/, move
4
the declaration to tcg-internal.h.
5
2
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
8
Suggested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
6
---
11
include/tcg/tcg.h | 1 -
7
tcg/optimize.c | 145 ++++++++++++++++++++++++-------------------------
12
tcg/tcg-internal.h | 1 +
8
1 file changed, 72 insertions(+), 73 deletions(-)
13
accel/tcg/translate-all.c | 3 ---
14
tcg/tcg.c | 3 +++
15
4 files changed, 4 insertions(+), 4 deletions(-)
16
9
17
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
18
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
19
--- a/include/tcg/tcg.h
12
--- a/tcg/optimize.c
20
+++ b/include/tcg/tcg.h
13
+++ b/tcg/optimize.c
21
@@ -XXX,XX +XXX,XX @@ static inline bool temp_readonly(TCGTemp *ts)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
22
return ts->kind >= TEMP_FIXED;
15
return fold_const2(ctx, op);
23
}
16
}
24
17
25
-extern TCGContext tcg_init_ctx;
18
+static bool fold_setcond2(OptContext *ctx, TCGOp *op)
26
extern __thread TCGContext *tcg_ctx;
19
+{
27
extern const void *tcg_code_gen_epilogue;
20
+ TCGCond cond = op->args[5];
28
extern uintptr_t tcg_splitwx_diff;
21
+ int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
29
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
22
+ int inv = 0;
30
index XXXXXXX..XXXXXXX 100644
31
--- a/tcg/tcg-internal.h
32
+++ b/tcg/tcg-internal.h
33
@@ -XXX,XX +XXX,XX @@
34
35
#define TCG_HIGHWATER 1024
36
37
+extern TCGContext tcg_init_ctx;
38
extern TCGContext **tcg_ctxs;
39
extern unsigned int tcg_cur_ctxs;
40
extern unsigned int tcg_max_ctxs;
41
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/accel/tcg/translate-all.c
44
+++ b/accel/tcg/translate-all.c
45
@@ -XXX,XX +XXX,XX @@ static int v_l2_levels;
46
47
static void *l1_map[V_L1_MAX_SIZE];
48
49
-/* code generation context */
50
-TCGContext tcg_init_ctx;
51
-__thread TCGContext *tcg_ctx;
52
TBContext tb_ctx;
53
54
static void page_table_config_init(void)
55
diff --git a/tcg/tcg.c b/tcg/tcg.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/tcg/tcg.c
58
+++ b/tcg/tcg.c
59
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
60
static int tcg_out_ldst_finalize(TCGContext *s);
61
#endif
62
63
+TCGContext tcg_init_ctx;
64
+__thread TCGContext *tcg_ctx;
65
+
23
+
66
TCGContext **tcg_ctxs;
24
+ if (i >= 0) {
67
unsigned int tcg_cur_ctxs;
25
+ goto do_setcond_const;
68
unsigned int tcg_max_ctxs;
26
+ }
27
+
28
+ switch (cond) {
29
+ case TCG_COND_LT:
30
+ case TCG_COND_GE:
31
+ /*
32
+ * Simplify LT/GE comparisons vs zero to a single compare
33
+ * vs the high word of the input.
34
+ */
35
+ if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 &&
36
+ arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) {
37
+ goto do_setcond_high;
38
+ }
39
+ break;
40
+
41
+ case TCG_COND_NE:
42
+ inv = 1;
43
+ QEMU_FALLTHROUGH;
44
+ case TCG_COND_EQ:
45
+ /*
46
+ * Simplify EQ/NE comparisons where one of the pairs
47
+ * can be simplified.
48
+ */
49
+ i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
50
+ op->args[3], cond);
51
+ switch (i ^ inv) {
52
+ case 0:
53
+ goto do_setcond_const;
54
+ case 1:
55
+ goto do_setcond_high;
56
+ }
57
+
58
+ i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
59
+ op->args[4], cond);
60
+ switch (i ^ inv) {
61
+ case 0:
62
+ goto do_setcond_const;
63
+ case 1:
64
+ op->args[2] = op->args[3];
65
+ op->args[3] = cond;
66
+ op->opc = INDEX_op_setcond_i32;
67
+ break;
68
+ }
69
+ break;
70
+
71
+ default:
72
+ break;
73
+
74
+ do_setcond_high:
75
+ op->args[1] = op->args[2];
76
+ op->args[2] = op->args[4];
77
+ op->args[3] = cond;
78
+ op->opc = INDEX_op_setcond_i32;
79
+ break;
80
+ }
81
+ return false;
82
+
83
+ do_setcond_const:
84
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
85
+}
86
+
87
static bool fold_shift(OptContext *ctx, TCGOp *op)
88
{
89
return fold_const2(ctx, op);
90
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
91
}
92
break;
93
94
- case INDEX_op_setcond2_i32:
95
- i = do_constant_folding_cond2(&op->args[1], &op->args[3],
96
- op->args[5]);
97
- if (i >= 0) {
98
- do_setcond_const:
99
- tcg_opt_gen_movi(&ctx, op, op->args[0], i);
100
- continue;
101
- }
102
- if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
103
- && arg_is_const(op->args[3])
104
- && arg_info(op->args[3])->val == 0
105
- && arg_is_const(op->args[4])
106
- && arg_info(op->args[4])->val == 0) {
107
- /* Simplify LT/GE comparisons vs zero to a single compare
108
- vs the high word of the input. */
109
- do_setcond_high:
110
- reset_temp(op->args[0]);
111
- arg_info(op->args[0])->z_mask = 1;
112
- op->opc = INDEX_op_setcond_i32;
113
- op->args[1] = op->args[2];
114
- op->args[2] = op->args[4];
115
- op->args[3] = op->args[5];
116
- break;
117
- }
118
- if (op->args[5] == TCG_COND_EQ) {
119
- /* Simplify EQ comparisons where one of the pairs
120
- can be simplified. */
121
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
122
- op->args[1], op->args[3],
123
- TCG_COND_EQ);
124
- if (i == 0) {
125
- goto do_setcond_const;
126
- } else if (i > 0) {
127
- goto do_setcond_high;
128
- }
129
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
130
- op->args[2], op->args[4],
131
- TCG_COND_EQ);
132
- if (i == 0) {
133
- goto do_setcond_high;
134
- } else if (i < 0) {
135
- break;
136
- }
137
- do_setcond_low:
138
- reset_temp(op->args[0]);
139
- arg_info(op->args[0])->z_mask = 1;
140
- op->opc = INDEX_op_setcond_i32;
141
- op->args[2] = op->args[3];
142
- op->args[3] = op->args[5];
143
- break;
144
- }
145
- if (op->args[5] == TCG_COND_NE) {
146
- /* Simplify NE comparisons where one of the pairs
147
- can be simplified. */
148
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
149
- op->args[1], op->args[3],
150
- TCG_COND_NE);
151
- if (i == 0) {
152
- goto do_setcond_high;
153
- } else if (i > 0) {
154
- goto do_setcond_const;
155
- }
156
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
157
- op->args[2], op->args[4],
158
- TCG_COND_NE);
159
- if (i == 0) {
160
- goto do_setcond_low;
161
- } else if (i > 0) {
162
- goto do_setcond_const;
163
- }
164
- }
165
- break;
166
-
167
default:
168
break;
169
170
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
171
CASE_OP_32_64(shr):
172
done = fold_shift(&ctx, op);
173
break;
174
+ case INDEX_op_setcond2_i32:
175
+ done = fold_setcond2(&ctx, op);
176
+ break;
177
CASE_OP_32_64_VEC(sub):
178
done = fold_sub(&ctx, op);
179
break;
69
--
180
--
70
2.25.1
181
2.25.1
71
182
72
183
diff view generated by jsdifflib
New patch
1
Reduce some code duplication by folding the NE and EQ cases.
1
2
3
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 159 +++++++++++++++++++++++++------------------------
7
1 file changed, 81 insertions(+), 78 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
14
return fold_const2(ctx, op);
15
}
16
17
+static bool fold_brcond2(OptContext *ctx, TCGOp *op)
18
+{
19
+ TCGCond cond = op->args[4];
20
+ int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
21
+ TCGArg label = op->args[5];
22
+ int inv = 0;
23
+
24
+ if (i >= 0) {
25
+ goto do_brcond_const;
26
+ }
27
+
28
+ switch (cond) {
29
+ case TCG_COND_LT:
30
+ case TCG_COND_GE:
31
+ /*
32
+ * Simplify LT/GE comparisons vs zero to a single compare
33
+ * vs the high word of the input.
34
+ */
35
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 &&
36
+ arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) {
37
+ goto do_brcond_high;
38
+ }
39
+ break;
40
+
41
+ case TCG_COND_NE:
42
+ inv = 1;
43
+ QEMU_FALLTHROUGH;
44
+ case TCG_COND_EQ:
45
+ /*
46
+ * Simplify EQ/NE comparisons where one of the pairs
47
+ * can be simplified.
48
+ */
49
+ i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
50
+ op->args[2], cond);
51
+ switch (i ^ inv) {
52
+ case 0:
53
+ goto do_brcond_const;
54
+ case 1:
55
+ goto do_brcond_high;
56
+ }
57
+
58
+ i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
59
+ op->args[3], cond);
60
+ switch (i ^ inv) {
61
+ case 0:
62
+ goto do_brcond_const;
63
+ case 1:
64
+ op->opc = INDEX_op_brcond_i32;
65
+ op->args[1] = op->args[2];
66
+ op->args[2] = cond;
67
+ op->args[3] = label;
68
+ break;
69
+ }
70
+ break;
71
+
72
+ default:
73
+ break;
74
+
75
+ do_brcond_high:
76
+ op->opc = INDEX_op_brcond_i32;
77
+ op->args[0] = op->args[1];
78
+ op->args[1] = op->args[3];
79
+ op->args[2] = cond;
80
+ op->args[3] = label;
81
+ break;
82
+
83
+ do_brcond_const:
84
+ if (i == 0) {
85
+ tcg_op_remove(ctx->tcg, op);
86
+ return true;
87
+ }
88
+ op->opc = INDEX_op_br;
89
+ op->args[0] = label;
90
+ break;
91
+ }
92
+ return false;
93
+}
94
+
95
static bool fold_call(OptContext *ctx, TCGOp *op)
96
{
97
TCGContext *s = ctx->tcg;
98
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
99
}
100
break;
101
102
- case INDEX_op_brcond2_i32:
103
- i = do_constant_folding_cond2(&op->args[0], &op->args[2],
104
- op->args[4]);
105
- if (i == 0) {
106
- do_brcond_false:
107
- tcg_op_remove(s, op);
108
- continue;
109
- }
110
- if (i > 0) {
111
- do_brcond_true:
112
- op->opc = opc = INDEX_op_br;
113
- op->args[0] = op->args[5];
114
- break;
115
- }
116
- if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE)
117
- && arg_is_const(op->args[2])
118
- && arg_info(op->args[2])->val == 0
119
- && arg_is_const(op->args[3])
120
- && arg_info(op->args[3])->val == 0) {
121
- /* Simplify LT/GE comparisons vs zero to a single compare
122
- vs the high word of the input. */
123
- do_brcond_high:
124
- op->opc = opc = INDEX_op_brcond_i32;
125
- op->args[0] = op->args[1];
126
- op->args[1] = op->args[3];
127
- op->args[2] = op->args[4];
128
- op->args[3] = op->args[5];
129
- break;
130
- }
131
- if (op->args[4] == TCG_COND_EQ) {
132
- /* Simplify EQ comparisons where one of the pairs
133
- can be simplified. */
134
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
135
- op->args[0], op->args[2],
136
- TCG_COND_EQ);
137
- if (i == 0) {
138
- goto do_brcond_false;
139
- } else if (i > 0) {
140
- goto do_brcond_high;
141
- }
142
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
143
- op->args[1], op->args[3],
144
- TCG_COND_EQ);
145
- if (i == 0) {
146
- goto do_brcond_false;
147
- } else if (i < 0) {
148
- break;
149
- }
150
- do_brcond_low:
151
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
152
- op->opc = INDEX_op_brcond_i32;
153
- op->args[1] = op->args[2];
154
- op->args[2] = op->args[4];
155
- op->args[3] = op->args[5];
156
- break;
157
- }
158
- if (op->args[4] == TCG_COND_NE) {
159
- /* Simplify NE comparisons where one of the pairs
160
- can be simplified. */
161
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
162
- op->args[0], op->args[2],
163
- TCG_COND_NE);
164
- if (i == 0) {
165
- goto do_brcond_high;
166
- } else if (i > 0) {
167
- goto do_brcond_true;
168
- }
169
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
170
- op->args[1], op->args[3],
171
- TCG_COND_NE);
172
- if (i == 0) {
173
- goto do_brcond_low;
174
- } else if (i > 0) {
175
- goto do_brcond_true;
176
- }
177
- }
178
- break;
179
-
180
default:
181
break;
182
183
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
184
CASE_OP_32_64_VEC(andc):
185
done = fold_andc(&ctx, op);
186
break;
187
+ case INDEX_op_brcond2_i32:
188
+ done = fold_brcond2(&ctx, op);
189
+ break;
190
CASE_OP_32_64(ctpop):
191
done = fold_ctpop(&ctx, op);
192
break;
193
--
194
2.25.1
195
196
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 33 +++++++++++++++++++--------------
6
1 file changed, 19 insertions(+), 14 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_brcond(OptContext *ctx, TCGOp *op)
17
+{
18
+ TCGCond cond = op->args[2];
19
+ int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
20
+
21
+ if (i == 0) {
22
+ tcg_op_remove(ctx->tcg, op);
23
+ return true;
24
+ }
25
+ if (i > 0) {
26
+ op->opc = INDEX_op_br;
27
+ op->args[0] = op->args[3];
28
+ }
29
+ return false;
30
+}
31
+
32
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
33
{
34
TCGCond cond = op->args[4];
35
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
36
}
37
break;
38
39
- CASE_OP_32_64(brcond):
40
- i = do_constant_folding_cond(opc, op->args[0],
41
- op->args[1], op->args[2]);
42
- if (i == 0) {
43
- tcg_op_remove(s, op);
44
- continue;
45
- } else if (i > 0) {
46
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
47
- op->opc = opc = INDEX_op_br;
48
- op->args[0] = op->args[3];
49
- break;
50
- }
51
- break;
52
-
53
CASE_OP_32_64(movcond):
54
i = do_constant_folding_cond(opc, op->args[1],
55
op->args[2], op->args[5]);
56
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
57
CASE_OP_32_64_VEC(andc):
58
done = fold_andc(&ctx, op);
59
break;
60
+ CASE_OP_32_64(brcond):
61
+ done = fold_brcond(&ctx, op);
62
+ break;
63
case INDEX_op_brcond2_i32:
64
done = fold_brcond2(&ctx, op);
65
break;
66
--
67
2.25.1
68
69
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 23 ++++++++++++++---------
6
1 file changed, 14 insertions(+), 9 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_setcond(OptContext *ctx, TCGOp *op)
17
+{
18
+ TCGCond cond = op->args[3];
19
+ int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
20
+
21
+ if (i >= 0) {
22
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
+ }
24
+ return false;
25
+}
26
+
27
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
28
{
29
TCGCond cond = op->args[5];
30
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
31
}
32
break;
33
34
- CASE_OP_32_64(setcond):
35
- i = do_constant_folding_cond(opc, op->args[1],
36
- op->args[2], op->args[3]);
37
- if (i >= 0) {
38
- tcg_opt_gen_movi(&ctx, op, op->args[0], i);
39
- continue;
40
- }
41
- break;
42
-
43
CASE_OP_32_64(movcond):
44
i = do_constant_folding_cond(opc, op->args[1],
45
op->args[2], op->args[5]);
46
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
47
CASE_OP_32_64(shr):
48
done = fold_shift(&ctx, op);
49
break;
50
+ CASE_OP_32_64(setcond):
51
+ done = fold_setcond(&ctx, op);
52
+ break;
53
case INDEX_op_setcond2_i32:
54
done = fold_setcond2(&ctx, op);
55
break;
56
--
57
2.25.1
58
59
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 37 +++++++++++++++++++++----------------
6
1 file changed, 21 insertions(+), 16 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
19
+ uint32_t a = arg_info(op->args[2])->val;
20
+ uint32_t b = arg_info(op->args[3])->val;
21
+ uint64_t r = (uint64_t)a * b;
22
+ TCGArg rl, rh;
23
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
24
+
25
+ rl = op->args[0];
26
+ rh = op->args[1];
27
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
28
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
29
+ return true;
30
+ }
31
+ return false;
32
+}
33
+
34
static bool fold_nand(OptContext *ctx, TCGOp *op)
35
{
36
return fold_const2(ctx, op);
37
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
38
}
39
break;
40
41
- case INDEX_op_mulu2_i32:
42
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
43
- uint32_t a = arg_info(op->args[2])->val;
44
- uint32_t b = arg_info(op->args[3])->val;
45
- uint64_t r = (uint64_t)a * b;
46
- TCGArg rl, rh;
47
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
48
-
49
- rl = op->args[0];
50
- rh = op->args[1];
51
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
52
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
53
- continue;
54
- }
55
- break;
56
-
57
default:
58
break;
59
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
61
CASE_OP_32_64(muluh):
62
done = fold_mul_highpart(&ctx, op);
63
break;
64
+ case INDEX_op_mulu2_i32:
65
+ done = fold_mulu2_i32(&ctx, op);
66
+ break;
67
CASE_OP_32_64(nand):
68
done = fold_nand(&ctx, op);
69
break;
70
--
71
2.25.1
72
73
diff view generated by jsdifflib
1
Give the field a name reflecting its actual meaning.
1
Add two additional helpers, fold_add2_i32 and fold_sub2_i32
2
which will not be simple wrappers forever.
2
3
3
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
tcg/region.c | 15 ++++++++-------
8
tcg/optimize.c | 70 +++++++++++++++++++++++++++++++-------------------
8
1 file changed, 8 insertions(+), 7 deletions(-)
9
1 file changed, 44 insertions(+), 26 deletions(-)
9
10
10
diff --git a/tcg/region.c b/tcg/region.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/region.c
13
--- a/tcg/optimize.c
13
+++ b/tcg/region.c
14
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ struct tcg_region_state {
15
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
15
QemuMutex lock;
16
return fold_const2(ctx, op);
16
17
}
17
/* fields set at init time */
18
18
- void *start;
19
+static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
19
void *start_aligned;
20
+{
20
+ void *after_prologue;
21
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
21
size_t n;
22
+ arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
22
size_t size; /* size of one region */
23
+ uint32_t al = arg_info(op->args[2])->val;
23
size_t stride; /* .size + guard size */
24
+ uint32_t ah = arg_info(op->args[3])->val;
24
@@ -XXX,XX +XXX,XX @@ static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
25
+ uint32_t bl = arg_info(op->args[4])->val;
25
end = start + region.size;
26
+ uint32_t bh = arg_info(op->args[5])->val;
26
27
+ uint64_t a = ((uint64_t)ah << 32) | al;
27
if (curr_region == 0) {
28
+ uint64_t b = ((uint64_t)bh << 32) | bl;
28
- start = region.start;
29
+ TCGArg rl, rh;
29
+ start = region.after_prologue;
30
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
30
}
31
+
31
/* The final region may have a few extra pages due to earlier rounding. */
32
+ if (add) {
32
if (curr_region == region.n - 1) {
33
+ a += b;
33
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
34
+ } else {
34
region.n = n_regions;
35
+ a -= b;
35
region.size = region_size - page_size;
36
+ }
36
region.stride = region_size;
37
+
37
- region.start = buf;
38
+ rl = op->args[0];
38
+ region.after_prologue = buf;
39
+ rh = op->args[1];
39
region.start_aligned = aligned;
40
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
40
/* page-align the end, since its last page will be a guard page */
41
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
41
end = QEMU_ALIGN_PTR_DOWN(buf + total_size, page_size);
42
+ return true;
42
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
43
+ }
43
void tcg_region_prologue_set(TCGContext *s)
44
+ return false;
45
+}
46
+
47
+static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
48
+{
49
+ return fold_addsub2_i32(ctx, op, true);
50
+}
51
+
52
static bool fold_and(OptContext *ctx, TCGOp *op)
44
{
53
{
45
/* Deduct the prologue from the first region. */
54
return fold_const2(ctx, op);
46
- g_assert(region.start == s->code_gen_buffer);
55
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
47
- region.start = s->code_ptr;
56
return fold_const2(ctx, op);
48
+ g_assert(region.start_aligned == s->code_gen_buffer);
49
+ region.after_prologue = s->code_ptr;
50
51
/* Recompute boundaries of the first region. */
52
tcg_region_assign(s, 0);
53
54
/* Register the balance of the buffer with gdb. */
55
- tcg_register_jit(tcg_splitwx_to_rx(region.start),
56
- region.start_aligned + region.total_size - region.start);
57
+ tcg_register_jit(tcg_splitwx_to_rx(region.after_prologue),
58
+ region.start_aligned + region.total_size -
59
+ region.after_prologue);
60
}
57
}
61
58
62
/*
59
+static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
60
+{
61
+ return fold_addsub2_i32(ctx, op, false);
62
+}
63
+
64
static bool fold_xor(OptContext *ctx, TCGOp *op)
65
{
66
return fold_const2(ctx, op);
67
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
68
}
69
break;
70
71
- case INDEX_op_add2_i32:
72
- case INDEX_op_sub2_i32:
73
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])
74
- && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
75
- uint32_t al = arg_info(op->args[2])->val;
76
- uint32_t ah = arg_info(op->args[3])->val;
77
- uint32_t bl = arg_info(op->args[4])->val;
78
- uint32_t bh = arg_info(op->args[5])->val;
79
- uint64_t a = ((uint64_t)ah << 32) | al;
80
- uint64_t b = ((uint64_t)bh << 32) | bl;
81
- TCGArg rl, rh;
82
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
83
-
84
- if (opc == INDEX_op_add2_i32) {
85
- a += b;
86
- } else {
87
- a -= b;
88
- }
89
-
90
- rl = op->args[0];
91
- rh = op->args[1];
92
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
93
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
94
- continue;
95
- }
96
- break;
97
98
default:
99
break;
100
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
101
CASE_OP_32_64_VEC(add):
102
done = fold_add(&ctx, op);
103
break;
104
+ case INDEX_op_add2_i32:
105
+ done = fold_add2_i32(&ctx, op);
106
+ break;
107
CASE_OP_32_64_VEC(and):
108
done = fold_and(&ctx, op);
109
break;
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
CASE_OP_32_64_VEC(sub):
112
done = fold_sub(&ctx, op);
113
break;
114
+ case INDEX_op_sub2_i32:
115
+ done = fold_sub2_i32(&ctx, op);
116
+ break;
117
CASE_OP_32_64_VEC(xor):
118
done = fold_xor(&ctx, op);
119
break;
63
--
120
--
64
2.25.1
121
2.25.1
65
122
66
123
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 56 ++++++++++++++++++++++++++++----------------------
6
1 file changed, 31 insertions(+), 25 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
13
return true;
14
}
15
16
+static bool fold_movcond(OptContext *ctx, TCGOp *op)
17
+{
18
+ TCGOpcode opc = op->opc;
19
+ TCGCond cond = op->args[5];
20
+ int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
21
+
22
+ if (i >= 0) {
23
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
+ }
25
+
26
+ if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
27
+ uint64_t tv = arg_info(op->args[3])->val;
28
+ uint64_t fv = arg_info(op->args[4])->val;
29
+
30
+ opc = (opc == INDEX_op_movcond_i32
31
+ ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
32
+
33
+ if (tv == 1 && fv == 0) {
34
+ op->opc = opc;
35
+ op->args[3] = cond;
36
+ } else if (fv == 1 && tv == 0) {
37
+ op->opc = opc;
38
+ op->args[3] = tcg_invert_cond(cond);
39
+ }
40
+ }
41
+ return false;
42
+}
43
+
44
static bool fold_mul(OptContext *ctx, TCGOp *op)
45
{
46
return fold_const2(ctx, op);
47
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
48
}
49
break;
50
51
- CASE_OP_32_64(movcond):
52
- i = do_constant_folding_cond(opc, op->args[1],
53
- op->args[2], op->args[5]);
54
- if (i >= 0) {
55
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]);
56
- continue;
57
- }
58
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
59
- uint64_t tv = arg_info(op->args[3])->val;
60
- uint64_t fv = arg_info(op->args[4])->val;
61
- TCGCond cond = op->args[5];
62
-
63
- if (fv == 1 && tv == 0) {
64
- cond = tcg_invert_cond(cond);
65
- } else if (!(tv == 1 && fv == 0)) {
66
- break;
67
- }
68
- op->args[3] = cond;
69
- op->opc = opc = (opc == INDEX_op_movcond_i32
70
- ? INDEX_op_setcond_i32
71
- : INDEX_op_setcond_i64);
72
- }
73
- break;
74
-
75
-
76
default:
77
break;
78
79
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
80
case INDEX_op_mb:
81
done = fold_mb(&ctx, op);
82
break;
83
+ CASE_OP_32_64(movcond):
84
+ done = fold_movcond(&ctx, op);
85
+ break;
86
CASE_OP_32_64(mul):
87
done = fold_mul(&ctx, op);
88
break;
89
--
90
2.25.1
91
92
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 39 ++++++++++++++++++++++-----------------
6
1 file changed, 22 insertions(+), 17 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_extract2(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
19
+ uint64_t v1 = arg_info(op->args[1])->val;
20
+ uint64_t v2 = arg_info(op->args[2])->val;
21
+ int shr = op->args[3];
22
+
23
+ if (op->opc == INDEX_op_extract2_i64) {
24
+ v1 >>= shr;
25
+ v2 <<= 64 - shr;
26
+ } else {
27
+ v1 = (uint32_t)v1 >> shr;
28
+ v2 = (int32_t)v2 << (32 - shr);
29
+ }
30
+ return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
31
+ }
32
+ return false;
33
+}
34
+
35
static bool fold_exts(OptContext *ctx, TCGOp *op)
36
{
37
return fold_const1(ctx, op);
38
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
39
}
40
break;
41
42
- CASE_OP_32_64(extract2):
43
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
44
- uint64_t v1 = arg_info(op->args[1])->val;
45
- uint64_t v2 = arg_info(op->args[2])->val;
46
- int shr = op->args[3];
47
-
48
- if (opc == INDEX_op_extract2_i64) {
49
- tmp = (v1 >> shr) | (v2 << (64 - shr));
50
- } else {
51
- tmp = (int32_t)(((uint32_t)v1 >> shr) |
52
- ((uint32_t)v2 << (32 - shr)));
53
- }
54
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
55
- continue;
56
- }
57
- break;
58
-
59
default:
60
break;
61
62
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
63
CASE_OP_32_64(eqv):
64
done = fold_eqv(&ctx, op);
65
break;
66
+ CASE_OP_32_64(extract2):
67
+ done = fold_extract2(&ctx, op);
68
+ break;
69
CASE_OP_32_64(ext8s):
70
CASE_OP_32_64(ext16s):
71
case INDEX_op_ext32s_i64:
72
--
73
2.25.1
74
75
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 48 ++++++++++++++++++++++++++++++------------------
6
1 file changed, 30 insertions(+), 18 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_extract(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1])) {
19
+ uint64_t t;
20
+
21
+ t = arg_info(op->args[1])->val;
22
+ t = extract64(t, op->args[2], op->args[3]);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
24
+ }
25
+ return false;
26
+}
27
+
28
static bool fold_extract2(OptContext *ctx, TCGOp *op)
29
{
30
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
31
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
32
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
33
}
34
35
+static bool fold_sextract(OptContext *ctx, TCGOp *op)
36
+{
37
+ if (arg_is_const(op->args[1])) {
38
+ uint64_t t;
39
+
40
+ t = arg_info(op->args[1])->val;
41
+ t = sextract64(t, op->args[2], op->args[3]);
42
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
43
+ }
44
+ return false;
45
+}
46
+
47
static bool fold_shift(OptContext *ctx, TCGOp *op)
48
{
49
return fold_const2(ctx, op);
50
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
51
}
52
break;
53
54
- CASE_OP_32_64(extract):
55
- if (arg_is_const(op->args[1])) {
56
- tmp = extract64(arg_info(op->args[1])->val,
57
- op->args[2], op->args[3]);
58
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
59
- continue;
60
- }
61
- break;
62
-
63
- CASE_OP_32_64(sextract):
64
- if (arg_is_const(op->args[1])) {
65
- tmp = sextract64(arg_info(op->args[1])->val,
66
- op->args[2], op->args[3]);
67
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
68
- continue;
69
- }
70
- break;
71
-
72
default:
73
break;
74
75
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
76
CASE_OP_32_64(eqv):
77
done = fold_eqv(&ctx, op);
78
break;
79
+ CASE_OP_32_64(extract):
80
+ done = fold_extract(&ctx, op);
81
+ break;
82
CASE_OP_32_64(extract2):
83
done = fold_extract2(&ctx, op);
84
break;
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
86
case INDEX_op_setcond2_i32:
87
done = fold_setcond2(&ctx, op);
88
break;
89
+ CASE_OP_32_64(sextract):
90
+ done = fold_sextract(&ctx, op);
91
+ break;
92
CASE_OP_32_64_VEC(sub):
93
done = fold_sub(&ctx, op);
94
break;
95
--
96
2.25.1
97
98
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 25 +++++++++++++++----------
6
1 file changed, 15 insertions(+), 10 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
13
return fold_const1(ctx, op);
14
}
15
16
+static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
19
+ uint64_t t1 = arg_info(op->args[1])->val;
20
+ uint64_t t2 = arg_info(op->args[2])->val;
21
+
22
+ t1 = deposit64(t1, op->args[3], op->args[4], t2);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
24
+ }
25
+ return false;
26
+}
27
+
28
static bool fold_divide(OptContext *ctx, TCGOp *op)
29
{
30
return fold_const2(ctx, op);
31
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
32
}
33
break;
34
35
- CASE_OP_32_64(deposit):
36
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
37
- tmp = deposit64(arg_info(op->args[1])->val,
38
- op->args[3], op->args[4],
39
- arg_info(op->args[2])->val);
40
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
41
- continue;
42
- }
43
- break;
44
-
45
default:
46
break;
47
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
49
CASE_OP_32_64(ctpop):
50
done = fold_ctpop(&ctx, op);
51
break;
52
+ CASE_OP_32_64(deposit):
53
+ done = fold_deposit(&ctx, op);
54
+ break;
55
CASE_OP_32_64(div):
56
CASE_OP_32_64(divu):
57
done = fold_divide(&ctx, op);
58
--
59
2.25.1
60
61
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 32 ++++++++++++++++++--------------
6
1 file changed, 18 insertions(+), 14 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
13
return true;
14
}
15
16
+static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1])) {
19
+ uint64_t t = arg_info(op->args[1])->val;
20
+
21
+ if (t != 0) {
22
+ t = do_constant_folding(op->opc, t, 0);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
24
+ }
25
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
26
+ }
27
+ return false;
28
+}
29
+
30
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
31
{
32
return fold_const1(ctx, op);
33
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
34
}
35
break;
36
37
- CASE_OP_32_64(clz):
38
- CASE_OP_32_64(ctz):
39
- if (arg_is_const(op->args[1])) {
40
- TCGArg v = arg_info(op->args[1])->val;
41
- if (v != 0) {
42
- tmp = do_constant_folding(opc, v, 0);
43
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
44
- } else {
45
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
46
- }
47
- continue;
48
- }
49
- break;
50
-
51
default:
52
break;
53
54
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
55
case INDEX_op_brcond2_i32:
56
done = fold_brcond2(&ctx, op);
57
break;
58
+ CASE_OP_32_64(clz):
59
+ CASE_OP_32_64(ctz):
60
+ done = fold_count_zeros(&ctx, op);
61
+ break;
62
CASE_OP_32_64(ctpop):
63
done = fold_ctpop(&ctx, op);
64
break;
65
--
66
2.25.1
67
68
diff view generated by jsdifflib
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
4
---
6
meson.build | 4 +---
5
tcg/optimize.c | 27 ++++++++++++++++-----------
7
fpu/meson.build | 1 +
6
1 file changed, 16 insertions(+), 11 deletions(-)
8
2 files changed, 2 insertions(+), 3 deletions(-)
9
create mode 100644 fpu/meson.build
10
7
11
diff --git a/meson.build b/meson.build
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/meson.build
10
--- a/tcg/optimize.c
14
+++ b/meson.build
11
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ subdir('softmmu')
12
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
13
return false;
17
common_ss.add(capstone)
14
}
18
specific_ss.add(files('cpu.c', 'disas.c', 'gdbstub.c'), capstone)
15
19
-specific_ss.add(when: 'CONFIG_TCG', if_true: files(
16
+static bool fold_bswap(OptContext *ctx, TCGOp *op)
20
- 'fpu/softfloat.c',
17
+{
21
-))
18
+ if (arg_is_const(op->args[1])) {
22
19
+ uint64_t t = arg_info(op->args[1])->val;
23
# Work around a gcc bug/misfeature wherein constant propagation looks
20
+
24
# through an alias:
21
+ t = do_constant_folding(op->opc, t, op->args[2]);
25
@@ -XXX,XX +XXX,XX @@ subdir('replay')
22
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
subdir('semihosting')
23
+ }
27
subdir('hw')
24
+ return false;
28
subdir('tcg')
25
+}
29
+subdir('fpu')
26
+
30
subdir('accel')
27
static bool fold_call(OptContext *ctx, TCGOp *op)
31
subdir('plugins')
28
{
32
subdir('bsd-user')
29
TCGContext *s = ctx->tcg;
33
diff --git a/fpu/meson.build b/fpu/meson.build
30
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
34
new file mode 100644
31
}
35
index XXXXXXX..XXXXXXX
32
break;
36
--- /dev/null
33
37
+++ b/fpu/meson.build
34
- CASE_OP_32_64(bswap16):
38
@@ -0,0 +1 @@
35
- CASE_OP_32_64(bswap32):
39
+specific_ss.add(when: 'CONFIG_TCG', if_true: files('softfloat.c'))
36
- case INDEX_op_bswap64_i64:
37
- if (arg_is_const(op->args[1])) {
38
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
39
- op->args[2]);
40
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
41
- continue;
42
- }
43
- break;
44
-
45
default:
46
break;
47
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
49
case INDEX_op_brcond2_i32:
50
done = fold_brcond2(&ctx, op);
51
break;
52
+ CASE_OP_32_64(bswap16):
53
+ CASE_OP_32_64(bswap32):
54
+ case INDEX_op_bswap64_i64:
55
+ done = fold_bswap(&ctx, op);
56
+ break;
57
CASE_OP_32_64(clz):
58
CASE_OP_32_64(ctz):
59
done = fold_count_zeros(&ctx, op);
40
--
60
--
41
2.25.1
61
2.25.1
42
62
43
63
diff view generated by jsdifflib
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
4
---
6
meson.build | 8 +-------
5
tcg/optimize.c | 53 +++++++++++++++++++++++++++++---------------------
7
tcg/meson.build | 13 +++++++++++++
6
1 file changed, 31 insertions(+), 22 deletions(-)
8
2 files changed, 14 insertions(+), 7 deletions(-)
9
create mode 100644 tcg/meson.build
10
7
11
diff --git a/meson.build b/meson.build
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/meson.build
10
--- a/tcg/optimize.c
14
+++ b/meson.build
11
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ common_ss.add(capstone)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
16
specific_ss.add(files('cpu.c', 'disas.c', 'gdbstub.c'), capstone)
13
return fold_const2(ctx, op);
17
specific_ss.add(when: 'CONFIG_TCG', if_true: files(
14
}
18
'fpu/softfloat.c',
15
19
- 'tcg/optimize.c',
16
+static bool fold_dup(OptContext *ctx, TCGOp *op)
20
- 'tcg/tcg-common.c',
17
+{
21
- 'tcg/tcg-op-gvec.c',
18
+ if (arg_is_const(op->args[1])) {
22
- 'tcg/tcg-op-vec.c',
19
+ uint64_t t = arg_info(op->args[1])->val;
23
- 'tcg/tcg-op.c',
20
+ t = dup_const(TCGOP_VECE(op), t);
24
- 'tcg/tcg.c',
21
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
25
))
22
+ }
26
-specific_ss.add(when: 'CONFIG_TCG_INTERPRETER', if_true: files('tcg/tci.c'))
23
+ return false;
27
24
+}
28
# Work around a gcc bug/misfeature wherein constant propagation looks
29
# through an alias:
30
@@ -XXX,XX +XXX,XX @@ subdir('net')
31
subdir('replay')
32
subdir('semihosting')
33
subdir('hw')
34
+subdir('tcg')
35
subdir('accel')
36
subdir('plugins')
37
subdir('bsd-user')
38
diff --git a/tcg/meson.build b/tcg/meson.build
39
new file mode 100644
40
index XXXXXXX..XXXXXXX
41
--- /dev/null
42
+++ b/tcg/meson.build
43
@@ -XXX,XX +XXX,XX @@
44
+tcg_ss = ss.source_set()
45
+
25
+
46
+tcg_ss.add(files(
26
+static bool fold_dup2(OptContext *ctx, TCGOp *op)
47
+ 'optimize.c',
27
+{
48
+ 'tcg.c',
28
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
49
+ 'tcg-common.c',
29
+ uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
50
+ 'tcg-op.c',
30
+ arg_info(op->args[2])->val);
51
+ 'tcg-op-gvec.c',
31
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
52
+ 'tcg-op-vec.c',
32
+ }
53
+))
54
+tcg_ss.add(when: 'CONFIG_TCG_INTERPRETER', if_true: files('tci.c'))
55
+
33
+
56
+specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
34
+ if (args_are_copies(op->args[1], op->args[2])) {
35
+ op->opc = INDEX_op_dup_vec;
36
+ TCGOP_VECE(op) = MO_32;
37
+ }
38
+ return false;
39
+}
40
+
41
static bool fold_eqv(OptContext *ctx, TCGOp *op)
42
{
43
return fold_const2(ctx, op);
44
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
45
done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
46
break;
47
48
- case INDEX_op_dup_vec:
49
- if (arg_is_const(op->args[1])) {
50
- tmp = arg_info(op->args[1])->val;
51
- tmp = dup_const(TCGOP_VECE(op), tmp);
52
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
53
- continue;
54
- }
55
- break;
56
-
57
- case INDEX_op_dup2_vec:
58
- assert(TCG_TARGET_REG_BITS == 32);
59
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
60
- tcg_opt_gen_movi(&ctx, op, op->args[0],
61
- deposit64(arg_info(op->args[1])->val, 32, 32,
62
- arg_info(op->args[2])->val));
63
- continue;
64
- } else if (args_are_copies(op->args[1], op->args[2])) {
65
- op->opc = INDEX_op_dup_vec;
66
- TCGOP_VECE(op) = MO_32;
67
- }
68
- break;
69
-
70
default:
71
break;
72
73
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
74
CASE_OP_32_64(divu):
75
done = fold_divide(&ctx, op);
76
break;
77
+ case INDEX_op_dup_vec:
78
+ done = fold_dup(&ctx, op);
79
+ break;
80
+ case INDEX_op_dup2_vec:
81
+ done = fold_dup2(&ctx, op);
82
+ break;
83
CASE_OP_32_64(eqv):
84
done = fold_eqv(&ctx, op);
85
break;
57
--
86
--
58
2.25.1
87
2.25.1
59
88
60
89
diff view generated by jsdifflib
1
Finish the divorce of tcg/ from hw/, and do not take
1
This is the final entry in the main switch that was in a
2
the max cpu value from MachineState; just remember what
2
different form. After this, we have the option to convert
3
we were passed in tcg_init.
3
the switch into a function dispatch table.
4
4
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
8
---
10
tcg/tcg-internal.h | 3 ++-
9
tcg/optimize.c | 27 ++++++++++++++-------------
11
tcg/region.c | 6 +++---
10
1 file changed, 14 insertions(+), 13 deletions(-)
12
tcg/tcg.c | 23 ++++++++++-------------
13
3 files changed, 15 insertions(+), 17 deletions(-)
14
11
15
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/tcg-internal.h
14
--- a/tcg/optimize.c
18
+++ b/tcg/tcg-internal.h
15
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
20
#define TCG_HIGHWATER 1024
17
return true;
21
18
}
22
extern TCGContext **tcg_ctxs;
19
23
-extern unsigned int n_tcg_ctxs;
20
+static bool fold_mov(OptContext *ctx, TCGOp *op)
24
+extern unsigned int tcg_cur_ctxs;
21
+{
25
+extern unsigned int tcg_max_ctxs;
22
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
26
23
+}
27
void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus);
24
+
28
bool tcg_region_alloc(TCGContext *s);
25
static bool fold_movcond(OptContext *ctx, TCGOp *op)
29
diff --git a/tcg/region.c b/tcg/region.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/tcg/region.c
32
+++ b/tcg/region.c
33
@@ -XXX,XX +XXX,XX @@ void tcg_region_initial_alloc(TCGContext *s)
34
/* Call from a safe-work context */
35
void tcg_region_reset_all(void)
36
{
26
{
37
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
27
TCGOpcode opc = op->opc;
38
+ unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
28
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
39
unsigned int i;
29
break;
40
30
}
41
qemu_mutex_lock(&region.lock);
31
42
@@ -XXX,XX +XXX,XX @@ void tcg_region_prologue_set(TCGContext *s)
32
- /* Propagate constants through copy operations and do constant
43
*/
33
- folding. Constants will be substituted to arguments by register
44
size_t tcg_code_size(void)
34
- allocator where needed and possible. Also detect copies. */
45
{
35
+ /*
46
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
36
+ * Process each opcode.
47
+ unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
37
+ * Sorted alphabetically by opcode as much as possible.
48
unsigned int i;
38
+ */
49
size_t total;
39
switch (opc) {
50
40
- CASE_OP_32_64_VEC(mov):
51
@@ -XXX,XX +XXX,XX @@ size_t tcg_code_capacity(void)
41
- done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
52
42
- break;
53
size_t tcg_tb_phys_invalidate_count(void)
54
{
55
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
56
+ unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
57
unsigned int i;
58
size_t total = 0;
59
60
diff --git a/tcg/tcg.c b/tcg/tcg.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/tcg/tcg.c
63
+++ b/tcg/tcg.c
64
@@ -XXX,XX +XXX,XX @@
65
#define NO_CPU_IO_DEFS
66
67
#include "exec/exec-all.h"
68
-
43
-
69
-#if !defined(CONFIG_USER_ONLY)
44
- default:
70
-#include "hw/boards.h"
45
- break;
71
-#endif
72
-
46
-
73
#include "tcg/tcg-op.h"
47
- /* ---------------------------------------------------------- */
74
48
- /* Sorted alphabetically by opcode as much as possible. */
75
#if UINTPTR_MAX == UINT32_MAX
49
-
76
@@ -XXX,XX +XXX,XX @@ static int tcg_out_ldst_finalize(TCGContext *s);
50
CASE_OP_32_64_VEC(add):
77
#endif
51
done = fold_add(&ctx, op);
78
52
break;
79
TCGContext **tcg_ctxs;
53
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
80
-unsigned int n_tcg_ctxs;
54
case INDEX_op_mb:
81
+unsigned int tcg_cur_ctxs;
55
done = fold_mb(&ctx, op);
82
+unsigned int tcg_max_ctxs;
56
break;
83
TCGv_env cpu_env = 0;
57
+ CASE_OP_32_64_VEC(mov):
84
const void *tcg_code_gen_epilogue;
58
+ done = fold_mov(&ctx, op);
85
uintptr_t tcg_splitwx_diff;
59
+ break;
86
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
60
CASE_OP_32_64(movcond):
87
#else
61
done = fold_movcond(&ctx, op);
88
void tcg_register_thread(void)
62
break;
89
{
63
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
90
- MachineState *ms = MACHINE(qdev_get_machine());
64
CASE_OP_32_64_VEC(xor):
91
TCGContext *s = g_malloc(sizeof(*s));
65
done = fold_xor(&ctx, op);
92
unsigned int i, n;
66
break;
93
67
+ default:
94
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
68
+ break;
95
}
69
}
96
70
97
/* Claim an entry in tcg_ctxs */
71
if (!done) {
98
- n = qatomic_fetch_inc(&n_tcg_ctxs);
99
- g_assert(n < ms->smp.max_cpus);
100
+ n = qatomic_fetch_inc(&tcg_cur_ctxs);
101
+ g_assert(n < tcg_max_ctxs);
102
qatomic_set(&tcg_ctxs[n], s);
103
104
if (n > 0) {
105
@@ -XXX,XX +XXX,XX @@ static void tcg_context_init(unsigned max_cpus)
106
*/
107
#ifdef CONFIG_USER_ONLY
108
tcg_ctxs = &tcg_ctx;
109
- n_tcg_ctxs = 1;
110
+ tcg_cur_ctxs = 1;
111
+ tcg_max_ctxs = 1;
112
#else
113
- tcg_ctxs = g_new(TCGContext *, max_cpus);
114
+ tcg_max_ctxs = max_cpus;
115
+ tcg_ctxs = g_new0(TCGContext *, max_cpus);
116
#endif
117
118
tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
119
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
120
static inline
121
void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
122
{
123
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
124
+ unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
125
unsigned int i;
126
127
for (i = 0; i < n_ctxs; i++) {
128
@@ -XXX,XX +XXX,XX @@ void tcg_dump_op_count(void)
129
130
int64_t tcg_cpu_exec_time(void)
131
{
132
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
133
+ unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
134
unsigned int i;
135
int64_t ret = 0;
136
137
--
72
--
138
2.25.1
73
2.25.1
139
74
140
75
diff view generated by jsdifflib
1
A size is easier to work with than an end point,
1
Pull the "op r, a, a => movi r, 0" optimization into a function,
2
particularly during initial buffer allocation.
2
and use it in the outer opcode fold functions.
3
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
tcg/region.c | 30 ++++++++++++++++++------------
8
tcg/optimize.c | 41 ++++++++++++++++++++++++-----------------
9
1 file changed, 18 insertions(+), 12 deletions(-)
9
1 file changed, 24 insertions(+), 17 deletions(-)
10
10
11
diff --git a/tcg/region.c b/tcg/region.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/region.c
13
--- a/tcg/optimize.c
14
+++ b/tcg/region.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ struct tcg_region_state {
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
16
/* fields set at init time */
16
return false;
17
void *start;
17
}
18
void *start_aligned;
18
19
- void *end;
19
+/* If the binary operation has both arguments equal, fold to @i. */
20
size_t n;
20
+static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
21
size_t size; /* size of one region */
21
+{
22
size_t stride; /* .size + guard size */
22
+ if (args_are_copies(op->args[1], op->args[2])) {
23
+ size_t total_size; /* size of entire buffer, >= n * stride */
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
24
24
+ }
25
/* fields protected by the lock */
25
+ return false;
26
size_t current; /* current region index */
26
+}
27
@@ -XXX,XX +XXX,XX @@ static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
27
+
28
if (curr_region == 0) {
28
/*
29
start = region.start;
29
* These outermost fold_<op> functions are sorted alphabetically.
30
}
31
+ /* The final region may have a few extra pages due to earlier rounding. */
32
if (curr_region == region.n - 1) {
33
- end = region.end;
34
+ end = region.start_aligned + region.total_size;
35
}
36
37
*pstart = start;
38
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
39
*/
30
*/
40
void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
31
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
32
33
static bool fold_andc(OptContext *ctx, TCGOp *op)
41
{
34
{
42
- void *buf, *aligned;
35
- return fold_const2(ctx, op);
43
- size_t size;
36
+ if (fold_const2(ctx, op) ||
44
+ void *buf, *aligned, *end;
37
+ fold_xx_to_i(ctx, op, 0)) {
45
+ size_t total_size;
38
+ return true;
46
size_t page_size;
39
+ }
47
size_t region_size;
40
+ return false;
48
size_t n_regions;
49
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
50
assert(ok);
51
52
buf = tcg_init_ctx.code_gen_buffer;
53
- size = tcg_init_ctx.code_gen_buffer_size;
54
+ total_size = tcg_init_ctx.code_gen_buffer_size;
55
page_size = qemu_real_host_page_size;
56
n_regions = tcg_n_regions(max_cpus);
57
58
/* The first region will be 'aligned - buf' bytes larger than the others */
59
aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
60
- g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
61
+ g_assert(aligned < tcg_init_ctx.code_gen_buffer + total_size);
62
+
63
/*
64
* Make region_size a multiple of page_size, using aligned as the start.
65
* As a result of this we might end up with a few extra pages at the end of
66
* the buffer; we will assign those to the last region.
67
*/
68
- region_size = (size - (aligned - buf)) / n_regions;
69
+ region_size = (total_size - (aligned - buf)) / n_regions;
70
region_size = QEMU_ALIGN_DOWN(region_size, page_size);
71
72
/* A region must have at least 2 pages; one code, one guard */
73
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
74
region.start = buf;
75
region.start_aligned = aligned;
76
/* page-align the end, since its last page will be a guard page */
77
- region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
78
+ end = QEMU_ALIGN_PTR_DOWN(buf + total_size, page_size);
79
/* account for that last guard page */
80
- region.end -= page_size;
81
+ end -= page_size;
82
+ total_size = end - aligned;
83
+ region.total_size = total_size;
84
85
/*
86
* Set guard pages in the rw buffer, as that's the one into which
87
@@ -XXX,XX +XXX,XX @@ void tcg_region_prologue_set(TCGContext *s)
88
89
/* Register the balance of the buffer with gdb. */
90
tcg_register_jit(tcg_splitwx_to_rx(region.start),
91
- region.end - region.start);
92
+ region.start_aligned + region.total_size - region.start);
93
}
41
}
94
42
95
/*
43
static bool fold_brcond(OptContext *ctx, TCGOp *op)
96
@@ -XXX,XX +XXX,XX @@ size_t tcg_code_capacity(void)
44
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
97
45
98
/* no need for synchronization; these variables are set at init time */
46
static bool fold_sub(OptContext *ctx, TCGOp *op)
99
guard_size = region.stride - region.size;
47
{
100
- capacity = region.end + guard_size - region.start;
48
- return fold_const2(ctx, op);
101
- capacity -= region.n * (guard_size + TCG_HIGHWATER);
49
+ if (fold_const2(ctx, op) ||
102
+ capacity = region.total_size;
50
+ fold_xx_to_i(ctx, op, 0)) {
103
+ capacity -= (region.n - 1) * guard_size;
51
+ return true;
104
+ capacity -= region.n * TCG_HIGHWATER;
52
+ }
105
+
53
+ return false;
106
return capacity;
107
}
54
}
108
55
56
static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
57
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
58
59
static bool fold_xor(OptContext *ctx, TCGOp *op)
60
{
61
- return fold_const2(ctx, op);
62
+ if (fold_const2(ctx, op) ||
63
+ fold_xx_to_i(ctx, op, 0)) {
64
+ return true;
65
+ }
66
+ return false;
67
}
68
69
/* Propagate constants and copies, fold constant expressions. */
70
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
71
break;
72
}
73
74
- /* Simplify expression for "op r, a, a => movi r, 0" cases */
75
- switch (opc) {
76
- CASE_OP_32_64_VEC(andc):
77
- CASE_OP_32_64_VEC(sub):
78
- CASE_OP_32_64_VEC(xor):
79
- if (args_are_copies(op->args[1], op->args[2])) {
80
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
81
- continue;
82
- }
83
- break;
84
- default:
85
- break;
86
- }
87
-
88
/*
89
* Process each opcode.
90
* Sorted alphabetically by opcode as much as possible.
109
--
91
--
110
2.25.1
92
2.25.1
111
93
112
94
diff view generated by jsdifflib
1
It consists of one function call and has only one caller.
1
Pull the "op r, a, a => mov r, a" optimization into a function,
2
and use it in the outer opcode fold functions.
2
3
3
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
accel/tcg/translate-all.c | 7 +------
8
tcg/optimize.c | 39 ++++++++++++++++++++++++---------------
9
1 file changed, 1 insertion(+), 6 deletions(-)
9
1 file changed, 24 insertions(+), 15 deletions(-)
10
10
11
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/translate-all.c
13
--- a/tcg/optimize.c
14
+++ b/accel/tcg/translate-all.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void page_table_config_init(void)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
16
assert(v_l2_levels >= 0);
16
return false;
17
}
17
}
18
18
19
-static void cpu_gen_init(void)
19
+/* If the binary operation has both arguments equal, fold to identity. */
20
-{
20
+static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
21
- tcg_context_init(&tcg_init_ctx);
21
+{
22
-}
22
+ if (args_are_copies(op->args[1], op->args[2])) {
23
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
24
+ }
25
+ return false;
26
+}
27
+
28
/*
29
* These outermost fold_<op> functions are sorted alphabetically.
30
+ *
31
+ * The ordering of the transformations should be:
32
+ * 1) those that produce a constant
33
+ * 2) those that produce a copy
34
+ * 3) those that produce information about the result value.
35
*/
36
37
static bool fold_add(OptContext *ctx, TCGOp *op)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
39
40
static bool fold_and(OptContext *ctx, TCGOp *op)
41
{
42
- return fold_const2(ctx, op);
43
+ if (fold_const2(ctx, op) ||
44
+ fold_xx_to_x(ctx, op)) {
45
+ return true;
46
+ }
47
+ return false;
48
}
49
50
static bool fold_andc(OptContext *ctx, TCGOp *op)
51
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
52
53
static bool fold_or(OptContext *ctx, TCGOp *op)
54
{
55
- return fold_const2(ctx, op);
56
+ if (fold_const2(ctx, op) ||
57
+ fold_xx_to_x(ctx, op)) {
58
+ return true;
59
+ }
60
+ return false;
61
}
62
63
static bool fold_orc(OptContext *ctx, TCGOp *op)
64
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
65
break;
66
}
67
68
- /* Simplify expression for "op r, a, a => mov r, a" cases */
69
- switch (opc) {
70
- CASE_OP_32_64_VEC(or):
71
- CASE_OP_32_64_VEC(and):
72
- if (args_are_copies(op->args[1], op->args[2])) {
73
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
74
- continue;
75
- }
76
- break;
77
- default:
78
- break;
79
- }
23
-
80
-
24
/* Encode VAL as a signed leb128 sequence at P.
81
/*
25
Return P incremented past the encoded value. */
82
* Process each opcode.
26
static uint8_t *encode_sleb128(uint8_t *p, target_long val)
83
* Sorted alphabetically by opcode as much as possible.
27
@@ -XXX,XX +XXX,XX @@ void tcg_exec_init(unsigned long tb_size, int splitwx)
28
bool ok;
29
30
tcg_allowed = true;
31
- cpu_gen_init();
32
+ tcg_context_init(&tcg_init_ctx);
33
page_init();
34
tb_htable_init();
35
36
--
84
--
37
2.25.1
85
2.25.1
38
86
39
87
diff view generated by jsdifflib
1
There is only one caller, and shortly we will need access
1
Pull the "op r, a, 0 => movi r, 0" optimization into a function,
2
to the MachineState, which tcg_init_machine already has.
2
and use it in the outer opcode fold functions.
3
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
accel/tcg/internal.h | 2 ++
8
tcg/optimize.c | 38 ++++++++++++++++++++------------------
9
include/sysemu/tcg.h | 2 --
9
1 file changed, 20 insertions(+), 18 deletions(-)
10
accel/tcg/tcg-all.c | 16 +++++++++++++++-
11
accel/tcg/translate-all.c | 21 ++-------------------
12
bsd-user/main.c | 2 +-
13
5 files changed, 20 insertions(+), 23 deletions(-)
14
10
15
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/internal.h
13
--- a/tcg/optimize.c
18
+++ b/accel/tcg/internal.h
14
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
20
int cflags);
21
22
void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
23
+void page_init(void);
24
+void tb_htable_init(void);
25
26
#endif /* ACCEL_TCG_INTERNAL_H */
27
diff --git a/include/sysemu/tcg.h b/include/sysemu/tcg.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/include/sysemu/tcg.h
30
+++ b/include/sysemu/tcg.h
31
@@ -XXX,XX +XXX,XX @@
32
#ifndef SYSEMU_TCG_H
33
#define SYSEMU_TCG_H
34
35
-void tcg_exec_init(unsigned long tb_size, int splitwx);
36
-
37
#ifdef CONFIG_TCG
38
extern bool tcg_allowed;
39
#define tcg_enabled() (tcg_allowed)
40
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/accel/tcg/tcg-all.c
43
+++ b/accel/tcg/tcg-all.c
44
@@ -XXX,XX +XXX,XX @@
45
#include "qemu/error-report.h"
46
#include "qemu/accel.h"
47
#include "qapi/qapi-builtin-visit.h"
48
+#include "internal.h"
49
50
struct TCGState {
51
AccelState parent_obj;
52
@@ -XXX,XX +XXX,XX @@ static int tcg_init_machine(MachineState *ms)
53
{
54
TCGState *s = TCG_STATE(current_accel());
55
56
- tcg_exec_init(s->tb_size * 1024 * 1024, s->splitwx_enabled);
57
+ tcg_allowed = true;
58
mttcg_enabled = s->mttcg_enabled;
59
+
60
+ page_init();
61
+ tb_htable_init();
62
+ tcg_init(s->tb_size * 1024 * 1024, s->splitwx_enabled);
63
+
64
+#if defined(CONFIG_SOFTMMU)
65
+ /*
66
+ * There's no guest base to take into account, so go ahead and
67
+ * initialize the prologue now.
68
+ */
69
+ tcg_prologue_init(tcg_ctx);
70
+#endif
71
+
72
return 0;
73
}
74
75
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/accel/tcg/translate-all.c
78
+++ b/accel/tcg/translate-all.c
79
@@ -XXX,XX +XXX,XX @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
80
return false;
16
return false;
81
}
17
}
82
18
83
-static void page_init(void)
19
+/* If the binary operation has second argument @i, fold to @i. */
84
+void page_init(void)
20
+static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
21
+{
22
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
24
+ }
25
+ return false;
26
+}
27
+
28
/* If the binary operation has both arguments equal, fold to @i. */
29
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
85
{
30
{
86
page_size_init();
31
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
87
page_table_config_init();
32
static bool fold_and(OptContext *ctx, TCGOp *op)
88
@@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp)
33
{
89
a->page_addr[1] == b->page_addr[1];
34
if (fold_const2(ctx, op) ||
35
+ fold_xi_to_i(ctx, op, 0) ||
36
fold_xx_to_x(ctx, op)) {
37
return true;
38
}
39
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
40
41
static bool fold_mul(OptContext *ctx, TCGOp *op)
42
{
43
- return fold_const2(ctx, op);
44
+ if (fold_const2(ctx, op) ||
45
+ fold_xi_to_i(ctx, op, 0)) {
46
+ return true;
47
+ }
48
+ return false;
90
}
49
}
91
50
92
-static void tb_htable_init(void)
51
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
93
+void tb_htable_init(void)
94
{
52
{
95
unsigned int mode = QHT_MODE_AUTO_RESIZE;
53
- return fold_const2(ctx, op);
96
54
+ if (fold_const2(ctx, op) ||
97
qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
55
+ fold_xi_to_i(ctx, op, 0)) {
56
+ return true;
57
+ }
58
+ return false;
98
}
59
}
99
60
100
-/* Must be called before using the QEMU cpus. 'tb_size' is the size
61
static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
101
- (in bytes) allocated to the translation buffer. Zero means default
62
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
102
- size. */
63
continue;
103
-void tcg_exec_init(unsigned long tb_size, int splitwx)
64
}
104
-{
65
105
- tcg_allowed = true;
66
- /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
106
- page_init();
67
- switch (opc) {
107
- tb_htable_init();
68
- CASE_OP_32_64_VEC(and):
108
- tcg_init(tb_size, splitwx);
69
- CASE_OP_32_64_VEC(mul):
70
- CASE_OP_32_64(muluh):
71
- CASE_OP_32_64(mulsh):
72
- if (arg_is_const(op->args[2])
73
- && arg_info(op->args[2])->val == 0) {
74
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
75
- continue;
76
- }
77
- break;
78
- default:
79
- break;
80
- }
109
-
81
-
110
-#if defined(CONFIG_SOFTMMU)
82
/*
111
- /* There's no guest base to take into account, so go ahead and
83
* Process each opcode.
112
- initialize the prologue now. */
84
* Sorted alphabetically by opcode as much as possible.
113
- tcg_prologue_init(tcg_ctx);
114
-#endif
115
-}
116
-
117
/* call with @p->lock held */
118
static inline void invalidate_page_bitmap(PageDesc *p)
119
{
120
diff --git a/bsd-user/main.c b/bsd-user/main.c
121
index XXXXXXX..XXXXXXX 100644
122
--- a/bsd-user/main.c
123
+++ b/bsd-user/main.c
124
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
125
envlist_free(envlist);
126
127
/*
128
- * Now that page sizes are configured in tcg_exec_init() we can do
129
+ * Now that page sizes are configured we can do
130
* proper page alignment for guest_base.
131
*/
132
guest_base = HOST_PAGE_ALIGN(guest_base);
133
--
85
--
134
2.25.1
86
2.25.1
135
87
136
88
diff view generated by jsdifflib
1
There's a change in mprotect() behaviour [1] in the latest macOS
1
Compute the type of the operation early.
2
on M1 and it's not yet clear if it's going to be fixed by Apple.
2
3
3
There are at least 4 places that used a def->flags ladder
4
In this case, instead of changing permissions of N guard pages,
4
to determine the type of the operation being optimized.
5
we change permissions of N rwx regions. The same number of
5
6
syscalls are required either way.
6
There were two places that assumed !TCG_OPF_64BIT means
7
7
TCG_TYPE_I32, and so could potentially compute incorrect
8
[1] https://gist.github.com/hikalium/75ae822466ee4da13cbbe486498a191f
8
results for vector operations.
9
9
10
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
10
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
12
---
13
tcg/region.c | 19 +++++++++----------
13
tcg/optimize.c | 149 +++++++++++++++++++++++++++++--------------------
14
1 file changed, 9 insertions(+), 10 deletions(-)
14
1 file changed, 89 insertions(+), 60 deletions(-)
15
15
16
diff --git a/tcg/region.c b/tcg/region.c
16
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/region.c
18
--- a/tcg/optimize.c
19
+++ b/tcg/region.c
19
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
20
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
21
error_free_or_abort(errp);
21
22
}
22
/* In flight values from optimization. */
23
23
uint64_t z_mask;
24
- prot = PROT_READ | PROT_WRITE | PROT_EXEC;
24
+ TCGType type;
25
+ /*
25
} OptContext;
26
+ * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
26
27
+ * rejects a permission change from RWX -> NONE when reserving the
27
static inline TempOptInfo *ts_info(TCGTemp *ts)
28
+ * guard pages later. We can go the other way with the same number
28
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
29
+ * of syscalls, so always begin with PROT_NONE.
29
{
30
+ */
30
TCGTemp *dst_ts = arg_temp(dst);
31
+ prot = PROT_NONE;
31
TCGTemp *src_ts = arg_temp(src);
32
flags = MAP_PRIVATE | MAP_ANONYMOUS;
32
- const TCGOpDef *def;
33
-#ifdef CONFIG_TCG_INTERPRETER
33
TempOptInfo *di;
34
- /* The tcg interpreter does not need execute permission. */
34
TempOptInfo *si;
35
- prot = PROT_READ | PROT_WRITE;
35
uint64_t z_mask;
36
-#elif defined(CONFIG_DARWIN)
36
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
37
+#ifdef CONFIG_DARWIN
37
reset_ts(dst_ts);
38
/* Applicable to both iOS and macOS (Apple Silicon). */
38
di = ts_info(dst_ts);
39
if (!splitwx) {
39
si = ts_info(src_ts);
40
flags |= MAP_JIT;
40
- def = &tcg_op_defs[op->opc];
41
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
41
- if (def->flags & TCG_OPF_VECTOR) {
42
}
42
- new_op = INDEX_op_mov_vec;
43
- } else if (def->flags & TCG_OPF_64BIT) {
44
- new_op = INDEX_op_mov_i64;
45
- } else {
46
+
47
+ switch (ctx->type) {
48
+ case TCG_TYPE_I32:
49
new_op = INDEX_op_mov_i32;
50
+ break;
51
+ case TCG_TYPE_I64:
52
+ new_op = INDEX_op_mov_i64;
53
+ break;
54
+ case TCG_TYPE_V64:
55
+ case TCG_TYPE_V128:
56
+ case TCG_TYPE_V256:
57
+ /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
58
+ new_op = INDEX_op_mov_vec;
59
+ break;
60
+ default:
61
+ g_assert_not_reached();
62
}
63
op->opc = new_op;
64
- /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
65
op->args[0] = dst;
66
op->args[1] = src;
67
68
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
69
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
70
TCGArg dst, uint64_t val)
71
{
72
- const TCGOpDef *def = &tcg_op_defs[op->opc];
73
- TCGType type;
74
- TCGTemp *tv;
75
-
76
- if (def->flags & TCG_OPF_VECTOR) {
77
- type = TCGOP_VECL(op) + TCG_TYPE_V64;
78
- } else if (def->flags & TCG_OPF_64BIT) {
79
- type = TCG_TYPE_I64;
80
- } else {
81
- type = TCG_TYPE_I32;
82
- }
83
-
84
/* Convert movi to mov with constant temp. */
85
- tv = tcg_constant_internal(type, val);
86
+ TCGTemp *tv = tcg_constant_internal(ctx->type, val);
87
+
88
init_ts_info(ctx, tv);
89
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
90
}
91
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
92
}
93
}
94
95
-static uint64_t do_constant_folding(TCGOpcode op, uint64_t x, uint64_t y)
96
+static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
97
+ uint64_t x, uint64_t y)
98
{
99
- const TCGOpDef *def = &tcg_op_defs[op];
100
uint64_t res = do_constant_folding_2(op, x, y);
101
- if (!(def->flags & TCG_OPF_64BIT)) {
102
+ if (type == TCG_TYPE_I32) {
103
res = (int32_t)res;
104
}
105
return res;
106
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
107
* Return -1 if the condition can't be simplified,
108
* and the result of the condition (0 or 1) if it can.
109
*/
110
-static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
111
+static int do_constant_folding_cond(TCGType type, TCGArg x,
112
TCGArg y, TCGCond c)
113
{
114
uint64_t xv = arg_info(x)->val;
115
uint64_t yv = arg_info(y)->val;
116
117
if (arg_is_const(x) && arg_is_const(y)) {
118
- const TCGOpDef *def = &tcg_op_defs[op];
119
- tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR));
120
- if (def->flags & TCG_OPF_64BIT) {
121
- return do_constant_folding_cond_64(xv, yv, c);
122
- } else {
123
+ switch (type) {
124
+ case TCG_TYPE_I32:
125
return do_constant_folding_cond_32(xv, yv, c);
126
+ case TCG_TYPE_I64:
127
+ return do_constant_folding_cond_64(xv, yv, c);
128
+ default:
129
+ /* Only scalar comparisons are optimizable */
130
+ return -1;
43
}
131
}
44
if (have_prot != 0) {
132
} else if (args_are_copies(x, y)) {
45
- /*
133
return do_constant_folding_cond_eq(c);
46
- * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
134
@@ -XXX,XX +XXX,XX @@ static bool fold_const1(OptContext *ctx, TCGOp *op)
47
- * rejects a permission change from RWX -> NONE. Guard pages are
135
uint64_t t;
48
- * nice for bug detection but are not essential; ignore any failure.
136
49
- */
137
t = arg_info(op->args[1])->val;
50
+ /* Guard pages are nice for bug detection but are not essential. */
138
- t = do_constant_folding(op->opc, t, 0);
51
(void)qemu_mprotect_none(end, page_size);
139
+ t = do_constant_folding(op->opc, ctx->type, t, 0);
140
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
141
}
142
return false;
143
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
144
uint64_t t1 = arg_info(op->args[1])->val;
145
uint64_t t2 = arg_info(op->args[2])->val;
146
147
- t1 = do_constant_folding(op->opc, t1, t2);
148
+ t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
149
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
150
}
151
return false;
152
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
153
static bool fold_brcond(OptContext *ctx, TCGOp *op)
154
{
155
TCGCond cond = op->args[2];
156
- int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
157
+ int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
158
159
if (i == 0) {
160
tcg_op_remove(ctx->tcg, op);
161
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
162
* Simplify EQ/NE comparisons where one of the pairs
163
* can be simplified.
164
*/
165
- i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
166
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
167
op->args[2], cond);
168
switch (i ^ inv) {
169
case 0:
170
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
171
goto do_brcond_high;
52
}
172
}
53
}
173
174
- i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
175
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
176
op->args[3], cond);
177
switch (i ^ inv) {
178
case 0:
179
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
180
if (arg_is_const(op->args[1])) {
181
uint64_t t = arg_info(op->args[1])->val;
182
183
- t = do_constant_folding(op->opc, t, op->args[2]);
184
+ t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
185
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
186
}
187
return false;
188
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
189
uint64_t t = arg_info(op->args[1])->val;
190
191
if (t != 0) {
192
- t = do_constant_folding(op->opc, t, 0);
193
+ t = do_constant_folding(op->opc, ctx->type, t, 0);
194
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
195
}
196
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
197
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
198
199
static bool fold_movcond(OptContext *ctx, TCGOp *op)
200
{
201
- TCGOpcode opc = op->opc;
202
TCGCond cond = op->args[5];
203
- int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
204
+ int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
205
206
if (i >= 0) {
207
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
208
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
209
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
210
uint64_t tv = arg_info(op->args[3])->val;
211
uint64_t fv = arg_info(op->args[4])->val;
212
+ TCGOpcode opc;
213
214
- opc = (opc == INDEX_op_movcond_i32
215
- ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
216
+ switch (ctx->type) {
217
+ case TCG_TYPE_I32:
218
+ opc = INDEX_op_setcond_i32;
219
+ break;
220
+ case TCG_TYPE_I64:
221
+ opc = INDEX_op_setcond_i64;
222
+ break;
223
+ default:
224
+ g_assert_not_reached();
225
+ }
226
227
if (tv == 1 && fv == 0) {
228
op->opc = opc;
229
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
230
static bool fold_setcond(OptContext *ctx, TCGOp *op)
231
{
232
TCGCond cond = op->args[3];
233
- int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
234
+ int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
235
236
if (i >= 0) {
237
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
238
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
239
* Simplify EQ/NE comparisons where one of the pairs
240
* can be simplified.
241
*/
242
- i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
243
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
244
op->args[3], cond);
245
switch (i ^ inv) {
246
case 0:
247
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
248
goto do_setcond_high;
249
}
250
251
- i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
252
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
253
op->args[4], cond);
254
switch (i ^ inv) {
255
case 0:
256
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
257
init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
258
copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
259
260
+ /* Pre-compute the type of the operation. */
261
+ if (def->flags & TCG_OPF_VECTOR) {
262
+ ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
263
+ } else if (def->flags & TCG_OPF_64BIT) {
264
+ ctx.type = TCG_TYPE_I64;
265
+ } else {
266
+ ctx.type = TCG_TYPE_I32;
267
+ }
268
+
269
/* For commutative operations make constant second argument */
270
switch (opc) {
271
CASE_OP_32_64_VEC(add):
272
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
273
/* Proceed with possible constant folding. */
274
break;
275
}
276
- if (opc == INDEX_op_sub_i32) {
277
+ switch (ctx.type) {
278
+ case TCG_TYPE_I32:
279
neg_op = INDEX_op_neg_i32;
280
have_neg = TCG_TARGET_HAS_neg_i32;
281
- } else if (opc == INDEX_op_sub_i64) {
282
+ break;
283
+ case TCG_TYPE_I64:
284
neg_op = INDEX_op_neg_i64;
285
have_neg = TCG_TARGET_HAS_neg_i64;
286
- } else if (TCG_TARGET_HAS_neg_vec) {
287
- TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64;
288
- unsigned vece = TCGOP_VECE(op);
289
- neg_op = INDEX_op_neg_vec;
290
- have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0;
291
- } else {
292
break;
293
+ case TCG_TYPE_V64:
294
+ case TCG_TYPE_V128:
295
+ case TCG_TYPE_V256:
296
+ neg_op = INDEX_op_neg_vec;
297
+ have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
298
+ TCGOP_VECE(op)) > 0;
299
+ break;
300
+ default:
301
+ g_assert_not_reached();
302
}
303
if (!have_neg) {
304
break;
305
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
306
TCGOpcode not_op;
307
bool have_not;
308
309
- if (def->flags & TCG_OPF_VECTOR) {
310
- not_op = INDEX_op_not_vec;
311
- have_not = TCG_TARGET_HAS_not_vec;
312
- } else if (def->flags & TCG_OPF_64BIT) {
313
- not_op = INDEX_op_not_i64;
314
- have_not = TCG_TARGET_HAS_not_i64;
315
- } else {
316
+ switch (ctx.type) {
317
+ case TCG_TYPE_I32:
318
not_op = INDEX_op_not_i32;
319
have_not = TCG_TARGET_HAS_not_i32;
320
+ break;
321
+ case TCG_TYPE_I64:
322
+ not_op = INDEX_op_not_i64;
323
+ have_not = TCG_TARGET_HAS_not_i64;
324
+ break;
325
+ case TCG_TYPE_V64:
326
+ case TCG_TYPE_V128:
327
+ case TCG_TYPE_V256:
328
+ not_op = INDEX_op_not_vec;
329
+ have_not = TCG_TARGET_HAS_not_vec;
330
+ break;
331
+ default:
332
+ g_assert_not_reached();
333
}
334
if (!have_not) {
335
break;
336
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
337
below, we can ignore high bits, but for further optimizations we
338
need to record that the high bits contain garbage. */
339
partmask = z_mask;
340
- if (!(def->flags & TCG_OPF_64BIT)) {
341
+ if (ctx.type == TCG_TYPE_I32) {
342
z_mask |= ~(tcg_target_ulong)0xffffffffu;
343
partmask &= 0xffffffffu;
344
affected &= 0xffffffffu;
54
--
345
--
55
2.25.1
346
2.25.1
56
347
57
348
diff view generated by jsdifflib
1
Remove the ifdef ladder and move each define into the
1
Split out the conditional conversion from a more complex logical
2
appropriate header file.
2
operation to a simple NOT. Create a couple more helpers to make
3
this easy for the outer-most logical operations.
3
4
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
tcg/aarch64/tcg-target.h | 1 +
8
tcg/optimize.c | 158 +++++++++++++++++++++++++++----------------------
9
tcg/arm/tcg-target.h | 1 +
9
1 file changed, 86 insertions(+), 72 deletions(-)
10
tcg/i386/tcg-target.h | 2 ++
10
11
tcg/mips/tcg-target.h | 6 ++++++
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
tcg/ppc/tcg-target.h | 2 ++
13
tcg/riscv/tcg-target.h | 1 +
14
tcg/s390/tcg-target.h | 3 +++
15
tcg/sparc/tcg-target.h | 1 +
16
tcg/tci/tcg-target.h | 1 +
17
tcg/region.c | 33 +++++----------------------------
18
10 files changed, 23 insertions(+), 28 deletions(-)
19
20
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
21
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/aarch64/tcg-target.h
13
--- a/tcg/optimize.c
23
+++ b/tcg/aarch64/tcg-target.h
14
+++ b/tcg/optimize.c
24
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
25
16
return false;
26
#define TCG_TARGET_INSN_UNIT_SIZE 4
17
}
27
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 24
28
+#define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
29
#undef TCG_TARGET_STACK_GROWSUP
30
31
typedef enum {
32
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/arm/tcg-target.h
35
+++ b/tcg/arm/tcg-target.h
36
@@ -XXX,XX +XXX,XX @@ extern int arm_arch;
37
#undef TCG_TARGET_STACK_GROWSUP
38
#define TCG_TARGET_INSN_UNIT_SIZE 4
39
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 16
40
+#define MAX_CODE_GEN_BUFFER_SIZE UINT32_MAX
41
42
typedef enum {
43
TCG_REG_R0 = 0,
44
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/i386/tcg-target.h
47
+++ b/tcg/i386/tcg-target.h
48
@@ -XXX,XX +XXX,XX @@
49
#ifdef __x86_64__
50
# define TCG_TARGET_REG_BITS 64
51
# define TCG_TARGET_NB_REGS 32
52
+# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
53
#else
54
# define TCG_TARGET_REG_BITS 32
55
# define TCG_TARGET_NB_REGS 24
56
+# define MAX_CODE_GEN_BUFFER_SIZE UINT32_MAX
57
#endif
58
59
typedef enum {
60
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
61
index XXXXXXX..XXXXXXX 100644
62
--- a/tcg/mips/tcg-target.h
63
+++ b/tcg/mips/tcg-target.h
64
@@ -XXX,XX +XXX,XX @@
65
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 16
66
#define TCG_TARGET_NB_REGS 32
67
18
68
+/*
19
+/*
69
+ * We have a 256MB branch region, but leave room to make sure the
20
+ * Convert @op to NOT, if NOT is supported by the host.
70
+ * main executable is also within that region.
21
+ * Return true f the conversion is successful, which will still
22
+ * indicate that the processing is complete.
71
+ */
23
+ */
72
+#define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
24
+static bool fold_not(OptContext *ctx, TCGOp *op);
73
+
25
+static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
74
typedef enum {
26
+{
75
TCG_REG_ZERO = 0,
27
+ TCGOpcode not_op;
76
TCG_REG_AT,
28
+ bool have_not;
77
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
29
+
78
index XXXXXXX..XXXXXXX 100644
30
+ switch (ctx->type) {
79
--- a/tcg/ppc/tcg-target.h
31
+ case TCG_TYPE_I32:
80
+++ b/tcg/ppc/tcg-target.h
32
+ not_op = INDEX_op_not_i32;
81
@@ -XXX,XX +XXX,XX @@
33
+ have_not = TCG_TARGET_HAS_not_i32;
82
34
+ break;
83
#ifdef _ARCH_PPC64
35
+ case TCG_TYPE_I64:
84
# define TCG_TARGET_REG_BITS 64
36
+ not_op = INDEX_op_not_i64;
85
+# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
37
+ have_not = TCG_TARGET_HAS_not_i64;
86
#else
38
+ break;
87
# define TCG_TARGET_REG_BITS 32
39
+ case TCG_TYPE_V64:
88
+# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
40
+ case TCG_TYPE_V128:
89
#endif
41
+ case TCG_TYPE_V256:
90
42
+ not_op = INDEX_op_not_vec;
91
#define TCG_TARGET_NB_REGS 64
43
+ have_not = TCG_TARGET_HAS_not_vec;
92
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
44
+ break;
93
index XXXXXXX..XXXXXXX 100644
45
+ default:
94
--- a/tcg/riscv/tcg-target.h
46
+ g_assert_not_reached();
95
+++ b/tcg/riscv/tcg-target.h
47
+ }
96
@@ -XXX,XX +XXX,XX @@
48
+ if (have_not) {
97
#define TCG_TARGET_INSN_UNIT_SIZE 4
49
+ op->opc = not_op;
98
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 20
50
+ op->args[1] = op->args[idx];
99
#define TCG_TARGET_NB_REGS 32
51
+ return fold_not(ctx, op);
100
+#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
52
+ }
101
53
+ return false;
102
typedef enum {
54
+}
103
TCG_REG_ZERO,
55
+
104
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
56
+/* If the binary operation has first argument @i, fold to NOT. */
105
index XXXXXXX..XXXXXXX 100644
57
+static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
106
--- a/tcg/s390/tcg-target.h
58
+{
107
+++ b/tcg/s390/tcg-target.h
59
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
108
@@ -XXX,XX +XXX,XX @@
60
+ return fold_to_not(ctx, op, 2);
109
#define TCG_TARGET_INSN_UNIT_SIZE 2
61
+ }
110
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 19
62
+ return false;
111
63
+}
112
+/* We have a +- 4GB range on the branches; leave some slop. */
64
+
113
+#define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
65
/* If the binary operation has second argument @i, fold to @i. */
114
+
66
static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
115
typedef enum TCGReg {
67
{
116
TCG_REG_R0 = 0,
68
@@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
117
TCG_REG_R1,
69
return false;
118
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
70
}
119
index XXXXXXX..XXXXXXX 100644
71
120
--- a/tcg/sparc/tcg-target.h
72
+/* If the binary operation has second argument @i, fold to NOT. */
121
+++ b/tcg/sparc/tcg-target.h
73
+static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
122
@@ -XXX,XX +XXX,XX @@
74
+{
123
#define TCG_TARGET_INSN_UNIT_SIZE 4
75
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
124
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 32
76
+ return fold_to_not(ctx, op, 1);
125
#define TCG_TARGET_NB_REGS 32
77
+ }
126
+#define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
78
+ return false;
127
79
+}
128
typedef enum {
80
+
129
TCG_REG_G0 = 0,
81
/* If the binary operation has both arguments equal, fold to @i. */
130
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
82
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
131
index XXXXXXX..XXXXXXX 100644
83
{
132
--- a/tcg/tci/tcg-target.h
84
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
133
+++ b/tcg/tci/tcg-target.h
85
static bool fold_andc(OptContext *ctx, TCGOp *op)
134
@@ -XXX,XX +XXX,XX @@
86
{
135
#define TCG_TARGET_INTERPRETER 1
87
if (fold_const2(ctx, op) ||
136
#define TCG_TARGET_INSN_UNIT_SIZE 1
88
- fold_xx_to_i(ctx, op, 0)) {
137
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 32
89
+ fold_xx_to_i(ctx, op, 0) ||
138
+#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
90
+ fold_ix_to_not(ctx, op, -1)) {
139
91
return true;
140
#if UINTPTR_MAX == UINT32_MAX
92
}
141
# define TCG_TARGET_REG_BITS 32
93
return false;
142
diff --git a/tcg/region.c b/tcg/region.c
94
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
143
index XXXXXXX..XXXXXXX 100644
95
144
--- a/tcg/region.c
96
static bool fold_eqv(OptContext *ctx, TCGOp *op)
145
+++ b/tcg/region.c
97
{
146
@@ -XXX,XX +XXX,XX @@ static size_t tcg_n_regions(unsigned max_cpus)
98
- return fold_const2(ctx, op);
147
/*
99
+ if (fold_const2(ctx, op) ||
148
* Minimum size of the code gen buffer. This number is randomly chosen,
100
+ fold_xi_to_not(ctx, op, 0)) {
149
* but not so small that we can't have a fair number of TB's live.
101
+ return true;
150
+ *
102
+ }
151
+ * Maximum size, MAX_CODE_GEN_BUFFER_SIZE, is defined in tcg-target.h.
103
+ return false;
152
+ * Unless otherwise indicated, this is constrained by the range of
104
}
153
+ * direct branches on the host cpu, as used by the TCG implementation
105
154
+ * of goto_tb.
106
static bool fold_extract(OptContext *ctx, TCGOp *op)
155
*/
107
@@ -XXX,XX +XXX,XX @@ static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
156
#define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
108
157
109
static bool fold_nand(OptContext *ctx, TCGOp *op)
158
-/*
110
{
159
- * Maximum size of the code gen buffer we'd like to use. Unless otherwise
111
- return fold_const2(ctx, op);
160
- * indicated, this is constrained by the range of direct branches on the
112
+ if (fold_const2(ctx, op) ||
161
- * host cpu, as used by the TCG implementation of goto_tb.
113
+ fold_xi_to_not(ctx, op, -1)) {
162
- */
114
+ return true;
163
-#if defined(__x86_64__)
115
+ }
164
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
116
+ return false;
165
-#elif defined(__sparc__)
117
}
166
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
118
167
-#elif defined(__powerpc64__)
119
static bool fold_neg(OptContext *ctx, TCGOp *op)
168
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
120
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
169
-#elif defined(__powerpc__)
121
170
-# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
122
static bool fold_nor(OptContext *ctx, TCGOp *op)
171
-#elif defined(__aarch64__)
123
{
172
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
124
- return fold_const2(ctx, op);
173
-#elif defined(__s390x__)
125
+ if (fold_const2(ctx, op) ||
174
- /* We have a +- 4GB range on the branches; leave some slop. */
126
+ fold_xi_to_not(ctx, op, 0)) {
175
-# define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
127
+ return true;
176
-#elif defined(__mips__)
128
+ }
177
- /*
129
+ return false;
178
- * We have a 256MB branch region, but leave room to make sure the
130
}
179
- * main executable is also within that region.
131
180
- */
132
static bool fold_not(OptContext *ctx, TCGOp *op)
181
-# define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
133
{
182
-#else
134
- return fold_const1(ctx, op);
183
-# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
135
+ if (fold_const1(ctx, op)) {
184
-#endif
136
+ return true;
137
+ }
138
+
139
+ /* Because of fold_to_not, we want to always return true, via finish. */
140
+ finish_folding(ctx, op);
141
+ return true;
142
}
143
144
static bool fold_or(OptContext *ctx, TCGOp *op)
145
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
146
147
static bool fold_orc(OptContext *ctx, TCGOp *op)
148
{
149
- return fold_const2(ctx, op);
150
+ if (fold_const2(ctx, op) ||
151
+ fold_ix_to_not(ctx, op, 0)) {
152
+ return true;
153
+ }
154
+ return false;
155
}
156
157
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
158
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
159
static bool fold_xor(OptContext *ctx, TCGOp *op)
160
{
161
if (fold_const2(ctx, op) ||
162
- fold_xx_to_i(ctx, op, 0)) {
163
+ fold_xx_to_i(ctx, op, 0) ||
164
+ fold_xi_to_not(ctx, op, -1)) {
165
return true;
166
}
167
return false;
168
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
169
}
170
}
171
break;
172
- CASE_OP_32_64_VEC(xor):
173
- CASE_OP_32_64(nand):
174
- if (!arg_is_const(op->args[1])
175
- && arg_is_const(op->args[2])
176
- && arg_info(op->args[2])->val == -1) {
177
- i = 1;
178
- goto try_not;
179
- }
180
- break;
181
- CASE_OP_32_64(nor):
182
- if (!arg_is_const(op->args[1])
183
- && arg_is_const(op->args[2])
184
- && arg_info(op->args[2])->val == 0) {
185
- i = 1;
186
- goto try_not;
187
- }
188
- break;
189
- CASE_OP_32_64_VEC(andc):
190
- if (!arg_is_const(op->args[2])
191
- && arg_is_const(op->args[1])
192
- && arg_info(op->args[1])->val == -1) {
193
- i = 2;
194
- goto try_not;
195
- }
196
- break;
197
- CASE_OP_32_64_VEC(orc):
198
- CASE_OP_32_64(eqv):
199
- if (!arg_is_const(op->args[2])
200
- && arg_is_const(op->args[1])
201
- && arg_info(op->args[1])->val == 0) {
202
- i = 2;
203
- goto try_not;
204
- }
205
- break;
206
- try_not:
207
- {
208
- TCGOpcode not_op;
209
- bool have_not;
185
-
210
-
186
#if TCG_TARGET_REG_BITS == 32
211
- switch (ctx.type) {
187
#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
212
- case TCG_TYPE_I32:
188
#ifdef CONFIG_USER_ONLY
213
- not_op = INDEX_op_not_i32;
214
- have_not = TCG_TARGET_HAS_not_i32;
215
- break;
216
- case TCG_TYPE_I64:
217
- not_op = INDEX_op_not_i64;
218
- have_not = TCG_TARGET_HAS_not_i64;
219
- break;
220
- case TCG_TYPE_V64:
221
- case TCG_TYPE_V128:
222
- case TCG_TYPE_V256:
223
- not_op = INDEX_op_not_vec;
224
- have_not = TCG_TARGET_HAS_not_vec;
225
- break;
226
- default:
227
- g_assert_not_reached();
228
- }
229
- if (!have_not) {
230
- break;
231
- }
232
- op->opc = not_op;
233
- reset_temp(op->args[0]);
234
- op->args[1] = op->args[i];
235
- continue;
236
- }
237
default:
238
break;
239
}
189
--
240
--
190
2.25.1
241
2.25.1
191
242
192
243
diff view generated by jsdifflib
1
Perform both tcg_context_init and tcg_region_init.
1
Even though there is only one user, place this more complex
2
Do not leave this split to the caller.
2
conversion into its own helper.
3
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
include/tcg/tcg.h | 3 +--
7
tcg/optimize.c | 89 ++++++++++++++++++++++++++------------------------
9
tcg/tcg-internal.h | 1 +
8
1 file changed, 47 insertions(+), 42 deletions(-)
10
accel/tcg/translate-all.c | 3 +--
11
tcg/tcg.c | 9 ++++++++-
12
4 files changed, 11 insertions(+), 5 deletions(-)
13
9
14
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg.h
12
--- a/tcg/optimize.c
17
+++ b/include/tcg/tcg.h
13
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ void *tcg_malloc_internal(TCGContext *s, int size);
14
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
19
void tcg_pool_reset(TCGContext *s);
15
20
TranslationBlock *tcg_tb_alloc(TCGContext *s);
16
static bool fold_neg(OptContext *ctx, TCGOp *op)
21
17
{
22
-void tcg_region_init(size_t tb_size, int splitwx);
18
- return fold_const1(ctx, op);
23
void tb_destroy(TranslationBlock *tb);
19
+ if (fold_const1(ctx, op)) {
24
void tcg_region_reset_all(void);
20
+ return true;
25
21
+ }
26
@@ -XXX,XX +XXX,XX @@ static inline void *tcg_malloc(int size)
22
+ /*
27
}
23
+ * Because of fold_sub_to_neg, we want to always return true,
24
+ * via finish_folding.
25
+ */
26
+ finish_folding(ctx, op);
27
+ return true;
28
}
28
}
29
29
30
-void tcg_context_init(TCGContext *s);
30
static bool fold_nor(OptContext *ctx, TCGOp *op)
31
+void tcg_init(size_t tb_size, int splitwx);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
32
void tcg_register_thread(void);
32
return fold_const2(ctx, op);
33
void tcg_prologue_init(TCGContext *s);
34
void tcg_func_start(TCGContext *s);
35
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/tcg/tcg-internal.h
38
+++ b/tcg/tcg-internal.h
39
@@ -XXX,XX +XXX,XX @@
40
extern TCGContext **tcg_ctxs;
41
extern unsigned int n_tcg_ctxs;
42
43
+void tcg_region_init(size_t tb_size, int splitwx);
44
bool tcg_region_alloc(TCGContext *s);
45
void tcg_region_initial_alloc(TCGContext *s);
46
void tcg_region_prologue_set(TCGContext *s);
47
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/accel/tcg/translate-all.c
50
+++ b/accel/tcg/translate-all.c
51
@@ -XXX,XX +XXX,XX @@ static void tb_htable_init(void)
52
void tcg_exec_init(unsigned long tb_size, int splitwx)
53
{
54
tcg_allowed = true;
55
- tcg_context_init(&tcg_init_ctx);
56
page_init();
57
tb_htable_init();
58
- tcg_region_init(tb_size, splitwx);
59
+ tcg_init(tb_size, splitwx);
60
61
#if defined(CONFIG_SOFTMMU)
62
/* There's no guest base to take into account, so go ahead and
63
diff --git a/tcg/tcg.c b/tcg/tcg.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/tcg.c
66
+++ b/tcg/tcg.c
67
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s);
68
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
69
TCGReg reg, const char *name);
70
71
-void tcg_context_init(TCGContext *s)
72
+static void tcg_context_init(void)
73
{
74
+ TCGContext *s = &tcg_init_ctx;
75
int op, total_args, n, i;
76
TCGOpDef *def;
77
TCGArgConstraint *args_ct;
78
@@ -XXX,XX +XXX,XX @@ void tcg_context_init(TCGContext *s)
79
cpu_env = temp_tcgv_ptr(ts);
80
}
33
}
81
34
82
+void tcg_init(size_t tb_size, int splitwx)
35
+static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
83
+{
36
+{
84
+ tcg_context_init();
37
+ TCGOpcode neg_op;
85
+ tcg_region_init(tb_size, splitwx);
38
+ bool have_neg;
39
+
40
+ if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
41
+ return false;
42
+ }
43
+
44
+ switch (ctx->type) {
45
+ case TCG_TYPE_I32:
46
+ neg_op = INDEX_op_neg_i32;
47
+ have_neg = TCG_TARGET_HAS_neg_i32;
48
+ break;
49
+ case TCG_TYPE_I64:
50
+ neg_op = INDEX_op_neg_i64;
51
+ have_neg = TCG_TARGET_HAS_neg_i64;
52
+ break;
53
+ case TCG_TYPE_V64:
54
+ case TCG_TYPE_V128:
55
+ case TCG_TYPE_V256:
56
+ neg_op = INDEX_op_neg_vec;
57
+ have_neg = (TCG_TARGET_HAS_neg_vec &&
58
+ tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
59
+ break;
60
+ default:
61
+ g_assert_not_reached();
62
+ }
63
+ if (have_neg) {
64
+ op->opc = neg_op;
65
+ op->args[1] = op->args[2];
66
+ return fold_neg(ctx, op);
67
+ }
68
+ return false;
86
+}
69
+}
87
+
70
+
88
/*
71
static bool fold_sub(OptContext *ctx, TCGOp *op)
89
* Allocate TBs right before their corresponding translated code, making
72
{
90
* sure that TBs and code are on different cache lines.
73
if (fold_const2(ctx, op) ||
74
- fold_xx_to_i(ctx, op, 0)) {
75
+ fold_xx_to_i(ctx, op, 0) ||
76
+ fold_sub_to_neg(ctx, op)) {
77
return true;
78
}
79
return false;
80
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
81
continue;
82
}
83
break;
84
- CASE_OP_32_64_VEC(sub):
85
- {
86
- TCGOpcode neg_op;
87
- bool have_neg;
88
-
89
- if (arg_is_const(op->args[2])) {
90
- /* Proceed with possible constant folding. */
91
- break;
92
- }
93
- switch (ctx.type) {
94
- case TCG_TYPE_I32:
95
- neg_op = INDEX_op_neg_i32;
96
- have_neg = TCG_TARGET_HAS_neg_i32;
97
- break;
98
- case TCG_TYPE_I64:
99
- neg_op = INDEX_op_neg_i64;
100
- have_neg = TCG_TARGET_HAS_neg_i64;
101
- break;
102
- case TCG_TYPE_V64:
103
- case TCG_TYPE_V128:
104
- case TCG_TYPE_V256:
105
- neg_op = INDEX_op_neg_vec;
106
- have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
107
- TCGOP_VECE(op)) > 0;
108
- break;
109
- default:
110
- g_assert_not_reached();
111
- }
112
- if (!have_neg) {
113
- break;
114
- }
115
- if (arg_is_const(op->args[1])
116
- && arg_info(op->args[1])->val == 0) {
117
- op->opc = neg_op;
118
- reset_temp(op->args[0]);
119
- op->args[1] = op->args[2];
120
- continue;
121
- }
122
- }
123
- break;
124
default:
125
break;
126
}
91
--
127
--
92
2.25.1
128
2.25.1
93
129
94
130
diff view generated by jsdifflib
1
This has only one user, but will make more sense after some
1
Pull the "op r, a, i => mov r, a" optimization into a function,
2
code motion.
2
and use them in the outer-most logical operations.
3
4
Always leave the tcg_init_ctx initialized to the first region,
5
in preparation for tcg_prologue_init(). This also requires
6
that we don't re-allocate the region for the first cpu, lest
7
we hit the assertion for total number of regions allocated .
8
3
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
6
---
13
tcg/tcg.c | 37 ++++++++++++++++++++++---------------
7
tcg/optimize.c | 61 +++++++++++++++++++++-----------------------------
14
1 file changed, 22 insertions(+), 15 deletions(-)
8
1 file changed, 26 insertions(+), 35 deletions(-)
15
9
16
diff --git a/tcg/tcg.c b/tcg/tcg.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/tcg.c
12
--- a/tcg/optimize.c
19
+++ b/tcg/tcg.c
13
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(void)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
21
15
return false;
22
tcg_region_trees_init();
16
}
23
17
24
- /* In user-mode we support only one ctx, so do the initial allocation now */
18
+/* If the binary operation has second argument @i, fold to identity. */
25
-#ifdef CONFIG_USER_ONLY
19
+static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
26
- tcg_region_initial_alloc__locked(tcg_ctx);
20
+{
27
-#endif
21
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
28
+ /*
22
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
29
+ * Leave the initial context initialized to the first region.
23
+ }
30
+ * This will be the context into which we generate the prologue.
24
+ return false;
31
+ * It is also the only context for CONFIG_USER_ONLY.
32
+ */
33
+ tcg_region_initial_alloc__locked(&tcg_init_ctx);
34
+}
25
+}
35
+
26
+
36
+static void tcg_region_prologue_set(TCGContext *s)
27
/* If the binary operation has second argument @i, fold to NOT. */
37
+{
28
static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
38
+ /* Deduct the prologue from the first region. */
29
{
39
+ g_assert(region.start == s->code_gen_buffer);
30
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
40
+ region.start = s->code_ptr;
31
41
+
32
static bool fold_add(OptContext *ctx, TCGOp *op)
42
+ /* Recompute boundaries of the first region. */
33
{
43
+ tcg_region_assign(s, 0);
34
- return fold_const2(ctx, op);
44
+
35
+ if (fold_const2(ctx, op) ||
45
+ /* Register the balance of the buffer with gdb. */
36
+ fold_xi_to_x(ctx, op, 0)) {
46
+ tcg_register_jit(tcg_splitwx_to_rx(region.start),
37
+ return true;
47
+ region.end - region.start);
38
+ }
39
+ return false;
48
}
40
}
49
41
50
#ifdef CONFIG_DEBUG_TCG
42
static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
51
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
43
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
52
44
{
53
if (n > 0) {
45
if (fold_const2(ctx, op) ||
54
alloc_tcg_plugin_context(s);
46
fold_xi_to_i(ctx, op, 0) ||
55
+ tcg_region_initial_alloc(s);
47
+ fold_xi_to_x(ctx, op, -1) ||
48
fold_xx_to_x(ctx, op)) {
49
return true;
56
}
50
}
57
51
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
58
tcg_ctx = s;
52
{
59
- tcg_region_initial_alloc(s);
53
if (fold_const2(ctx, op) ||
54
fold_xx_to_i(ctx, op, 0) ||
55
+ fold_xi_to_x(ctx, op, 0) ||
56
fold_ix_to_not(ctx, op, -1)) {
57
return true;
58
}
59
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
60
static bool fold_eqv(OptContext *ctx, TCGOp *op)
61
{
62
if (fold_const2(ctx, op) ||
63
+ fold_xi_to_x(ctx, op, -1) ||
64
fold_xi_to_not(ctx, op, 0)) {
65
return true;
66
}
67
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
68
static bool fold_or(OptContext *ctx, TCGOp *op)
69
{
70
if (fold_const2(ctx, op) ||
71
+ fold_xi_to_x(ctx, op, 0) ||
72
fold_xx_to_x(ctx, op)) {
73
return true;
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
76
static bool fold_orc(OptContext *ctx, TCGOp *op)
77
{
78
if (fold_const2(ctx, op) ||
79
+ fold_xi_to_x(ctx, op, -1) ||
80
fold_ix_to_not(ctx, op, 0)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
84
85
static bool fold_shift(OptContext *ctx, TCGOp *op)
86
{
87
- return fold_const2(ctx, op);
88
+ if (fold_const2(ctx, op) ||
89
+ fold_xi_to_x(ctx, op, 0)) {
90
+ return true;
91
+ }
92
+ return false;
60
}
93
}
61
#endif /* !CONFIG_USER_ONLY */
94
62
95
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
63
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
96
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
64
{
97
{
65
size_t prologue_size;
98
if (fold_const2(ctx, op) ||
66
99
fold_xx_to_i(ctx, op, 0) ||
67
- /* Put the prologue at the beginning of code_gen_buffer. */
100
+ fold_xi_to_x(ctx, op, 0) ||
68
- tcg_region_assign(s, 0);
101
fold_sub_to_neg(ctx, op)) {
69
s->code_ptr = s->code_gen_ptr;
102
return true;
70
s->code_buf = s->code_gen_ptr;
103
}
71
s->data_gen_ptr = NULL;
104
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
72
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
105
{
73
(uintptr_t)s->code_buf, prologue_size);
106
if (fold_const2(ctx, op) ||
74
#endif
107
fold_xx_to_i(ctx, op, 0) ||
75
108
+ fold_xi_to_x(ctx, op, 0) ||
76
- /* Deduct the prologue from the first region. */
109
fold_xi_to_not(ctx, op, -1)) {
77
- region.start = s->code_ptr;
110
return true;
111
}
112
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
113
break;
114
}
115
116
- /* Simplify expression for "op r, a, const => mov r, a" cases */
117
- switch (opc) {
118
- CASE_OP_32_64_VEC(add):
119
- CASE_OP_32_64_VEC(sub):
120
- CASE_OP_32_64_VEC(or):
121
- CASE_OP_32_64_VEC(xor):
122
- CASE_OP_32_64_VEC(andc):
123
- CASE_OP_32_64(shl):
124
- CASE_OP_32_64(shr):
125
- CASE_OP_32_64(sar):
126
- CASE_OP_32_64(rotl):
127
- CASE_OP_32_64(rotr):
128
- if (!arg_is_const(op->args[1])
129
- && arg_is_const(op->args[2])
130
- && arg_info(op->args[2])->val == 0) {
131
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
132
- continue;
133
- }
134
- break;
135
- CASE_OP_32_64_VEC(and):
136
- CASE_OP_32_64_VEC(orc):
137
- CASE_OP_32_64(eqv):
138
- if (!arg_is_const(op->args[1])
139
- && arg_is_const(op->args[2])
140
- && arg_info(op->args[2])->val == -1) {
141
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
142
- continue;
143
- }
144
- break;
145
- default:
146
- break;
147
- }
78
-
148
-
79
- /* Recompute boundaries of the first region. */
149
/* Simplify using known-zero bits. Currently only ops with a single
80
- tcg_region_assign(s, 0);
150
output argument is supported. */
81
-
151
z_mask = -1;
82
- tcg_register_jit(tcg_splitwx_to_rx(region.start),
83
- region.end - region.start);
84
+ tcg_region_prologue_set(s);
85
86
#ifdef DEBUG_DISAS
87
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
88
--
152
--
89
2.25.1
153
2.25.1
90
154
91
155
diff view generated by jsdifflib
1
We shortly want to use tcg_init for something else.
1
Pull the "op r, 0, b => movi r, 0" optimization into a function,
2
Since the hook is called init_machine, match that.
2
and use it in fold_shift.
3
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
accel/tcg/tcg-all.c | 4 ++--
8
tcg/optimize.c | 28 ++++++++++------------------
10
1 file changed, 2 insertions(+), 2 deletions(-)
9
1 file changed, 10 insertions(+), 18 deletions(-)
11
10
12
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/tcg-all.c
13
--- a/tcg/optimize.c
15
+++ b/accel/tcg/tcg-all.c
14
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static void tcg_accel_instance_init(Object *obj)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
17
16
return false;
18
bool mttcg_enabled;
17
}
19
18
20
-static int tcg_init(MachineState *ms)
19
+/* If the binary operation has first argument @i, fold to @i. */
21
+static int tcg_init_machine(MachineState *ms)
20
+static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
21
+{
22
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
24
+ }
25
+ return false;
26
+}
27
+
28
/* If the binary operation has first argument @i, fold to NOT. */
29
static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
22
{
30
{
23
TCGState *s = TCG_STATE(current_accel());
31
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
24
32
static bool fold_shift(OptContext *ctx, TCGOp *op)
25
@@ -XXX,XX +XXX,XX @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
26
{
33
{
27
AccelClass *ac = ACCEL_CLASS(oc);
34
if (fold_const2(ctx, op) ||
28
ac->name = "tcg";
35
+ fold_ix_to_i(ctx, op, 0) ||
29
- ac->init_machine = tcg_init;
36
fold_xi_to_x(ctx, op, 0)) {
30
+ ac->init_machine = tcg_init_machine;
37
return true;
31
ac->allowed = &tcg_allowed;
38
}
32
39
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
33
object_class_property_add_str(oc, "thread",
40
break;
41
}
42
43
- /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
44
- and "sub r, 0, a => neg r, a" case. */
45
- switch (opc) {
46
- CASE_OP_32_64(shl):
47
- CASE_OP_32_64(shr):
48
- CASE_OP_32_64(sar):
49
- CASE_OP_32_64(rotl):
50
- CASE_OP_32_64(rotr):
51
- if (arg_is_const(op->args[1])
52
- && arg_info(op->args[1])->val == 0) {
53
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
54
- continue;
55
- }
56
- break;
57
- default:
58
- break;
59
- }
60
-
61
/* Simplify using known-zero bits. Currently only ops with a single
62
output argument is supported. */
63
z_mask = -1;
34
--
64
--
35
2.25.1
65
2.25.1
36
66
37
67
diff view generated by jsdifflib
1
Shortly, the full code_gen_buffer will only be visible
1
Move all of the known-zero optimizations into the per-opcode
2
to region.c, so move in_code_gen_buffer out-of-line.
2
functions. Use fold_masks when there is a possibility of the
3
3
result being determined, and simply set ctx->z_mask otherwise.
4
Move the debugging versions of tcg_splitwx_to_{rx,rw}
5
to region.c as well, so that the compiler gets to see
6
the implementation of in_code_gen_buffer.
7
8
This leaves exactly one use of in_code_gen_buffer outside
9
of region.c, in cpu_restore_state. Which, being on the
10
exception path, is not performance critical.
11
4
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
8
---
16
include/tcg/tcg.h | 11 +----------
9
tcg/optimize.c | 545 ++++++++++++++++++++++++++-----------------------
17
tcg/region.c | 34 ++++++++++++++++++++++++++++++++++
10
1 file changed, 294 insertions(+), 251 deletions(-)
18
tcg/tcg.c | 23 -----------------------
19
3 files changed, 35 insertions(+), 33 deletions(-)
20
11
21
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
22
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
23
--- a/include/tcg/tcg.h
14
--- a/tcg/optimize.c
24
+++ b/include/tcg/tcg.h
15
+++ b/tcg/optimize.c
25
@@ -XXX,XX +XXX,XX @@ extern const void *tcg_code_gen_epilogue;
16
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
26
extern uintptr_t tcg_splitwx_diff;
17
TCGTempSet temps_used;
27
extern TCGv_env cpu_env;
18
28
19
/* In flight values from optimization. */
29
-static inline bool in_code_gen_buffer(const void *p)
20
- uint64_t z_mask;
30
-{
21
+ uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
31
- const TCGContext *s = &tcg_init_ctx;
22
+ uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
32
- /*
23
TCGType type;
33
- * Much like it is valid to have a pointer to the byte past the
24
} OptContext;
34
- * end of an array (so long as you don't dereference it), allow
25
35
- * a pointer to the byte past the end of the code gen buffer.
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
36
- */
27
return false;
37
- return (size_t)(p - s->code_gen_buffer) <= s->code_gen_buffer_size;
28
}
38
-}
29
39
+bool in_code_gen_buffer(const void *p);
30
+static bool fold_masks(OptContext *ctx, TCGOp *op)
40
41
#ifdef CONFIG_DEBUG_TCG
42
const void *tcg_splitwx_to_rx(void *rw);
43
diff --git a/tcg/region.c b/tcg/region.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/region.c
46
+++ b/tcg/region.c
47
@@ -XXX,XX +XXX,XX @@ static struct tcg_region_state region;
48
static void *region_trees;
49
static size_t tree_size;
50
51
+bool in_code_gen_buffer(const void *p)
52
+{
31
+{
53
+ const TCGContext *s = &tcg_init_ctx;
32
+ uint64_t a_mask = ctx->a_mask;
33
+ uint64_t z_mask = ctx->z_mask;
34
+
54
+ /*
35
+ /*
55
+ * Much like it is valid to have a pointer to the byte past the
36
+ * 32-bit ops generate 32-bit results. For the result is zero test
56
+ * end of an array (so long as you don't dereference it), allow
37
+ * below, we can ignore high bits, but for further optimizations we
57
+ * a pointer to the byte past the end of the code gen buffer.
38
+ * need to record that the high bits contain garbage.
58
+ */
39
+ */
59
+ return (size_t)(p - s->code_gen_buffer) <= s->code_gen_buffer_size;
40
+ if (ctx->type == TCG_TYPE_I32) {
41
+ ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
42
+ a_mask &= MAKE_64BIT_MASK(0, 32);
43
+ z_mask &= MAKE_64BIT_MASK(0, 32);
44
+ }
45
+
46
+ if (z_mask == 0) {
47
+ return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
48
+ }
49
+ if (a_mask == 0) {
50
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
51
+ }
52
+ return false;
60
+}
53
+}
61
+
54
+
62
+#ifdef CONFIG_DEBUG_TCG
55
/*
63
+const void *tcg_splitwx_to_rx(void *rw)
56
* Convert @op to NOT, if NOT is supported by the host.
57
* Return true f the conversion is successful, which will still
58
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
59
60
static bool fold_and(OptContext *ctx, TCGOp *op)
61
{
62
+ uint64_t z1, z2;
63
+
64
if (fold_const2(ctx, op) ||
65
fold_xi_to_i(ctx, op, 0) ||
66
fold_xi_to_x(ctx, op, -1) ||
67
fold_xx_to_x(ctx, op)) {
68
return true;
69
}
70
- return false;
71
+
72
+ z1 = arg_info(op->args[1])->z_mask;
73
+ z2 = arg_info(op->args[2])->z_mask;
74
+ ctx->z_mask = z1 & z2;
75
+
76
+ /*
77
+ * Known-zeros does not imply known-ones. Therefore unless
78
+ * arg2 is constant, we can't infer affected bits from it.
79
+ */
80
+ if (arg_is_const(op->args[2])) {
81
+ ctx->a_mask = z1 & ~z2;
82
+ }
83
+
84
+ return fold_masks(ctx, op);
85
}
86
87
static bool fold_andc(OptContext *ctx, TCGOp *op)
88
{
89
+ uint64_t z1;
90
+
91
if (fold_const2(ctx, op) ||
92
fold_xx_to_i(ctx, op, 0) ||
93
fold_xi_to_x(ctx, op, 0) ||
94
fold_ix_to_not(ctx, op, -1)) {
95
return true;
96
}
97
- return false;
98
+
99
+ z1 = arg_info(op->args[1])->z_mask;
100
+
101
+ /*
102
+ * Known-zeros does not imply known-ones. Therefore unless
103
+ * arg2 is constant, we can't infer anything from it.
104
+ */
105
+ if (arg_is_const(op->args[2])) {
106
+ uint64_t z2 = ~arg_info(op->args[2])->z_mask;
107
+ ctx->a_mask = z1 & ~z2;
108
+ z1 &= z2;
109
+ }
110
+ ctx->z_mask = z1;
111
+
112
+ return fold_masks(ctx, op);
113
}
114
115
static bool fold_brcond(OptContext *ctx, TCGOp *op)
116
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
117
118
static bool fold_bswap(OptContext *ctx, TCGOp *op)
119
{
120
+ uint64_t z_mask, sign;
121
+
122
if (arg_is_const(op->args[1])) {
123
uint64_t t = arg_info(op->args[1])->val;
124
125
t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
126
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
127
}
128
- return false;
129
+
130
+ z_mask = arg_info(op->args[1])->z_mask;
131
+ switch (op->opc) {
132
+ case INDEX_op_bswap16_i32:
133
+ case INDEX_op_bswap16_i64:
134
+ z_mask = bswap16(z_mask);
135
+ sign = INT16_MIN;
136
+ break;
137
+ case INDEX_op_bswap32_i32:
138
+ case INDEX_op_bswap32_i64:
139
+ z_mask = bswap32(z_mask);
140
+ sign = INT32_MIN;
141
+ break;
142
+ case INDEX_op_bswap64_i64:
143
+ z_mask = bswap64(z_mask);
144
+ sign = INT64_MIN;
145
+ break;
146
+ default:
147
+ g_assert_not_reached();
148
+ }
149
+
150
+ switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
151
+ case TCG_BSWAP_OZ:
152
+ break;
153
+ case TCG_BSWAP_OS:
154
+ /* If the sign bit may be 1, force all the bits above to 1. */
155
+ if (z_mask & sign) {
156
+ z_mask |= sign;
157
+ }
158
+ break;
159
+ default:
160
+ /* The high bits are undefined: force all bits above the sign to 1. */
161
+ z_mask |= sign << 1;
162
+ break;
163
+ }
164
+ ctx->z_mask = z_mask;
165
+
166
+ return fold_masks(ctx, op);
167
}
168
169
static bool fold_call(OptContext *ctx, TCGOp *op)
170
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
171
172
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
173
{
174
+ uint64_t z_mask;
175
+
176
if (arg_is_const(op->args[1])) {
177
uint64_t t = arg_info(op->args[1])->val;
178
179
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
180
}
181
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
182
}
183
+
184
+ switch (ctx->type) {
185
+ case TCG_TYPE_I32:
186
+ z_mask = 31;
187
+ break;
188
+ case TCG_TYPE_I64:
189
+ z_mask = 63;
190
+ break;
191
+ default:
192
+ g_assert_not_reached();
193
+ }
194
+ ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
195
+
196
return false;
197
}
198
199
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
200
{
201
- return fold_const1(ctx, op);
202
+ if (fold_const1(ctx, op)) {
203
+ return true;
204
+ }
205
+
206
+ switch (ctx->type) {
207
+ case TCG_TYPE_I32:
208
+ ctx->z_mask = 32 | 31;
209
+ break;
210
+ case TCG_TYPE_I64:
211
+ ctx->z_mask = 64 | 63;
212
+ break;
213
+ default:
214
+ g_assert_not_reached();
215
+ }
216
+ return false;
217
}
218
219
static bool fold_deposit(OptContext *ctx, TCGOp *op)
220
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
221
t1 = deposit64(t1, op->args[3], op->args[4], t2);
222
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
223
}
224
+
225
+ ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
226
+ op->args[3], op->args[4],
227
+ arg_info(op->args[2])->z_mask);
228
return false;
229
}
230
231
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
232
233
static bool fold_extract(OptContext *ctx, TCGOp *op)
234
{
235
+ uint64_t z_mask_old, z_mask;
236
+
237
if (arg_is_const(op->args[1])) {
238
uint64_t t;
239
240
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
241
t = extract64(t, op->args[2], op->args[3]);
242
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
243
}
244
- return false;
245
+
246
+ z_mask_old = arg_info(op->args[1])->z_mask;
247
+ z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
248
+ if (op->args[2] == 0) {
249
+ ctx->a_mask = z_mask_old ^ z_mask;
250
+ }
251
+ ctx->z_mask = z_mask;
252
+
253
+ return fold_masks(ctx, op);
254
}
255
256
static bool fold_extract2(OptContext *ctx, TCGOp *op)
257
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
258
259
static bool fold_exts(OptContext *ctx, TCGOp *op)
260
{
261
- return fold_const1(ctx, op);
262
+ uint64_t z_mask_old, z_mask, sign;
263
+ bool type_change = false;
264
+
265
+ if (fold_const1(ctx, op)) {
266
+ return true;
267
+ }
268
+
269
+ z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
270
+
271
+ switch (op->opc) {
272
+ CASE_OP_32_64(ext8s):
273
+ sign = INT8_MIN;
274
+ z_mask = (uint8_t)z_mask;
275
+ break;
276
+ CASE_OP_32_64(ext16s):
277
+ sign = INT16_MIN;
278
+ z_mask = (uint16_t)z_mask;
279
+ break;
280
+ case INDEX_op_ext_i32_i64:
281
+ type_change = true;
282
+ QEMU_FALLTHROUGH;
283
+ case INDEX_op_ext32s_i64:
284
+ sign = INT32_MIN;
285
+ z_mask = (uint32_t)z_mask;
286
+ break;
287
+ default:
288
+ g_assert_not_reached();
289
+ }
290
+
291
+ if (z_mask & sign) {
292
+ z_mask |= sign;
293
+ } else if (!type_change) {
294
+ ctx->a_mask = z_mask_old ^ z_mask;
295
+ }
296
+ ctx->z_mask = z_mask;
297
+
298
+ return fold_masks(ctx, op);
299
}
300
301
static bool fold_extu(OptContext *ctx, TCGOp *op)
302
{
303
- return fold_const1(ctx, op);
304
+ uint64_t z_mask_old, z_mask;
305
+ bool type_change = false;
306
+
307
+ if (fold_const1(ctx, op)) {
308
+ return true;
309
+ }
310
+
311
+ z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
312
+
313
+ switch (op->opc) {
314
+ CASE_OP_32_64(ext8u):
315
+ z_mask = (uint8_t)z_mask;
316
+ break;
317
+ CASE_OP_32_64(ext16u):
318
+ z_mask = (uint16_t)z_mask;
319
+ break;
320
+ case INDEX_op_extrl_i64_i32:
321
+ case INDEX_op_extu_i32_i64:
322
+ type_change = true;
323
+ QEMU_FALLTHROUGH;
324
+ case INDEX_op_ext32u_i64:
325
+ z_mask = (uint32_t)z_mask;
326
+ break;
327
+ case INDEX_op_extrh_i64_i32:
328
+ type_change = true;
329
+ z_mask >>= 32;
330
+ break;
331
+ default:
332
+ g_assert_not_reached();
333
+ }
334
+
335
+ ctx->z_mask = z_mask;
336
+ if (!type_change) {
337
+ ctx->a_mask = z_mask_old ^ z_mask;
338
+ }
339
+ return fold_masks(ctx, op);
340
}
341
342
static bool fold_mb(OptContext *ctx, TCGOp *op)
343
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
344
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
345
}
346
347
+ ctx->z_mask = arg_info(op->args[3])->z_mask
348
+ | arg_info(op->args[4])->z_mask;
349
+
350
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
351
uint64_t tv = arg_info(op->args[3])->val;
352
uint64_t fv = arg_info(op->args[4])->val;
353
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
354
355
static bool fold_neg(OptContext *ctx, TCGOp *op)
356
{
357
+ uint64_t z_mask;
358
+
359
if (fold_const1(ctx, op)) {
360
return true;
361
}
362
+
363
+ /* Set to 1 all bits to the left of the rightmost. */
364
+ z_mask = arg_info(op->args[1])->z_mask;
365
+ ctx->z_mask = -(z_mask & -z_mask);
366
+
367
/*
368
* Because of fold_sub_to_neg, we want to always return true,
369
* via finish_folding.
370
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
371
fold_xx_to_x(ctx, op)) {
372
return true;
373
}
374
- return false;
375
+
376
+ ctx->z_mask = arg_info(op->args[1])->z_mask
377
+ | arg_info(op->args[2])->z_mask;
378
+ return fold_masks(ctx, op);
379
}
380
381
static bool fold_orc(OptContext *ctx, TCGOp *op)
382
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
383
384
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
385
{
386
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
387
+ MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
388
+ MemOp mop = get_memop(oi);
389
+ int width = 8 * memop_size(mop);
390
+
391
+ if (!(mop & MO_SIGN) && width < 64) {
392
+ ctx->z_mask = MAKE_64BIT_MASK(0, width);
393
+ }
394
+
395
/* Opcodes that touch guest memory stop the mb optimization. */
396
ctx->prev_mb = NULL;
397
return false;
398
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
399
if (i >= 0) {
400
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
401
}
402
+
403
+ ctx->z_mask = 1;
404
return false;
405
}
406
407
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
408
op->opc = INDEX_op_setcond_i32;
409
break;
410
}
411
+
412
+ ctx->z_mask = 1;
413
return false;
414
415
do_setcond_const:
416
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
417
418
static bool fold_sextract(OptContext *ctx, TCGOp *op)
419
{
420
+ int64_t z_mask_old, z_mask;
421
+
422
if (arg_is_const(op->args[1])) {
423
uint64_t t;
424
425
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
426
t = sextract64(t, op->args[2], op->args[3]);
427
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
428
}
429
- return false;
430
+
431
+ z_mask_old = arg_info(op->args[1])->z_mask;
432
+ z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
433
+ if (op->args[2] == 0 && z_mask >= 0) {
434
+ ctx->a_mask = z_mask_old ^ z_mask;
435
+ }
436
+ ctx->z_mask = z_mask;
437
+
438
+ return fold_masks(ctx, op);
439
}
440
441
static bool fold_shift(OptContext *ctx, TCGOp *op)
442
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
443
fold_xi_to_x(ctx, op, 0)) {
444
return true;
445
}
446
+
447
+ if (arg_is_const(op->args[2])) {
448
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type,
449
+ arg_info(op->args[1])->z_mask,
450
+ arg_info(op->args[2])->val);
451
+ return fold_masks(ctx, op);
452
+ }
453
return false;
454
}
455
456
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
457
return fold_addsub2_i32(ctx, op, false);
458
}
459
460
+static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
64
+{
461
+{
65
+ /* Pass NULL pointers unchanged. */
462
+ /* We can't do any folding with a load, but we can record bits. */
66
+ if (rw) {
463
+ switch (op->opc) {
67
+ g_assert(in_code_gen_buffer(rw));
464
+ CASE_OP_32_64(ld8u):
68
+ rw += tcg_splitwx_diff;
465
+ ctx->z_mask = MAKE_64BIT_MASK(0, 8);
69
+ }
466
+ break;
70
+ return rw;
467
+ CASE_OP_32_64(ld16u):
468
+ ctx->z_mask = MAKE_64BIT_MASK(0, 16);
469
+ break;
470
+ case INDEX_op_ld32u_i64:
471
+ ctx->z_mask = MAKE_64BIT_MASK(0, 32);
472
+ break;
473
+ default:
474
+ g_assert_not_reached();
475
+ }
476
+ return false;
71
+}
477
+}
72
+
478
+
73
+void *tcg_splitwx_to_rw(const void *rx)
479
static bool fold_xor(OptContext *ctx, TCGOp *op)
74
+{
480
{
75
+ /* Pass NULL pointers unchanged. */
481
if (fold_const2(ctx, op) ||
76
+ if (rx) {
482
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
77
+ rx -= tcg_splitwx_diff;
483
fold_xi_to_not(ctx, op, -1)) {
78
+ /* Assert that we end with a pointer in the rw region. */
484
return true;
79
+ g_assert(in_code_gen_buffer(rx));
485
}
80
+ }
486
- return false;
81
+ return (void *)rx;
487
+
82
+}
488
+ ctx->z_mask = arg_info(op->args[1])->z_mask
83
+#endif /* CONFIG_DEBUG_TCG */
489
+ | arg_info(op->args[2])->z_mask;
84
+
490
+ return fold_masks(ctx, op);
85
/* compare a pointer @ptr and a tb_tc @s */
491
}
86
static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
492
87
{
493
/* Propagate constants and copies, fold constant expressions. */
88
diff --git a/tcg/tcg.c b/tcg/tcg.c
494
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
89
index XXXXXXX..XXXXXXX 100644
495
}
90
--- a/tcg/tcg.c
496
91
+++ b/tcg/tcg.c
497
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
92
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef constraint_sets[] = {
498
- uint64_t z_mask, partmask, affected, tmp;
93
499
TCGOpcode opc = op->opc;
94
#include "tcg-target.c.inc"
500
const TCGOpDef *def;
95
501
bool done = false;
96
-#ifdef CONFIG_DEBUG_TCG
502
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
97
-const void *tcg_splitwx_to_rx(void *rw)
503
break;
98
-{
504
}
99
- /* Pass NULL pointers unchanged. */
505
100
- if (rw) {
506
- /* Simplify using known-zero bits. Currently only ops with a single
101
- g_assert(in_code_gen_buffer(rw));
507
- output argument is supported. */
102
- rw += tcg_splitwx_diff;
508
- z_mask = -1;
103
- }
509
- affected = -1;
104
- return rw;
510
- switch (opc) {
105
-}
511
- CASE_OP_32_64(ext8s):
106
-
512
- if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
107
-void *tcg_splitwx_to_rw(const void *rx)
513
- break;
108
-{
514
- }
109
- /* Pass NULL pointers unchanged. */
515
- QEMU_FALLTHROUGH;
110
- if (rx) {
516
- CASE_OP_32_64(ext8u):
111
- rx -= tcg_splitwx_diff;
517
- z_mask = 0xff;
112
- /* Assert that we end with a pointer in the rw region. */
518
- goto and_const;
113
- g_assert(in_code_gen_buffer(rx));
519
- CASE_OP_32_64(ext16s):
114
- }
520
- if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
115
- return (void *)rx;
521
- break;
116
-}
522
- }
117
-#endif /* CONFIG_DEBUG_TCG */
523
- QEMU_FALLTHROUGH;
118
-
524
- CASE_OP_32_64(ext16u):
119
static void alloc_tcg_plugin_context(TCGContext *s)
525
- z_mask = 0xffff;
120
{
526
- goto and_const;
121
#ifdef CONFIG_PLUGIN
527
- case INDEX_op_ext32s_i64:
528
- if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
529
- break;
530
- }
531
- QEMU_FALLTHROUGH;
532
- case INDEX_op_ext32u_i64:
533
- z_mask = 0xffffffffU;
534
- goto and_const;
535
-
536
- CASE_OP_32_64(and):
537
- z_mask = arg_info(op->args[2])->z_mask;
538
- if (arg_is_const(op->args[2])) {
539
- and_const:
540
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
541
- }
542
- z_mask = arg_info(op->args[1])->z_mask & z_mask;
543
- break;
544
-
545
- case INDEX_op_ext_i32_i64:
546
- if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
547
- break;
548
- }
549
- QEMU_FALLTHROUGH;
550
- case INDEX_op_extu_i32_i64:
551
- /* We do not compute affected as it is a size changing op. */
552
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
553
- break;
554
-
555
- CASE_OP_32_64(andc):
556
- /* Known-zeros does not imply known-ones. Therefore unless
557
- op->args[2] is constant, we can't infer anything from it. */
558
- if (arg_is_const(op->args[2])) {
559
- z_mask = ~arg_info(op->args[2])->z_mask;
560
- goto and_const;
561
- }
562
- /* But we certainly know nothing outside args[1] may be set. */
563
- z_mask = arg_info(op->args[1])->z_mask;
564
- break;
565
-
566
- case INDEX_op_sar_i32:
567
- if (arg_is_const(op->args[2])) {
568
- tmp = arg_info(op->args[2])->val & 31;
569
- z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
570
- }
571
- break;
572
- case INDEX_op_sar_i64:
573
- if (arg_is_const(op->args[2])) {
574
- tmp = arg_info(op->args[2])->val & 63;
575
- z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
576
- }
577
- break;
578
-
579
- case INDEX_op_shr_i32:
580
- if (arg_is_const(op->args[2])) {
581
- tmp = arg_info(op->args[2])->val & 31;
582
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
583
- }
584
- break;
585
- case INDEX_op_shr_i64:
586
- if (arg_is_const(op->args[2])) {
587
- tmp = arg_info(op->args[2])->val & 63;
588
- z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
589
- }
590
- break;
591
-
592
- case INDEX_op_extrl_i64_i32:
593
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
594
- break;
595
- case INDEX_op_extrh_i64_i32:
596
- z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
597
- break;
598
-
599
- CASE_OP_32_64(shl):
600
- if (arg_is_const(op->args[2])) {
601
- tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
602
- z_mask = arg_info(op->args[1])->z_mask << tmp;
603
- }
604
- break;
605
-
606
- CASE_OP_32_64(neg):
607
- /* Set to 1 all bits to the left of the rightmost. */
608
- z_mask = -(arg_info(op->args[1])->z_mask
609
- & -arg_info(op->args[1])->z_mask);
610
- break;
611
-
612
- CASE_OP_32_64(deposit):
613
- z_mask = deposit64(arg_info(op->args[1])->z_mask,
614
- op->args[3], op->args[4],
615
- arg_info(op->args[2])->z_mask);
616
- break;
617
-
618
- CASE_OP_32_64(extract):
619
- z_mask = extract64(arg_info(op->args[1])->z_mask,
620
- op->args[2], op->args[3]);
621
- if (op->args[2] == 0) {
622
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
623
- }
624
- break;
625
- CASE_OP_32_64(sextract):
626
- z_mask = sextract64(arg_info(op->args[1])->z_mask,
627
- op->args[2], op->args[3]);
628
- if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
629
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
630
- }
631
- break;
632
-
633
- CASE_OP_32_64(or):
634
- CASE_OP_32_64(xor):
635
- z_mask = arg_info(op->args[1])->z_mask
636
- | arg_info(op->args[2])->z_mask;
637
- break;
638
-
639
- case INDEX_op_clz_i32:
640
- case INDEX_op_ctz_i32:
641
- z_mask = arg_info(op->args[2])->z_mask | 31;
642
- break;
643
-
644
- case INDEX_op_clz_i64:
645
- case INDEX_op_ctz_i64:
646
- z_mask = arg_info(op->args[2])->z_mask | 63;
647
- break;
648
-
649
- case INDEX_op_ctpop_i32:
650
- z_mask = 32 | 31;
651
- break;
652
- case INDEX_op_ctpop_i64:
653
- z_mask = 64 | 63;
654
- break;
655
-
656
- CASE_OP_32_64(setcond):
657
- case INDEX_op_setcond2_i32:
658
- z_mask = 1;
659
- break;
660
-
661
- CASE_OP_32_64(movcond):
662
- z_mask = arg_info(op->args[3])->z_mask
663
- | arg_info(op->args[4])->z_mask;
664
- break;
665
-
666
- CASE_OP_32_64(ld8u):
667
- z_mask = 0xff;
668
- break;
669
- CASE_OP_32_64(ld16u):
670
- z_mask = 0xffff;
671
- break;
672
- case INDEX_op_ld32u_i64:
673
- z_mask = 0xffffffffu;
674
- break;
675
-
676
- CASE_OP_32_64(qemu_ld):
677
- {
678
- MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
679
- MemOp mop = get_memop(oi);
680
- if (!(mop & MO_SIGN)) {
681
- z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
682
- }
683
- }
684
- break;
685
-
686
- CASE_OP_32_64(bswap16):
687
- z_mask = arg_info(op->args[1])->z_mask;
688
- if (z_mask <= 0xffff) {
689
- op->args[2] |= TCG_BSWAP_IZ;
690
- }
691
- z_mask = bswap16(z_mask);
692
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
693
- case TCG_BSWAP_OZ:
694
- break;
695
- case TCG_BSWAP_OS:
696
- z_mask = (int16_t)z_mask;
697
- break;
698
- default: /* undefined high bits */
699
- z_mask |= MAKE_64BIT_MASK(16, 48);
700
- break;
701
- }
702
- break;
703
-
704
- case INDEX_op_bswap32_i64:
705
- z_mask = arg_info(op->args[1])->z_mask;
706
- if (z_mask <= 0xffffffffu) {
707
- op->args[2] |= TCG_BSWAP_IZ;
708
- }
709
- z_mask = bswap32(z_mask);
710
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
711
- case TCG_BSWAP_OZ:
712
- break;
713
- case TCG_BSWAP_OS:
714
- z_mask = (int32_t)z_mask;
715
- break;
716
- default: /* undefined high bits */
717
- z_mask |= MAKE_64BIT_MASK(32, 32);
718
- break;
719
- }
720
- break;
721
-
722
- default:
723
- break;
724
- }
725
-
726
- /* 32-bit ops generate 32-bit results. For the result is zero test
727
- below, we can ignore high bits, but for further optimizations we
728
- need to record that the high bits contain garbage. */
729
- partmask = z_mask;
730
- if (ctx.type == TCG_TYPE_I32) {
731
- z_mask |= ~(tcg_target_ulong)0xffffffffu;
732
- partmask &= 0xffffffffu;
733
- affected &= 0xffffffffu;
734
- }
735
- ctx.z_mask = z_mask;
736
-
737
- if (partmask == 0) {
738
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
739
- continue;
740
- }
741
- if (affected == 0) {
742
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
743
- continue;
744
- }
745
+ /* Assume all bits affected, and no bits known zero. */
746
+ ctx.a_mask = -1;
747
+ ctx.z_mask = -1;
748
749
/*
750
* Process each opcode.
751
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
752
case INDEX_op_extrh_i64_i32:
753
done = fold_extu(&ctx, op);
754
break;
755
+ CASE_OP_32_64(ld8u):
756
+ CASE_OP_32_64(ld16u):
757
+ case INDEX_op_ld32u_i64:
758
+ done = fold_tcg_ld(&ctx, op);
759
+ break;
760
case INDEX_op_mb:
761
done = fold_mb(&ctx, op);
762
break;
122
--
763
--
123
2.25.1
764
2.25.1
124
765
125
766
diff view generated by jsdifflib
1
Instead of delaying tcg_region_init until after tcg_prologue_init
1
Rename to fold_multiply2, and handle muls2_i32, mulu2_i64,
2
is complete, do tcg_region_init first and let tcg_prologue_init
2
and muls2_i64.
3
shrink the first region by the size of the generated prologue.
4
3
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
accel/tcg/tcg-all.c | 11 ---------
8
tcg/optimize.c | 44 +++++++++++++++++++++++++++++++++++---------
10
accel/tcg/translate-all.c | 3 +++
9
1 file changed, 35 insertions(+), 9 deletions(-)
11
bsd-user/main.c | 1 -
12
linux-user/main.c | 1 -
13
tcg/tcg.c | 52 ++++++++++++++-------------------------
14
5 files changed, 22 insertions(+), 46 deletions(-)
15
10
16
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
18
--- a/accel/tcg/tcg-all.c
13
--- a/tcg/optimize.c
19
+++ b/accel/tcg/tcg-all.c
14
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ static int tcg_init(MachineState *ms)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
21
16
return false;
22
tcg_exec_init(s->tb_size * 1024 * 1024, s->splitwx_enabled);
23
mttcg_enabled = s->mttcg_enabled;
24
-
25
- /*
26
- * Initialize TCG regions only for softmmu.
27
- *
28
- * This needs to be done later for user mode, because the prologue
29
- * generation needs to be delayed so that GUEST_BASE is already set.
30
- */
31
-#ifndef CONFIG_USER_ONLY
32
- tcg_region_init();
33
-#endif /* !CONFIG_USER_ONLY */
34
-
35
return 0;
36
}
17
}
37
18
38
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
19
-static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
39
index XXXXXXX..XXXXXXX 100644
20
+static bool fold_multiply2(OptContext *ctx, TCGOp *op)
40
--- a/accel/tcg/translate-all.c
21
{
41
+++ b/accel/tcg/translate-all.c
22
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
42
@@ -XXX,XX +XXX,XX @@ void tcg_exec_init(unsigned long tb_size, int splitwx)
23
- uint32_t a = arg_info(op->args[2])->val;
43
splitwx, &error_fatal);
24
- uint32_t b = arg_info(op->args[3])->val;
44
assert(ok);
25
- uint64_t r = (uint64_t)a * b;
45
26
+ uint64_t a = arg_info(op->args[2])->val;
46
+ /* TODO: allocating regions is hand-in-glove with code_gen_buffer. */
27
+ uint64_t b = arg_info(op->args[3])->val;
47
+ tcg_region_init();
28
+ uint64_t h, l;
29
TCGArg rl, rh;
30
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
31
+ TCGOp *op2;
48
+
32
+
49
#if defined(CONFIG_SOFTMMU)
33
+ switch (op->opc) {
50
/* There's no guest base to take into account, so go ahead and
34
+ case INDEX_op_mulu2_i32:
51
initialize the prologue now. */
35
+ l = (uint64_t)(uint32_t)a * (uint32_t)b;
52
diff --git a/bsd-user/main.c b/bsd-user/main.c
36
+ h = (int32_t)(l >> 32);
53
index XXXXXXX..XXXXXXX 100644
37
+ l = (int32_t)l;
54
--- a/bsd-user/main.c
38
+ break;
55
+++ b/bsd-user/main.c
39
+ case INDEX_op_muls2_i32:
56
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
40
+ l = (int64_t)(int32_t)a * (int32_t)b;
57
* the real value of GUEST_BASE into account.
41
+ h = l >> 32;
58
*/
42
+ l = (int32_t)l;
59
tcg_prologue_init(tcg_ctx);
43
+ break;
60
- tcg_region_init();
44
+ case INDEX_op_mulu2_i64:
61
45
+ mulu64(&l, &h, a, b);
62
/* build Task State */
46
+ break;
63
memset(ts, 0, sizeof(TaskState));
47
+ case INDEX_op_muls2_i64:
64
diff --git a/linux-user/main.c b/linux-user/main.c
48
+ muls64(&l, &h, a, b);
65
index XXXXXXX..XXXXXXX 100644
49
+ break;
66
--- a/linux-user/main.c
50
+ default:
67
+++ b/linux-user/main.c
51
+ g_assert_not_reached();
68
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
52
+ }
69
generating the prologue until now so that the prologue can take
53
70
the real value of GUEST_BASE into account. */
54
rl = op->args[0];
71
tcg_prologue_init(tcg_ctx);
55
rh = op->args[1];
72
- tcg_region_init();
56
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
73
57
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
74
target_cpu_copy_regs(env, regs);
58
+
75
59
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
76
diff --git a/tcg/tcg.c b/tcg/tcg.c
60
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
77
index XXXXXXX..XXXXXXX 100644
61
+
78
--- a/tcg/tcg.c
62
+ tcg_opt_gen_movi(ctx, op, rl, l);
79
+++ b/tcg/tcg.c
63
+ tcg_opt_gen_movi(ctx, op2, rh, h);
80
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tcg_tb_alloc(TCGContext *s)
64
return true;
81
82
void tcg_prologue_init(TCGContext *s)
83
{
84
- size_t prologue_size, total_size;
85
- void *buf0, *buf1;
86
+ size_t prologue_size;
87
88
/* Put the prologue at the beginning of code_gen_buffer. */
89
- buf0 = s->code_gen_buffer;
90
- total_size = s->code_gen_buffer_size;
91
- s->code_ptr = buf0;
92
- s->code_buf = buf0;
93
+ tcg_region_assign(s, 0);
94
+ s->code_ptr = s->code_gen_ptr;
95
+ s->code_buf = s->code_gen_ptr;
96
s->data_gen_ptr = NULL;
97
98
- /*
99
- * The region trees are not yet configured, but tcg_splitwx_to_rx
100
- * needs the bounds for an assert.
101
- */
102
- region.start = buf0;
103
- region.end = buf0 + total_size;
104
-
105
#ifndef CONFIG_TCG_INTERPRETER
106
- tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(buf0);
107
+ tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
108
#endif
109
110
- /* Compute a high-water mark, at which we voluntarily flush the buffer
111
- and start over. The size here is arbitrary, significantly larger
112
- than we expect the code generation for any one opcode to require. */
113
- s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
114
-
115
#ifdef TCG_TARGET_NEED_POOL_LABELS
116
s->pool_labels = NULL;
117
#endif
118
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
119
}
65
}
120
#endif
66
return false;
121
67
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
122
- buf1 = s->code_ptr;
68
CASE_OP_32_64(muluh):
123
+ prologue_size = tcg_current_code_size(s);
69
done = fold_mul_highpart(&ctx, op);
124
+
70
break;
125
#ifndef CONFIG_TCG_INTERPRETER
71
- case INDEX_op_mulu2_i32:
126
- flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(buf0), (uintptr_t)buf0,
72
- done = fold_mulu2_i32(&ctx, op);
127
- tcg_ptr_byte_diff(buf1, buf0));
73
+ CASE_OP_32_64(muls2):
128
+ flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
74
+ CASE_OP_32_64(mulu2):
129
+ (uintptr_t)s->code_buf, prologue_size);
75
+ done = fold_multiply2(&ctx, op);
130
#endif
76
break;
131
77
CASE_OP_32_64(nand):
132
- /* Deduct the prologue from the buffer. */
78
done = fold_nand(&ctx, op);
133
- prologue_size = tcg_current_code_size(s);
134
- s->code_gen_ptr = buf1;
135
- s->code_gen_buffer = buf1;
136
- s->code_buf = buf1;
137
- total_size -= prologue_size;
138
- s->code_gen_buffer_size = total_size;
139
+ /* Deduct the prologue from the first region. */
140
+ region.start = s->code_ptr;
141
142
- tcg_register_jit(tcg_splitwx_to_rx(s->code_gen_buffer), total_size);
143
+ /* Recompute boundaries of the first region. */
144
+ tcg_region_assign(s, 0);
145
+
146
+ tcg_register_jit(tcg_splitwx_to_rx(region.start),
147
+ region.end - region.start);
148
149
#ifdef DEBUG_DISAS
150
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
151
FILE *logfile = qemu_log_lock();
152
qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
153
if (s->data_gen_ptr) {
154
- size_t code_size = s->data_gen_ptr - buf0;
155
+ size_t code_size = s->data_gen_ptr - s->code_gen_ptr;
156
size_t data_size = prologue_size - code_size;
157
size_t i;
158
159
- log_disas(buf0, code_size);
160
+ log_disas(s->code_gen_ptr, code_size);
161
162
for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
163
if (sizeof(tcg_target_ulong) == 8) {
164
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
165
}
166
}
167
} else {
168
- log_disas(buf0, prologue_size);
169
+ log_disas(s->code_gen_ptr, prologue_size);
170
}
171
qemu_log("\n");
172
qemu_log_flush();
173
--
79
--
174
2.25.1
80
2.25.1
175
81
176
82
diff view generated by jsdifflib
1
All callers immediately assert on error, so move the assert
1
Rename to fold_addsub2.
2
into the function itself.
2
Use Int128 to implement the wider operation.
3
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
tcg/tcg.c | 19 ++++++-------------
9
tcg/optimize.c | 65 ++++++++++++++++++++++++++++++++++----------------
10
1 file changed, 6 insertions(+), 13 deletions(-)
10
1 file changed, 44 insertions(+), 21 deletions(-)
11
11
12
diff --git a/tcg/tcg.c b/tcg/tcg.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/tcg.c
14
--- a/tcg/optimize.c
15
+++ b/tcg/tcg.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool tcg_region_alloc(TCGContext *s)
16
@@ -XXX,XX +XXX,XX @@
17
* Perform a context's first region allocation.
18
* This function does _not_ increment region.agg_size_full.
19
*/
17
*/
20
-static inline bool tcg_region_initial_alloc__locked(TCGContext *s)
18
21
+static void tcg_region_initial_alloc__locked(TCGContext *s)
19
#include "qemu/osdep.h"
20
+#include "qemu/int128.h"
21
#include "tcg/tcg-op.h"
22
#include "tcg-internal.h"
23
24
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
25
return false;
26
}
27
28
-static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
29
+static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
22
{
30
{
23
- return tcg_region_alloc__locked(s);
31
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
24
+ bool err = tcg_region_alloc__locked(s);
32
arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
25
+ g_assert(!err);
33
- uint32_t al = arg_info(op->args[2])->val;
34
- uint32_t ah = arg_info(op->args[3])->val;
35
- uint32_t bl = arg_info(op->args[4])->val;
36
- uint32_t bh = arg_info(op->args[5])->val;
37
- uint64_t a = ((uint64_t)ah << 32) | al;
38
- uint64_t b = ((uint64_t)bh << 32) | bl;
39
+ uint64_t al = arg_info(op->args[2])->val;
40
+ uint64_t ah = arg_info(op->args[3])->val;
41
+ uint64_t bl = arg_info(op->args[4])->val;
42
+ uint64_t bh = arg_info(op->args[5])->val;
43
TCGArg rl, rh;
44
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
45
+ TCGOp *op2;
46
47
- if (add) {
48
- a += b;
49
+ if (ctx->type == TCG_TYPE_I32) {
50
+ uint64_t a = deposit64(al, 32, 32, ah);
51
+ uint64_t b = deposit64(bl, 32, 32, bh);
52
+
53
+ if (add) {
54
+ a += b;
55
+ } else {
56
+ a -= b;
57
+ }
58
+
59
+ al = sextract64(a, 0, 32);
60
+ ah = sextract64(a, 32, 32);
61
} else {
62
- a -= b;
63
+ Int128 a = int128_make128(al, ah);
64
+ Int128 b = int128_make128(bl, bh);
65
+
66
+ if (add) {
67
+ a = int128_add(a, b);
68
+ } else {
69
+ a = int128_sub(a, b);
70
+ }
71
+
72
+ al = int128_getlo(a);
73
+ ah = int128_gethi(a);
74
}
75
76
rl = op->args[0];
77
rh = op->args[1];
78
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
79
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
80
+
81
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
82
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
83
+
84
+ tcg_opt_gen_movi(ctx, op, rl, al);
85
+ tcg_opt_gen_movi(ctx, op2, rh, ah);
86
return true;
87
}
88
return false;
26
}
89
}
27
90
28
/* Call from a safe-work context */
91
-static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
29
@@ -XXX,XX +XXX,XX @@ void tcg_region_reset_all(void)
92
+static bool fold_add2(OptContext *ctx, TCGOp *op)
30
93
{
31
for (i = 0; i < n_ctxs; i++) {
94
- return fold_addsub2_i32(ctx, op, true);
32
TCGContext *s = qatomic_read(&tcg_ctxs[i]);
95
+ return fold_addsub2(ctx, op, true);
33
- bool err = tcg_region_initial_alloc__locked(s);
34
-
35
- g_assert(!err);
36
+ tcg_region_initial_alloc__locked(s);
37
}
38
qemu_mutex_unlock(&region.lock);
39
40
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(void)
41
42
/* In user-mode we support only one ctx, so do the initial allocation now */
43
#ifdef CONFIG_USER_ONLY
44
- {
45
- bool err = tcg_region_initial_alloc__locked(tcg_ctx);
46
-
47
- g_assert(!err);
48
- }
49
+ tcg_region_initial_alloc__locked(tcg_ctx);
50
#endif
51
}
96
}
52
97
53
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
98
static bool fold_and(OptContext *ctx, TCGOp *op)
54
MachineState *ms = MACHINE(qdev_get_machine());
99
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
55
TCGContext *s = g_malloc(sizeof(*s));
100
return false;
56
unsigned int i, n;
57
- bool err;
58
59
*s = tcg_init_ctx;
60
61
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
62
63
tcg_ctx = s;
64
qemu_mutex_lock(&region.lock);
65
- err = tcg_region_initial_alloc__locked(tcg_ctx);
66
- g_assert(!err);
67
+ tcg_region_initial_alloc__locked(s);
68
qemu_mutex_unlock(&region.lock);
69
}
101
}
70
#endif /* !CONFIG_USER_ONLY */
102
103
-static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
104
+static bool fold_sub2(OptContext *ctx, TCGOp *op)
105
{
106
- return fold_addsub2_i32(ctx, op, false);
107
+ return fold_addsub2(ctx, op, false);
108
}
109
110
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
111
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
112
CASE_OP_32_64_VEC(add):
113
done = fold_add(&ctx, op);
114
break;
115
- case INDEX_op_add2_i32:
116
- done = fold_add2_i32(&ctx, op);
117
+ CASE_OP_32_64(add2):
118
+ done = fold_add2(&ctx, op);
119
break;
120
CASE_OP_32_64_VEC(and):
121
done = fold_and(&ctx, op);
122
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
123
CASE_OP_32_64_VEC(sub):
124
done = fold_sub(&ctx, op);
125
break;
126
- case INDEX_op_sub2_i32:
127
- done = fold_sub2_i32(&ctx, op);
128
+ CASE_OP_32_64(sub2):
129
+ done = fold_sub2(&ctx, op);
130
break;
131
CASE_OP_32_64_VEC(xor):
132
done = fold_xor(&ctx, op);
71
--
133
--
72
2.25.1
134
2.25.1
73
135
74
136
diff view generated by jsdifflib
1
Introduce a function to remove everything emitted
1
Most of these are handled by creating a fold_const2_commutative
2
since a given point.
2
to handle all of the binary operators. The rest were already
3
3
handled on a case-by-case basis in the switch, and have their
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
own fold function in which to place the call.
5
6
We now have only one major switch on TCGOpcode.
7
8
Introduce NO_DEST and a block comment for swap_commutative in
9
order to make the handling of brcond and movcond opcodes cleaner.
10
11
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
13
---
7
include/tcg/tcg.h | 10 ++++++++++
14
tcg/optimize.c | 142 ++++++++++++++++++++++++-------------------------
8
tcg/tcg.c | 13 +++++++++++++
15
1 file changed, 70 insertions(+), 72 deletions(-)
9
2 files changed, 23 insertions(+)
16
10
17
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
12
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
13
--- a/include/tcg/tcg.h
19
--- a/tcg/optimize.c
14
+++ b/include/tcg/tcg.h
20
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ void tcg_op_remove(TCGContext *s, TCGOp *op);
21
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
16
TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc);
22
return -1;
17
TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc);
23
}
18
24
19
+/**
25
+/**
20
+ * tcg_remove_ops_after:
26
+ * swap_commutative:
21
+ * @op: target operation
27
+ * @dest: TCGArg of the destination argument, or NO_DEST.
28
+ * @p1: first paired argument
29
+ * @p2: second paired argument
22
+ *
30
+ *
23
+ * Discard any opcodes emitted since @op. Expected usage is to save
31
+ * If *@p1 is a constant and *@p2 is not, swap.
24
+ * a starting point with tcg_last_op(), speculatively emit opcodes,
32
+ * If *@p2 matches @dest, swap.
25
+ * then decide whether or not to keep those opcodes after the fact.
33
+ * Return true if a swap was performed.
26
+ */
34
+ */
27
+void tcg_remove_ops_after(TCGOp *op);
35
+
28
+
36
+#define NO_DEST temp_arg(NULL)
29
void tcg_optimize(TCGContext *s);
37
+
30
38
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
31
/* Allocate a new temporary and initialize it with a constant. */
39
{
32
diff --git a/tcg/tcg.c b/tcg/tcg.c
40
TCGArg a1 = *p1, a2 = *p2;
33
index XXXXXXX..XXXXXXX 100644
41
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
34
--- a/tcg/tcg.c
42
return false;
35
+++ b/tcg/tcg.c
36
@@ -XXX,XX +XXX,XX @@ void tcg_op_remove(TCGContext *s, TCGOp *op)
37
#endif
38
}
43
}
39
44
40
+void tcg_remove_ops_after(TCGOp *op)
45
+static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
41
+{
46
+{
42
+ TCGContext *s = tcg_ctx;
47
+ swap_commutative(op->args[0], &op->args[1], &op->args[2]);
43
+
48
+ return fold_const2(ctx, op);
44
+ while (true) {
45
+ TCGOp *last = tcg_last_op();
46
+ if (last == op) {
47
+ return;
48
+ }
49
+ tcg_op_remove(s, last);
50
+ }
51
+}
49
+}
52
+
50
+
53
static TCGOp *tcg_op_alloc(TCGOpcode opc)
51
static bool fold_masks(OptContext *ctx, TCGOp *op)
54
{
52
{
55
TCGContext *s = tcg_ctx;
53
uint64_t a_mask = ctx->a_mask;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
55
56
static bool fold_add(OptContext *ctx, TCGOp *op)
57
{
58
- if (fold_const2(ctx, op) ||
59
+ if (fold_const2_commutative(ctx, op) ||
60
fold_xi_to_x(ctx, op, 0)) {
61
return true;
62
}
63
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
64
65
static bool fold_add2(OptContext *ctx, TCGOp *op)
66
{
67
+ /* Note that the high and low parts may be independently swapped. */
68
+ swap_commutative(op->args[0], &op->args[2], &op->args[4]);
69
+ swap_commutative(op->args[1], &op->args[3], &op->args[5]);
70
+
71
return fold_addsub2(ctx, op, true);
72
}
73
74
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
75
{
76
uint64_t z1, z2;
77
78
- if (fold_const2(ctx, op) ||
79
+ if (fold_const2_commutative(ctx, op) ||
80
fold_xi_to_i(ctx, op, 0) ||
81
fold_xi_to_x(ctx, op, -1) ||
82
fold_xx_to_x(ctx, op)) {
83
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
85
{
86
TCGCond cond = op->args[2];
87
- int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
88
+ int i;
89
90
+ if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) {
91
+ op->args[2] = cond = tcg_swap_cond(cond);
92
+ }
93
+
94
+ i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
95
if (i == 0) {
96
tcg_op_remove(ctx->tcg, op);
97
return true;
98
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
99
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
100
{
101
TCGCond cond = op->args[4];
102
- int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
103
TCGArg label = op->args[5];
104
- int inv = 0;
105
+ int i, inv = 0;
106
107
+ if (swap_commutative2(&op->args[0], &op->args[2])) {
108
+ op->args[4] = cond = tcg_swap_cond(cond);
109
+ }
110
+
111
+ i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
112
if (i >= 0) {
113
goto do_brcond_const;
114
}
115
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
116
117
static bool fold_eqv(OptContext *ctx, TCGOp *op)
118
{
119
- if (fold_const2(ctx, op) ||
120
+ if (fold_const2_commutative(ctx, op) ||
121
fold_xi_to_x(ctx, op, -1) ||
122
fold_xi_to_not(ctx, op, 0)) {
123
return true;
124
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
125
static bool fold_movcond(OptContext *ctx, TCGOp *op)
126
{
127
TCGCond cond = op->args[5];
128
- int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
129
+ int i;
130
131
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
132
+ op->args[5] = cond = tcg_swap_cond(cond);
133
+ }
134
+ /*
135
+ * Canonicalize the "false" input reg to match the destination reg so
136
+ * that the tcg backend can implement a "move if true" operation.
137
+ */
138
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
139
+ op->args[5] = cond = tcg_invert_cond(cond);
140
+ }
141
+
142
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
143
if (i >= 0) {
144
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
145
}
146
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
147
148
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
149
{
150
- if (fold_const2(ctx, op) ||
151
+ if (fold_const2_commutative(ctx, op) ||
152
fold_xi_to_i(ctx, op, 0)) {
153
return true;
154
}
155
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
156
157
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
158
{
159
+ swap_commutative(op->args[0], &op->args[2], &op->args[3]);
160
+
161
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
162
uint64_t a = arg_info(op->args[2])->val;
163
uint64_t b = arg_info(op->args[3])->val;
164
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
165
166
static bool fold_nand(OptContext *ctx, TCGOp *op)
167
{
168
- if (fold_const2(ctx, op) ||
169
+ if (fold_const2_commutative(ctx, op) ||
170
fold_xi_to_not(ctx, op, -1)) {
171
return true;
172
}
173
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
174
175
static bool fold_nor(OptContext *ctx, TCGOp *op)
176
{
177
- if (fold_const2(ctx, op) ||
178
+ if (fold_const2_commutative(ctx, op) ||
179
fold_xi_to_not(ctx, op, 0)) {
180
return true;
181
}
182
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
183
184
static bool fold_or(OptContext *ctx, TCGOp *op)
185
{
186
- if (fold_const2(ctx, op) ||
187
+ if (fold_const2_commutative(ctx, op) ||
188
fold_xi_to_x(ctx, op, 0) ||
189
fold_xx_to_x(ctx, op)) {
190
return true;
191
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
192
static bool fold_setcond(OptContext *ctx, TCGOp *op)
193
{
194
TCGCond cond = op->args[3];
195
- int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
196
+ int i;
197
198
+ if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
199
+ op->args[3] = cond = tcg_swap_cond(cond);
200
+ }
201
+
202
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
203
if (i >= 0) {
204
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
205
}
206
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
207
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
208
{
209
TCGCond cond = op->args[5];
210
- int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
211
- int inv = 0;
212
+ int i, inv = 0;
213
214
+ if (swap_commutative2(&op->args[1], &op->args[3])) {
215
+ op->args[5] = cond = tcg_swap_cond(cond);
216
+ }
217
+
218
+ i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
219
if (i >= 0) {
220
goto do_setcond_const;
221
}
222
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
223
224
static bool fold_xor(OptContext *ctx, TCGOp *op)
225
{
226
- if (fold_const2(ctx, op) ||
227
+ if (fold_const2_commutative(ctx, op) ||
228
fold_xx_to_i(ctx, op, 0) ||
229
fold_xi_to_x(ctx, op, 0) ||
230
fold_xi_to_not(ctx, op, -1)) {
231
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
232
ctx.type = TCG_TYPE_I32;
233
}
234
235
- /* For commutative operations make constant second argument */
236
- switch (opc) {
237
- CASE_OP_32_64_VEC(add):
238
- CASE_OP_32_64_VEC(mul):
239
- CASE_OP_32_64_VEC(and):
240
- CASE_OP_32_64_VEC(or):
241
- CASE_OP_32_64_VEC(xor):
242
- CASE_OP_32_64(eqv):
243
- CASE_OP_32_64(nand):
244
- CASE_OP_32_64(nor):
245
- CASE_OP_32_64(muluh):
246
- CASE_OP_32_64(mulsh):
247
- swap_commutative(op->args[0], &op->args[1], &op->args[2]);
248
- break;
249
- CASE_OP_32_64(brcond):
250
- if (swap_commutative(-1, &op->args[0], &op->args[1])) {
251
- op->args[2] = tcg_swap_cond(op->args[2]);
252
- }
253
- break;
254
- CASE_OP_32_64(setcond):
255
- if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
256
- op->args[3] = tcg_swap_cond(op->args[3]);
257
- }
258
- break;
259
- CASE_OP_32_64(movcond):
260
- if (swap_commutative(-1, &op->args[1], &op->args[2])) {
261
- op->args[5] = tcg_swap_cond(op->args[5]);
262
- }
263
- /* For movcond, we canonicalize the "false" input reg to match
264
- the destination reg so that the tcg backend can implement
265
- a "move if true" operation. */
266
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
267
- op->args[5] = tcg_invert_cond(op->args[5]);
268
- }
269
- break;
270
- CASE_OP_32_64(add2):
271
- swap_commutative(op->args[0], &op->args[2], &op->args[4]);
272
- swap_commutative(op->args[1], &op->args[3], &op->args[5]);
273
- break;
274
- CASE_OP_32_64(mulu2):
275
- CASE_OP_32_64(muls2):
276
- swap_commutative(op->args[0], &op->args[2], &op->args[3]);
277
- break;
278
- case INDEX_op_brcond2_i32:
279
- if (swap_commutative2(&op->args[0], &op->args[2])) {
280
- op->args[4] = tcg_swap_cond(op->args[4]);
281
- }
282
- break;
283
- case INDEX_op_setcond2_i32:
284
- if (swap_commutative2(&op->args[1], &op->args[3])) {
285
- op->args[5] = tcg_swap_cond(op->args[5]);
286
- }
287
- break;
288
- default:
289
- break;
290
- }
291
-
292
/* Assume all bits affected, and no bits known zero. */
293
ctx.a_mask = -1;
294
ctx.z_mask = -1;
56
--
295
--
57
2.25.1
296
2.25.1
58
297
59
298
diff view generated by jsdifflib
1
Move the call out of the N versions of alloc_code_gen_buffer
1
This "garbage" setting pre-dates the addition of the type
2
and into tcg_region_init.
2
changing opcodes INDEX_op_ext_i32_i64, INDEX_op_extu_i32_i64,
3
and INDEX_op_extr{l,h}_i64_i32.
4
5
So now we have a definitive points at which to adjust z_mask
6
to eliminate such bits from the 32-bit operands.
3
7
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
11
---
8
tcg/region.c | 14 +++++++-------
12
tcg/optimize.c | 35 ++++++++++++++++-------------------
9
1 file changed, 7 insertions(+), 7 deletions(-)
13
1 file changed, 16 insertions(+), 19 deletions(-)
10
14
11
diff --git a/tcg/region.c b/tcg/region.c
15
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/region.c
17
--- a/tcg/optimize.c
14
+++ b/tcg/region.c
18
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
19
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
16
error_setg_errno(errp, errno, "mprotect of jit buffer");
20
ti->is_const = true;
17
return false;
21
ti->val = ts->val;
18
}
22
ti->z_mask = ts->val;
19
- qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
23
- if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
20
24
- /* High bits of a 32-bit quantity are garbage. */
21
region.start_aligned = buf;
25
- ti->z_mask |= ~0xffffffffull;
22
region.total_size = size;
26
- }
23
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer_anon(size_t size, int prot,
27
} else {
24
}
28
ti->is_const = false;
25
#endif
29
ti->z_mask = -1;
26
30
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
27
- /* Request large pages for the buffer. */
31
TCGTemp *src_ts = arg_temp(src);
28
- qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
32
TempOptInfo *di;
29
-
33
TempOptInfo *si;
30
region.start_aligned = buf;
34
- uint64_t z_mask;
31
region.total_size = size;
35
TCGOpcode new_op;
32
return prot;
36
33
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
37
if (ts_are_copies(dst_ts, src_ts)) {
34
region.total_size = size;
38
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
35
tcg_splitwx_diff = buf_rx - buf_rw;
39
op->args[0] = dst;
36
40
op->args[1] = src;
37
- /* Request large pages for the buffer and the splitwx. */
41
38
- qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
42
- z_mask = si->z_mask;
39
- qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
43
- if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
40
return PROT_READ | PROT_WRITE;
44
- /* High bits of the destination are now garbage. */
41
45
- z_mask |= ~0xffffffffull;
42
fail_rx:
46
- }
43
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
47
- di->z_mask = z_mask;
44
splitwx, &error_fatal);
48
+ di->z_mask = si->z_mask;
45
assert(have_prot >= 0);
49
46
50
if (src_ts->type == dst_ts->type) {
47
+ /* Request large pages for the buffer and the splitwx. */
51
TempOptInfo *ni = ts_info(si->next_copy);
48
+ qemu_madvise(region.start_aligned, region.total_size, QEMU_MADV_HUGEPAGE);
52
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
49
+ if (tcg_splitwx_diff) {
53
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
50
+ qemu_madvise(region.start_aligned + tcg_splitwx_diff,
54
TCGArg dst, uint64_t val)
51
+ region.total_size, QEMU_MADV_HUGEPAGE);
55
{
56
- /* Convert movi to mov with constant temp. */
57
- TCGTemp *tv = tcg_constant_internal(ctx->type, val);
58
+ TCGTemp *tv;
59
60
+ if (ctx->type == TCG_TYPE_I32) {
61
+ val = (int32_t)val;
52
+ }
62
+ }
53
+
63
+
64
+ /* Convert movi to mov with constant temp. */
65
+ tv = tcg_constant_internal(ctx->type, val);
66
init_ts_info(ctx, tv);
67
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
68
}
69
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
70
uint64_t z_mask = ctx->z_mask;
71
54
/*
72
/*
55
* Make region_size a multiple of page_size, using aligned as the start.
73
- * 32-bit ops generate 32-bit results. For the result is zero test
56
* As a result of this we might end up with a few extra pages at the end of
74
- * below, we can ignore high bits, but for further optimizations we
75
- * need to record that the high bits contain garbage.
76
+ * 32-bit ops generate 32-bit results, which for the purpose of
77
+ * simplifying tcg are sign-extended. Certainly that's how we
78
+ * represent our constants elsewhere. Note that the bits will
79
+ * be reset properly for a 64-bit value when encountering the
80
+ * type changing opcodes.
81
*/
82
if (ctx->type == TCG_TYPE_I32) {
83
- ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
84
- a_mask &= MAKE_64BIT_MASK(0, 32);
85
- z_mask &= MAKE_64BIT_MASK(0, 32);
86
+ a_mask = (int32_t)a_mask;
87
+ z_mask = (int32_t)z_mask;
88
+ ctx->z_mask = z_mask;
89
}
90
91
if (z_mask == 0) {
57
--
92
--
58
2.25.1
93
2.25.1
59
94
60
95
diff view generated by jsdifflib
New patch
1
Recognize the constant function for or-complement.
1
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 1 +
9
1 file changed, 1 insertion(+)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
16
static bool fold_orc(OptContext *ctx, TCGOp *op)
17
{
18
if (fold_const2(ctx, op) ||
19
+ fold_xx_to_i(ctx, op, -1) ||
20
fold_xi_to_x(ctx, op, -1) ||
21
fold_ix_to_not(ctx, op, 0)) {
22
return true;
23
--
24
2.25.1
25
26
diff view generated by jsdifflib
1
From: "Jose R. Ziviani" <jziviani@suse.de>
1
Recognize the identity function for low-part multiply.
2
2
3
Commit 5e8892db93 fixed several function signatures but tcg_out_op for
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
4
arm is missing. This patch fixes it as well.
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Jose R. Ziviani <jziviani@suse.de>
7
Message-Id: <20210610224450.23425-1-jziviani@suse.de>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
tcg/arm/tcg-target.c.inc | 3 ++-
8
tcg/optimize.c | 3 ++-
11
1 file changed, 2 insertions(+), 1 deletion(-)
9
1 file changed, 2 insertions(+), 1 deletion(-)
12
10
13
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/arm/tcg-target.c.inc
13
--- a/tcg/optimize.c
16
+++ b/tcg/arm/tcg-target.c.inc
14
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
18
static void tcg_out_epilogue(TCGContext *s);
16
static bool fold_mul(OptContext *ctx, TCGOp *op)
19
20
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
21
- const TCGArg *args, const int *const_args)
22
+ const TCGArg args[TCG_MAX_OP_ARGS],
23
+ const int const_args[TCG_MAX_OP_ARGS])
24
{
17
{
25
TCGArg a0, a1, a2, a3, a4, a5;
18
if (fold_const2(ctx, op) ||
26
int c;
19
- fold_xi_to_i(ctx, op, 0)) {
20
+ fold_xi_to_i(ctx, op, 0) ||
21
+ fold_xi_to_x(ctx, op, 1)) {
22
return true;
23
}
24
return false;
27
--
25
--
28
2.25.1
26
2.25.1
29
27
30
28
diff view generated by jsdifflib
New patch
1
Recognize the identity function for division.
1
2
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 6 +++++-
9
1 file changed, 5 insertions(+), 1 deletion(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
16
17
static bool fold_divide(OptContext *ctx, TCGOp *op)
18
{
19
- return fold_const2(ctx, op);
20
+ if (fold_const2(ctx, op) ||
21
+ fold_xi_to_x(ctx, op, 1)) {
22
+ return true;
23
+ }
24
+ return false;
25
}
26
27
static bool fold_dup(OptContext *ctx, TCGOp *op)
28
--
29
2.25.1
30
31
diff view generated by jsdifflib
New patch
1
Recognize the constant function for remainder.
1
2
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 6 +++++-
8
1 file changed, 5 insertions(+), 1 deletion(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
15
16
static bool fold_remainder(OptContext *ctx, TCGOp *op)
17
{
18
- return fold_const2(ctx, op);
19
+ if (fold_const2(ctx, op) ||
20
+ fold_xx_to_i(ctx, op, 0)) {
21
+ return true;
22
+ }
23
+ return false;
24
}
25
26
static bool fold_setcond(OptContext *ctx, TCGOp *op)
27
--
28
2.25.1
29
30
diff view generated by jsdifflib
1
Do not handle protections on a case-by-case basis in the
1
Certain targets, like riscv, produce signed 32-bit results.
2
various alloc_code_gen_buffer instances; do it within a
2
This can lead to lots of redundant extensions as values are
3
single loop in tcg_region_init.
3
manipulated.
4
5
Begin by tracking only the obvious sign-extensions, and
6
converting them to simple copies when possible.
4
7
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
11
---
9
tcg/region.c | 45 +++++++++++++++++++++++++++++++--------------
12
tcg/optimize.c | 123 ++++++++++++++++++++++++++++++++++++++++---------
10
1 file changed, 31 insertions(+), 14 deletions(-)
13
1 file changed, 102 insertions(+), 21 deletions(-)
11
14
12
diff --git a/tcg/region.c b/tcg/region.c
15
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/region.c
17
--- a/tcg/optimize.c
15
+++ b/tcg/region.c
18
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
19
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
17
}
20
TCGTemp *next_copy;
18
#endif
21
uint64_t val;
19
22
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
20
- if (qemu_mprotect_rwx(buf, size)) {
23
+ uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
21
- error_setg_errno(errp, errno, "mprotect of jit buffer");
24
} TempOptInfo;
22
- return false;
25
26
typedef struct OptContext {
27
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
28
/* In flight values from optimization. */
29
uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
30
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
31
+ uint64_t s_mask; /* mask of clrsb(value) bits */
32
TCGType type;
33
} OptContext;
34
35
+/* Calculate the smask for a specific value. */
36
+static uint64_t smask_from_value(uint64_t value)
37
+{
38
+ int rep = clrsb64(value);
39
+ return ~(~0ull >> rep);
40
+}
41
+
42
+/*
43
+ * Calculate the smask for a given set of known-zeros.
44
+ * If there are lots of zeros on the left, we can consider the remainder
45
+ * an unsigned field, and thus the corresponding signed field is one bit
46
+ * larger.
47
+ */
48
+static uint64_t smask_from_zmask(uint64_t zmask)
49
+{
50
+ /*
51
+ * Only the 0 bits are significant for zmask, thus the msb itself
52
+ * must be zero, else we have no sign information.
53
+ */
54
+ int rep = clz64(zmask);
55
+ if (rep == 0) {
56
+ return 0;
57
+ }
58
+ rep -= 1;
59
+ return ~(~0ull >> rep);
60
+}
61
+
62
static inline TempOptInfo *ts_info(TCGTemp *ts)
63
{
64
return ts->state_ptr;
65
@@ -XXX,XX +XXX,XX @@ static void reset_ts(TCGTemp *ts)
66
ti->prev_copy = ts;
67
ti->is_const = false;
68
ti->z_mask = -1;
69
+ ti->s_mask = 0;
70
}
71
72
static void reset_temp(TCGArg arg)
73
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
74
ti->is_const = true;
75
ti->val = ts->val;
76
ti->z_mask = ts->val;
77
+ ti->s_mask = smask_from_value(ts->val);
78
} else {
79
ti->is_const = false;
80
ti->z_mask = -1;
81
+ ti->s_mask = 0;
82
}
83
}
84
85
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
86
op->args[1] = src;
87
88
di->z_mask = si->z_mask;
89
+ di->s_mask = si->s_mask;
90
91
if (src_ts->type == dst_ts->type) {
92
TempOptInfo *ni = ts_info(si->next_copy);
93
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
94
95
nb_oargs = def->nb_oargs;
96
for (i = 0; i < nb_oargs; i++) {
97
- reset_temp(op->args[i]);
98
+ TCGTemp *ts = arg_temp(op->args[i]);
99
+ reset_ts(ts);
100
/*
101
- * Save the corresponding known-zero bits mask for the
102
+ * Save the corresponding known-zero/sign bits mask for the
103
* first output argument (only one supported so far).
104
*/
105
if (i == 0) {
106
- arg_info(op->args[i])->z_mask = ctx->z_mask;
107
+ ts_info(ts)->z_mask = ctx->z_mask;
108
+ ts_info(ts)->s_mask = ctx->s_mask;
109
}
110
}
111
}
112
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
113
{
114
uint64_t a_mask = ctx->a_mask;
115
uint64_t z_mask = ctx->z_mask;
116
+ uint64_t s_mask = ctx->s_mask;
117
118
/*
119
* 32-bit ops generate 32-bit results, which for the purpose of
120
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
121
if (ctx->type == TCG_TYPE_I32) {
122
a_mask = (int32_t)a_mask;
123
z_mask = (int32_t)z_mask;
124
+ s_mask |= MAKE_64BIT_MASK(32, 32);
125
ctx->z_mask = z_mask;
126
+ ctx->s_mask = s_mask;
127
}
128
129
if (z_mask == 0) {
130
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
131
132
static bool fold_bswap(OptContext *ctx, TCGOp *op)
133
{
134
- uint64_t z_mask, sign;
135
+ uint64_t z_mask, s_mask, sign;
136
137
if (arg_is_const(op->args[1])) {
138
uint64_t t = arg_info(op->args[1])->val;
139
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
140
}
141
142
z_mask = arg_info(op->args[1])->z_mask;
143
+
144
switch (op->opc) {
145
case INDEX_op_bswap16_i32:
146
case INDEX_op_bswap16_i64:
147
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
148
default:
149
g_assert_not_reached();
150
}
151
+ s_mask = smask_from_zmask(z_mask);
152
153
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
154
case TCG_BSWAP_OZ:
155
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
156
/* If the sign bit may be 1, force all the bits above to 1. */
157
if (z_mask & sign) {
158
z_mask |= sign;
159
+ s_mask = sign << 1;
160
}
161
break;
162
default:
163
/* The high bits are undefined: force all bits above the sign to 1. */
164
z_mask |= sign << 1;
165
+ s_mask = 0;
166
break;
167
}
168
ctx->z_mask = z_mask;
169
+ ctx->s_mask = s_mask;
170
171
return fold_masks(ctx, op);
172
}
173
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
174
static bool fold_extract(OptContext *ctx, TCGOp *op)
175
{
176
uint64_t z_mask_old, z_mask;
177
+ int pos = op->args[2];
178
+ int len = op->args[3];
179
180
if (arg_is_const(op->args[1])) {
181
uint64_t t;
182
183
t = arg_info(op->args[1])->val;
184
- t = extract64(t, op->args[2], op->args[3]);
185
+ t = extract64(t, pos, len);
186
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
187
}
188
189
z_mask_old = arg_info(op->args[1])->z_mask;
190
- z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
191
- if (op->args[2] == 0) {
192
+ z_mask = extract64(z_mask_old, pos, len);
193
+ if (pos == 0) {
194
ctx->a_mask = z_mask_old ^ z_mask;
195
}
196
ctx->z_mask = z_mask;
197
+ ctx->s_mask = smask_from_zmask(z_mask);
198
199
return fold_masks(ctx, op);
200
}
201
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
202
203
static bool fold_exts(OptContext *ctx, TCGOp *op)
204
{
205
- uint64_t z_mask_old, z_mask, sign;
206
+ uint64_t s_mask_old, s_mask, z_mask, sign;
207
bool type_change = false;
208
209
if (fold_const1(ctx, op)) {
210
return true;
211
}
212
213
- z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
214
+ z_mask = arg_info(op->args[1])->z_mask;
215
+ s_mask = arg_info(op->args[1])->s_mask;
216
+ s_mask_old = s_mask;
217
218
switch (op->opc) {
219
CASE_OP_32_64(ext8s):
220
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
221
222
if (z_mask & sign) {
223
z_mask |= sign;
224
- } else if (!type_change) {
225
- ctx->a_mask = z_mask_old ^ z_mask;
226
}
227
+ s_mask |= sign << 1;
228
+
229
ctx->z_mask = z_mask;
230
+ ctx->s_mask = s_mask;
231
+ if (!type_change) {
232
+ ctx->a_mask = s_mask & ~s_mask_old;
233
+ }
234
235
return fold_masks(ctx, op);
236
}
237
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
238
}
239
240
ctx->z_mask = z_mask;
241
+ ctx->s_mask = smask_from_zmask(z_mask);
242
if (!type_change) {
243
ctx->a_mask = z_mask_old ^ z_mask;
244
}
245
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
246
MemOp mop = get_memop(oi);
247
int width = 8 * memop_size(mop);
248
249
- if (!(mop & MO_SIGN) && width < 64) {
250
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
251
+ if (width < 64) {
252
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
253
+ if (!(mop & MO_SIGN)) {
254
+ ctx->z_mask = MAKE_64BIT_MASK(0, width);
255
+ ctx->s_mask <<= 1;
256
+ }
257
}
258
259
/* Opcodes that touch guest memory stop the mb optimization. */
260
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
261
262
static bool fold_sextract(OptContext *ctx, TCGOp *op)
263
{
264
- int64_t z_mask_old, z_mask;
265
+ uint64_t z_mask, s_mask, s_mask_old;
266
+ int pos = op->args[2];
267
+ int len = op->args[3];
268
269
if (arg_is_const(op->args[1])) {
270
uint64_t t;
271
272
t = arg_info(op->args[1])->val;
273
- t = sextract64(t, op->args[2], op->args[3]);
274
+ t = sextract64(t, pos, len);
275
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
276
}
277
278
- z_mask_old = arg_info(op->args[1])->z_mask;
279
- z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
280
- if (op->args[2] == 0 && z_mask >= 0) {
281
- ctx->a_mask = z_mask_old ^ z_mask;
23
- }
282
- }
24
-
283
+ z_mask = arg_info(op->args[1])->z_mask;
25
region.start_aligned = buf;
284
+ z_mask = sextract64(z_mask, pos, len);
26
region.total_size = size;
285
ctx->z_mask = z_mask;
27
286
28
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
287
+ s_mask_old = arg_info(op->args[1])->s_mask;
29
{
288
+ s_mask = sextract64(s_mask_old, pos, len);
30
const size_t page_size = qemu_real_host_page_size;
289
+ s_mask |= MAKE_64BIT_MASK(len, 64 - len);
31
size_t region_size;
290
+ ctx->s_mask = s_mask;
32
- size_t i;
291
+
33
- int have_prot;
292
+ if (pos == 0) {
34
+ int have_prot, need_prot;
293
+ ctx->a_mask = s_mask & ~s_mask_old;
35
36
/* Size the buffer. */
37
if (tb_size == 0) {
38
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
39
* Set guard pages in the rw buffer, as that's the one into which
40
* buffer overruns could occur. Do not set guard pages in the rx
41
* buffer -- let that one use hugepages throughout.
42
+ * Work with the page protections set up with the initial mapping.
43
*/
44
- for (i = 0; i < region.n; i++) {
45
+ need_prot = PAGE_READ | PAGE_WRITE;
46
+#ifndef CONFIG_TCG_INTERPRETER
47
+ if (tcg_splitwx_diff == 0) {
48
+ need_prot |= PAGE_EXEC;
49
+ }
294
+ }
50
+#endif
295
+
51
+ for (size_t i = 0, n = region.n; i < n; i++) {
296
return fold_masks(ctx, op);
52
void *start, *end;
297
}
53
298
54
tcg_region_bounds(i, &start, &end);
299
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
55
+ if (have_prot != need_prot) {
300
{
56
+ int rc;
301
/* We can't do any folding with a load, but we can record bits. */
57
302
switch (op->opc) {
58
- /*
303
+ CASE_OP_32_64(ld8s):
59
- * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
304
+ ctx->s_mask = MAKE_64BIT_MASK(8, 56);
60
- * rejects a permission change from RWX -> NONE. Guard pages are
305
+ break;
61
- * nice for bug detection but are not essential; ignore any failure.
306
CASE_OP_32_64(ld8u):
62
- */
307
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
63
- (void)qemu_mprotect_none(end, page_size);
308
+ ctx->s_mask = MAKE_64BIT_MASK(9, 55);
64
+ if (need_prot == (PAGE_READ | PAGE_WRITE | PAGE_EXEC)) {
309
+ break;
65
+ rc = qemu_mprotect_rwx(start, end - start);
310
+ CASE_OP_32_64(ld16s):
66
+ } else if (need_prot == (PAGE_READ | PAGE_WRITE)) {
311
+ ctx->s_mask = MAKE_64BIT_MASK(16, 48);
67
+ rc = qemu_mprotect_rw(start, end - start);
312
break;
68
+ } else {
313
CASE_OP_32_64(ld16u):
69
+ g_assert_not_reached();
314
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
70
+ }
315
+ ctx->s_mask = MAKE_64BIT_MASK(17, 47);
71
+ if (rc) {
316
+ break;
72
+ error_setg_errno(&error_fatal, errno,
317
+ case INDEX_op_ld32s_i64:
73
+ "mprotect of jit buffer");
318
+ ctx->s_mask = MAKE_64BIT_MASK(32, 32);
74
+ }
319
break;
75
+ }
320
case INDEX_op_ld32u_i64:
76
+ if (have_prot != 0) {
321
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
77
+ /*
322
+ ctx->s_mask = MAKE_64BIT_MASK(33, 31);
78
+ * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
323
break;
79
+ * rejects a permission change from RWX -> NONE. Guard pages are
324
default:
80
+ * nice for bug detection but are not essential; ignore any failure.
325
g_assert_not_reached();
81
+ */
326
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
82
+ (void)qemu_mprotect_none(end, page_size);
327
ctx.type = TCG_TYPE_I32;
83
+ }
328
}
84
}
329
85
330
- /* Assume all bits affected, and no bits known zero. */
86
tcg_region_trees_init();
331
+ /* Assume all bits affected, no bits known zero, no sign reps. */
332
ctx.a_mask = -1;
333
ctx.z_mask = -1;
334
+ ctx.s_mask = 0;
335
336
/*
337
* Process each opcode.
338
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
339
case INDEX_op_extrh_i64_i32:
340
done = fold_extu(&ctx, op);
341
break;
342
+ CASE_OP_32_64(ld8s):
343
CASE_OP_32_64(ld8u):
344
+ CASE_OP_32_64(ld16s):
345
CASE_OP_32_64(ld16u):
346
+ case INDEX_op_ld32s_i64:
347
case INDEX_op_ld32u_i64:
348
done = fold_tcg_ld(&ctx, op);
349
break;
87
--
350
--
88
2.25.1
351
2.25.1
89
352
90
353
diff view generated by jsdifflib
1
Typo in the conversion to FloatParts64.
1
Sign repetitions are perforce all identical, whether they are 1 or 0.
2
Bitwise operations preserve the relative quantity of the repetitions.
2
3
3
Fixes: 572c4d862ff2
4
Fixes: Coverity CID 1457457
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Message-Id: <20210607223812.110596-1-richard.henderson@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
8
---
10
fpu/softfloat.c | 2 +-
9
tcg/optimize.c | 29 +++++++++++++++++++++++++++++
11
1 file changed, 1 insertion(+), 1 deletion(-)
10
1 file changed, 29 insertions(+)
12
11
13
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
15
--- a/fpu/softfloat.c
14
--- a/tcg/optimize.c
16
+++ b/fpu/softfloat.c
15
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
16
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
18
17
z2 = arg_info(op->args[2])->z_mask;
19
float_raise(float_flag_inexact, status);
18
ctx->z_mask = z1 & z2;
20
19
21
- float64_unpack_canonical(&xnp, float64_ln2, status);
20
+ /*
22
+ float64_unpack_canonical(&tp, float64_ln2, status);
21
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
23
xp = *parts_mul(&xp, &tp, status);
22
+ * Bitwise operations preserve the relative quantity of the repetitions.
24
xnp = xp;
23
+ */
24
+ ctx->s_mask = arg_info(op->args[1])->s_mask
25
+ & arg_info(op->args[2])->s_mask;
26
+
27
/*
28
* Known-zeros does not imply known-ones. Therefore unless
29
* arg2 is constant, we can't infer affected bits from it.
30
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
31
}
32
ctx->z_mask = z1;
33
34
+ ctx->s_mask = arg_info(op->args[1])->s_mask
35
+ & arg_info(op->args[2])->s_mask;
36
return fold_masks(ctx, op);
37
}
38
39
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
40
fold_xi_to_not(ctx, op, 0)) {
41
return true;
42
}
43
+
44
+ ctx->s_mask = arg_info(op->args[1])->s_mask
45
+ & arg_info(op->args[2])->s_mask;
46
return false;
47
}
48
49
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
50
51
ctx->z_mask = arg_info(op->args[3])->z_mask
52
| arg_info(op->args[4])->z_mask;
53
+ ctx->s_mask = arg_info(op->args[3])->s_mask
54
+ & arg_info(op->args[4])->s_mask;
55
56
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
57
uint64_t tv = arg_info(op->args[3])->val;
58
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
59
fold_xi_to_not(ctx, op, -1)) {
60
return true;
61
}
62
+
63
+ ctx->s_mask = arg_info(op->args[1])->s_mask
64
+ & arg_info(op->args[2])->s_mask;
65
return false;
66
}
67
68
@@ -XXX,XX +XXX,XX @@ static bool fold_nor(OptContext *ctx, TCGOp *op)
69
fold_xi_to_not(ctx, op, 0)) {
70
return true;
71
}
72
+
73
+ ctx->s_mask = arg_info(op->args[1])->s_mask
74
+ & arg_info(op->args[2])->s_mask;
75
return false;
76
}
77
78
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
79
return true;
80
}
81
82
+ ctx->s_mask = arg_info(op->args[1])->s_mask;
83
+
84
/* Because of fold_to_not, we want to always return true, via finish. */
85
finish_folding(ctx, op);
86
return true;
87
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
88
89
ctx->z_mask = arg_info(op->args[1])->z_mask
90
| arg_info(op->args[2])->z_mask;
91
+ ctx->s_mask = arg_info(op->args[1])->s_mask
92
+ & arg_info(op->args[2])->s_mask;
93
return fold_masks(ctx, op);
94
}
95
96
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
97
fold_ix_to_not(ctx, op, 0)) {
98
return true;
99
}
100
+
101
+ ctx->s_mask = arg_info(op->args[1])->s_mask
102
+ & arg_info(op->args[2])->s_mask;
103
return false;
104
}
105
106
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
107
108
ctx->z_mask = arg_info(op->args[1])->z_mask
109
| arg_info(op->args[2])->z_mask;
110
+ ctx->s_mask = arg_info(op->args[1])->s_mask
111
+ & arg_info(op->args[2])->s_mask;
112
return fold_masks(ctx, op);
113
}
25
114
26
--
115
--
27
2.25.1
116
2.25.1
28
117
29
118
diff view generated by jsdifflib
1
Change the interface from a boolean error indication to a
1
The result is either 0 or 1, which means that we have
2
negative error vs a non-negative protection. For the moment
2
a 2 bit signed result, and thus 62 bits of sign.
3
this is only interface change, not making use of the new data.
3
For clarity, use the smask_from_zmask function.
4
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
tcg/region.c | 63 +++++++++++++++++++++++++++-------------------------
9
tcg/optimize.c | 2 ++
10
1 file changed, 33 insertions(+), 30 deletions(-)
10
1 file changed, 2 insertions(+)
11
11
12
diff --git a/tcg/region.c b/tcg/region.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/region.c
14
--- a/tcg/optimize.c
15
+++ b/tcg/region.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static inline void split_cross_256mb(void **obuf, size_t *osize,
16
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
17
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
18
__attribute__((aligned(CODE_GEN_ALIGN)));
19
20
-static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
21
+static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
22
{
23
void *buf, *end;
24
size_t size;
25
26
if (splitwx > 0) {
27
error_setg(errp, "jit split-wx not supported");
28
- return false;
29
+ return -1;
30
}
17
}
31
18
32
/* page-align the beginning and end of the buffer */
19
ctx->z_mask = 1;
33
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
20
+ ctx->s_mask = smask_from_zmask(1);
34
21
return false;
35
region.start_aligned = buf;
36
region.total_size = size;
37
- return true;
38
+
39
+ return PROT_READ | PROT_WRITE;
40
}
22
}
41
#elif defined(_WIN32)
23
42
-static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
43
+static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
44
{
45
void *buf;
46
47
if (splitwx > 0) {
48
error_setg(errp, "jit split-wx not supported");
49
- return false;
50
+ return -1;
51
}
25
}
52
26
53
buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
27
ctx->z_mask = 1;
54
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
28
+ ctx->s_mask = smask_from_zmask(1);
55
29
return false;
56
region.start_aligned = buf;
30
57
region.total_size = size;
31
do_setcond_const:
58
- return true;
59
+
60
+ return PAGE_READ | PAGE_WRITE | PAGE_EXEC;
61
}
62
#else
63
-static bool alloc_code_gen_buffer_anon(size_t size, int prot,
64
- int flags, Error **errp)
65
+static int alloc_code_gen_buffer_anon(size_t size, int prot,
66
+ int flags, Error **errp)
67
{
68
void *buf;
69
70
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot,
71
if (buf == MAP_FAILED) {
72
error_setg_errno(errp, errno,
73
"allocate %zu bytes for jit buffer", size);
74
- return false;
75
+ return -1;
76
}
77
78
#ifdef __mips__
79
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot,
80
81
region.start_aligned = buf;
82
region.total_size = size;
83
- return true;
84
+ return prot;
85
}
86
87
#ifndef CONFIG_TCG_INTERPRETER
88
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
89
90
#ifdef __mips__
91
/* Find space for the RX mapping, vs the 256MiB regions. */
92
- if (!alloc_code_gen_buffer_anon(size, PROT_NONE,
93
- MAP_PRIVATE | MAP_ANONYMOUS |
94
- MAP_NORESERVE, errp)) {
95
+ if (alloc_code_gen_buffer_anon(size, PROT_NONE,
96
+ MAP_PRIVATE | MAP_ANONYMOUS |
97
+ MAP_NORESERVE, errp) < 0) {
98
return false;
99
}
100
/* The size of the mapping may have been adjusted. */
101
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
102
/* Request large pages for the buffer and the splitwx. */
103
qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
104
qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
105
- return true;
106
+ return PROT_READ | PROT_WRITE;
107
108
fail_rx:
109
error_setg_errno(errp, errno, "failed to map shared memory for execute");
110
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
111
if (fd >= 0) {
112
close(fd);
113
}
114
- return false;
115
+ return -1;
116
}
117
#endif /* CONFIG_POSIX */
118
119
@@ -XXX,XX +XXX,XX @@ extern kern_return_t mach_vm_remap(vm_map_t target_task,
120
vm_prot_t *max_protection,
121
vm_inherit_t inheritance);
122
123
-static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
124
+static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
125
{
126
kern_return_t ret;
127
mach_vm_address_t buf_rw, buf_rx;
128
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
129
/* Map the read-write portion via normal anon memory. */
130
if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
131
MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
132
- return false;
133
+ return -1;
134
}
135
136
buf_rw = region.start_aligned;
137
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
138
/* TODO: Convert "ret" to a human readable error message. */
139
error_setg(errp, "vm_remap for jit splitwx failed");
140
munmap((void *)buf_rw, size);
141
- return false;
142
+ return -1;
143
}
144
145
if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
146
error_setg_errno(errp, errno, "mprotect for jit splitwx");
147
munmap((void *)buf_rx, size);
148
munmap((void *)buf_rw, size);
149
- return false;
150
+ return -1;
151
}
152
153
tcg_splitwx_diff = buf_rx - buf_rw;
154
- return true;
155
+ return PROT_READ | PROT_WRITE;
156
}
157
#endif /* CONFIG_DARWIN */
158
#endif /* CONFIG_TCG_INTERPRETER */
159
160
-static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
161
+static int alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
162
{
163
#ifndef CONFIG_TCG_INTERPRETER
164
# ifdef CONFIG_DARWIN
165
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
166
# endif
167
#endif
168
error_setg(errp, "jit split-wx not supported");
169
- return false;
170
+ return -1;
171
}
172
173
-static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
174
+static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
175
{
176
ERRP_GUARD();
177
int prot, flags;
178
179
if (splitwx) {
180
- if (alloc_code_gen_buffer_splitwx(size, errp)) {
181
- return true;
182
+ prot = alloc_code_gen_buffer_splitwx(size, errp);
183
+ if (prot >= 0) {
184
+ return prot;
185
}
186
/*
187
* If splitwx force-on (1), fail;
188
* if splitwx default-on (-1), fall through to splitwx off.
189
*/
190
if (splitwx > 0) {
191
- return false;
192
+ return -1;
193
}
194
error_free_or_abort(errp);
195
}
196
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
197
size_t page_size;
198
size_t region_size;
199
size_t i;
200
- bool ok;
201
+ int have_prot;
202
203
- ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
204
- splitwx, &error_fatal);
205
- assert(ok);
206
+ have_prot = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
207
+ splitwx, &error_fatal);
208
+ assert(have_prot >= 0);
209
210
/*
211
* Make region_size a multiple of page_size, using aligned as the start.
212
--
32
--
213
2.25.1
33
2.25.1
214
34
215
35
diff view generated by jsdifflib
1
At some point during the development of tcg_constant_*, I changed
1
The results are generally 6 bit unsigned values, though
2
my mind about whether such temps should be able to be passed to
2
the count leading and trailing bits may produce any value
3
tcg_temp_free_*. The final version committed allows this, but the
3
for a zero input.
4
commentary was not updated to match.
5
4
6
Fixes: c0522136adf
7
Reported-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
8
---
12
include/tcg/tcg.h | 3 ++-
9
tcg/optimize.c | 3 ++-
13
1 file changed, 2 insertions(+), 1 deletion(-)
10
1 file changed, 2 insertions(+), 1 deletion(-)
14
11
15
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
17
--- a/include/tcg/tcg.h
14
--- a/tcg/optimize.c
18
+++ b/include/tcg/tcg.h
15
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
16
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
20
17
g_assert_not_reached();
21
/*
18
}
22
* Locate or create a read-only temporary that is a constant.
19
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
23
- * This kind of temporary need not and should not be freed.
20
-
24
+ * This kind of temporary need not be freed, but for convenience
21
+ ctx->s_mask = smask_from_zmask(ctx->z_mask);
25
+ * will be silently ignored by tcg_temp_free_*.
22
return false;
26
*/
23
}
27
TCGTemp *tcg_constant_internal(TCGType type, int64_t val);
24
25
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
26
default:
27
g_assert_not_reached();
28
}
29
+ ctx->s_mask = smask_from_zmask(ctx->z_mask);
30
return false;
31
}
28
32
29
--
33
--
30
2.25.1
34
2.25.1
31
35
32
36
diff view generated by jsdifflib
1
This has only one user, and currently needs an ifdef,
1
For constant shifts, we can simply shift the s_mask.
2
but will make more sense after some code motion.
3
2
3
For variable shifts, we know that sar does not reduce
4
the s_mask, which helps for sequences like
5
6
ext32s_i64 t, in
7
sar_i64 t, t, v
8
ext32s_i64 out, t
9
10
allowing the final extend to be eliminated.
11
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
13
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
15
---
8
tcg/tcg.c | 13 ++++++++++---
16
tcg/optimize.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++---
9
1 file changed, 10 insertions(+), 3 deletions(-)
17
1 file changed, 47 insertions(+), 3 deletions(-)
10
18
11
diff --git a/tcg/tcg.c b/tcg/tcg.c
19
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/tcg.c
21
--- a/tcg/optimize.c
14
+++ b/tcg/tcg.c
22
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void tcg_region_initial_alloc__locked(TCGContext *s)
23
@@ -XXX,XX +XXX,XX @@ static uint64_t smask_from_zmask(uint64_t zmask)
16
g_assert(!err);
24
return ~(~0ull >> rep);
17
}
25
}
18
26
19
+#ifndef CONFIG_USER_ONLY
27
+/*
20
+static void tcg_region_initial_alloc(TCGContext *s)
28
+ * Recreate a properly left-aligned smask after manipulation.
29
+ * Some bit-shuffling, particularly shifts and rotates, may
30
+ * retain sign bits on the left, but may scatter disconnected
31
+ * sign bits on the right. Retain only what remains to the left.
32
+ */
33
+static uint64_t smask_from_smask(int64_t smask)
21
+{
34
+{
22
+ qemu_mutex_lock(&region.lock);
35
+ /* Only the 1 bits are significant for smask */
23
+ tcg_region_initial_alloc__locked(s);
36
+ return smask_from_zmask(~smask);
24
+ qemu_mutex_unlock(&region.lock);
25
+}
37
+}
26
+#endif
27
+
38
+
28
/* Call from a safe-work context */
39
static inline TempOptInfo *ts_info(TCGTemp *ts)
29
void tcg_region_reset_all(void)
30
{
40
{
31
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
41
return ts->state_ptr;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
43
44
static bool fold_shift(OptContext *ctx, TCGOp *op)
45
{
46
+ uint64_t s_mask, z_mask, sign;
47
+
48
if (fold_const2(ctx, op) ||
49
fold_ix_to_i(ctx, op, 0) ||
50
fold_xi_to_x(ctx, op, 0)) {
51
return true;
32
}
52
}
33
53
34
tcg_ctx = s;
54
+ s_mask = arg_info(op->args[1])->s_mask;
35
- qemu_mutex_lock(&region.lock);
55
+ z_mask = arg_info(op->args[1])->z_mask;
36
- tcg_region_initial_alloc__locked(s);
56
+
37
- qemu_mutex_unlock(&region.lock);
57
if (arg_is_const(op->args[2])) {
38
+ tcg_region_initial_alloc(s);
58
- ctx->z_mask = do_constant_folding(op->opc, ctx->type,
59
- arg_info(op->args[1])->z_mask,
60
- arg_info(op->args[2])->val);
61
+ int sh = arg_info(op->args[2])->val;
62
+
63
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
64
+
65
+ s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
66
+ ctx->s_mask = smask_from_smask(s_mask);
67
+
68
return fold_masks(ctx, op);
69
}
70
+
71
+ switch (op->opc) {
72
+ CASE_OP_32_64(sar):
73
+ /*
74
+ * Arithmetic right shift will not reduce the number of
75
+ * input sign repetitions.
76
+ */
77
+ ctx->s_mask = s_mask;
78
+ break;
79
+ CASE_OP_32_64(shr):
80
+ /*
81
+ * If the sign bit is known zero, then logical right shift
82
+ * will not reduced the number of input sign repetitions.
83
+ */
84
+ sign = (s_mask & -s_mask) >> 1;
85
+ if (!(z_mask & sign)) {
86
+ ctx->s_mask = s_mask;
87
+ }
88
+ break;
89
+ default:
90
+ break;
91
+ }
92
+
93
return false;
39
}
94
}
40
#endif /* !CONFIG_USER_ONLY */
41
95
42
--
96
--
43
2.25.1
97
2.25.1
44
98
45
99
diff view generated by jsdifflib