1
The following changes since commit 035eed4c0d257c905a556fa0f4865a0c077b4e7f:
1
The following changes since commit c52d69e7dbaaed0ffdef8125e79218672c30161d:
2
2
3
Merge remote-tracking branch 'remotes/vivier/tags/q800-for-5.0-pull-request' into staging (2020-01-07 17:08:21 +0000)
3
Merge remote-tracking branch 'remotes/cschoenebeck/tags/pull-9p-20211027' into staging (2021-10-27 11:45:18 -0700)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/rth7680/qemu.git tags/pull-tcg-20200108
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211027
8
8
9
for you to fetch changes up to 5e7ef51cbe47e726f76bfbc208e167085cf398c4:
9
for you to fetch changes up to 820c025f0dcacf2f3c12735b1f162893fbfa7bc6:
10
10
11
MAINTAINERS: Replace Claudio Fontana for tcg/aarch64 (2020-01-08 11:54:12 +1100)
11
tcg/optimize: Propagate sign info for shifting (2021-10-27 17:11:23 -0700)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Improve -static and -pie linking
14
Improvements to qemu/int128
15
Add cpu_{ld,st}*_mmuidx_ra
15
Fixes for 128/64 division.
16
Remove MMU_MODE*_SUFFIX
16
Cleanup tcg/optimize.c
17
Move tcg headers under include/
17
Optimize redundant sign extensions
18
18
19
----------------------------------------------------------------
19
----------------------------------------------------------------
20
Philippe Mathieu-Daudé (4):
20
Frédéric Pétrot (1):
21
tcg: Search includes from the project root source directory
21
qemu/int128: Add int128_{not,xor}
22
tcg: Search includes in the parent source directory
23
tcg: Move TCG headers to include/tcg/
24
configure: Remove tcg/ from the preprocessor include search list
25
22
26
Richard Henderson (37):
23
Luis Pires (4):
27
configure: Drop adjustment of textseg
24
host-utils: move checks out of divu128/divs128
28
tcg: Remove softmmu code_gen_buffer fixed address
25
host-utils: move udiv_qrnnd() to host-utils
29
configure: Do not force pie=no for non-x86
26
host-utils: add 128-bit quotient support to divu128/divs128
30
configure: Always detect -no-pie toolchain support
27
host-utils: add unit tests for divu128/divs128
31
configure: Unnest detection of -z,relro and -z,now
32
configure: Override the os default with --disable-pie
33
configure: Support -static-pie if requested
34
target/xtensa: Use probe_access for itlb_hit_test
35
cputlb: Use trace_mem_get_info instead of trace_mem_build_info
36
trace: Remove trace_mem_build_info_no_se_[bl]e
37
target/s390x: Include tcg.h in mem_helper.c
38
target/arm: Include tcg.h in sve_helper.c
39
accel/tcg: Include tcg.h in tcg-runtime.c
40
linux-user: Include tcg.h in syscall.c
41
linux-user: Include trace-root.h in syscall-trace.h
42
plugins: Include trace/mem.h in api.c
43
cputlb: Move body of cpu_ldst_template.h out of line
44
translator: Use cpu_ld*_code instead of open-coding
45
cputlb: Rename helper_ret_ld*_cmmu to cpu_ld*_code
46
cputlb: Provide cpu_(ld,st}*_mmuidx_ra for user-only
47
target/i386: Use cpu_*_mmuidx_ra instead of templates
48
cputlb: Expand cpu_ldst_useronly_template.h in user-exec.c
49
target/nios2: Remove MMU_MODE{0,1}_SUFFIX
50
target/alpha: Remove MMU_MODE{0,1}_SUFFIX
51
target/cris: Remove MMU_MODE{0,1}_SUFFIX
52
target/i386: Remove MMU_MODE{0,1,2}_SUFFIX
53
target/microblaze: Remove MMU_MODE{0,1,2}_SUFFIX
54
target/sh4: Remove MMU_MODE{0,1}_SUFFIX
55
target/unicore32: Remove MMU_MODE{0,1}_SUFFIX
56
target/xtensa: Remove MMU_MODE{0,1,2,3}_SUFFIX
57
target/m68k: Use cpu_*_mmuidx_ra instead of MMU_MODE{0,1}_SUFFIX
58
target/mips: Use cpu_*_mmuidx_ra instead of MMU_MODE*_SUFFIX
59
target/s390x: Use cpu_*_mmuidx_ra instead of MMU_MODE*_SUFFIX
60
target/ppc: Use cpu_*_mmuidx_ra instead of MMU_MODE*_SUFFIX
61
cputlb: Remove support for MMU_MODE*_SUFFIX
62
cputlb: Expand cpu_ldst_template.h in cputlb.c
63
MAINTAINERS: Replace Claudio Fontana for tcg/aarch64
64
28
65
Makefile | 2 +-
29
Richard Henderson (51):
66
accel/tcg/atomic_template.h | 67 ++---
30
tcg/optimize: Rename "mask" to "z_mask"
67
include/exec/cpu_ldst.h | 446 +++++++++---------------------
31
tcg/optimize: Split out OptContext
68
include/exec/cpu_ldst_template.h | 211 --------------
32
tcg/optimize: Remove do_default label
69
include/exec/cpu_ldst_useronly_template.h | 159 -----------
33
tcg/optimize: Change tcg_opt_gen_{mov,movi} interface
70
include/exec/translator.h | 48 +---
34
tcg/optimize: Move prev_mb into OptContext
71
{tcg => include/tcg}/tcg-gvec-desc.h | 0
35
tcg/optimize: Split out init_arguments
72
{tcg => include/tcg}/tcg-mo.h | 0
36
tcg/optimize: Split out copy_propagate
73
{tcg => include/tcg}/tcg-op-gvec.h | 0
37
tcg/optimize: Split out fold_call
74
{tcg => include/tcg}/tcg-op.h | 2 +-
38
tcg/optimize: Drop nb_oargs, nb_iargs locals
75
{tcg => include/tcg}/tcg-opc.h | 0
39
tcg/optimize: Change fail return for do_constant_folding_cond*
76
{tcg => include/tcg}/tcg.h | 33 +--
40
tcg/optimize: Return true from tcg_opt_gen_{mov,movi}
77
include/user/syscall-trace.h | 2 +
41
tcg/optimize: Split out finish_folding
78
target/alpha/cpu.h | 2 -
42
tcg/optimize: Use a boolean to avoid a mass of continues
79
target/cris/cpu.h | 2 -
43
tcg/optimize: Split out fold_mb, fold_qemu_{ld,st}
80
target/i386/cpu.h | 3 -
44
tcg/optimize: Split out fold_const{1,2}
81
target/m68k/cpu.h | 2 -
45
tcg/optimize: Split out fold_setcond2
82
target/microblaze/cpu.h | 3 -
46
tcg/optimize: Split out fold_brcond2
83
target/mips/cpu.h | 4 -
47
tcg/optimize: Split out fold_brcond
84
target/nios2/cpu.h | 2 -
48
tcg/optimize: Split out fold_setcond
85
target/ppc/cpu.h | 2 -
49
tcg/optimize: Split out fold_mulu2_i32
86
target/s390x/cpu.h | 5 -
50
tcg/optimize: Split out fold_addsub2_i32
87
target/sh4/cpu.h | 2 -
51
tcg/optimize: Split out fold_movcond
88
target/unicore32/cpu.h | 2 -
52
tcg/optimize: Split out fold_extract2
89
target/xtensa/cpu.h | 4 -
53
tcg/optimize: Split out fold_extract, fold_sextract
90
tcg/i386/tcg-target.h | 2 +-
54
tcg/optimize: Split out fold_deposit
91
trace/mem-internal.h | 17 --
55
tcg/optimize: Split out fold_count_zeros
92
accel/tcg/cpu-exec.c | 2 +-
56
tcg/optimize: Split out fold_bswap
93
accel/tcg/cputlb.c | 315 ++++++++++++++++-----
57
tcg/optimize: Split out fold_dup, fold_dup2
94
accel/tcg/tcg-runtime-gvec.c | 2 +-
58
tcg/optimize: Split out fold_mov
95
accel/tcg/tcg-runtime.c | 1 +
59
tcg/optimize: Split out fold_xx_to_i
96
accel/tcg/translate-all.c | 39 +--
60
tcg/optimize: Split out fold_xx_to_x
97
accel/tcg/user-exec.c | 238 +++++++++++++++-
61
tcg/optimize: Split out fold_xi_to_i
98
bsd-user/main.c | 2 +-
62
tcg/optimize: Add type to OptContext
99
cpus.c | 2 +-
63
tcg/optimize: Split out fold_to_not
100
exec.c | 2 +-
64
tcg/optimize: Split out fold_sub_to_neg
101
linux-user/main.c | 2 +-
65
tcg/optimize: Split out fold_xi_to_x
102
linux-user/syscall.c | 1 +
66
tcg/optimize: Split out fold_ix_to_i
103
plugins/api.c | 1 +
67
tcg/optimize: Split out fold_masks
104
target/alpha/translate.c | 2 +-
68
tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies
105
target/arm/helper-a64.c | 2 +-
69
tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops
106
target/arm/sve_helper.c | 1 +
70
tcg/optimize: Sink commutative operand swapping into fold functions
107
target/arm/translate-a64.c | 4 +-
71
tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values
108
target/arm/translate-sve.c | 6 +-
72
tcg/optimize: Use fold_xx_to_i for orc
109
target/arm/translate.c | 4 +-
73
tcg/optimize: Use fold_xi_to_x for mul
110
target/cris/translate.c | 2 +-
74
tcg/optimize: Use fold_xi_to_x for div
111
target/hppa/translate.c | 2 +-
75
tcg/optimize: Use fold_xx_to_i for rem
112
target/i386/mem_helper.c | 2 +-
76
tcg/optimize: Optimize sign extensions
113
target/i386/seg_helper.c | 56 ++--
77
tcg/optimize: Propagate sign info for logical operations
114
target/i386/translate.c | 2 +-
78
tcg/optimize: Propagate sign info for setcond
115
target/lm32/translate.c | 2 +-
79
tcg/optimize: Propagate sign info for bit counting
116
target/m68k/op_helper.c | 77 ++++--
80
tcg/optimize: Propagate sign info for shifting
117
target/m68k/translate.c | 2 +-
118
target/microblaze/translate.c | 2 +-
119
target/mips/op_helper.c | 182 ++++--------
120
target/mips/translate.c | 2 +-
121
target/moxie/translate.c | 2 +-
122
target/nios2/translate.c | 2 +-
123
target/openrisc/translate.c | 2 +-
124
target/ppc/mem_helper.c | 13 +-
125
target/ppc/translate.c | 4 +-
126
target/riscv/cpu_helper.c | 2 +-
127
target/riscv/translate.c | 2 +-
128
target/s390x/mem_helper.c | 11 +-
129
target/s390x/translate.c | 4 +-
130
target/sh4/translate.c | 2 +-
131
target/sparc/ldst_helper.c | 2 +-
132
target/sparc/translate.c | 2 +-
133
target/tilegx/translate.c | 2 +-
134
target/tricore/translate.c | 2 +-
135
target/unicore32/translate.c | 2 +-
136
target/xtensa/mmu_helper.c | 5 +-
137
target/xtensa/translate.c | 2 +-
138
tcg/aarch64/tcg-target.inc.c | 4 +-
139
tcg/arm/tcg-target.inc.c | 4 +-
140
tcg/i386/tcg-target.inc.c | 4 +-
141
tcg/mips/tcg-target.inc.c | 2 +-
142
tcg/optimize.c | 2 +-
143
tcg/ppc/tcg-target.inc.c | 4 +-
144
tcg/riscv/tcg-target.inc.c | 4 +-
145
tcg/s390/tcg-target.inc.c | 4 +-
146
tcg/sparc/tcg-target.inc.c | 2 +-
147
tcg/tcg-common.c | 2 +-
148
tcg/tcg-op-gvec.c | 8 +-
149
tcg/tcg-op-vec.c | 6 +-
150
tcg/tcg-op.c | 6 +-
151
tcg/tcg.c | 2 +-
152
tcg/tci.c | 2 +-
153
MAINTAINERS | 4 +-
154
configure | 117 +++-----
155
docs/devel/loads-stores.rst | 215 ++++++++++----
156
91 files changed, 1075 insertions(+), 1357 deletions(-)
157
delete mode 100644 include/exec/cpu_ldst_template.h
158
delete mode 100644 include/exec/cpu_ldst_useronly_template.h
159
rename {tcg => include/tcg}/tcg-gvec-desc.h (100%)
160
rename {tcg => include/tcg}/tcg-mo.h (100%)
161
rename {tcg => include/tcg}/tcg-op-gvec.h (100%)
162
rename {tcg => include/tcg}/tcg-op.h (99%)
163
rename {tcg => include/tcg}/tcg-opc.h (100%)
164
rename {tcg => include/tcg}/tcg.h (96%)
165
81
82
include/fpu/softfloat-macros.h | 82 --
83
include/hw/clock.h | 5 +-
84
include/qemu/host-utils.h | 121 +-
85
include/qemu/int128.h | 20 +
86
target/ppc/int_helper.c | 23 +-
87
tcg/optimize.c | 2644 ++++++++++++++++++++++++----------------
88
tests/unit/test-div128.c | 197 +++
89
util/host-utils.c | 147 ++-
90
tests/unit/meson.build | 1 +
91
9 files changed, 2053 insertions(+), 1187 deletions(-)
92
create mode 100644 tests/unit/test-div128.c
93
diff view generated by jsdifflib
1
With the tracing hooks, the inline functions are no longer
1
From: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
2
so simple. Reduce the amount of preprocessor obfuscation
3
by expanding the text of each of the functions generated.
4
2
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
3
Addition of not and xor on 128-bit integers.
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
5
Signed-off-by: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
6
Co-authored-by: Fabien Portas <fabien.portas@grenoble-inp.org>
7
Message-Id: <20211025122818.168890-3-frederic.petrot@univ-grenoble-alpes.fr>
8
[rth: Split out logical operations.]
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
11
---
9
include/exec/cpu_ldst.h | 54 +++--
12
include/qemu/int128.h | 20 ++++++++++++++++++++
10
include/exec/cpu_ldst_useronly_template.h | 159 ---------------
13
1 file changed, 20 insertions(+)
11
accel/tcg/user-exec.c | 236 ++++++++++++++++++++++
12
3 files changed, 262 insertions(+), 187 deletions(-)
13
delete mode 100644 include/exec/cpu_ldst_useronly_template.h
14
14
15
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
15
diff --git a/include/qemu/int128.h b/include/qemu/int128.h
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/cpu_ldst.h
17
--- a/include/qemu/int128.h
18
+++ b/include/exec/cpu_ldst.h
18
+++ b/include/qemu/int128.h
19
@@ -XXX,XX +XXX,XX @@ static inline void clear_helper_retaddr(void)
19
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_exts64(int64_t a)
20
20
return a;
21
/* In user-only mode we provide only the _code and _data accessors. */
22
23
-#define MEMSUFFIX _data
24
-#define DATA_SIZE 1
25
-#include "exec/cpu_ldst_useronly_template.h"
26
+uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
27
+uint32_t cpu_lduw_data(CPUArchState *env, abi_ptr ptr);
28
+uint32_t cpu_ldl_data(CPUArchState *env, abi_ptr ptr);
29
+uint64_t cpu_ldq_data(CPUArchState *env, abi_ptr ptr);
30
+int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
31
+int cpu_ldsw_data(CPUArchState *env, abi_ptr ptr);
32
33
-#define DATA_SIZE 2
34
-#include "exec/cpu_ldst_useronly_template.h"
35
+uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
36
+uint32_t cpu_lduw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
37
+uint32_t cpu_ldl_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
38
+uint64_t cpu_ldq_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
39
+int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
40
+int cpu_ldsw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
41
42
-#define DATA_SIZE 4
43
-#include "exec/cpu_ldst_useronly_template.h"
44
+void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
45
+void cpu_stw_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
46
+void cpu_stl_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
47
+void cpu_stq_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
48
49
-#define DATA_SIZE 8
50
-#include "exec/cpu_ldst_useronly_template.h"
51
-#undef MEMSUFFIX
52
-
53
-#define MEMSUFFIX _code
54
-#define CODE_ACCESS
55
-#define DATA_SIZE 1
56
-#include "exec/cpu_ldst_useronly_template.h"
57
-
58
-#define DATA_SIZE 2
59
-#include "exec/cpu_ldst_useronly_template.h"
60
-
61
-#define DATA_SIZE 4
62
-#include "exec/cpu_ldst_useronly_template.h"
63
-
64
-#define DATA_SIZE 8
65
-#include "exec/cpu_ldst_useronly_template.h"
66
-#undef MEMSUFFIX
67
-#undef CODE_ACCESS
68
+void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
69
+ uint32_t val, uintptr_t retaddr);
70
+void cpu_stw_data_ra(CPUArchState *env, abi_ptr ptr,
71
+ uint32_t val, uintptr_t retaddr);
72
+void cpu_stl_data_ra(CPUArchState *env, abi_ptr ptr,
73
+ uint32_t val, uintptr_t retaddr);
74
+void cpu_stq_data_ra(CPUArchState *env, abi_ptr ptr,
75
+ uint64_t val, uintptr_t retaddr);
76
77
/*
78
* Provide the same *_mmuidx_ra interface as for softmmu.
79
@@ -XXX,XX +XXX,XX @@ void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
80
#undef CPU_MMU_INDEX
81
#undef MEMSUFFIX
82
83
+#endif /* defined(CONFIG_USER_ONLY) */
84
+
85
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
86
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
87
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
88
@@ -XXX,XX +XXX,XX @@ static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
89
return (int16_t)cpu_lduw_code(env, addr);
90
}
21
}
91
22
92
-#endif /* defined(CONFIG_USER_ONLY) */
23
+static inline Int128 int128_not(Int128 a)
93
-
94
/**
95
* tlb_vaddr_to_host:
96
* @env: CPUArchState
97
diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h
98
deleted file mode 100644
99
index XXXXXXX..XXXXXXX
100
--- a/include/exec/cpu_ldst_useronly_template.h
101
+++ /dev/null
102
@@ -XXX,XX +XXX,XX @@
103
-/*
104
- * User-only accessor function support
105
- *
106
- * Generate inline load/store functions for one data size.
107
- *
108
- * Generate a store function as well as signed and unsigned loads.
109
- *
110
- * Not used directly but included from cpu_ldst.h.
111
- *
112
- * Copyright (c) 2015 Linaro Limited
113
- *
114
- * This library is free software; you can redistribute it and/or
115
- * modify it under the terms of the GNU Lesser General Public
116
- * License as published by the Free Software Foundation; either
117
- * version 2 of the License, or (at your option) any later version.
118
- *
119
- * This library is distributed in the hope that it will be useful,
120
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
121
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
122
- * Lesser General Public License for more details.
123
- *
124
- * You should have received a copy of the GNU Lesser General Public
125
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
126
- */
127
-
128
-#if !defined(CODE_ACCESS)
129
-#include "trace-root.h"
130
-#endif
131
-
132
-#include "trace/mem.h"
133
-
134
-#if DATA_SIZE == 8
135
-#define SUFFIX q
136
-#define USUFFIX q
137
-#define DATA_TYPE uint64_t
138
-#define SHIFT 3
139
-#elif DATA_SIZE == 4
140
-#define SUFFIX l
141
-#define USUFFIX l
142
-#define DATA_TYPE uint32_t
143
-#define SHIFT 2
144
-#elif DATA_SIZE == 2
145
-#define SUFFIX w
146
-#define USUFFIX uw
147
-#define DATA_TYPE uint16_t
148
-#define DATA_STYPE int16_t
149
-#define SHIFT 1
150
-#elif DATA_SIZE == 1
151
-#define SUFFIX b
152
-#define USUFFIX ub
153
-#define DATA_TYPE uint8_t
154
-#define DATA_STYPE int8_t
155
-#define SHIFT 0
156
-#else
157
-#error unsupported data size
158
-#endif
159
-
160
-#if DATA_SIZE == 8
161
-#define RES_TYPE uint64_t
162
-#else
163
-#define RES_TYPE uint32_t
164
-#endif
165
-
166
-static inline RES_TYPE
167
-glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
168
-{
169
- RES_TYPE ret;
170
-#ifdef CODE_ACCESS
171
- set_helper_retaddr(1);
172
- ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
173
- clear_helper_retaddr();
174
-#else
175
- MemOp op = MO_TE | SHIFT;
176
- uint16_t meminfo = trace_mem_get_info(op, MMU_USER_IDX, false);
177
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
178
- ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
179
-#endif
180
- return ret;
181
-}
182
-
183
-#ifndef CODE_ACCESS
184
-static inline RES_TYPE
185
-glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
186
- abi_ptr ptr,
187
- uintptr_t retaddr)
188
-{
189
- RES_TYPE ret;
190
- set_helper_retaddr(retaddr);
191
- ret = glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(env, ptr);
192
- clear_helper_retaddr();
193
- return ret;
194
-}
195
-#endif
196
-
197
-#if DATA_SIZE <= 2
198
-static inline int
199
-glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
200
-{
201
- int ret;
202
-#ifdef CODE_ACCESS
203
- set_helper_retaddr(1);
204
- ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
205
- clear_helper_retaddr();
206
-#else
207
- MemOp op = MO_TE | MO_SIGN | SHIFT;
208
- uint16_t meminfo = trace_mem_get_info(op, MMU_USER_IDX, false);
209
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
210
- ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
211
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
212
-#endif
213
- return ret;
214
-}
215
-
216
-#ifndef CODE_ACCESS
217
-static inline int
218
-glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
219
- abi_ptr ptr,
220
- uintptr_t retaddr)
221
-{
222
- int ret;
223
- set_helper_retaddr(retaddr);
224
- ret = glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(env, ptr);
225
- clear_helper_retaddr();
226
- return ret;
227
-}
228
-#endif /* CODE_ACCESS */
229
-#endif /* DATA_SIZE <= 2 */
230
-
231
-#ifndef CODE_ACCESS
232
-static inline void
233
-glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr,
234
- RES_TYPE v)
235
-{
236
- MemOp op = MO_TE | SHIFT;
237
- uint16_t meminfo = trace_mem_get_info(op, MMU_USER_IDX, true);
238
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
239
- glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
240
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
241
-}
242
-
243
-static inline void
244
-glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
245
- abi_ptr ptr,
246
- RES_TYPE v,
247
- uintptr_t retaddr)
248
-{
249
- set_helper_retaddr(retaddr);
250
- glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(env, ptr, v);
251
- clear_helper_retaddr();
252
-}
253
-#endif
254
-
255
-#undef RES_TYPE
256
-#undef DATA_TYPE
257
-#undef DATA_STYPE
258
-#undef SUFFIX
259
-#undef USUFFIX
260
-#undef DATA_SIZE
261
-#undef SHIFT
262
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
263
index XXXXXXX..XXXXXXX 100644
264
--- a/accel/tcg/user-exec.c
265
+++ b/accel/tcg/user-exec.c
266
@@ -XXX,XX +XXX,XX @@
267
#include "translate-all.h"
268
#include "exec/helper-proto.h"
269
#include "qemu/atomic128.h"
270
+#include "trace-root.h"
271
+#include "trace/mem.h"
272
273
#undef EAX
274
#undef ECX
275
@@ -XXX,XX +XXX,XX @@ int cpu_signal_handler(int host_signum, void *pinfo,
276
277
/* The softmmu versions of these helpers are in cputlb.c. */
278
279
+uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
280
+{
24
+{
281
+ uint32_t ret;
25
+ return ~a;
282
+ uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, false);
283
+
284
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
285
+ ret = ldub_p(g2h(ptr));
286
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
287
+ return ret;
288
+}
26
+}
289
+
27
+
290
+int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
28
static inline Int128 int128_and(Int128 a, Int128 b)
29
{
30
return a & b;
31
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_or(Int128 a, Int128 b)
32
return a | b;
33
}
34
35
+static inline Int128 int128_xor(Int128 a, Int128 b)
291
+{
36
+{
292
+ int ret;
37
+ return a ^ b;
293
+ uint16_t meminfo = trace_mem_get_info(MO_SB, MMU_USER_IDX, false);
294
+
295
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
296
+ ret = ldsb_p(g2h(ptr));
297
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
298
+ return ret;
299
+}
38
+}
300
+
39
+
301
+uint32_t cpu_lduw_data(CPUArchState *env, abi_ptr ptr)
40
static inline Int128 int128_rshift(Int128 a, int n)
41
{
42
return a >> n;
43
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_exts64(int64_t a)
44
return int128_make128(a, (a < 0) ? -1 : 0);
45
}
46
47
+static inline Int128 int128_not(Int128 a)
302
+{
48
+{
303
+ uint32_t ret;
49
+ return int128_make128(~a.lo, ~a.hi);
304
+ uint16_t meminfo = trace_mem_get_info(MO_TEUW, MMU_USER_IDX, false);
305
+
306
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
307
+ ret = lduw_p(g2h(ptr));
308
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
309
+ return ret;
310
+}
50
+}
311
+
51
+
312
+int cpu_ldsw_data(CPUArchState *env, abi_ptr ptr)
52
static inline Int128 int128_and(Int128 a, Int128 b)
53
{
54
return int128_make128(a.lo & b.lo, a.hi & b.hi);
55
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_or(Int128 a, Int128 b)
56
return int128_make128(a.lo | b.lo, a.hi | b.hi);
57
}
58
59
+static inline Int128 int128_xor(Int128 a, Int128 b)
313
+{
60
+{
314
+ int ret;
61
+ return int128_make128(a.lo ^ b.lo, a.hi ^ b.hi);
315
+ uint16_t meminfo = trace_mem_get_info(MO_TESW, MMU_USER_IDX, false);
316
+
317
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
318
+ ret = ldsw_p(g2h(ptr));
319
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
320
+ return ret;
321
+}
62
+}
322
+
63
+
323
+uint32_t cpu_ldl_data(CPUArchState *env, abi_ptr ptr)
64
static inline Int128 int128_rshift(Int128 a, int n)
324
+{
65
{
325
+ uint32_t ret;
66
int64_t h;
326
+ uint16_t meminfo = trace_mem_get_info(MO_TEUL, MMU_USER_IDX, false);
327
+
328
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
329
+ ret = ldl_p(g2h(ptr));
330
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
331
+ return ret;
332
+}
333
+
334
+uint64_t cpu_ldq_data(CPUArchState *env, abi_ptr ptr)
335
+{
336
+ uint64_t ret;
337
+ uint16_t meminfo = trace_mem_get_info(MO_TEQ, MMU_USER_IDX, false);
338
+
339
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
340
+ ret = ldq_p(g2h(ptr));
341
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
342
+ return ret;
343
+}
344
+
345
+uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
346
+{
347
+ uint32_t ret;
348
+
349
+ set_helper_retaddr(retaddr);
350
+ ret = cpu_ldub_data(env, ptr);
351
+ clear_helper_retaddr();
352
+ return ret;
353
+}
354
+
355
+int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
356
+{
357
+ int ret;
358
+
359
+ set_helper_retaddr(retaddr);
360
+ ret = cpu_ldsb_data(env, ptr);
361
+ clear_helper_retaddr();
362
+ return ret;
363
+}
364
+
365
+uint32_t cpu_lduw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
366
+{
367
+ uint32_t ret;
368
+
369
+ set_helper_retaddr(retaddr);
370
+ ret = cpu_lduw_data(env, ptr);
371
+ clear_helper_retaddr();
372
+ return ret;
373
+}
374
+
375
+int cpu_ldsw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
376
+{
377
+ int ret;
378
+
379
+ set_helper_retaddr(retaddr);
380
+ ret = cpu_ldsw_data(env, ptr);
381
+ clear_helper_retaddr();
382
+ return ret;
383
+}
384
+
385
+uint32_t cpu_ldl_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
386
+{
387
+ uint32_t ret;
388
+
389
+ set_helper_retaddr(retaddr);
390
+ ret = cpu_ldl_data(env, ptr);
391
+ clear_helper_retaddr();
392
+ return ret;
393
+}
394
+
395
+uint64_t cpu_ldq_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
396
+{
397
+ uint64_t ret;
398
+
399
+ set_helper_retaddr(retaddr);
400
+ ret = cpu_ldq_data(env, ptr);
401
+ clear_helper_retaddr();
402
+ return ret;
403
+}
404
+
405
+void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
406
+{
407
+ uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, true);
408
+
409
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
410
+ stb_p(g2h(ptr), val);
411
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
412
+}
413
+
414
+void cpu_stw_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
415
+{
416
+ uint16_t meminfo = trace_mem_get_info(MO_TEUW, MMU_USER_IDX, true);
417
+
418
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
419
+ stw_p(g2h(ptr), val);
420
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
421
+}
422
+
423
+void cpu_stl_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
424
+{
425
+ uint16_t meminfo = trace_mem_get_info(MO_TEUL, MMU_USER_IDX, true);
426
+
427
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
428
+ stl_p(g2h(ptr), val);
429
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
430
+}
431
+
432
+void cpu_stq_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
433
+{
434
+ uint16_t meminfo = trace_mem_get_info(MO_TEQ, MMU_USER_IDX, true);
435
+
436
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
437
+ stq_p(g2h(ptr), val);
438
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
439
+}
440
+
441
+void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
442
+ uint32_t val, uintptr_t retaddr)
443
+{
444
+ set_helper_retaddr(retaddr);
445
+ cpu_stb_data(env, ptr, val);
446
+ clear_helper_retaddr();
447
+}
448
+
449
+void cpu_stw_data_ra(CPUArchState *env, abi_ptr ptr,
450
+ uint32_t val, uintptr_t retaddr)
451
+{
452
+ set_helper_retaddr(retaddr);
453
+ cpu_stw_data(env, ptr, val);
454
+ clear_helper_retaddr();
455
+}
456
+
457
+void cpu_stl_data_ra(CPUArchState *env, abi_ptr ptr,
458
+ uint32_t val, uintptr_t retaddr)
459
+{
460
+ set_helper_retaddr(retaddr);
461
+ cpu_stl_data(env, ptr, val);
462
+ clear_helper_retaddr();
463
+}
464
+
465
+void cpu_stq_data_ra(CPUArchState *env, abi_ptr ptr,
466
+ uint64_t val, uintptr_t retaddr)
467
+{
468
+ set_helper_retaddr(retaddr);
469
+ cpu_stq_data(env, ptr, val);
470
+ clear_helper_retaddr();
471
+}
472
+
473
+uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
474
+{
475
+ uint32_t ret;
476
+
477
+ set_helper_retaddr(1);
478
+ ret = ldub_p(g2h(ptr));
479
+ clear_helper_retaddr();
480
+ return ret;
481
+}
482
+
483
+uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
484
+{
485
+ uint32_t ret;
486
+
487
+ set_helper_retaddr(1);
488
+ ret = lduw_p(g2h(ptr));
489
+ clear_helper_retaddr();
490
+ return ret;
491
+}
492
+
493
+uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
494
+{
495
+ uint32_t ret;
496
+
497
+ set_helper_retaddr(1);
498
+ ret = ldl_p(g2h(ptr));
499
+ clear_helper_retaddr();
500
+ return ret;
501
+}
502
+
503
+uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
504
+{
505
+ uint64_t ret;
506
+
507
+ set_helper_retaddr(1);
508
+ ret = ldq_p(g2h(ptr));
509
+ clear_helper_retaddr();
510
+ return ret;
511
+}
512
+
513
/* Do not allow unaligned operations to proceed. Return the host address. */
514
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
515
int size, uintptr_t retaddr)
516
--
67
--
517
2.20.1
68
2.25.1
518
69
519
70
diff view generated by jsdifflib
New patch
1
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
3
In preparation for changing the divu128/divs128 implementations
4
to allow for quotients larger than 64 bits, move the div-by-zero
5
and overflow checks to the callers.
6
7
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20211025191154.350831-2-luis.pires@eldorado.org.br>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
include/hw/clock.h | 5 +++--
13
include/qemu/host-utils.h | 34 ++++++++++++---------------------
14
target/ppc/int_helper.c | 14 +++++++++-----
15
util/host-utils.c | 40 ++++++++++++++++++---------------------
16
4 files changed, 42 insertions(+), 51 deletions(-)
17
18
diff --git a/include/hw/clock.h b/include/hw/clock.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/clock.h
21
+++ b/include/hw/clock.h
22
@@ -XXX,XX +XXX,XX @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
23
return 0;
24
}
25
/*
26
- * Ignore divu128() return value as we've caught div-by-zero and don't
27
- * need different behaviour for overflow.
28
+ * BUG: when CONFIG_INT128 is not defined, the current implementation of
29
+ * divu128 does not return a valid truncated quotient, so the result will
30
+ * be wrong.
31
*/
32
divu128(&lo, &hi, clk->period);
33
return lo;
34
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/include/qemu/host-utils.h
37
+++ b/include/qemu/host-utils.h
38
@@ -XXX,XX +XXX,XX @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
39
return (__int128_t)a * b / c;
40
}
41
42
-static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
43
+static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
44
{
45
- if (divisor == 0) {
46
- return 1;
47
- } else {
48
- __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
49
- __uint128_t result = dividend / divisor;
50
- *plow = result;
51
- *phigh = dividend % divisor;
52
- return result > UINT64_MAX;
53
- }
54
+ __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
55
+ __uint128_t result = dividend / divisor;
56
+ *plow = result;
57
+ *phigh = dividend % divisor;
58
}
59
60
-static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
61
+static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
62
{
63
- if (divisor == 0) {
64
- return 1;
65
- } else {
66
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
67
- __int128_t result = dividend / divisor;
68
- *plow = result;
69
- *phigh = dividend % divisor;
70
- return result != *plow;
71
- }
72
+ __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
73
+ __int128_t result = dividend / divisor;
74
+ *plow = result;
75
+ *phigh = dividend % divisor;
76
}
77
#else
78
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
79
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
80
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
81
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
82
+void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
83
+void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
84
85
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
86
{
87
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/ppc/int_helper.c
90
+++ b/target/ppc/int_helper.c
91
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
92
uint64_t rt = 0;
93
int overflow = 0;
94
95
- overflow = divu128(&rt, &ra, rb);
96
-
97
- if (unlikely(overflow)) {
98
+ if (unlikely(rb == 0 || ra >= rb)) {
99
+ overflow = 1;
100
rt = 0; /* Undefined */
101
+ } else {
102
+ divu128(&rt, &ra, rb);
103
}
104
105
if (oe) {
106
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
107
int64_t rt = 0;
108
int64_t ra = (int64_t)rau;
109
int64_t rb = (int64_t)rbu;
110
- int overflow = divs128(&rt, &ra, rb);
111
+ int overflow = 0;
112
113
- if (unlikely(overflow)) {
114
+ if (unlikely(rb == 0 || uabs64(ra) >= uabs64(rb))) {
115
+ overflow = 1;
116
rt = 0; /* Undefined */
117
+ } else {
118
+ divs128(&rt, &ra, rb);
119
}
120
121
if (oe) {
122
diff --git a/util/host-utils.c b/util/host-utils.c
123
index XXXXXXX..XXXXXXX 100644
124
--- a/util/host-utils.c
125
+++ b/util/host-utils.c
126
@@ -XXX,XX +XXX,XX @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
127
*phigh = rh;
128
}
129
130
-/* Unsigned 128x64 division. Returns 1 if overflow (divide by zero or */
131
-/* quotient exceeds 64 bits). Otherwise returns quotient via plow and */
132
-/* remainder via phigh. */
133
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
134
+/*
135
+ * Unsigned 128-by-64 division. Returns quotient via plow and
136
+ * remainder via phigh.
137
+ * The result must fit in 64 bits (plow) - otherwise, the result
138
+ * is undefined.
139
+ * This function will cause a division by zero if passed a zero divisor.
140
+ */
141
+void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
142
{
143
uint64_t dhi = *phigh;
144
uint64_t dlo = *plow;
145
unsigned i;
146
uint64_t carry = 0;
147
148
- if (divisor == 0) {
149
- return 1;
150
- } else if (dhi == 0) {
151
+ if (divisor == 0 || dhi == 0) {
152
*plow = dlo / divisor;
153
*phigh = dlo % divisor;
154
- return 0;
155
- } else if (dhi >= divisor) {
156
- return 1;
157
} else {
158
159
for (i = 0; i < 64; i++) {
160
@@ -XXX,XX +XXX,XX @@ int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
161
162
*plow = dlo;
163
*phigh = dhi;
164
- return 0;
165
}
166
}
167
168
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
169
+/*
170
+ * Signed 128-by-64 division. Returns quotient via plow and
171
+ * remainder via phigh.
172
+ * The result must fit in 64 bits (plow) - otherwise, the result
173
+ * is undefined.
174
+ * This function will cause a division by zero if passed a zero divisor.
175
+ */
176
+void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
177
{
178
int sgn_dvdnd = *phigh < 0;
179
int sgn_divsr = divisor < 0;
180
- int overflow = 0;
181
182
if (sgn_dvdnd) {
183
*plow = ~(*plow);
184
@@ -XXX,XX +XXX,XX @@ int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
185
divisor = 0 - divisor;
186
}
187
188
- overflow = divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
189
+ divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
190
191
if (sgn_dvdnd ^ sgn_divsr) {
192
*plow = 0 - *plow;
193
}
194
-
195
- if (!overflow) {
196
- if ((*plow < 0) ^ (sgn_dvdnd ^ sgn_divsr)) {
197
- overflow = 1;
198
- }
199
- }
200
-
201
- return overflow;
202
}
203
#endif
204
205
--
206
2.25.1
207
208
diff view generated by jsdifflib
New patch
1
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
3
Move udiv_qrnnd() from include/fpu/softfloat-macros.h to host-utils,
4
so it can be reused by divu128().
5
6
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20211025191154.350831-3-luis.pires@eldorado.org.br>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
include/fpu/softfloat-macros.h | 82 ----------------------------------
12
include/qemu/host-utils.h | 81 +++++++++++++++++++++++++++++++++
13
2 files changed, 81 insertions(+), 82 deletions(-)
14
15
diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/fpu/softfloat-macros.h
18
+++ b/include/fpu/softfloat-macros.h
19
@@ -XXX,XX +XXX,XX @@
20
* so some portions are provided under:
21
* the SoftFloat-2a license
22
* the BSD license
23
- * GPL-v2-or-later
24
*
25
* Any future contributions to this file after December 1st 2014 will be
26
* taken to be licensed under the Softfloat-2a license unless specifically
27
@@ -XXX,XX +XXX,XX @@ this code that are retained.
28
* THE POSSIBILITY OF SUCH DAMAGE.
29
*/
30
31
-/* Portions of this work are licensed under the terms of the GNU GPL,
32
- * version 2 or later. See the COPYING file in the top-level directory.
33
- */
34
-
35
#ifndef FPU_SOFTFLOAT_MACROS_H
36
#define FPU_SOFTFLOAT_MACROS_H
37
38
@@ -XXX,XX +XXX,XX @@ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b)
39
40
}
41
42
-/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
43
- * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
44
- *
45
- * Licensed under the GPLv2/LGPLv3
46
- */
47
-static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
48
- uint64_t n0, uint64_t d)
49
-{
50
-#if defined(__x86_64__)
51
- uint64_t q;
52
- asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
53
- return q;
54
-#elif defined(__s390x__) && !defined(__clang__)
55
- /* Need to use a TImode type to get an even register pair for DLGR. */
56
- unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
57
- asm("dlgr %0, %1" : "+r"(n) : "r"(d));
58
- *r = n >> 64;
59
- return n;
60
-#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
61
- /* From Power ISA 2.06, programming note for divdeu. */
62
- uint64_t q1, q2, Q, r1, r2, R;
63
- asm("divdeu %0,%2,%4; divdu %1,%3,%4"
64
- : "=&r"(q1), "=r"(q2)
65
- : "r"(n1), "r"(n0), "r"(d));
66
- r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
67
- r2 = n0 - (q2 * d);
68
- Q = q1 + q2;
69
- R = r1 + r2;
70
- if (R >= d || R < r2) { /* overflow implies R > d */
71
- Q += 1;
72
- R -= d;
73
- }
74
- *r = R;
75
- return Q;
76
-#else
77
- uint64_t d0, d1, q0, q1, r1, r0, m;
78
-
79
- d0 = (uint32_t)d;
80
- d1 = d >> 32;
81
-
82
- r1 = n1 % d1;
83
- q1 = n1 / d1;
84
- m = q1 * d0;
85
- r1 = (r1 << 32) | (n0 >> 32);
86
- if (r1 < m) {
87
- q1 -= 1;
88
- r1 += d;
89
- if (r1 >= d) {
90
- if (r1 < m) {
91
- q1 -= 1;
92
- r1 += d;
93
- }
94
- }
95
- }
96
- r1 -= m;
97
-
98
- r0 = r1 % d1;
99
- q0 = r1 / d1;
100
- m = q0 * d0;
101
- r0 = (r0 << 32) | (uint32_t)n0;
102
- if (r0 < m) {
103
- q0 -= 1;
104
- r0 += d;
105
- if (r0 >= d) {
106
- if (r0 < m) {
107
- q0 -= 1;
108
- r0 += d;
109
- }
110
- }
111
- }
112
- r0 -= m;
113
-
114
- *r = r0;
115
- return (q1 << 32) | q0;
116
-#endif
117
-}
118
-
119
/*----------------------------------------------------------------------------
120
| Returns an approximation to the square root of the 32-bit significand given
121
| by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of
122
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
123
index XXXXXXX..XXXXXXX 100644
124
--- a/include/qemu/host-utils.h
125
+++ b/include/qemu/host-utils.h
126
@@ -XXX,XX +XXX,XX @@
127
* THE SOFTWARE.
128
*/
129
130
+/* Portions of this work are licensed under the terms of the GNU GPL,
131
+ * version 2 or later. See the COPYING file in the top-level directory.
132
+ */
133
+
134
#ifndef HOST_UTILS_H
135
#define HOST_UTILS_H
136
137
@@ -XXX,XX +XXX,XX @@ void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
138
*/
139
void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
140
141
+/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
142
+ * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
143
+ *
144
+ * Licensed under the GPLv2/LGPLv3
145
+ */
146
+static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
147
+ uint64_t n0, uint64_t d)
148
+{
149
+#if defined(__x86_64__)
150
+ uint64_t q;
151
+ asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
152
+ return q;
153
+#elif defined(__s390x__) && !defined(__clang__)
154
+ /* Need to use a TImode type to get an even register pair for DLGR. */
155
+ unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
156
+ asm("dlgr %0, %1" : "+r"(n) : "r"(d));
157
+ *r = n >> 64;
158
+ return n;
159
+#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
160
+ /* From Power ISA 2.06, programming note for divdeu. */
161
+ uint64_t q1, q2, Q, r1, r2, R;
162
+ asm("divdeu %0,%2,%4; divdu %1,%3,%4"
163
+ : "=&r"(q1), "=r"(q2)
164
+ : "r"(n1), "r"(n0), "r"(d));
165
+ r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
166
+ r2 = n0 - (q2 * d);
167
+ Q = q1 + q2;
168
+ R = r1 + r2;
169
+ if (R >= d || R < r2) { /* overflow implies R > d */
170
+ Q += 1;
171
+ R -= d;
172
+ }
173
+ *r = R;
174
+ return Q;
175
+#else
176
+ uint64_t d0, d1, q0, q1, r1, r0, m;
177
+
178
+ d0 = (uint32_t)d;
179
+ d1 = d >> 32;
180
+
181
+ r1 = n1 % d1;
182
+ q1 = n1 / d1;
183
+ m = q1 * d0;
184
+ r1 = (r1 << 32) | (n0 >> 32);
185
+ if (r1 < m) {
186
+ q1 -= 1;
187
+ r1 += d;
188
+ if (r1 >= d) {
189
+ if (r1 < m) {
190
+ q1 -= 1;
191
+ r1 += d;
192
+ }
193
+ }
194
+ }
195
+ r1 -= m;
196
+
197
+ r0 = r1 % d1;
198
+ q0 = r1 / d1;
199
+ m = q0 * d0;
200
+ r0 = (r0 << 32) | (uint32_t)n0;
201
+ if (r0 < m) {
202
+ q0 -= 1;
203
+ r0 += d;
204
+ if (r0 >= d) {
205
+ if (r0 < m) {
206
+ q0 -= 1;
207
+ r0 += d;
208
+ }
209
+ }
210
+ }
211
+ r0 -= m;
212
+
213
+ *r = r0;
214
+ return (q1 << 32) | q0;
215
+#endif
216
+}
217
+
218
#endif
219
--
220
2.25.1
221
222
diff view generated by jsdifflib
New patch
1
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
3
These will be used to implement new decimal floating point
4
instructions from Power ISA 3.1.
5
6
The remainder is now returned directly by divu128/divs128,
7
freeing up phigh to receive the high 64 bits of the quotient.
8
9
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20211025191154.350831-4-luis.pires@eldorado.org.br>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
include/hw/clock.h | 6 +-
15
include/qemu/host-utils.h | 20 ++++--
16
target/ppc/int_helper.c | 9 +--
17
util/host-utils.c | 133 +++++++++++++++++++++++++-------------
18
4 files changed, 108 insertions(+), 60 deletions(-)
19
20
diff --git a/include/hw/clock.h b/include/hw/clock.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/hw/clock.h
23
+++ b/include/hw/clock.h
24
@@ -XXX,XX +XXX,XX @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
25
if (clk->period == 0) {
26
return 0;
27
}
28
- /*
29
- * BUG: when CONFIG_INT128 is not defined, the current implementation of
30
- * divu128 does not return a valid truncated quotient, so the result will
31
- * be wrong.
32
- */
33
+
34
divu128(&lo, &hi, clk->period);
35
return lo;
36
}
37
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/include/qemu/host-utils.h
40
+++ b/include/qemu/host-utils.h
41
@@ -XXX,XX +XXX,XX @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
42
return (__int128_t)a * b / c;
43
}
44
45
-static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
46
+static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh,
47
+ uint64_t divisor)
48
{
49
__uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
50
__uint128_t result = dividend / divisor;
51
+
52
*plow = result;
53
- *phigh = dividend % divisor;
54
+ *phigh = result >> 64;
55
+ return dividend % divisor;
56
}
57
58
-static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
59
+static inline int64_t divs128(uint64_t *plow, int64_t *phigh,
60
+ int64_t divisor)
61
{
62
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
63
+ __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
64
__int128_t result = dividend / divisor;
65
+
66
*plow = result;
67
- *phigh = dividend % divisor;
68
+ *phigh = result >> 64;
69
+ return dividend % divisor;
70
}
71
#else
72
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
73
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
74
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
75
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
76
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
77
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor);
78
79
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
80
{
81
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/ppc/int_helper.c
84
+++ b/target/ppc/int_helper.c
85
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
86
87
uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
88
{
89
- int64_t rt = 0;
90
+ uint64_t rt = 0;
91
int64_t ra = (int64_t)rau;
92
int64_t rb = (int64_t)rbu;
93
int overflow = 0;
94
@@ -XXX,XX +XXX,XX @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
95
int cr;
96
uint64_t lo_value;
97
uint64_t hi_value;
98
+ uint64_t rem;
99
ppc_avr_t ret = { .u64 = { 0, 0 } };
100
101
if (b->VsrSD(0) < 0) {
102
@@ -XXX,XX +XXX,XX @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
103
* In that case, we leave r unchanged.
104
*/
105
} else {
106
- divu128(&lo_value, &hi_value, 1000000000000000ULL);
107
+ rem = divu128(&lo_value, &hi_value, 1000000000000000ULL);
108
109
- for (i = 1; i < 16; hi_value /= 10, i++) {
110
- bcd_put_digit(&ret, hi_value % 10, i);
111
+ for (i = 1; i < 16; rem /= 10, i++) {
112
+ bcd_put_digit(&ret, rem % 10, i);
113
}
114
115
for (; i < 32; lo_value /= 10, i++) {
116
diff --git a/util/host-utils.c b/util/host-utils.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/util/host-utils.c
119
+++ b/util/host-utils.c
120
@@ -XXX,XX +XXX,XX @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
121
}
122
123
/*
124
- * Unsigned 128-by-64 division. Returns quotient via plow and
125
- * remainder via phigh.
126
- * The result must fit in 64 bits (plow) - otherwise, the result
127
- * is undefined.
128
- * This function will cause a division by zero if passed a zero divisor.
129
+ * Unsigned 128-by-64 division.
130
+ * Returns the remainder.
131
+ * Returns quotient via plow and phigh.
132
+ * Also returns the remainder via the function return value.
133
*/
134
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
135
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
136
{
137
uint64_t dhi = *phigh;
138
uint64_t dlo = *plow;
139
- unsigned i;
140
- uint64_t carry = 0;
141
+ uint64_t rem, dhighest;
142
+ int sh;
143
144
if (divisor == 0 || dhi == 0) {
145
*plow = dlo / divisor;
146
- *phigh = dlo % divisor;
147
+ *phigh = 0;
148
+ return dlo % divisor;
149
} else {
150
+ sh = clz64(divisor);
151
152
- for (i = 0; i < 64; i++) {
153
- carry = dhi >> 63;
154
- dhi = (dhi << 1) | (dlo >> 63);
155
- if (carry || (dhi >= divisor)) {
156
- dhi -= divisor;
157
- carry = 1;
158
- } else {
159
- carry = 0;
160
+ if (dhi < divisor) {
161
+ if (sh != 0) {
162
+ /* normalize the divisor, shifting the dividend accordingly */
163
+ divisor <<= sh;
164
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
165
+ dlo <<= sh;
166
}
167
- dlo = (dlo << 1) | carry;
168
+
169
+ *phigh = 0;
170
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
171
+ } else {
172
+ if (sh != 0) {
173
+ /* normalize the divisor, shifting the dividend accordingly */
174
+ divisor <<= sh;
175
+ dhighest = dhi >> (64 - sh);
176
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
177
+ dlo <<= sh;
178
+
179
+ *phigh = udiv_qrnnd(&dhi, dhighest, dhi, divisor);
180
+ } else {
181
+ /**
182
+ * dhi >= divisor
183
+ * Since the MSB of divisor is set (sh == 0),
184
+ * (dhi - divisor) < divisor
185
+ *
186
+ * Thus, the high part of the quotient is 1, and we can
187
+ * calculate the low part with a single call to udiv_qrnnd
188
+ * after subtracting divisor from dhi
189
+ */
190
+ dhi -= divisor;
191
+ *phigh = 1;
192
+ }
193
+
194
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
195
}
196
197
- *plow = dlo;
198
- *phigh = dhi;
199
+ /*
200
+ * since the dividend/divisor might have been normalized,
201
+ * the remainder might also have to be shifted back
202
+ */
203
+ return rem >> sh;
204
}
205
}
206
207
/*
208
- * Signed 128-by-64 division. Returns quotient via plow and
209
- * remainder via phigh.
210
- * The result must fit in 64 bits (plow) - otherwise, the result
211
- * is undefined.
212
- * This function will cause a division by zero if passed a zero divisor.
213
+ * Signed 128-by-64 division.
214
+ * Returns quotient via plow and phigh.
215
+ * Also returns the remainder via the function return value.
216
*/
217
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
218
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor)
219
{
220
- int sgn_dvdnd = *phigh < 0;
221
- int sgn_divsr = divisor < 0;
222
+ bool neg_quotient = false, neg_remainder = false;
223
+ uint64_t unsig_hi = *phigh, unsig_lo = *plow;
224
+ uint64_t rem;
225
226
- if (sgn_dvdnd) {
227
- *plow = ~(*plow);
228
- *phigh = ~(*phigh);
229
- if (*plow == (int64_t)-1) {
230
+ if (*phigh < 0) {
231
+ neg_quotient = !neg_quotient;
232
+ neg_remainder = !neg_remainder;
233
+
234
+ if (unsig_lo == 0) {
235
+ unsig_hi = -unsig_hi;
236
+ } else {
237
+ unsig_hi = ~unsig_hi;
238
+ unsig_lo = -unsig_lo;
239
+ }
240
+ }
241
+
242
+ if (divisor < 0) {
243
+ neg_quotient = !neg_quotient;
244
+
245
+ divisor = -divisor;
246
+ }
247
+
248
+ rem = divu128(&unsig_lo, &unsig_hi, (uint64_t)divisor);
249
+
250
+ if (neg_quotient) {
251
+ if (unsig_lo == 0) {
252
+ *phigh = -unsig_hi;
253
*plow = 0;
254
- (*phigh)++;
255
- } else {
256
- (*plow)++;
257
- }
258
+ } else {
259
+ *phigh = ~unsig_hi;
260
+ *plow = -unsig_lo;
261
+ }
262
+ } else {
263
+ *phigh = unsig_hi;
264
+ *plow = unsig_lo;
265
}
266
267
- if (sgn_divsr) {
268
- divisor = 0 - divisor;
269
- }
270
-
271
- divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
272
-
273
- if (sgn_dvdnd ^ sgn_divsr) {
274
- *plow = 0 - *plow;
275
+ if (neg_remainder) {
276
+ return -rem;
277
+ } else {
278
+ return rem;
279
}
280
}
281
#endif
282
--
283
2.25.1
284
285
diff view generated by jsdifflib
New patch
1
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
3
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20211025191154.350831-5-luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tests/unit/test-div128.c | 197 +++++++++++++++++++++++++++++++++++++++
9
tests/unit/meson.build | 1 +
10
2 files changed, 198 insertions(+)
11
create mode 100644 tests/unit/test-div128.c
12
13
diff --git a/tests/unit/test-div128.c b/tests/unit/test-div128.c
14
new file mode 100644
15
index XXXXXXX..XXXXXXX
16
--- /dev/null
17
+++ b/tests/unit/test-div128.c
18
@@ -XXX,XX +XXX,XX @@
19
+/*
20
+ * Test 128-bit division functions
21
+ *
22
+ * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
23
+ *
24
+ * This library is free software; you can redistribute it and/or
25
+ * modify it under the terms of the GNU Lesser General Public
26
+ * License as published by the Free Software Foundation; either
27
+ * version 2.1 of the License, or (at your option) any later version.
28
+ *
29
+ * This library is distributed in the hope that it will be useful,
30
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
31
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
32
+ * Lesser General Public License for more details.
33
+ *
34
+ * You should have received a copy of the GNU Lesser General Public
35
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36
+ */
37
+
38
+#include "qemu/osdep.h"
39
+#include "qemu/host-utils.h"
40
+
41
+typedef struct {
42
+ uint64_t high;
43
+ uint64_t low;
44
+ uint64_t rhigh;
45
+ uint64_t rlow;
46
+ uint64_t divisor;
47
+ uint64_t remainder;
48
+} test_data_unsigned;
49
+
50
+typedef struct {
51
+ int64_t high;
52
+ uint64_t low;
53
+ int64_t rhigh;
54
+ uint64_t rlow;
55
+ int64_t divisor;
56
+ int64_t remainder;
57
+} test_data_signed;
58
+
59
+static const test_data_unsigned test_table_unsigned[] = {
60
+ /* Dividend fits in 64 bits */
61
+ { 0x0000000000000000ULL, 0x0000000000000000ULL,
62
+ 0x0000000000000000ULL, 0x0000000000000000ULL,
63
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
64
+ { 0x0000000000000000ULL, 0x0000000000000001ULL,
65
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
66
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
67
+ { 0x0000000000000000ULL, 0x0000000000000003ULL,
68
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
69
+ 0x0000000000000002ULL, 0x0000000000000001ULL},
70
+ { 0x0000000000000000ULL, 0x8000000000000000ULL,
71
+ 0x0000000000000000ULL, 0x8000000000000000ULL,
72
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
73
+ { 0x0000000000000000ULL, 0xa000000000000000ULL,
74
+ 0x0000000000000000ULL, 0x0000000000000002ULL,
75
+ 0x4000000000000000ULL, 0x2000000000000000ULL},
76
+ { 0x0000000000000000ULL, 0x8000000000000000ULL,
77
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
78
+ 0x8000000000000000ULL, 0x0000000000000000ULL},
79
+
80
+ /* Dividend > 64 bits, with MSB 0 */
81
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
82
+ 0x123456789abcdefeULL, 0xefedcba987654321ULL,
83
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
84
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
85
+ 0x0000000000000001ULL, 0x000000000000000dULL,
86
+ 0x123456789abcdefeULL, 0x03456789abcdf03bULL},
87
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
88
+ 0x0123456789abcdefULL, 0xeefedcba98765432ULL,
89
+ 0x0000000000000010ULL, 0x0000000000000001ULL},
90
+
91
+ /* Dividend > 64 bits, with MSB 1 */
92
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
93
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
94
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
95
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
96
+ 0x0000000000000001ULL, 0x0000000000000000ULL,
97
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL},
98
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
99
+ 0x0feeddccbbaa9988ULL, 0x7766554433221100ULL,
100
+ 0x0000000000000010ULL, 0x000000000000000fULL},
101
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
102
+ 0x000000000000000eULL, 0x00f0f0f0f0f0f35aULL,
103
+ 0x123456789abcdefeULL, 0x0f8922bc55ef90c3ULL},
104
+
105
+ /**
106
+ * Divisor == 64 bits, with MSB 1
107
+ * and high 64 bits of dividend >= divisor
108
+ * (for testing normalization)
109
+ */
110
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
111
+ 0x0000000000000001ULL, 0x0000000000000000ULL,
112
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL},
113
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
114
+ 0x0000000000000001ULL, 0xfddbb9977553310aULL,
115
+ 0x8000000000000001ULL, 0x78899aabbccddf05ULL},
116
+
117
+ /* Dividend > 64 bits, divisor almost as big */
118
+ { 0x0000000000000001ULL, 0x23456789abcdef01ULL,
119
+ 0x0000000000000000ULL, 0x000000000000000fULL,
120
+ 0x123456789abcdefeULL, 0x123456789abcde1fULL},
121
+};
122
+
123
+static const test_data_signed test_table_signed[] = {
124
+ /* Positive dividend, positive/negative divisors */
125
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
126
+ 0x0000000000000000LL, 0x0000000000bc614eULL,
127
+ 0x0000000000000001LL, 0x0000000000000000LL},
128
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
129
+ 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
130
+ 0xffffffffffffffffLL, 0x0000000000000000LL},
131
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
132
+ 0x0000000000000000LL, 0x00000000005e30a7ULL,
133
+ 0x0000000000000002LL, 0x0000000000000000LL},
134
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
135
+ 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL,
136
+ 0xfffffffffffffffeLL, 0x0000000000000000LL},
137
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
138
+ 0x0000000000000000LL, 0x0000000000178c29ULL,
139
+ 0x0000000000000008LL, 0x0000000000000006LL},
140
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
141
+ 0xffffffffffffffffLL, 0xffffffffffe873d7ULL,
142
+ 0xfffffffffffffff8LL, 0x0000000000000006LL},
143
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
144
+ 0x0000000000000000LL, 0x000000000000550dULL,
145
+ 0x0000000000000237LL, 0x0000000000000183LL},
146
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
147
+ 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL,
148
+ 0xfffffffffffffdc9LL, 0x0000000000000183LL},
149
+
150
+ /* Negative dividend, positive/negative divisors */
151
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
152
+ 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
153
+ 0x0000000000000001LL, 0x0000000000000000LL},
154
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
155
+ 0x0000000000000000LL, 0x0000000000bc614eULL,
156
+ 0xffffffffffffffffLL, 0x0000000000000000LL},
157
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
158
+ 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL,
159
+ 0x0000000000000002LL, 0x0000000000000000LL},
160
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
161
+ 0x0000000000000000LL, 0x00000000005e30a7ULL,
162
+ 0xfffffffffffffffeLL, 0x0000000000000000LL},
163
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
164
+ 0xffffffffffffffffLL, 0xffffffffffe873d7ULL,
165
+ 0x0000000000000008LL, 0xfffffffffffffffaLL},
166
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
167
+ 0x0000000000000000LL, 0x0000000000178c29ULL,
168
+ 0xfffffffffffffff8LL, 0xfffffffffffffffaLL},
169
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
170
+ 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL,
171
+ 0x0000000000000237LL, 0xfffffffffffffe7dLL},
172
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
173
+ 0x0000000000000000LL, 0x000000000000550dULL,
174
+ 0xfffffffffffffdc9LL, 0xfffffffffffffe7dLL},
175
+};
176
+
177
+static void test_divu128(void)
178
+{
179
+ int i;
180
+ uint64_t rem;
181
+ test_data_unsigned tmp;
182
+
183
+ for (i = 0; i < ARRAY_SIZE(test_table_unsigned); ++i) {
184
+ tmp = test_table_unsigned[i];
185
+
186
+ rem = divu128(&tmp.low, &tmp.high, tmp.divisor);
187
+ g_assert_cmpuint(tmp.low, ==, tmp.rlow);
188
+ g_assert_cmpuint(tmp.high, ==, tmp.rhigh);
189
+ g_assert_cmpuint(rem, ==, tmp.remainder);
190
+ }
191
+}
192
+
193
+static void test_divs128(void)
194
+{
195
+ int i;
196
+ int64_t rem;
197
+ test_data_signed tmp;
198
+
199
+ for (i = 0; i < ARRAY_SIZE(test_table_signed); ++i) {
200
+ tmp = test_table_signed[i];
201
+
202
+ rem = divs128(&tmp.low, &tmp.high, tmp.divisor);
203
+ g_assert_cmpuint(tmp.low, ==, tmp.rlow);
204
+ g_assert_cmpuint(tmp.high, ==, tmp.rhigh);
205
+ g_assert_cmpuint(rem, ==, tmp.remainder);
206
+ }
207
+}
208
+
209
+int main(int argc, char **argv)
210
+{
211
+ g_test_init(&argc, &argv, NULL);
212
+ g_test_add_func("/host-utils/test_divu128", test_divu128);
213
+ g_test_add_func("/host-utils/test_divs128", test_divs128);
214
+ return g_test_run();
215
+}
216
diff --git a/tests/unit/meson.build b/tests/unit/meson.build
217
index XXXXXXX..XXXXXXX 100644
218
--- a/tests/unit/meson.build
219
+++ b/tests/unit/meson.build
220
@@ -XXX,XX +XXX,XX @@ tests = {
221
# all code tested by test-x86-cpuid is inside topology.h
222
'test-x86-cpuid': [],
223
'test-cutils': [],
224
+ 'test-div128': [],
225
'test-shift128': [],
226
'test-mul64': [],
227
# all code tested by test-int128 is inside int128.h
228
--
229
2.25.1
230
231
diff view generated by jsdifflib
1
The separate suffixed functions were used to construct
1
Prepare for tracking different masks by renaming this one.
2
some do_##insn function switched on mmu_idx. The interface
3
is exactly identical to the *_mmuidx_ra functions. Replace
4
them directly and remove the constructions.
5
2
6
Cc: Aurelien Jarno <aurelien@aurel32.net>
7
Cc: Aleksandar Rikalo <aleksandar.rikalo@rt-rk.com>
8
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com>
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
7
---
13
target/mips/cpu.h | 4 -
8
tcg/optimize.c | 142 +++++++++++++++++++++++++------------------------
14
target/mips/op_helper.c | 182 +++++++++++++---------------------------
9
1 file changed, 72 insertions(+), 70 deletions(-)
15
2 files changed, 60 insertions(+), 126 deletions(-)
16
10
17
diff --git a/target/mips/cpu.h b/target/mips/cpu.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
18
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
19
--- a/target/mips/cpu.h
13
--- a/tcg/optimize.c
20
+++ b/target/mips/cpu.h
14
+++ b/tcg/optimize.c
21
@@ -XXX,XX +XXX,XX @@ extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env);
15
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
22
* MMU modes definitions. We carefully match the indices with our
16
TCGTemp *prev_copy;
23
* hflags layout.
17
TCGTemp *next_copy;
24
*/
18
uint64_t val;
25
-#define MMU_MODE0_SUFFIX _kernel
19
- uint64_t mask;
26
-#define MMU_MODE1_SUFFIX _super
20
+ uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
27
-#define MMU_MODE2_SUFFIX _user
21
} TempOptInfo;
28
-#define MMU_MODE3_SUFFIX _error
22
29
#define MMU_USER_IDX 2
23
static inline TempOptInfo *ts_info(TCGTemp *ts)
30
24
@@ -XXX,XX +XXX,XX @@ static void reset_ts(TCGTemp *ts)
31
static inline int hflags_mmu_index(uint32_t hflags)
25
ti->next_copy = ts;
32
diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c
26
ti->prev_copy = ts;
33
index XXXXXXX..XXXXXXX 100644
27
ti->is_const = false;
34
--- a/target/mips/op_helper.c
28
- ti->mask = -1;
35
+++ b/target/mips/op_helper.c
29
+ ti->z_mask = -1;
36
@@ -XXX,XX +XXX,XX @@ static void raise_exception(CPUMIPSState *env, uint32_t exception)
37
do_raise_exception(env, exception, 0);
38
}
30
}
39
31
40
-#if defined(CONFIG_USER_ONLY)
32
static void reset_temp(TCGArg arg)
41
-#define HELPER_LD(name, insn, type) \
33
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
42
-static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
34
if (ts->kind == TEMP_CONST) {
43
- int mem_idx, uintptr_t retaddr) \
35
ti->is_const = true;
44
-{ \
36
ti->val = ts->val;
45
- return (type) cpu_##insn##_data_ra(env, addr, retaddr); \
37
- ti->mask = ts->val;
46
-}
38
+ ti->z_mask = ts->val;
47
-#else
39
if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
48
-#define HELPER_LD(name, insn, type) \
40
/* High bits of a 32-bit quantity are garbage. */
49
-static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
41
- ti->mask |= ~0xffffffffull;
50
- int mem_idx, uintptr_t retaddr) \
42
+ ti->z_mask |= ~0xffffffffull;
51
-{ \
43
}
52
- switch (mem_idx) { \
44
} else {
53
- case 0: return (type) cpu_##insn##_kernel_ra(env, addr, retaddr); \
45
ti->is_const = false;
54
- case 1: return (type) cpu_##insn##_super_ra(env, addr, retaddr); \
46
- ti->mask = -1;
55
- default: \
47
+ ti->z_mask = -1;
56
- case 2: return (type) cpu_##insn##_user_ra(env, addr, retaddr); \
57
- case 3: return (type) cpu_##insn##_error_ra(env, addr, retaddr); \
58
- } \
59
-}
60
-#endif
61
-HELPER_LD(lw, ldl, int32_t)
62
-#if defined(TARGET_MIPS64)
63
-HELPER_LD(ld, ldq, int64_t)
64
-#endif
65
-#undef HELPER_LD
66
-
67
-#if defined(CONFIG_USER_ONLY)
68
-#define HELPER_ST(name, insn, type) \
69
-static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
70
- type val, int mem_idx, uintptr_t retaddr) \
71
-{ \
72
- cpu_##insn##_data_ra(env, addr, val, retaddr); \
73
-}
74
-#else
75
-#define HELPER_ST(name, insn, type) \
76
-static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
77
- type val, int mem_idx, uintptr_t retaddr) \
78
-{ \
79
- switch (mem_idx) { \
80
- case 0: \
81
- cpu_##insn##_kernel_ra(env, addr, val, retaddr); \
82
- break; \
83
- case 1: \
84
- cpu_##insn##_super_ra(env, addr, val, retaddr); \
85
- break; \
86
- default: \
87
- case 2: \
88
- cpu_##insn##_user_ra(env, addr, val, retaddr); \
89
- break; \
90
- case 3: \
91
- cpu_##insn##_error_ra(env, addr, val, retaddr); \
92
- break; \
93
- } \
94
-}
95
-#endif
96
-HELPER_ST(sb, stb, uint8_t)
97
-HELPER_ST(sw, stl, uint32_t)
98
-#if defined(TARGET_MIPS64)
99
-HELPER_ST(sd, stq, uint64_t)
100
-#endif
101
-#undef HELPER_ST
102
-
103
/* 64 bits arithmetic for 32 bits hosts */
104
static inline uint64_t get_HILO(CPUMIPSState *env)
105
{
106
@@ -XXX,XX +XXX,XX @@ target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
107
} \
108
env->CP0_LLAddr = do_translate_address(env, arg, 0, GETPC()); \
109
env->lladdr = arg; \
110
- env->llval = do_##insn(env, arg, mem_idx, GETPC()); \
111
+ env->llval = cpu_##insn##_mmuidx_ra(env, arg, mem_idx, GETPC()); \
112
return env->llval; \
113
}
114
-HELPER_LD_ATOMIC(ll, lw, 0x3)
115
+HELPER_LD_ATOMIC(ll, ldl, 0x3)
116
#ifdef TARGET_MIPS64
117
-HELPER_LD_ATOMIC(lld, ld, 0x7)
118
+HELPER_LD_ATOMIC(lld, ldq, 0x7)
119
#endif
120
#undef HELPER_LD_ATOMIC
121
#endif
122
@@ -XXX,XX +XXX,XX @@ HELPER_LD_ATOMIC(lld, ld, 0x7)
123
void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
124
int mem_idx)
125
{
126
- do_sb(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
127
+ cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
128
129
if (GET_LMASK(arg2) <= 2) {
130
- do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx,
131
- GETPC());
132
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16),
133
+ mem_idx, GETPC());
134
}
135
136
if (GET_LMASK(arg2) <= 1) {
137
- do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx,
138
- GETPC());
139
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8),
140
+ mem_idx, GETPC());
141
}
142
143
if (GET_LMASK(arg2) == 0) {
144
- do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx,
145
- GETPC());
146
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)arg1,
147
+ mem_idx, GETPC());
148
}
48
}
149
}
49
}
150
50
151
void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
51
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
152
int mem_idx)
52
const TCGOpDef *def;
153
{
53
TempOptInfo *di;
154
- do_sb(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
54
TempOptInfo *si;
155
+ cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
55
- uint64_t mask;
156
56
+ uint64_t z_mask;
157
if (GET_LMASK(arg2) >= 1) {
57
TCGOpcode new_op;
158
- do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx,
58
159
- GETPC());
59
if (ts_are_copies(dst_ts, src_ts)) {
160
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8),
60
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
161
+ mem_idx, GETPC());
61
op->args[0] = dst;
62
op->args[1] = src;
63
64
- mask = si->mask;
65
+ z_mask = si->z_mask;
66
if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
67
/* High bits of the destination are now garbage. */
68
- mask |= ~0xffffffffull;
69
+ z_mask |= ~0xffffffffull;
162
}
70
}
163
71
- di->mask = mask;
164
if (GET_LMASK(arg2) >= 2) {
72
+ di->z_mask = z_mask;
165
- do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx,
73
166
- GETPC());
74
if (src_ts->type == dst_ts->type) {
167
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16),
75
TempOptInfo *ni = ts_info(si->next_copy);
168
+ mem_idx, GETPC());
76
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
169
}
77
}
170
78
171
if (GET_LMASK(arg2) == 3) {
79
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
172
- do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx,
80
- uint64_t mask, partmask, affected, tmp;
173
- GETPC());
81
+ uint64_t z_mask, partmask, affected, tmp;
174
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24),
82
int nb_oargs, nb_iargs;
175
+ mem_idx, GETPC());
83
TCGOpcode opc = op->opc;
176
}
84
const TCGOpDef *def = &tcg_op_defs[opc];
177
}
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
178
86
179
@@ -XXX,XX +XXX,XX @@ void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
87
/* Simplify using known-zero bits. Currently only ops with a single
180
void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
88
output argument is supported. */
181
int mem_idx)
89
- mask = -1;
182
{
90
+ z_mask = -1;
183
- do_sb(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
91
affected = -1;
184
+ cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
92
switch (opc) {
185
93
CASE_OP_32_64(ext8s):
186
if (GET_LMASK64(arg2) <= 6) {
94
- if ((arg_info(op->args[1])->mask & 0x80) != 0) {
187
- do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx,
95
+ if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
188
- GETPC());
96
break;
189
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48),
97
}
190
+ mem_idx, GETPC());
98
QEMU_FALLTHROUGH;
191
}
99
CASE_OP_32_64(ext8u):
192
100
- mask = 0xff;
193
if (GET_LMASK64(arg2) <= 5) {
101
+ z_mask = 0xff;
194
- do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx,
102
goto and_const;
195
- GETPC());
103
CASE_OP_32_64(ext16s):
196
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40),
104
- if ((arg_info(op->args[1])->mask & 0x8000) != 0) {
197
+ mem_idx, GETPC());
105
+ if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
198
}
106
break;
199
107
}
200
if (GET_LMASK64(arg2) <= 4) {
108
QEMU_FALLTHROUGH;
201
- do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx,
109
CASE_OP_32_64(ext16u):
202
- GETPC());
110
- mask = 0xffff;
203
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32),
111
+ z_mask = 0xffff;
204
+ mem_idx, GETPC());
112
goto and_const;
205
}
113
case INDEX_op_ext32s_i64:
206
114
- if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
207
if (GET_LMASK64(arg2) <= 3) {
115
+ if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
208
- do_sb(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx,
116
break;
209
- GETPC());
117
}
210
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24),
118
QEMU_FALLTHROUGH;
211
+ mem_idx, GETPC());
119
case INDEX_op_ext32u_i64:
212
}
120
- mask = 0xffffffffU;
213
121
+ z_mask = 0xffffffffU;
214
if (GET_LMASK64(arg2) <= 2) {
122
goto and_const;
215
- do_sb(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx,
123
216
- GETPC());
124
CASE_OP_32_64(and):
217
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16),
125
- mask = arg_info(op->args[2])->mask;
218
+ mem_idx, GETPC());
126
+ z_mask = arg_info(op->args[2])->z_mask;
219
}
127
if (arg_is_const(op->args[2])) {
220
128
and_const:
221
if (GET_LMASK64(arg2) <= 1) {
129
- affected = arg_info(op->args[1])->mask & ~mask;
222
- do_sb(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx,
130
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
223
- GETPC());
131
}
224
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8),
132
- mask = arg_info(op->args[1])->mask & mask;
225
+ mem_idx, GETPC());
133
+ z_mask = arg_info(op->args[1])->z_mask & z_mask;
226
}
134
break;
227
135
228
if (GET_LMASK64(arg2) <= 0) {
136
case INDEX_op_ext_i32_i64:
229
- do_sb(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx,
137
- if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
230
- GETPC());
138
+ if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
231
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 7), (uint8_t)arg1,
139
break;
232
+ mem_idx, GETPC());
140
}
233
}
141
QEMU_FALLTHROUGH;
234
}
142
case INDEX_op_extu_i32_i64:
235
143
/* We do not compute affected as it is a size changing op. */
236
void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
144
- mask = (uint32_t)arg_info(op->args[1])->mask;
237
int mem_idx)
145
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
238
{
146
break;
239
- do_sb(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
147
240
+ cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
148
CASE_OP_32_64(andc):
241
149
/* Known-zeros does not imply known-ones. Therefore unless
242
if (GET_LMASK64(arg2) >= 1) {
150
op->args[2] is constant, we can't infer anything from it. */
243
- do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx,
151
if (arg_is_const(op->args[2])) {
244
- GETPC());
152
- mask = ~arg_info(op->args[2])->mask;
245
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8),
153
+ z_mask = ~arg_info(op->args[2])->z_mask;
246
+ mem_idx, GETPC());
154
goto and_const;
247
}
155
}
248
156
/* But we certainly know nothing outside args[1] may be set. */
249
if (GET_LMASK64(arg2) >= 2) {
157
- mask = arg_info(op->args[1])->mask;
250
- do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx,
158
+ z_mask = arg_info(op->args[1])->z_mask;
251
- GETPC());
159
break;
252
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16),
160
253
+ mem_idx, GETPC());
161
case INDEX_op_sar_i32:
254
}
162
if (arg_is_const(op->args[2])) {
255
163
tmp = arg_info(op->args[2])->val & 31;
256
if (GET_LMASK64(arg2) >= 3) {
164
- mask = (int32_t)arg_info(op->args[1])->mask >> tmp;
257
- do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx,
165
+ z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
258
- GETPC());
166
}
259
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24),
167
break;
260
+ mem_idx, GETPC());
168
case INDEX_op_sar_i64:
261
}
169
if (arg_is_const(op->args[2])) {
262
170
tmp = arg_info(op->args[2])->val & 63;
263
if (GET_LMASK64(arg2) >= 4) {
171
- mask = (int64_t)arg_info(op->args[1])->mask >> tmp;
264
- do_sb(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx,
172
+ z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
265
- GETPC());
173
}
266
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32),
174
break;
267
+ mem_idx, GETPC());
175
268
}
176
case INDEX_op_shr_i32:
269
177
if (arg_is_const(op->args[2])) {
270
if (GET_LMASK64(arg2) >= 5) {
178
tmp = arg_info(op->args[2])->val & 31;
271
- do_sb(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx,
179
- mask = (uint32_t)arg_info(op->args[1])->mask >> tmp;
272
- GETPC());
180
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
273
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40),
181
}
274
+ mem_idx, GETPC());
182
break;
275
}
183
case INDEX_op_shr_i64:
276
184
if (arg_is_const(op->args[2])) {
277
if (GET_LMASK64(arg2) >= 6) {
185
tmp = arg_info(op->args[2])->val & 63;
278
- do_sb(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx,
186
- mask = (uint64_t)arg_info(op->args[1])->mask >> tmp;
279
- GETPC());
187
+ z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
280
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48),
188
}
281
+ mem_idx, GETPC());
189
break;
282
}
190
283
191
case INDEX_op_extrl_i64_i32:
284
if (GET_LMASK64(arg2) == 7) {
192
- mask = (uint32_t)arg_info(op->args[1])->mask;
285
- do_sb(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx,
193
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
286
- GETPC());
194
break;
287
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56),
195
case INDEX_op_extrh_i64_i32:
288
+ mem_idx, GETPC());
196
- mask = (uint64_t)arg_info(op->args[1])->mask >> 32;
289
}
197
+ z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
290
}
198
break;
291
#endif /* TARGET_MIPS64 */
199
292
@@ -XXX,XX +XXX,XX @@ void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
200
CASE_OP_32_64(shl):
293
201
if (arg_is_const(op->args[2])) {
294
for (i = 0; i < base_reglist; i++) {
202
tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
295
env->active_tc.gpr[multiple_regs[i]] =
203
- mask = arg_info(op->args[1])->mask << tmp;
296
- (target_long)do_lw(env, addr, mem_idx, GETPC());
204
+ z_mask = arg_info(op->args[1])->z_mask << tmp;
297
+ (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
205
}
298
addr += 4;
206
break;
207
208
CASE_OP_32_64(neg):
209
/* Set to 1 all bits to the left of the rightmost. */
210
- mask = -(arg_info(op->args[1])->mask
211
- & -arg_info(op->args[1])->mask);
212
+ z_mask = -(arg_info(op->args[1])->z_mask
213
+ & -arg_info(op->args[1])->z_mask);
214
break;
215
216
CASE_OP_32_64(deposit):
217
- mask = deposit64(arg_info(op->args[1])->mask,
218
- op->args[3], op->args[4],
219
- arg_info(op->args[2])->mask);
220
+ z_mask = deposit64(arg_info(op->args[1])->z_mask,
221
+ op->args[3], op->args[4],
222
+ arg_info(op->args[2])->z_mask);
223
break;
224
225
CASE_OP_32_64(extract):
226
- mask = extract64(arg_info(op->args[1])->mask,
227
- op->args[2], op->args[3]);
228
+ z_mask = extract64(arg_info(op->args[1])->z_mask,
229
+ op->args[2], op->args[3]);
230
if (op->args[2] == 0) {
231
- affected = arg_info(op->args[1])->mask & ~mask;
232
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
233
}
234
break;
235
CASE_OP_32_64(sextract):
236
- mask = sextract64(arg_info(op->args[1])->mask,
237
- op->args[2], op->args[3]);
238
- if (op->args[2] == 0 && (tcg_target_long)mask >= 0) {
239
- affected = arg_info(op->args[1])->mask & ~mask;
240
+ z_mask = sextract64(arg_info(op->args[1])->z_mask,
241
+ op->args[2], op->args[3]);
242
+ if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
243
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
244
}
245
break;
246
247
CASE_OP_32_64(or):
248
CASE_OP_32_64(xor):
249
- mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask;
250
+ z_mask = arg_info(op->args[1])->z_mask
251
+ | arg_info(op->args[2])->z_mask;
252
break;
253
254
case INDEX_op_clz_i32:
255
case INDEX_op_ctz_i32:
256
- mask = arg_info(op->args[2])->mask | 31;
257
+ z_mask = arg_info(op->args[2])->z_mask | 31;
258
break;
259
260
case INDEX_op_clz_i64:
261
case INDEX_op_ctz_i64:
262
- mask = arg_info(op->args[2])->mask | 63;
263
+ z_mask = arg_info(op->args[2])->z_mask | 63;
264
break;
265
266
case INDEX_op_ctpop_i32:
267
- mask = 32 | 31;
268
+ z_mask = 32 | 31;
269
break;
270
case INDEX_op_ctpop_i64:
271
- mask = 64 | 63;
272
+ z_mask = 64 | 63;
273
break;
274
275
CASE_OP_32_64(setcond):
276
case INDEX_op_setcond2_i32:
277
- mask = 1;
278
+ z_mask = 1;
279
break;
280
281
CASE_OP_32_64(movcond):
282
- mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask;
283
+ z_mask = arg_info(op->args[3])->z_mask
284
+ | arg_info(op->args[4])->z_mask;
285
break;
286
287
CASE_OP_32_64(ld8u):
288
- mask = 0xff;
289
+ z_mask = 0xff;
290
break;
291
CASE_OP_32_64(ld16u):
292
- mask = 0xffff;
293
+ z_mask = 0xffff;
294
break;
295
case INDEX_op_ld32u_i64:
296
- mask = 0xffffffffu;
297
+ z_mask = 0xffffffffu;
298
break;
299
300
CASE_OP_32_64(qemu_ld):
301
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
302
MemOpIdx oi = op->args[nb_oargs + nb_iargs];
303
MemOp mop = get_memop(oi);
304
if (!(mop & MO_SIGN)) {
305
- mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
306
+ z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
307
}
308
}
309
break;
310
311
CASE_OP_32_64(bswap16):
312
- mask = arg_info(op->args[1])->mask;
313
- if (mask <= 0xffff) {
314
+ z_mask = arg_info(op->args[1])->z_mask;
315
+ if (z_mask <= 0xffff) {
316
op->args[2] |= TCG_BSWAP_IZ;
317
}
318
- mask = bswap16(mask);
319
+ z_mask = bswap16(z_mask);
320
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
321
case TCG_BSWAP_OZ:
322
break;
323
case TCG_BSWAP_OS:
324
- mask = (int16_t)mask;
325
+ z_mask = (int16_t)z_mask;
326
break;
327
default: /* undefined high bits */
328
- mask |= MAKE_64BIT_MASK(16, 48);
329
+ z_mask |= MAKE_64BIT_MASK(16, 48);
330
break;
331
}
332
break;
333
334
case INDEX_op_bswap32_i64:
335
- mask = arg_info(op->args[1])->mask;
336
- if (mask <= 0xffffffffu) {
337
+ z_mask = arg_info(op->args[1])->z_mask;
338
+ if (z_mask <= 0xffffffffu) {
339
op->args[2] |= TCG_BSWAP_IZ;
340
}
341
- mask = bswap32(mask);
342
+ z_mask = bswap32(z_mask);
343
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
344
case TCG_BSWAP_OZ:
345
break;
346
case TCG_BSWAP_OS:
347
- mask = (int32_t)mask;
348
+ z_mask = (int32_t)z_mask;
349
break;
350
default: /* undefined high bits */
351
- mask |= MAKE_64BIT_MASK(32, 32);
352
+ z_mask |= MAKE_64BIT_MASK(32, 32);
353
break;
354
}
355
break;
356
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
357
/* 32-bit ops generate 32-bit results. For the result is zero test
358
below, we can ignore high bits, but for further optimizations we
359
need to record that the high bits contain garbage. */
360
- partmask = mask;
361
+ partmask = z_mask;
362
if (!(def->flags & TCG_OPF_64BIT)) {
363
- mask |= ~(tcg_target_ulong)0xffffffffu;
364
+ z_mask |= ~(tcg_target_ulong)0xffffffffu;
365
partmask &= 0xffffffffu;
366
affected &= 0xffffffffu;
299
}
367
}
300
}
368
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
301
369
vs the high word of the input. */
302
if (do_r31) {
370
do_setcond_high:
303
- env->active_tc.gpr[31] = (target_long)do_lw(env, addr, mem_idx,
371
reset_temp(op->args[0]);
304
- GETPC());
372
- arg_info(op->args[0])->mask = 1;
305
+ env->active_tc.gpr[31] =
373
+ arg_info(op->args[0])->z_mask = 1;
306
+ (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
374
op->opc = INDEX_op_setcond_i32;
307
}
375
op->args[1] = op->args[2];
308
}
376
op->args[2] = op->args[4];
309
377
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
310
@@ -XXX,XX +XXX,XX @@ void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
378
}
311
target_ulong i;
379
do_setcond_low:
312
380
reset_temp(op->args[0]);
313
for (i = 0; i < base_reglist; i++) {
381
- arg_info(op->args[0])->mask = 1;
314
- do_sw(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx,
382
+ arg_info(op->args[0])->z_mask = 1;
315
- GETPC());
383
op->opc = INDEX_op_setcond_i32;
316
+ cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
384
op->args[2] = op->args[3];
317
+ mem_idx, GETPC());
385
op->args[3] = op->args[5];
318
addr += 4;
386
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
319
}
387
/* Default case: we know nothing about operation (or were unable
320
}
388
to compute the operation result) so no propagation is done.
321
389
We trash everything if the operation is the end of a basic
322
if (do_r31) {
390
- block, otherwise we only trash the output args. "mask" is
323
- do_sw(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
391
+ block, otherwise we only trash the output args. "z_mask" is
324
+ cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
392
the non-zero bits mask for the first output arg. */
325
}
393
if (def->flags & TCG_OPF_BB_END) {
326
}
394
memset(&temps_used, 0, sizeof(temps_used));
327
395
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
328
@@ -XXX,XX +XXX,XX @@ void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
396
/* Save the corresponding known-zero bits mask for the
329
target_ulong i;
397
first output argument (only one supported so far). */
330
398
if (i == 0) {
331
for (i = 0; i < base_reglist; i++) {
399
- arg_info(op->args[i])->mask = mask;
332
- env->active_tc.gpr[multiple_regs[i]] = do_ld(env, addr, mem_idx,
400
+ arg_info(op->args[i])->z_mask = z_mask;
333
- GETPC());
401
}
334
+ env->active_tc.gpr[multiple_regs[i]] =
402
}
335
+ cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
403
}
336
addr += 8;
337
}
338
}
339
340
if (do_r31) {
341
- env->active_tc.gpr[31] = do_ld(env, addr, mem_idx, GETPC());
342
+ env->active_tc.gpr[31] =
343
+ cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
344
}
345
}
346
347
@@ -XXX,XX +XXX,XX @@ void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
348
target_ulong i;
349
350
for (i = 0; i < base_reglist; i++) {
351
- do_sd(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx,
352
- GETPC());
353
+ cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
354
+ mem_idx, GETPC());
355
addr += 8;
356
}
357
}
358
359
if (do_r31) {
360
- do_sd(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
361
+ cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
362
}
363
}
364
#endif
365
--
404
--
366
2.20.1
405
2.25.1
367
406
368
407
diff view generated by jsdifflib
1
The generated functions aside from *_real are unused.
1
Provide what will become a larger context for splitting
2
The *_real functions have a couple of users in mem_helper.c;
2
the very large tcg_optimize function.
3
use *_mmuidx_ra instead, with MMU_REAL_IDX.
4
3
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: David Hildenbrand <david@redhat.com>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
8
---
10
v2: Use *_mmuidx_ra directly, without intermediate macros.
9
tcg/optimize.c | 77 ++++++++++++++++++++++++++------------------------
11
---
10
1 file changed, 40 insertions(+), 37 deletions(-)
12
target/s390x/cpu.h | 5 -----
13
target/s390x/mem_helper.c | 10 +++++-----
14
2 files changed, 5 insertions(+), 10 deletions(-)
15
11
16
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
18
--- a/target/s390x/cpu.h
14
--- a/tcg/optimize.c
19
+++ b/target/s390x/cpu.h
15
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
21
17
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
22
#define TARGET_INSN_START_EXTRA_WORDS 2
18
} TempOptInfo;
23
19
24
-#define MMU_MODE0_SUFFIX _primary
20
+typedef struct OptContext {
25
-#define MMU_MODE1_SUFFIX _secondary
21
+ TCGTempSet temps_used;
26
-#define MMU_MODE2_SUFFIX _home
22
+} OptContext;
27
-#define MMU_MODE3_SUFFIX _real
23
+
28
-
24
static inline TempOptInfo *ts_info(TCGTemp *ts)
29
#define MMU_USER_IDX 0
25
{
30
26
return ts->state_ptr;
31
#define S390_MAX_CPUS 248
27
@@ -XXX,XX +XXX,XX @@ static void reset_temp(TCGArg arg)
32
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
28
}
33
index XXXXXXX..XXXXXXX 100644
29
34
--- a/target/s390x/mem_helper.c
30
/* Initialize and activate a temporary. */
35
+++ b/target/s390x/mem_helper.c
31
-static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
36
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
32
+static void init_ts_info(OptContext *ctx, TCGTemp *ts)
37
real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK;
33
{
38
34
size_t idx = temp_idx(ts);
39
for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
35
TempOptInfo *ti;
40
- cpu_stq_real_ra(env, real_addr + i, 0, ra);
36
41
+ cpu_stq_mmuidx_ra(env, real_addr + i, 0, MMU_REAL_IDX, ra);
37
- if (test_bit(idx, temps_used->l)) {
38
+ if (test_bit(idx, ctx->temps_used.l)) {
39
return;
42
}
40
}
43
41
- set_bit(idx, temps_used->l);
44
return 0;
42
+ set_bit(idx, ctx->temps_used.l);
45
@@ -XXX,XX +XXX,XX @@ void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
43
46
for (i = 0; i < entries; i++) {
44
ti = ts->state_ptr;
47
/* addresses are not wrapped in 24/31bit mode but table index is */
45
if (ti == NULL) {
48
raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
46
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
49
- entry = cpu_ldq_real_ra(env, raddr, ra);
47
}
50
+ entry = cpu_ldq_mmuidx_ra(env, raddr, MMU_REAL_IDX, ra);
48
}
51
if (!(entry & REGION_ENTRY_I)) {
49
52
/* we are allowed to not store if already invalid */
50
-static void init_arg_info(TCGTempSet *temps_used, TCGArg arg)
53
entry |= REGION_ENTRY_I;
51
+static void init_arg_info(OptContext *ctx, TCGArg arg)
54
- cpu_stq_real_ra(env, raddr, entry, ra);
52
{
55
+ cpu_stq_mmuidx_ra(env, raddr, entry, MMU_REAL_IDX, ra);
53
- init_ts_info(temps_used, arg_temp(arg));
54
+ init_ts_info(ctx, arg_temp(arg));
55
}
56
57
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
58
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
59
}
60
}
61
62
-static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
63
+static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
64
TCGOp *op, TCGArg dst, uint64_t val)
65
{
66
const TCGOpDef *def = &tcg_op_defs[op->opc];
67
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
68
69
/* Convert movi to mov with constant temp. */
70
tv = tcg_constant_internal(type, val);
71
- init_ts_info(temps_used, tv);
72
+ init_ts_info(ctx, tv);
73
tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
74
}
75
76
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
77
{
78
int nb_temps, nb_globals, i;
79
TCGOp *op, *op_next, *prev_mb = NULL;
80
- TCGTempSet temps_used;
81
+ OptContext ctx = {};
82
83
/* Array VALS has an element for each temp.
84
If this temp holds a constant then its value is kept in VALS' element.
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
86
nb_temps = s->nb_temps;
87
nb_globals = s->nb_globals;
88
89
- memset(&temps_used, 0, sizeof(temps_used));
90
for (i = 0; i < nb_temps; ++i) {
91
s->temps[i].state_ptr = NULL;
92
}
93
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
94
for (i = 0; i < nb_oargs + nb_iargs; i++) {
95
TCGTemp *ts = arg_temp(op->args[i]);
96
if (ts) {
97
- init_ts_info(&temps_used, ts);
98
+ init_ts_info(&ctx, ts);
99
}
100
}
101
} else {
102
nb_oargs = def->nb_oargs;
103
nb_iargs = def->nb_iargs;
104
for (i = 0; i < nb_oargs + nb_iargs; i++) {
105
- init_arg_info(&temps_used, op->args[i]);
106
+ init_arg_info(&ctx, op->args[i]);
56
}
107
}
57
}
108
}
58
}
109
59
@@ -XXX,XX +XXX,XX @@ void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
60
pte_addr += VADDR_PAGE_TX(vaddr) * 8;
111
CASE_OP_32_64(rotr):
61
112
if (arg_is_const(op->args[1])
62
/* Mark the page table entry as invalid */
113
&& arg_info(op->args[1])->val == 0) {
63
- pte = cpu_ldq_real_ra(env, pte_addr, ra);
114
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
64
+ pte = cpu_ldq_mmuidx_ra(env, pte_addr, MMU_REAL_IDX, ra);
115
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
65
pte |= PAGE_ENTRY_I;
116
continue;
66
- cpu_stq_real_ra(env, pte_addr, pte, ra);
117
}
67
+ cpu_stq_mmuidx_ra(env, pte_addr, pte, MMU_REAL_IDX, ra);
118
break;
68
119
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
69
/* XXX we exploit the fact that Linux passes the exact virtual
120
70
address here - it's not obliged to! */
121
if (partmask == 0) {
122
tcg_debug_assert(nb_oargs == 1);
123
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
124
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
125
continue;
126
}
127
if (affected == 0) {
128
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
129
CASE_OP_32_64(mulsh):
130
if (arg_is_const(op->args[2])
131
&& arg_info(op->args[2])->val == 0) {
132
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
133
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
134
continue;
135
}
136
break;
137
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
138
CASE_OP_32_64_VEC(sub):
139
CASE_OP_32_64_VEC(xor):
140
if (args_are_copies(op->args[1], op->args[2])) {
141
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
142
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
143
continue;
144
}
145
break;
146
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
147
if (arg_is_const(op->args[1])) {
148
tmp = arg_info(op->args[1])->val;
149
tmp = dup_const(TCGOP_VECE(op), tmp);
150
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
151
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
152
break;
153
}
154
goto do_default;
155
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
156
case INDEX_op_dup2_vec:
157
assert(TCG_TARGET_REG_BITS == 32);
158
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
159
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0],
160
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0],
161
deposit64(arg_info(op->args[1])->val, 32, 32,
162
arg_info(op->args[2])->val));
163
break;
164
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
165
case INDEX_op_extrh_i64_i32:
166
if (arg_is_const(op->args[1])) {
167
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
168
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
169
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
170
break;
171
}
172
goto do_default;
173
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
174
if (arg_is_const(op->args[1])) {
175
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
176
op->args[2]);
177
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
178
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
179
break;
180
}
181
goto do_default;
182
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
183
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
184
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
185
arg_info(op->args[2])->val);
186
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
187
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
188
break;
189
}
190
goto do_default;
191
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
192
TCGArg v = arg_info(op->args[1])->val;
193
if (v != 0) {
194
tmp = do_constant_folding(opc, v, 0);
195
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
196
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
197
} else {
198
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
199
}
200
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
201
tmp = deposit64(arg_info(op->args[1])->val,
202
op->args[3], op->args[4],
203
arg_info(op->args[2])->val);
204
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
205
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
206
break;
207
}
208
goto do_default;
209
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
210
if (arg_is_const(op->args[1])) {
211
tmp = extract64(arg_info(op->args[1])->val,
212
op->args[2], op->args[3]);
213
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
214
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
215
break;
216
}
217
goto do_default;
218
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
219
if (arg_is_const(op->args[1])) {
220
tmp = sextract64(arg_info(op->args[1])->val,
221
op->args[2], op->args[3]);
222
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
223
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
224
break;
225
}
226
goto do_default;
227
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
228
tmp = (int32_t)(((uint32_t)v1 >> shr) |
229
((uint32_t)v2 << (32 - shr)));
230
}
231
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
232
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
233
break;
234
}
235
goto do_default;
236
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
237
tmp = do_constant_folding_cond(opc, op->args[1],
238
op->args[2], op->args[3]);
239
if (tmp != 2) {
240
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
241
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
242
break;
243
}
244
goto do_default;
245
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
246
op->args[1], op->args[2]);
247
if (tmp != 2) {
248
if (tmp) {
249
- memset(&temps_used, 0, sizeof(temps_used));
250
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
251
op->opc = INDEX_op_br;
252
op->args[0] = op->args[3];
253
} else {
254
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
255
256
rl = op->args[0];
257
rh = op->args[1];
258
- tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)a);
259
- tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(a >> 32));
260
+ tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
261
+ tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
262
break;
263
}
264
goto do_default;
265
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
266
267
rl = op->args[0];
268
rh = op->args[1];
269
- tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)r);
270
- tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(r >> 32));
271
+ tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
272
+ tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
273
break;
274
}
275
goto do_default;
276
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
277
if (tmp != 2) {
278
if (tmp) {
279
do_brcond_true:
280
- memset(&temps_used, 0, sizeof(temps_used));
281
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
282
op->opc = INDEX_op_br;
283
op->args[0] = op->args[5];
284
} else {
285
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
286
/* Simplify LT/GE comparisons vs zero to a single compare
287
vs the high word of the input. */
288
do_brcond_high:
289
- memset(&temps_used, 0, sizeof(temps_used));
290
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
291
op->opc = INDEX_op_brcond_i32;
292
op->args[0] = op->args[1];
293
op->args[1] = op->args[3];
294
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
295
goto do_default;
296
}
297
do_brcond_low:
298
- memset(&temps_used, 0, sizeof(temps_used));
299
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
300
op->opc = INDEX_op_brcond_i32;
301
op->args[1] = op->args[2];
302
op->args[2] = op->args[4];
303
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
304
op->args[5]);
305
if (tmp != 2) {
306
do_setcond_const:
307
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
308
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
309
} else if ((op->args[5] == TCG_COND_LT
310
|| op->args[5] == TCG_COND_GE)
311
&& arg_is_const(op->args[3])
312
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
313
if (!(tcg_call_flags(op)
314
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
315
for (i = 0; i < nb_globals; i++) {
316
- if (test_bit(i, temps_used.l)) {
317
+ if (test_bit(i, ctx.temps_used.l)) {
318
reset_ts(&s->temps[i]);
319
}
320
}
321
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
322
block, otherwise we only trash the output args. "z_mask" is
323
the non-zero bits mask for the first output arg. */
324
if (def->flags & TCG_OPF_BB_END) {
325
- memset(&temps_used, 0, sizeof(temps_used));
326
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
327
} else {
328
do_reset_output:
329
for (i = 0; i < nb_oargs; i++) {
71
--
330
--
72
2.20.1
331
2.25.1
73
332
74
333
diff view generated by jsdifflib
1
Code movement in an upcoming patch will show that this file
1
Break the final cleanup clause out of the main switch
2
was implicitly depending on trace/mem.h being included beforehand.
2
statement. When fully folding an opcode to mov/movi,
3
use "continue" to process the next opcode, else break
4
to fall into the final cleanup.
3
5
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reported-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
10
---
9
plugins/api.c | 1 +
11
tcg/optimize.c | 190 ++++++++++++++++++++++++-------------------------
10
1 file changed, 1 insertion(+)
12
1 file changed, 94 insertions(+), 96 deletions(-)
11
13
12
diff --git a/plugins/api.c b/plugins/api.c
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/plugins/api.c
16
--- a/tcg/optimize.c
15
+++ b/plugins/api.c
17
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
17
#include "qemu/plugin-memory.h"
19
switch (opc) {
18
#include "hw/boards.h"
20
CASE_OP_32_64_VEC(mov):
19
#endif
21
tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
20
+#include "trace/mem.h"
22
- break;
21
23
+ continue;
22
/* Uninstall and Reset handlers */
24
23
25
case INDEX_op_dup_vec:
26
if (arg_is_const(op->args[1])) {
27
tmp = arg_info(op->args[1])->val;
28
tmp = dup_const(TCGOP_VECE(op), tmp);
29
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
30
- break;
31
+ continue;
32
}
33
- goto do_default;
34
+ break;
35
36
case INDEX_op_dup2_vec:
37
assert(TCG_TARGET_REG_BITS == 32);
38
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
39
tcg_opt_gen_movi(s, &ctx, op, op->args[0],
40
deposit64(arg_info(op->args[1])->val, 32, 32,
41
arg_info(op->args[2])->val));
42
- break;
43
+ continue;
44
} else if (args_are_copies(op->args[1], op->args[2])) {
45
op->opc = INDEX_op_dup_vec;
46
TCGOP_VECE(op) = MO_32;
47
nb_iargs = 1;
48
}
49
- goto do_default;
50
+ break;
51
52
CASE_OP_32_64(not):
53
CASE_OP_32_64(neg):
54
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
55
if (arg_is_const(op->args[1])) {
56
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
57
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
58
- break;
59
+ continue;
60
}
61
- goto do_default;
62
+ break;
63
64
CASE_OP_32_64(bswap16):
65
CASE_OP_32_64(bswap32):
66
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
67
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
68
op->args[2]);
69
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
70
- break;
71
+ continue;
72
}
73
- goto do_default;
74
+ break;
75
76
CASE_OP_32_64(add):
77
CASE_OP_32_64(sub):
78
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
79
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
80
arg_info(op->args[2])->val);
81
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
82
- break;
83
+ continue;
84
}
85
- goto do_default;
86
+ break;
87
88
CASE_OP_32_64(clz):
89
CASE_OP_32_64(ctz):
90
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
91
} else {
92
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
93
}
94
- break;
95
+ continue;
96
}
97
- goto do_default;
98
+ break;
99
100
CASE_OP_32_64(deposit):
101
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
102
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
103
op->args[3], op->args[4],
104
arg_info(op->args[2])->val);
105
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
106
- break;
107
+ continue;
108
}
109
- goto do_default;
110
+ break;
111
112
CASE_OP_32_64(extract):
113
if (arg_is_const(op->args[1])) {
114
tmp = extract64(arg_info(op->args[1])->val,
115
op->args[2], op->args[3]);
116
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
117
- break;
118
+ continue;
119
}
120
- goto do_default;
121
+ break;
122
123
CASE_OP_32_64(sextract):
124
if (arg_is_const(op->args[1])) {
125
tmp = sextract64(arg_info(op->args[1])->val,
126
op->args[2], op->args[3]);
127
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
128
- break;
129
+ continue;
130
}
131
- goto do_default;
132
+ break;
133
134
CASE_OP_32_64(extract2):
135
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
136
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
137
((uint32_t)v2 << (32 - shr)));
138
}
139
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
140
- break;
141
+ continue;
142
}
143
- goto do_default;
144
+ break;
145
146
CASE_OP_32_64(setcond):
147
tmp = do_constant_folding_cond(opc, op->args[1],
148
op->args[2], op->args[3]);
149
if (tmp != 2) {
150
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
151
- break;
152
+ continue;
153
}
154
- goto do_default;
155
+ break;
156
157
CASE_OP_32_64(brcond):
158
tmp = do_constant_folding_cond(opc, op->args[0],
159
op->args[1], op->args[2]);
160
- if (tmp != 2) {
161
- if (tmp) {
162
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
163
- op->opc = INDEX_op_br;
164
- op->args[0] = op->args[3];
165
- } else {
166
- tcg_op_remove(s, op);
167
- }
168
+ switch (tmp) {
169
+ case 0:
170
+ tcg_op_remove(s, op);
171
+ continue;
172
+ case 1:
173
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
174
+ op->opc = opc = INDEX_op_br;
175
+ op->args[0] = op->args[3];
176
break;
177
}
178
- goto do_default;
179
+ break;
180
181
CASE_OP_32_64(movcond):
182
tmp = do_constant_folding_cond(opc, op->args[1],
183
op->args[2], op->args[5]);
184
if (tmp != 2) {
185
tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
186
- break;
187
+ continue;
188
}
189
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
190
uint64_t tv = arg_info(op->args[3])->val;
191
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
192
if (fv == 1 && tv == 0) {
193
cond = tcg_invert_cond(cond);
194
} else if (!(tv == 1 && fv == 0)) {
195
- goto do_default;
196
+ break;
197
}
198
op->args[3] = cond;
199
op->opc = opc = (opc == INDEX_op_movcond_i32
200
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
201
: INDEX_op_setcond_i64);
202
nb_iargs = 2;
203
}
204
- goto do_default;
205
+ break;
206
207
case INDEX_op_add2_i32:
208
case INDEX_op_sub2_i32:
209
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
210
rh = op->args[1];
211
tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
212
tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
213
- break;
214
+ continue;
215
}
216
- goto do_default;
217
+ break;
218
219
case INDEX_op_mulu2_i32:
220
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
221
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
222
rh = op->args[1];
223
tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
224
tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
225
- break;
226
+ continue;
227
}
228
- goto do_default;
229
+ break;
230
231
case INDEX_op_brcond2_i32:
232
tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
233
op->args[4]);
234
- if (tmp != 2) {
235
- if (tmp) {
236
- do_brcond_true:
237
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
238
- op->opc = INDEX_op_br;
239
- op->args[0] = op->args[5];
240
- } else {
241
+ if (tmp == 0) {
242
do_brcond_false:
243
- tcg_op_remove(s, op);
244
- }
245
- } else if ((op->args[4] == TCG_COND_LT
246
- || op->args[4] == TCG_COND_GE)
247
- && arg_is_const(op->args[2])
248
- && arg_info(op->args[2])->val == 0
249
- && arg_is_const(op->args[3])
250
- && arg_info(op->args[3])->val == 0) {
251
+ tcg_op_remove(s, op);
252
+ continue;
253
+ }
254
+ if (tmp == 1) {
255
+ do_brcond_true:
256
+ op->opc = opc = INDEX_op_br;
257
+ op->args[0] = op->args[5];
258
+ break;
259
+ }
260
+ if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE)
261
+ && arg_is_const(op->args[2])
262
+ && arg_info(op->args[2])->val == 0
263
+ && arg_is_const(op->args[3])
264
+ && arg_info(op->args[3])->val == 0) {
265
/* Simplify LT/GE comparisons vs zero to a single compare
266
vs the high word of the input. */
267
do_brcond_high:
268
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
269
- op->opc = INDEX_op_brcond_i32;
270
+ op->opc = opc = INDEX_op_brcond_i32;
271
op->args[0] = op->args[1];
272
op->args[1] = op->args[3];
273
op->args[2] = op->args[4];
274
op->args[3] = op->args[5];
275
- } else if (op->args[4] == TCG_COND_EQ) {
276
+ break;
277
+ }
278
+ if (op->args[4] == TCG_COND_EQ) {
279
/* Simplify EQ comparisons where one of the pairs
280
can be simplified. */
281
tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
282
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
283
if (tmp == 0) {
284
goto do_brcond_false;
285
} else if (tmp != 1) {
286
- goto do_default;
287
+ break;
288
}
289
do_brcond_low:
290
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
291
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
292
op->args[1] = op->args[2];
293
op->args[2] = op->args[4];
294
op->args[3] = op->args[5];
295
- } else if (op->args[4] == TCG_COND_NE) {
296
+ break;
297
+ }
298
+ if (op->args[4] == TCG_COND_NE) {
299
/* Simplify NE comparisons where one of the pairs
300
can be simplified. */
301
tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
302
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
303
} else if (tmp == 1) {
304
goto do_brcond_true;
305
}
306
- goto do_default;
307
- } else {
308
- goto do_default;
309
}
310
break;
311
312
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
313
if (tmp != 2) {
314
do_setcond_const:
315
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
316
- } else if ((op->args[5] == TCG_COND_LT
317
- || op->args[5] == TCG_COND_GE)
318
- && arg_is_const(op->args[3])
319
- && arg_info(op->args[3])->val == 0
320
- && arg_is_const(op->args[4])
321
- && arg_info(op->args[4])->val == 0) {
322
+ continue;
323
+ }
324
+ if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
325
+ && arg_is_const(op->args[3])
326
+ && arg_info(op->args[3])->val == 0
327
+ && arg_is_const(op->args[4])
328
+ && arg_info(op->args[4])->val == 0) {
329
/* Simplify LT/GE comparisons vs zero to a single compare
330
vs the high word of the input. */
331
do_setcond_high:
332
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
333
op->args[1] = op->args[2];
334
op->args[2] = op->args[4];
335
op->args[3] = op->args[5];
336
- } else if (op->args[5] == TCG_COND_EQ) {
337
+ break;
338
+ }
339
+ if (op->args[5] == TCG_COND_EQ) {
340
/* Simplify EQ comparisons where one of the pairs
341
can be simplified. */
342
tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
343
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
344
if (tmp == 0) {
345
goto do_setcond_high;
346
} else if (tmp != 1) {
347
- goto do_default;
348
+ break;
349
}
350
do_setcond_low:
351
reset_temp(op->args[0]);
352
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
353
op->opc = INDEX_op_setcond_i32;
354
op->args[2] = op->args[3];
355
op->args[3] = op->args[5];
356
- } else if (op->args[5] == TCG_COND_NE) {
357
+ break;
358
+ }
359
+ if (op->args[5] == TCG_COND_NE) {
360
/* Simplify NE comparisons where one of the pairs
361
can be simplified. */
362
tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
363
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
364
} else if (tmp == 1) {
365
goto do_setcond_const;
366
}
367
- goto do_default;
368
- } else {
369
- goto do_default;
370
}
371
break;
372
373
- case INDEX_op_call:
374
- if (!(tcg_call_flags(op)
375
+ default:
376
+ break;
377
+ }
378
+
379
+ /* Some of the folding above can change opc. */
380
+ opc = op->opc;
381
+ def = &tcg_op_defs[opc];
382
+ if (def->flags & TCG_OPF_BB_END) {
383
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
384
+ } else {
385
+ if (opc == INDEX_op_call &&
386
+ !(tcg_call_flags(op)
387
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
388
for (i = 0; i < nb_globals; i++) {
389
if (test_bit(i, ctx.temps_used.l)) {
390
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
391
}
392
}
393
}
394
- goto do_reset_output;
395
396
- default:
397
- do_default:
398
- /* Default case: we know nothing about operation (or were unable
399
- to compute the operation result) so no propagation is done.
400
- We trash everything if the operation is the end of a basic
401
- block, otherwise we only trash the output args. "z_mask" is
402
- the non-zero bits mask for the first output arg. */
403
- if (def->flags & TCG_OPF_BB_END) {
404
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
405
- } else {
406
- do_reset_output:
407
- for (i = 0; i < nb_oargs; i++) {
408
- reset_temp(op->args[i]);
409
- /* Save the corresponding known-zero bits mask for the
410
- first output argument (only one supported so far). */
411
- if (i == 0) {
412
- arg_info(op->args[i])->z_mask = z_mask;
413
- }
414
+ for (i = 0; i < nb_oargs; i++) {
415
+ reset_temp(op->args[i]);
416
+ /* Save the corresponding known-zero bits mask for the
417
+ first output argument (only one supported so far). */
418
+ if (i == 0) {
419
+ arg_info(op->args[i])->z_mask = z_mask;
420
}
421
}
422
- break;
423
}
424
425
/* Eliminate duplicate and redundant fence instructions. */
24
--
426
--
25
2.20.1
427
2.25.1
26
428
27
429
diff view generated by jsdifflib
New patch
1
1
Adjust the interface to take the OptContext parameter instead
2
of TCGContext or both.
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 67 +++++++++++++++++++++++++-------------------------
9
1 file changed, 34 insertions(+), 33 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
16
} TempOptInfo;
17
18
typedef struct OptContext {
19
+ TCGContext *tcg;
20
TCGTempSet temps_used;
21
} OptContext;
22
23
@@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
24
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
25
}
26
27
-static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
28
+static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
29
{
30
TCGTemp *dst_ts = arg_temp(dst);
31
TCGTemp *src_ts = arg_temp(src);
32
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
33
TCGOpcode new_op;
34
35
if (ts_are_copies(dst_ts, src_ts)) {
36
- tcg_op_remove(s, op);
37
+ tcg_op_remove(ctx->tcg, op);
38
return;
39
}
40
41
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
42
}
43
}
44
45
-static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
46
- TCGOp *op, TCGArg dst, uint64_t val)
47
+static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
48
+ TCGArg dst, uint64_t val)
49
{
50
const TCGOpDef *def = &tcg_op_defs[op->opc];
51
TCGType type;
52
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
53
/* Convert movi to mov with constant temp. */
54
tv = tcg_constant_internal(type, val);
55
init_ts_info(ctx, tv);
56
- tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
57
+ tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
58
}
59
60
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
62
{
63
int nb_temps, nb_globals, i;
64
TCGOp *op, *op_next, *prev_mb = NULL;
65
- OptContext ctx = {};
66
+ OptContext ctx = { .tcg = s };
67
68
/* Array VALS has an element for each temp.
69
If this temp holds a constant then its value is kept in VALS' element.
70
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
71
CASE_OP_32_64(rotr):
72
if (arg_is_const(op->args[1])
73
&& arg_info(op->args[1])->val == 0) {
74
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
75
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
76
continue;
77
}
78
break;
79
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
80
if (!arg_is_const(op->args[1])
81
&& arg_is_const(op->args[2])
82
&& arg_info(op->args[2])->val == 0) {
83
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
84
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
85
continue;
86
}
87
break;
88
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
89
if (!arg_is_const(op->args[1])
90
&& arg_is_const(op->args[2])
91
&& arg_info(op->args[2])->val == -1) {
92
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
93
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
94
continue;
95
}
96
break;
97
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
98
99
if (partmask == 0) {
100
tcg_debug_assert(nb_oargs == 1);
101
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
102
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
103
continue;
104
}
105
if (affected == 0) {
106
tcg_debug_assert(nb_oargs == 1);
107
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
108
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
109
continue;
110
}
111
112
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
113
CASE_OP_32_64(mulsh):
114
if (arg_is_const(op->args[2])
115
&& arg_info(op->args[2])->val == 0) {
116
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
117
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
118
continue;
119
}
120
break;
121
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
122
CASE_OP_32_64_VEC(or):
123
CASE_OP_32_64_VEC(and):
124
if (args_are_copies(op->args[1], op->args[2])) {
125
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
126
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
127
continue;
128
}
129
break;
130
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
131
CASE_OP_32_64_VEC(sub):
132
CASE_OP_32_64_VEC(xor):
133
if (args_are_copies(op->args[1], op->args[2])) {
134
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
135
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
136
continue;
137
}
138
break;
139
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
140
allocator where needed and possible. Also detect copies. */
141
switch (opc) {
142
CASE_OP_32_64_VEC(mov):
143
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
144
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
145
continue;
146
147
case INDEX_op_dup_vec:
148
if (arg_is_const(op->args[1])) {
149
tmp = arg_info(op->args[1])->val;
150
tmp = dup_const(TCGOP_VECE(op), tmp);
151
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
152
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
153
continue;
154
}
155
break;
156
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
157
case INDEX_op_dup2_vec:
158
assert(TCG_TARGET_REG_BITS == 32);
159
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
160
- tcg_opt_gen_movi(s, &ctx, op, op->args[0],
161
+ tcg_opt_gen_movi(&ctx, op, op->args[0],
162
deposit64(arg_info(op->args[1])->val, 32, 32,
163
arg_info(op->args[2])->val));
164
continue;
165
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
166
case INDEX_op_extrh_i64_i32:
167
if (arg_is_const(op->args[1])) {
168
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
169
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
170
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
171
continue;
172
}
173
break;
174
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
175
if (arg_is_const(op->args[1])) {
176
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
177
op->args[2]);
178
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
179
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
180
continue;
181
}
182
break;
183
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
184
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
185
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
186
arg_info(op->args[2])->val);
187
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
188
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
189
continue;
190
}
191
break;
192
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
193
TCGArg v = arg_info(op->args[1])->val;
194
if (v != 0) {
195
tmp = do_constant_folding(opc, v, 0);
196
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
197
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
198
} else {
199
- tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
200
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
201
}
202
continue;
203
}
204
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
205
tmp = deposit64(arg_info(op->args[1])->val,
206
op->args[3], op->args[4],
207
arg_info(op->args[2])->val);
208
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
209
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
210
continue;
211
}
212
break;
213
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
214
if (arg_is_const(op->args[1])) {
215
tmp = extract64(arg_info(op->args[1])->val,
216
op->args[2], op->args[3]);
217
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
218
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
219
continue;
220
}
221
break;
222
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
223
if (arg_is_const(op->args[1])) {
224
tmp = sextract64(arg_info(op->args[1])->val,
225
op->args[2], op->args[3]);
226
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
227
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
228
continue;
229
}
230
break;
231
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
232
tmp = (int32_t)(((uint32_t)v1 >> shr) |
233
((uint32_t)v2 << (32 - shr)));
234
}
235
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
236
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
237
continue;
238
}
239
break;
240
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
241
tmp = do_constant_folding_cond(opc, op->args[1],
242
op->args[2], op->args[3]);
243
if (tmp != 2) {
244
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
245
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
246
continue;
247
}
248
break;
249
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
250
tmp = do_constant_folding_cond(opc, op->args[1],
251
op->args[2], op->args[5]);
252
if (tmp != 2) {
253
- tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
254
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
255
continue;
256
}
257
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
258
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
259
260
rl = op->args[0];
261
rh = op->args[1];
262
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
263
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
264
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
265
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
266
continue;
267
}
268
break;
269
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
270
271
rl = op->args[0];
272
rh = op->args[1];
273
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
274
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
275
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
276
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
277
continue;
278
}
279
break;
280
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
281
op->args[5]);
282
if (tmp != 2) {
283
do_setcond_const:
284
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
285
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
286
continue;
287
}
288
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
289
--
290
2.25.1
291
292
diff view generated by jsdifflib
1
There are only two uses. Within dcbz_common, the local variable
1
This will expose the variable to subroutines that
2
mmu_idx already contains the epid computation, and we can avoid
2
will be broken out of tcg_optimize.
3
repeating it for the store. Within helper_icbiep, the usage is
4
trivially expanded using PPC_TLB_EPID_LOAD.
5
3
6
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Acked-by: David Gibson <david@gibson.dropbear.id.au>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
8
---
11
target/ppc/cpu.h | 2 --
9
tcg/optimize.c | 11 ++++++-----
12
target/ppc/mem_helper.c | 11 ++---------
10
1 file changed, 6 insertions(+), 5 deletions(-)
13
2 files changed, 2 insertions(+), 11 deletions(-)
14
11
15
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
17
--- a/target/ppc/cpu.h
14
--- a/tcg/optimize.c
18
+++ b/target/ppc/cpu.h
15
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ struct ppc_radix_page_info {
16
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
20
* + real/paged mode combinations. The other two modes are for
17
21
* external PID load/store.
18
typedef struct OptContext {
22
*/
19
TCGContext *tcg;
23
-#define MMU_MODE8_SUFFIX _epl
20
+ TCGOp *prev_mb;
24
-#define MMU_MODE9_SUFFIX _eps
21
TCGTempSet temps_used;
25
#define PPC_TLB_EPID_LOAD 8
22
} OptContext;
26
#define PPC_TLB_EPID_STORE 9
23
27
24
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
28
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
25
void tcg_optimize(TCGContext *s)
29
index XXXXXXX..XXXXXXX 100644
26
{
30
--- a/target/ppc/mem_helper.c
27
int nb_temps, nb_globals, i;
31
+++ b/target/ppc/mem_helper.c
28
- TCGOp *op, *op_next, *prev_mb = NULL;
32
@@ -XXX,XX +XXX,XX @@ static void dcbz_common(CPUPPCState *env, target_ulong addr,
29
+ TCGOp *op, *op_next;
33
} else {
30
OptContext ctx = { .tcg = s };
34
/* Slow path */
31
35
for (i = 0; i < dcbz_size; i += 8) {
32
/* Array VALS has an element for each temp.
36
- if (epid) {
33
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
-#if !defined(CONFIG_USER_ONLY)
34
}
38
- /* Does not make sense on USER_ONLY config */
35
39
- cpu_stq_eps_ra(env, addr + i, 0, retaddr);
36
/* Eliminate duplicate and redundant fence instructions. */
40
-#endif
37
- if (prev_mb) {
41
- } else {
38
+ if (ctx.prev_mb) {
42
- cpu_stq_data_ra(env, addr + i, 0, retaddr);
39
switch (opc) {
43
- }
40
case INDEX_op_mb:
44
+ cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr);
41
/* Merge two barriers of the same type into one,
42
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
43
* barrier. This is stricter than specified but for
44
* the purposes of TCG is better than not optimizing.
45
*/
46
- prev_mb->args[0] |= op->args[0];
47
+ ctx.prev_mb->args[0] |= op->args[0];
48
tcg_op_remove(s, op);
49
break;
50
51
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
52
case INDEX_op_qemu_st_i64:
53
case INDEX_op_call:
54
/* Opcodes that touch guest memory stop the optimization. */
55
- prev_mb = NULL;
56
+ ctx.prev_mb = NULL;
57
break;
58
}
59
} else if (opc == INDEX_op_mb) {
60
- prev_mb = op;
61
+ ctx.prev_mb = op;
45
}
62
}
46
}
63
}
47
}
64
}
48
@@ -XXX,XX +XXX,XX @@ void helper_icbiep(CPUPPCState *env, target_ulong addr)
49
#if !defined(CONFIG_USER_ONLY)
50
/* See comments above */
51
addr &= ~(env->dcache_line_size - 1);
52
- cpu_ldl_epl_ra(env, addr, GETPC());
53
+ cpu_ldl_mmuidx_ra(env, addr, PPC_TLB_EPID_LOAD, GETPC());
54
#endif
55
}
56
57
--
65
--
58
2.20.1
66
2.25.1
59
67
60
68
diff view generated by jsdifflib
1
It is easy for the atomic helpers to use trace_mem_build_info
1
There was no real reason for calls to have separate code here.
2
directly, without resorting to symbol pasting. For this usage,
2
Unify init for calls vs non-calls using the call path, which
3
we cannot use trace_mem_get_info, because the MemOp does not
3
handles TCG_CALL_DUMMY_ARG.
4
support 16-byte accesses.
5
4
6
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
9
---
10
accel/tcg/atomic_template.h | 67 +++++++++++++------------------------
10
tcg/optimize.c | 25 +++++++++++--------------
11
trace/mem-internal.h | 17 ----------
11
1 file changed, 11 insertions(+), 14 deletions(-)
12
2 files changed, 24 insertions(+), 60 deletions(-)
13
12
14
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/accel/tcg/atomic_template.h
15
--- a/tcg/optimize.c
17
+++ b/accel/tcg/atomic_template.h
16
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@
17
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
19
the ATOMIC_NAME macro, and redefined below. */
18
}
20
#if DATA_SIZE == 1
21
# define END
22
-# define MEND _be /* either le or be would be fine */
23
#elif defined(HOST_WORDS_BIGENDIAN)
24
# define END _be
25
-# define MEND _be
26
#else
27
# define END _le
28
-# define MEND _le
29
#endif
30
31
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
32
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
33
ATOMIC_MMU_DECLS;
34
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
35
DATA_TYPE ret;
36
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false,
37
- ATOMIC_MMU_IDX);
38
+ uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
39
+ ATOMIC_MMU_IDX);
40
41
atomic_trace_rmw_pre(env, addr, info);
42
#if DATA_SIZE == 16
43
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
44
{
45
ATOMIC_MMU_DECLS;
46
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
47
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false,
48
- ATOMIC_MMU_IDX);
49
+ uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
50
+ ATOMIC_MMU_IDX);
51
52
atomic_trace_ld_pre(env, addr, info);
53
val = atomic16_read(haddr);
54
@@ -XXX,XX +XXX,XX @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
55
{
56
ATOMIC_MMU_DECLS;
57
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
58
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, true,
59
- ATOMIC_MMU_IDX);
60
+ uint16_t info = trace_mem_build_info(SHIFT, false, 0, true,
61
+ ATOMIC_MMU_IDX);
62
63
atomic_trace_st_pre(env, addr, info);
64
atomic16_set(haddr, val);
65
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
66
ATOMIC_MMU_DECLS;
67
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
68
DATA_TYPE ret;
69
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false,
70
- ATOMIC_MMU_IDX);
71
+ uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
72
+ ATOMIC_MMU_IDX);
73
74
atomic_trace_rmw_pre(env, addr, info);
75
ret = atomic_xchg__nocheck(haddr, val);
76
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
77
ATOMIC_MMU_DECLS; \
78
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
79
DATA_TYPE ret; \
80
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
81
- false, \
82
- ATOMIC_MMU_IDX); \
83
- \
84
+ uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
85
+ ATOMIC_MMU_IDX); \
86
atomic_trace_rmw_pre(env, addr, info); \
87
ret = atomic_##X(haddr, val); \
88
ATOMIC_MMU_CLEANUP; \
89
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
90
ATOMIC_MMU_DECLS; \
91
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
92
XDATA_TYPE cmp, old, new, val = xval; \
93
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
94
- false, \
95
- ATOMIC_MMU_IDX); \
96
- \
97
+ uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
98
+ ATOMIC_MMU_IDX); \
99
atomic_trace_rmw_pre(env, addr, info); \
100
smp_mb(); \
101
cmp = atomic_read__nocheck(haddr); \
102
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
103
#endif /* DATA SIZE >= 16 */
104
105
#undef END
106
-#undef MEND
107
108
#if DATA_SIZE > 1
109
110
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
111
within the ATOMIC_NAME macro. */
112
#ifdef HOST_WORDS_BIGENDIAN
113
# define END _le
114
-# define MEND _le
115
#else
116
# define END _be
117
-# define MEND _be
118
#endif
119
120
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
121
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
122
ATOMIC_MMU_DECLS;
123
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
124
DATA_TYPE ret;
125
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
126
- false,
127
- ATOMIC_MMU_IDX);
128
+ uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
129
+ ATOMIC_MMU_IDX);
130
131
atomic_trace_rmw_pre(env, addr, info);
132
#if DATA_SIZE == 16
133
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
134
{
135
ATOMIC_MMU_DECLS;
136
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
137
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
138
- false,
139
- ATOMIC_MMU_IDX);
140
+ uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
141
+ ATOMIC_MMU_IDX);
142
143
atomic_trace_ld_pre(env, addr, info);
144
val = atomic16_read(haddr);
145
@@ -XXX,XX +XXX,XX @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
146
{
147
ATOMIC_MMU_DECLS;
148
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
149
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
150
- true,
151
- ATOMIC_MMU_IDX);
152
+ uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, true,
153
+ ATOMIC_MMU_IDX);
154
155
val = BSWAP(val);
156
atomic_trace_st_pre(env, addr, info);
157
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
158
ATOMIC_MMU_DECLS;
159
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
160
ABI_TYPE ret;
161
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
162
- false,
163
- ATOMIC_MMU_IDX);
164
+ uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
165
+ ATOMIC_MMU_IDX);
166
167
atomic_trace_rmw_pre(env, addr, info);
168
ret = atomic_xchg__nocheck(haddr, BSWAP(val));
169
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
170
ATOMIC_MMU_DECLS; \
171
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
172
DATA_TYPE ret; \
173
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
174
- false, \
175
- ATOMIC_MMU_IDX); \
176
- \
177
+ uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
178
+ false, ATOMIC_MMU_IDX); \
179
atomic_trace_rmw_pre(env, addr, info); \
180
ret = atomic_##X(haddr, BSWAP(val)); \
181
ATOMIC_MMU_CLEANUP; \
182
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
183
ATOMIC_MMU_DECLS; \
184
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
185
XDATA_TYPE ldo, ldn, old, new, val = xval; \
186
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
187
- false, \
188
- ATOMIC_MMU_IDX); \
189
- \
190
+ uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
191
+ false, ATOMIC_MMU_IDX); \
192
atomic_trace_rmw_pre(env, addr, info); \
193
smp_mb(); \
194
ldn = atomic_read__nocheck(haddr); \
195
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
196
#endif /* DATA_SIZE >= 16 */
197
198
#undef END
199
-#undef MEND
200
#endif /* DATA_SIZE > 1 */
201
202
#undef BSWAP
203
diff --git a/trace/mem-internal.h b/trace/mem-internal.h
204
index XXXXXXX..XXXXXXX 100644
205
--- a/trace/mem-internal.h
206
+++ b/trace/mem-internal.h
207
@@ -XXX,XX +XXX,XX @@ static inline uint16_t trace_mem_get_info(MemOp op,
208
mmu_idx);
209
}
19
}
210
20
211
-/* Used by the atomic helpers */
21
-static void init_arg_info(OptContext *ctx, TCGArg arg)
212
-static inline
213
-uint16_t trace_mem_build_info_no_se_be(int size_shift, bool store,
214
- TCGMemOpIdx oi)
215
-{
22
-{
216
- return trace_mem_build_info(size_shift, false, MO_BE, store,
23
- init_ts_info(ctx, arg_temp(arg));
217
- get_mmuidx(oi));
218
-}
24
-}
219
-
25
-
220
-static inline
26
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
221
-uint16_t trace_mem_build_info_no_se_le(int size_shift, bool store,
27
{
222
- TCGMemOpIdx oi)
28
TCGTemp *i, *g, *l;
223
-{
29
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
224
- return trace_mem_build_info(size_shift, false, MO_LE, store,
30
return false;
225
- get_mmuidx(oi));
31
}
226
-}
32
227
-
33
+static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
228
#endif /* TRACE__MEM_INTERNAL_H */
34
+{
35
+ for (int i = 0; i < nb_args; i++) {
36
+ TCGTemp *ts = arg_temp(op->args[i]);
37
+ if (ts) {
38
+ init_ts_info(ctx, ts);
39
+ }
40
+ }
41
+}
42
+
43
/* Propagate constants and copies, fold constant expressions. */
44
void tcg_optimize(TCGContext *s)
45
{
46
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
47
if (opc == INDEX_op_call) {
48
nb_oargs = TCGOP_CALLO(op);
49
nb_iargs = TCGOP_CALLI(op);
50
- for (i = 0; i < nb_oargs + nb_iargs; i++) {
51
- TCGTemp *ts = arg_temp(op->args[i]);
52
- if (ts) {
53
- init_ts_info(&ctx, ts);
54
- }
55
- }
56
} else {
57
nb_oargs = def->nb_oargs;
58
nb_iargs = def->nb_iargs;
59
- for (i = 0; i < nb_oargs + nb_iargs; i++) {
60
- init_arg_info(&ctx, op->args[i]);
61
- }
62
}
63
+ init_arguments(&ctx, op, nb_oargs + nb_iargs);
64
65
/* Do copy propagation */
66
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
229
--
67
--
230
2.20.1
68
2.25.1
231
69
232
70
diff view generated by jsdifflib
1
Code movement in an upcoming patch will show that this file
1
Continue splitting tcg_optimize.
2
was implicitly depending on tcg.h being included indirectly.
3
2
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
linux-user/syscall.c | 1 +
8
tcg/optimize.c | 22 ++++++++++++++--------
10
1 file changed, 1 insertion(+)
9
1 file changed, 14 insertions(+), 8 deletions(-)
11
10
12
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/syscall.c
13
--- a/tcg/optimize.c
15
+++ b/linux-user/syscall.c
14
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
17
#include "user/syscall-trace.h"
16
}
18
#include "qapi/error.h"
17
}
19
#include "fd-trans.h"
18
20
+#include "tcg.h"
19
+static void copy_propagate(OptContext *ctx, TCGOp *op,
21
20
+ int nb_oargs, int nb_iargs)
22
#ifndef CLONE_IO
21
+{
23
#define CLONE_IO 0x80000000 /* Clone io context */
22
+ TCGContext *s = ctx->tcg;
23
+
24
+ for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
25
+ TCGTemp *ts = arg_temp(op->args[i]);
26
+ if (ts && ts_is_copy(ts)) {
27
+ op->args[i] = temp_arg(find_better_copy(s, ts));
28
+ }
29
+ }
30
+}
31
+
32
/* Propagate constants and copies, fold constant expressions. */
33
void tcg_optimize(TCGContext *s)
34
{
35
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
36
nb_iargs = def->nb_iargs;
37
}
38
init_arguments(&ctx, op, nb_oargs + nb_iargs);
39
-
40
- /* Do copy propagation */
41
- for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
42
- TCGTemp *ts = arg_temp(op->args[i]);
43
- if (ts && ts_is_copy(ts)) {
44
- op->args[i] = temp_arg(find_better_copy(s, ts));
45
- }
46
- }
47
+ copy_propagate(&ctx, op, nb_oargs, nb_iargs);
48
49
/* For commutative operations make constant second argument */
50
switch (opc) {
24
--
51
--
25
2.20.1
52
2.25.1
26
53
27
54
diff view generated by jsdifflib
New patch
1
Calls are special in that they have a variable number
2
of arguments, and need to be able to clobber globals.
1
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 63 ++++++++++++++++++++++++++++++++------------------
9
1 file changed, 41 insertions(+), 22 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
16
}
17
}
18
19
+static bool fold_call(OptContext *ctx, TCGOp *op)
20
+{
21
+ TCGContext *s = ctx->tcg;
22
+ int nb_oargs = TCGOP_CALLO(op);
23
+ int nb_iargs = TCGOP_CALLI(op);
24
+ int flags, i;
25
+
26
+ init_arguments(ctx, op, nb_oargs + nb_iargs);
27
+ copy_propagate(ctx, op, nb_oargs, nb_iargs);
28
+
29
+ /* If the function reads or writes globals, reset temp data. */
30
+ flags = tcg_call_flags(op);
31
+ if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
32
+ int nb_globals = s->nb_globals;
33
+
34
+ for (i = 0; i < nb_globals; i++) {
35
+ if (test_bit(i, ctx->temps_used.l)) {
36
+ reset_ts(&ctx->tcg->temps[i]);
37
+ }
38
+ }
39
+ }
40
+
41
+ /* Reset temp data for outputs. */
42
+ for (i = 0; i < nb_oargs; i++) {
43
+ reset_temp(op->args[i]);
44
+ }
45
+
46
+ /* Stop optimizing MB across calls. */
47
+ ctx->prev_mb = NULL;
48
+ return true;
49
+}
50
+
51
/* Propagate constants and copies, fold constant expressions. */
52
void tcg_optimize(TCGContext *s)
53
{
54
- int nb_temps, nb_globals, i;
55
+ int nb_temps, i;
56
TCGOp *op, *op_next;
57
OptContext ctx = { .tcg = s };
58
59
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
60
available through the doubly linked circular list. */
61
62
nb_temps = s->nb_temps;
63
- nb_globals = s->nb_globals;
64
-
65
for (i = 0; i < nb_temps; ++i) {
66
s->temps[i].state_ptr = NULL;
67
}
68
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
69
uint64_t z_mask, partmask, affected, tmp;
70
int nb_oargs, nb_iargs;
71
TCGOpcode opc = op->opc;
72
- const TCGOpDef *def = &tcg_op_defs[opc];
73
+ const TCGOpDef *def;
74
75
- /* Count the arguments, and initialize the temps that are
76
- going to be used */
77
+ /* Calls are special. */
78
if (opc == INDEX_op_call) {
79
- nb_oargs = TCGOP_CALLO(op);
80
- nb_iargs = TCGOP_CALLI(op);
81
- } else {
82
- nb_oargs = def->nb_oargs;
83
- nb_iargs = def->nb_iargs;
84
+ fold_call(&ctx, op);
85
+ continue;
86
}
87
+
88
+ def = &tcg_op_defs[opc];
89
+ nb_oargs = def->nb_oargs;
90
+ nb_iargs = def->nb_iargs;
91
init_arguments(&ctx, op, nb_oargs + nb_iargs);
92
copy_propagate(&ctx, op, nb_oargs, nb_iargs);
93
94
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
95
if (def->flags & TCG_OPF_BB_END) {
96
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
97
} else {
98
- if (opc == INDEX_op_call &&
99
- !(tcg_call_flags(op)
100
- & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
101
- for (i = 0; i < nb_globals; i++) {
102
- if (test_bit(i, ctx.temps_used.l)) {
103
- reset_ts(&s->temps[i]);
104
- }
105
- }
106
- }
107
-
108
for (i = 0; i < nb_oargs; i++) {
109
reset_temp(op->args[i]);
110
/* Save the corresponding known-zero bits mask for the
111
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
112
case INDEX_op_qemu_st_i32:
113
case INDEX_op_qemu_st8_i32:
114
case INDEX_op_qemu_st_i64:
115
- case INDEX_op_call:
116
/* Opcodes that touch guest memory stop the optimization. */
117
ctx.prev_mb = NULL;
118
break;
119
--
120
2.25.1
121
122
diff view generated by jsdifflib
1
Code movement in an upcoming patch will show that this file
1
Rather than try to keep these up-to-date across folding,
2
was implicitly depending on tcg.h being included indirectly.
2
re-read nb_oargs at the end, after re-reading the opcode.
3
3
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
A couple of asserts need dropping, but that will take care
5
of itself as we split the function further.
6
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
10
---
8
accel/tcg/tcg-runtime.c | 1 +
11
tcg/optimize.c | 14 ++++----------
9
1 file changed, 1 insertion(+)
12
1 file changed, 4 insertions(+), 10 deletions(-)
10
13
11
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/tcg-runtime.c
16
--- a/tcg/optimize.c
14
+++ b/accel/tcg/tcg-runtime.c
17
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
16
#include "exec/tb-lookup.h"
19
17
#include "disas/disas.h"
20
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
18
#include "exec/log.h"
21
uint64_t z_mask, partmask, affected, tmp;
19
+#include "tcg.h"
22
- int nb_oargs, nb_iargs;
20
23
TCGOpcode opc = op->opc;
21
/* 32-bit helpers */
24
const TCGOpDef *def;
22
25
26
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
27
}
28
29
def = &tcg_op_defs[opc];
30
- nb_oargs = def->nb_oargs;
31
- nb_iargs = def->nb_iargs;
32
- init_arguments(&ctx, op, nb_oargs + nb_iargs);
33
- copy_propagate(&ctx, op, nb_oargs, nb_iargs);
34
+ init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
35
+ copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
36
37
/* For commutative operations make constant second argument */
38
switch (opc) {
39
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
40
41
CASE_OP_32_64(qemu_ld):
42
{
43
- MemOpIdx oi = op->args[nb_oargs + nb_iargs];
44
+ MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
45
MemOp mop = get_memop(oi);
46
if (!(mop & MO_SIGN)) {
47
z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
49
}
50
51
if (partmask == 0) {
52
- tcg_debug_assert(nb_oargs == 1);
53
tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
54
continue;
55
}
56
if (affected == 0) {
57
- tcg_debug_assert(nb_oargs == 1);
58
tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
59
continue;
60
}
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
62
} else if (args_are_copies(op->args[1], op->args[2])) {
63
op->opc = INDEX_op_dup_vec;
64
TCGOP_VECE(op) = MO_32;
65
- nb_iargs = 1;
66
}
67
break;
68
69
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
70
op->opc = opc = (opc == INDEX_op_movcond_i32
71
? INDEX_op_setcond_i32
72
: INDEX_op_setcond_i64);
73
- nb_iargs = 2;
74
}
75
break;
76
77
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
78
if (def->flags & TCG_OPF_BB_END) {
79
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
80
} else {
81
+ int nb_oargs = def->nb_oargs;
82
for (i = 0; i < nb_oargs; i++) {
83
reset_temp(op->args[i]);
84
/* Save the corresponding known-zero bits mask for the
23
--
85
--
24
2.20.1
86
2.25.1
25
87
26
88
diff view generated by jsdifflib
New patch
1
1
Return -1 instead of 2 for failure, so that we can
2
use comparisons against 0 for all cases.
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 145 +++++++++++++++++++++++++------------------------
9
1 file changed, 74 insertions(+), 71 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
16
}
17
}
18
19
-/* Return 2 if the condition can't be simplified, and the result
20
- of the condition (0 or 1) if it can */
21
-static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
22
- TCGArg y, TCGCond c)
23
+/*
24
+ * Return -1 if the condition can't be simplified,
25
+ * and the result of the condition (0 or 1) if it can.
26
+ */
27
+static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
28
+ TCGArg y, TCGCond c)
29
{
30
uint64_t xv = arg_info(x)->val;
31
uint64_t yv = arg_info(y)->val;
32
@@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
33
case TCG_COND_GEU:
34
return 1;
35
default:
36
- return 2;
37
+ return -1;
38
}
39
}
40
- return 2;
41
+ return -1;
42
}
43
44
-/* Return 2 if the condition can't be simplified, and the result
45
- of the condition (0 or 1) if it can */
46
-static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
47
+/*
48
+ * Return -1 if the condition can't be simplified,
49
+ * and the result of the condition (0 or 1) if it can.
50
+ */
51
+static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
52
{
53
TCGArg al = p1[0], ah = p1[1];
54
TCGArg bl = p2[0], bh = p2[1];
55
@@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
56
if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
57
return do_constant_folding_cond_eq(c);
58
}
59
- return 2;
60
+ return -1;
61
}
62
63
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
64
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
65
break;
66
67
CASE_OP_32_64(setcond):
68
- tmp = do_constant_folding_cond(opc, op->args[1],
69
- op->args[2], op->args[3]);
70
- if (tmp != 2) {
71
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
72
+ i = do_constant_folding_cond(opc, op->args[1],
73
+ op->args[2], op->args[3]);
74
+ if (i >= 0) {
75
+ tcg_opt_gen_movi(&ctx, op, op->args[0], i);
76
continue;
77
}
78
break;
79
80
CASE_OP_32_64(brcond):
81
- tmp = do_constant_folding_cond(opc, op->args[0],
82
- op->args[1], op->args[2]);
83
- switch (tmp) {
84
- case 0:
85
+ i = do_constant_folding_cond(opc, op->args[0],
86
+ op->args[1], op->args[2]);
87
+ if (i == 0) {
88
tcg_op_remove(s, op);
89
continue;
90
- case 1:
91
+ } else if (i > 0) {
92
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
93
op->opc = opc = INDEX_op_br;
94
op->args[0] = op->args[3];
95
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
96
break;
97
98
CASE_OP_32_64(movcond):
99
- tmp = do_constant_folding_cond(opc, op->args[1],
100
- op->args[2], op->args[5]);
101
- if (tmp != 2) {
102
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
103
+ i = do_constant_folding_cond(opc, op->args[1],
104
+ op->args[2], op->args[5]);
105
+ if (i >= 0) {
106
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]);
107
continue;
108
}
109
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
break;
112
113
case INDEX_op_brcond2_i32:
114
- tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
115
- op->args[4]);
116
- if (tmp == 0) {
117
+ i = do_constant_folding_cond2(&op->args[0], &op->args[2],
118
+ op->args[4]);
119
+ if (i == 0) {
120
do_brcond_false:
121
tcg_op_remove(s, op);
122
continue;
123
}
124
- if (tmp == 1) {
125
+ if (i > 0) {
126
do_brcond_true:
127
op->opc = opc = INDEX_op_br;
128
op->args[0] = op->args[5];
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
if (op->args[4] == TCG_COND_EQ) {
131
/* Simplify EQ comparisons where one of the pairs
132
can be simplified. */
133
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
134
- op->args[0], op->args[2],
135
- TCG_COND_EQ);
136
- if (tmp == 0) {
137
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
138
+ op->args[0], op->args[2],
139
+ TCG_COND_EQ);
140
+ if (i == 0) {
141
goto do_brcond_false;
142
- } else if (tmp == 1) {
143
+ } else if (i > 0) {
144
goto do_brcond_high;
145
}
146
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
147
- op->args[1], op->args[3],
148
- TCG_COND_EQ);
149
- if (tmp == 0) {
150
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
151
+ op->args[1], op->args[3],
152
+ TCG_COND_EQ);
153
+ if (i == 0) {
154
goto do_brcond_false;
155
- } else if (tmp != 1) {
156
+ } else if (i < 0) {
157
break;
158
}
159
do_brcond_low:
160
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
161
if (op->args[4] == TCG_COND_NE) {
162
/* Simplify NE comparisons where one of the pairs
163
can be simplified. */
164
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
165
- op->args[0], op->args[2],
166
- TCG_COND_NE);
167
- if (tmp == 0) {
168
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
169
+ op->args[0], op->args[2],
170
+ TCG_COND_NE);
171
+ if (i == 0) {
172
goto do_brcond_high;
173
- } else if (tmp == 1) {
174
+ } else if (i > 0) {
175
goto do_brcond_true;
176
}
177
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
178
- op->args[1], op->args[3],
179
- TCG_COND_NE);
180
- if (tmp == 0) {
181
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
182
+ op->args[1], op->args[3],
183
+ TCG_COND_NE);
184
+ if (i == 0) {
185
goto do_brcond_low;
186
- } else if (tmp == 1) {
187
+ } else if (i > 0) {
188
goto do_brcond_true;
189
}
190
}
191
break;
192
193
case INDEX_op_setcond2_i32:
194
- tmp = do_constant_folding_cond2(&op->args[1], &op->args[3],
195
- op->args[5]);
196
- if (tmp != 2) {
197
+ i = do_constant_folding_cond2(&op->args[1], &op->args[3],
198
+ op->args[5]);
199
+ if (i >= 0) {
200
do_setcond_const:
201
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
202
+ tcg_opt_gen_movi(&ctx, op, op->args[0], i);
203
continue;
204
}
205
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
206
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
207
if (op->args[5] == TCG_COND_EQ) {
208
/* Simplify EQ comparisons where one of the pairs
209
can be simplified. */
210
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
211
- op->args[1], op->args[3],
212
- TCG_COND_EQ);
213
- if (tmp == 0) {
214
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
215
+ op->args[1], op->args[3],
216
+ TCG_COND_EQ);
217
+ if (i == 0) {
218
goto do_setcond_const;
219
- } else if (tmp == 1) {
220
+ } else if (i > 0) {
221
goto do_setcond_high;
222
}
223
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
224
- op->args[2], op->args[4],
225
- TCG_COND_EQ);
226
- if (tmp == 0) {
227
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
228
+ op->args[2], op->args[4],
229
+ TCG_COND_EQ);
230
+ if (i == 0) {
231
goto do_setcond_high;
232
- } else if (tmp != 1) {
233
+ } else if (i < 0) {
234
break;
235
}
236
do_setcond_low:
237
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
238
if (op->args[5] == TCG_COND_NE) {
239
/* Simplify NE comparisons where one of the pairs
240
can be simplified. */
241
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
242
- op->args[1], op->args[3],
243
- TCG_COND_NE);
244
- if (tmp == 0) {
245
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
246
+ op->args[1], op->args[3],
247
+ TCG_COND_NE);
248
+ if (i == 0) {
249
goto do_setcond_high;
250
- } else if (tmp == 1) {
251
+ } else if (i > 0) {
252
goto do_setcond_const;
253
}
254
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
255
- op->args[2], op->args[4],
256
- TCG_COND_NE);
257
- if (tmp == 0) {
258
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
259
+ op->args[2], op->args[4],
260
+ TCG_COND_NE);
261
+ if (i == 0) {
262
goto do_setcond_low;
263
- } else if (tmp == 1) {
264
+ } else if (i > 0) {
265
goto do_setcond_const;
266
}
267
}
268
--
269
2.25.1
270
271
diff view generated by jsdifflib
1
The generated *_user functions are unused. The *_kernel functions
1
This will allow callers to tail call to these functions
2
have a couple of users in op_helper.c; use *_mmuidx_ra instead,
2
and return true indicating processing complete.
3
with MMU_KERNEL_IDX.
4
3
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
8
---
10
v2: Use *_mmuidx_ra directly, without intermediate macros.
9
tcg/optimize.c | 9 +++++----
11
---
10
1 file changed, 5 insertions(+), 4 deletions(-)
12
target/m68k/cpu.h | 2 --
13
target/m68k/op_helper.c | 77 +++++++++++++++++++++++++----------------
14
2 files changed, 47 insertions(+), 32 deletions(-)
15
11
16
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
18
--- a/target/m68k/cpu.h
14
--- a/tcg/optimize.c
19
+++ b/target/m68k/cpu.h
15
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ enum {
16
@@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
21
#define cpu_list m68k_cpu_list
17
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
22
23
/* MMU modes definitions */
24
-#define MMU_MODE0_SUFFIX _kernel
25
-#define MMU_MODE1_SUFFIX _user
26
#define MMU_KERNEL_IDX 0
27
#define MMU_USER_IDX 1
28
static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
29
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/m68k/op_helper.c
32
+++ b/target/m68k/op_helper.c
33
@@ -XXX,XX +XXX,XX @@ static void cf_rte(CPUM68KState *env)
34
uint32_t fmt;
35
36
sp = env->aregs[7];
37
- fmt = cpu_ldl_kernel(env, sp);
38
- env->pc = cpu_ldl_kernel(env, sp + 4);
39
+ fmt = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
40
+ env->pc = cpu_ldl_mmuidx_ra(env, sp + 4, MMU_KERNEL_IDX, 0);
41
sp |= (fmt >> 28) & 3;
42
env->aregs[7] = sp + 8;
43
44
@@ -XXX,XX +XXX,XX @@ static void m68k_rte(CPUM68KState *env)
45
46
sp = env->aregs[7];
47
throwaway:
48
- sr = cpu_lduw_kernel(env, sp);
49
+ sr = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
50
sp += 2;
51
- env->pc = cpu_ldl_kernel(env, sp);
52
+ env->pc = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
53
sp += 4;
54
if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) {
55
/* all except 68000 */
56
- fmt = cpu_lduw_kernel(env, sp);
57
+ fmt = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
58
sp += 2;
59
switch (fmt >> 12) {
60
case 0:
61
@@ -XXX,XX +XXX,XX @@ static void cf_interrupt_all(CPUM68KState *env, int is_hw)
62
/* ??? This could cause MMU faults. */
63
sp &= ~3;
64
sp -= 4;
65
- cpu_stl_kernel(env, sp, retaddr);
66
+ cpu_stl_mmuidx_ra(env, sp, retaddr, MMU_KERNEL_IDX, 0);
67
sp -= 4;
68
- cpu_stl_kernel(env, sp, fmt);
69
+ cpu_stl_mmuidx_ra(env, sp, fmt, MMU_KERNEL_IDX, 0);
70
env->aregs[7] = sp;
71
/* Jump to vector. */
72
- env->pc = cpu_ldl_kernel(env, env->vbr + vector);
73
+ env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0);
74
}
18
}
75
19
76
static inline void do_stack_frame(CPUM68KState *env, uint32_t *sp,
20
-static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
77
@@ -XXX,XX +XXX,XX @@ static inline void do_stack_frame(CPUM68KState *env, uint32_t *sp,
21
+static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
78
switch (format) {
22
{
79
case 4:
23
TCGTemp *dst_ts = arg_temp(dst);
80
*sp -= 4;
24
TCGTemp *src_ts = arg_temp(src);
81
- cpu_stl_kernel(env, *sp, env->pc);
25
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
82
+ cpu_stl_mmuidx_ra(env, *sp, env->pc, MMU_KERNEL_IDX, 0);
26
83
*sp -= 4;
27
if (ts_are_copies(dst_ts, src_ts)) {
84
- cpu_stl_kernel(env, *sp, addr);
28
tcg_op_remove(ctx->tcg, op);
85
+ cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0);
29
- return;
86
break;
30
+ return true;
87
case 3:
88
case 2:
89
*sp -= 4;
90
- cpu_stl_kernel(env, *sp, addr);
91
+ cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0);
92
break;
93
}
94
*sp -= 2;
95
- cpu_stw_kernel(env, *sp, (format << 12) + (cs->exception_index << 2));
96
+ cpu_stw_mmuidx_ra(env, *sp, (format << 12) + (cs->exception_index << 2),
97
+ MMU_KERNEL_IDX, 0);
98
}
31
}
99
*sp -= 4;
32
100
- cpu_stl_kernel(env, *sp, retaddr);
33
reset_ts(dst_ts);
101
+ cpu_stl_mmuidx_ra(env, *sp, retaddr, MMU_KERNEL_IDX, 0);
34
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
102
*sp -= 2;
35
di->is_const = si->is_const;
103
- cpu_stw_kernel(env, *sp, sr);
36
di->val = si->val;
104
+ cpu_stw_mmuidx_ra(env, *sp, sr, MMU_KERNEL_IDX, 0);
37
}
38
+ return true;
105
}
39
}
106
40
107
static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
41
-static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
108
@@ -XXX,XX +XXX,XX @@ static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
42
+static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
109
cpu_abort(cs, "DOUBLE MMU FAULT\n");
43
TCGArg dst, uint64_t val)
110
}
44
{
111
env->mmu.fault = true;
45
const TCGOpDef *def = &tcg_op_defs[op->opc];
112
+ /* push data 3 */
46
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
113
sp -= 4;
47
/* Convert movi to mov with constant temp. */
114
- cpu_stl_kernel(env, sp, 0); /* push data 3 */
48
tv = tcg_constant_internal(type, val);
115
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
49
init_ts_info(ctx, tv);
116
+ /* push data 2 */
50
- tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
117
sp -= 4;
51
+ return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
118
- cpu_stl_kernel(env, sp, 0); /* push data 2 */
119
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
120
+ /* push data 1 */
121
sp -= 4;
122
- cpu_stl_kernel(env, sp, 0); /* push data 1 */
123
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
124
+ /* write back 1 / push data 0 */
125
sp -= 4;
126
- cpu_stl_kernel(env, sp, 0); /* write back 1 / push data 0 */
127
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
128
+ /* write back 1 address */
129
sp -= 4;
130
- cpu_stl_kernel(env, sp, 0); /* write back 1 address */
131
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
132
+ /* write back 2 data */
133
sp -= 4;
134
- cpu_stl_kernel(env, sp, 0); /* write back 2 data */
135
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
136
+ /* write back 2 address */
137
sp -= 4;
138
- cpu_stl_kernel(env, sp, 0); /* write back 2 address */
139
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
140
+ /* write back 3 data */
141
sp -= 4;
142
- cpu_stl_kernel(env, sp, 0); /* write back 3 data */
143
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
144
+ /* write back 3 address */
145
sp -= 4;
146
- cpu_stl_kernel(env, sp, env->mmu.ar); /* write back 3 address */
147
+ cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
148
+ /* fault address */
149
sp -= 4;
150
- cpu_stl_kernel(env, sp, env->mmu.ar); /* fault address */
151
+ cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
152
+ /* write back 1 status */
153
sp -= 2;
154
- cpu_stw_kernel(env, sp, 0); /* write back 1 status */
155
+ cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
156
+ /* write back 2 status */
157
sp -= 2;
158
- cpu_stw_kernel(env, sp, 0); /* write back 2 status */
159
+ cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
160
+ /* write back 3 status */
161
sp -= 2;
162
- cpu_stw_kernel(env, sp, 0); /* write back 3 status */
163
+ cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
164
+ /* special status word */
165
sp -= 2;
166
- cpu_stw_kernel(env, sp, env->mmu.ssw); /* special status word */
167
+ cpu_stw_mmuidx_ra(env, sp, env->mmu.ssw, MMU_KERNEL_IDX, 0);
168
+ /* effective address */
169
sp -= 4;
170
- cpu_stl_kernel(env, sp, env->mmu.ar); /* effective address */
171
+ cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
172
+
173
do_stack_frame(env, &sp, 7, oldsr, 0, retaddr);
174
env->mmu.fault = false;
175
if (qemu_loglevel_mask(CPU_LOG_INT)) {
176
@@ -XXX,XX +XXX,XX @@ static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
177
178
env->aregs[7] = sp;
179
/* Jump to vector. */
180
- env->pc = cpu_ldl_kernel(env, env->vbr + vector);
181
+ env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0);
182
}
52
}
183
53
184
static void do_interrupt_all(CPUM68KState *env, int is_hw)
54
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
185
--
55
--
186
2.20.1
56
2.25.1
187
57
188
58
diff view generated by jsdifflib
1
Recent toolchains support static and pie at the same time.
1
Copy z_mask into OptContext, for writeback to the
2
2
first output within the new function.
3
As with normal dynamic builds, allow --static to default to PIE
4
if supported by the toolchain. Allow --enable/--disable-pie to
5
override the default.
6
3
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
v2: Fix --disable-pie --static
8
tcg/optimize.c | 49 +++++++++++++++++++++++++++++++++----------------
11
---
9
1 file changed, 33 insertions(+), 16 deletions(-)
12
configure | 19 ++++++++++++-------
13
1 file changed, 12 insertions(+), 7 deletions(-)
14
10
15
diff --git a/configure b/configure
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100755
12
index XXXXXXX..XXXXXXX 100644
17
--- a/configure
13
--- a/tcg/optimize.c
18
+++ b/configure
14
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ for opt do
15
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
20
;;
16
TCGContext *tcg;
21
--static)
17
TCGOp *prev_mb;
22
static="yes"
18
TCGTempSet temps_used;
23
- LDFLAGS="-static $LDFLAGS"
19
+
24
QEMU_PKG_CONFIG_FLAGS="--static $QEMU_PKG_CONFIG_FLAGS"
20
+ /* In flight values from optimization. */
25
;;
21
+ uint64_t z_mask;
26
--mandir=*) mandir="$optarg"
22
} OptContext;
27
@@ -XXX,XX +XXX,XX @@ if test "$static" = "yes" ; then
23
28
if test "$modules" = "yes" ; then
24
static inline TempOptInfo *ts_info(TCGTemp *ts)
29
error_exit "static and modules are mutually incompatible"
25
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
30
fi
26
}
31
- if test "$pie" = "yes" ; then
27
}
32
- error_exit "static and pie are mutually incompatible"
28
33
- else
29
+static void finish_folding(OptContext *ctx, TCGOp *op)
34
- pie="no"
30
+{
35
- fi
31
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
36
fi
32
+ int i, nb_oargs;
37
33
+
38
# Unconditional check for compiler __thread support
34
+ /*
39
@@ -XXX,XX +XXX,XX @@ if compile_prog "-Werror -fno-pie" "-no-pie"; then
35
+ * For an opcode that ends a BB, reset all temp data.
40
LDFLAGS_NOPIE="-no-pie"
36
+ * We do no cross-BB optimization.
41
fi
37
+ */
42
38
+ if (def->flags & TCG_OPF_BB_END) {
43
-if test "$pie" = "no"; then
39
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
44
+if test "$static" = "yes"; then
40
+ ctx->prev_mb = NULL;
45
+ if test "$pie" != "no" && compile_prog "-fPIE -DPIE" "-static-pie"; then
41
+ return;
46
+ QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS"
42
+ }
47
+ LDFLAGS="-static-pie $LDFLAGS"
43
+
48
+ pie="yes"
44
+ nb_oargs = def->nb_oargs;
49
+ elif test "$pie" = "yes"; then
45
+ for (i = 0; i < nb_oargs; i++) {
50
+ error_exit "-static-pie not available due to missing toolchain support"
46
+ reset_temp(op->args[i]);
51
+ else
47
+ /*
52
+ LDFLAGS="-static $LDFLAGS"
48
+ * Save the corresponding known-zero bits mask for the
53
+ pie="no"
49
+ * first output argument (only one supported so far).
54
+ fi
50
+ */
55
+elif test "$pie" = "no"; then
51
+ if (i == 0) {
56
QEMU_CFLAGS="$CFLAGS_NOPIE $QEMU_CFLAGS"
52
+ arg_info(op->args[i])->z_mask = ctx->z_mask;
57
LDFLAGS="$LDFLAGS_NOPIE $LDFLAGS"
53
+ }
58
elif compile_prog "-fPIE -DPIE" "-pie"; then
54
+ }
55
+}
56
+
57
static bool fold_call(OptContext *ctx, TCGOp *op)
58
{
59
TCGContext *s = ctx->tcg;
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
61
partmask &= 0xffffffffu;
62
affected &= 0xffffffffu;
63
}
64
+ ctx.z_mask = z_mask;
65
66
if (partmask == 0) {
67
tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
68
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
69
break;
70
}
71
72
- /* Some of the folding above can change opc. */
73
- opc = op->opc;
74
- def = &tcg_op_defs[opc];
75
- if (def->flags & TCG_OPF_BB_END) {
76
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
77
- } else {
78
- int nb_oargs = def->nb_oargs;
79
- for (i = 0; i < nb_oargs; i++) {
80
- reset_temp(op->args[i]);
81
- /* Save the corresponding known-zero bits mask for the
82
- first output argument (only one supported so far). */
83
- if (i == 0) {
84
- arg_info(op->args[i])->z_mask = z_mask;
85
- }
86
- }
87
- }
88
+ finish_folding(&ctx, op);
89
90
/* Eliminate duplicate and redundant fence instructions. */
91
if (ctx.prev_mb) {
59
--
92
--
60
2.20.1
93
2.25.1
61
94
62
95
diff view generated by jsdifflib
1
There is nothing about these options that is related to PIE.
2
Use them unconditionally.
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Fangrui Song <i@maskray.me>
2
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
v2: Do not split into two tests.
6
tcg/optimize.c | 9 ++++++---
10
---
11
configure | 9 ++++++---
12
1 file changed, 6 insertions(+), 3 deletions(-)
7
1 file changed, 6 insertions(+), 3 deletions(-)
13
8
14
diff --git a/configure b/configure
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100755
10
index XXXXXXX..XXXXXXX 100644
16
--- a/configure
11
--- a/tcg/optimize.c
17
+++ b/configure
12
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ if test "$pie" != "no" ; then
13
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
19
QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS"
14
uint64_t z_mask, partmask, affected, tmp;
20
LDFLAGS="-pie $LDFLAGS"
15
TCGOpcode opc = op->opc;
21
pie="yes"
16
const TCGOpDef *def;
22
- if compile_prog "" "-Wl,-z,relro -Wl,-z,now" ; then
17
+ bool done = false;
23
- LDFLAGS="-Wl,-z,relro -Wl,-z,now $LDFLAGS"
18
24
- fi
19
/* Calls are special. */
25
else
20
if (opc == INDEX_op_call) {
26
if test "$pie" = "yes"; then
21
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
27
error_exit "PIE not available due to missing toolchain support"
22
allocator where needed and possible. Also detect copies. */
28
@@ -XXX,XX +XXX,XX @@ if test "$pie" != "no" ; then
23
switch (opc) {
29
fi
24
CASE_OP_32_64_VEC(mov):
30
fi
25
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
31
26
- continue;
32
+# Detect support for PT_GNU_RELRO + DT_BIND_NOW.
27
+ done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
33
+# The combination is known as "full relro", because .got.plt is read-only too.
28
+ break;
34
+if compile_prog "" "-Wl,-z,relro -Wl,-z,now" ; then
29
35
+ LDFLAGS="-Wl,-z,relro -Wl,-z,now $LDFLAGS"
30
case INDEX_op_dup_vec:
36
+fi
31
if (arg_is_const(op->args[1])) {
37
+
32
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
38
##########################################
33
break;
39
# __sync_fetch_and_and requires at least -march=i486. Many toolchains
34
}
40
# use i686 as default anyway, but for those that don't, an explicit
35
36
- finish_folding(&ctx, op);
37
+ if (!done) {
38
+ finish_folding(&ctx, op);
39
+ }
40
41
/* Eliminate duplicate and redundant fence instructions. */
42
if (ctx.prev_mb) {
41
--
43
--
42
2.20.1
44
2.25.1
43
45
44
46
diff view generated by jsdifflib
1
This finishes the new interface began with the previous patch.
1
This puts the separate mb optimization into the same framework
2
Document the interface and deprecate MMU_MODE<N>_SUFFIX.
2
as the others. While fold_qemu_{ld,st} are currently identical,
3
that won't last as more code gets moved.
3
4
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
include/exec/cpu_ldst.h | 80 +++++++++++++-
9
tcg/optimize.c | 89 +++++++++++++++++++++++++++++---------------------
10
docs/devel/loads-stores.rst | 211 ++++++++++++++++++++++++++----------
10
1 file changed, 51 insertions(+), 38 deletions(-)
11
2 files changed, 230 insertions(+), 61 deletions(-)
12
11
13
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu_ldst.h
14
--- a/tcg/optimize.c
16
+++ b/include/exec/cpu_ldst.h
15
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
18
*
17
return true;
19
* The syntax for the accessors is:
18
}
20
*
19
21
- * load: cpu_ld{sign}{size}_{mmusuffix}(env, ptr)
20
+static bool fold_mb(OptContext *ctx, TCGOp *op)
22
+ * load: cpu_ld{sign}{size}_{mmusuffix}(env, ptr)
23
+ * cpu_ld{sign}{size}_{mmusuffix}_ra(env, ptr, retaddr)
24
+ * cpu_ld{sign}{size}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
25
*
26
- * store: cpu_st{sign}{size}_{mmusuffix}(env, ptr, val)
27
+ * store: cpu_st{size}_{mmusuffix}(env, ptr, val)
28
+ * cpu_st{size}_{mmusuffix}_ra(env, ptr, val, retaddr)
29
+ * cpu_st{size}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
30
*
31
* sign is:
32
* (empty): for 32 and 64 bit sizes
33
@@ -XXX,XX +XXX,XX @@
34
* l: 32 bits
35
* q: 64 bits
36
*
37
- * mmusuffix is one of the generic suffixes "data" or "code", or
38
- * (for softmmu configs) a target-specific MMU mode suffix as defined
39
- * in target cpu.h.
40
+ * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
41
+ * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
42
+ * the index to use; the "data" and "code" suffixes take the index from
43
+ * cpu_mmu_index().
44
*/
45
#ifndef CPU_LDST_H
46
#define CPU_LDST_H
47
@@ -XXX,XX +XXX,XX @@ static inline void clear_helper_retaddr(void)
48
#undef MEMSUFFIX
49
#undef CODE_ACCESS
50
51
+/*
52
+ * Provide the same *_mmuidx_ra interface as for softmmu.
53
+ * The mmu_idx argument is ignored.
54
+ */
55
+
56
+static inline uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
57
+ int mmu_idx, uintptr_t ra)
58
+{
21
+{
59
+ return cpu_ldub_data_ra(env, addr, ra);
22
+ /* Eliminate duplicate and redundant fence instructions. */
23
+ if (ctx->prev_mb) {
24
+ /*
25
+ * Merge two barriers of the same type into one,
26
+ * or a weaker barrier into a stronger one,
27
+ * or two weaker barriers into a stronger one.
28
+ * mb X; mb Y => mb X|Y
29
+ * mb; strl => mb; st
30
+ * ldaq; mb => ld; mb
31
+ * ldaq; strl => ld; mb; st
32
+ * Other combinations are also merged into a strong
33
+ * barrier. This is stricter than specified but for
34
+ * the purposes of TCG is better than not optimizing.
35
+ */
36
+ ctx->prev_mb->args[0] |= op->args[0];
37
+ tcg_op_remove(ctx->tcg, op);
38
+ } else {
39
+ ctx->prev_mb = op;
40
+ }
41
+ return true;
60
+}
42
+}
61
+
43
+
62
+static inline uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
44
+static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
63
+ int mmu_idx, uintptr_t ra)
64
+{
45
+{
65
+ return cpu_lduw_data_ra(env, addr, ra);
46
+ /* Opcodes that touch guest memory stop the mb optimization. */
47
+ ctx->prev_mb = NULL;
48
+ return false;
66
+}
49
+}
67
+
50
+
68
+static inline uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
51
+static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
69
+ int mmu_idx, uintptr_t ra)
70
+{
52
+{
71
+ return cpu_ldl_data_ra(env, addr, ra);
53
+ /* Opcodes that touch guest memory stop the mb optimization. */
54
+ ctx->prev_mb = NULL;
55
+ return false;
72
+}
56
+}
73
+
57
+
74
+static inline uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
58
/* Propagate constants and copies, fold constant expressions. */
75
+ int mmu_idx, uintptr_t ra)
59
void tcg_optimize(TCGContext *s)
76
+{
60
{
77
+ return cpu_ldq_data_ra(env, addr, ra);
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
78
+}
62
}
63
break;
64
65
+ case INDEX_op_mb:
66
+ done = fold_mb(&ctx, op);
67
+ break;
68
+ case INDEX_op_qemu_ld_i32:
69
+ case INDEX_op_qemu_ld_i64:
70
+ done = fold_qemu_ld(&ctx, op);
71
+ break;
72
+ case INDEX_op_qemu_st_i32:
73
+ case INDEX_op_qemu_st8_i32:
74
+ case INDEX_op_qemu_st_i64:
75
+ done = fold_qemu_st(&ctx, op);
76
+ break;
79
+
77
+
80
+static inline int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
78
default:
81
+ int mmu_idx, uintptr_t ra)
79
break;
82
+{
80
}
83
+ return cpu_ldsb_data_ra(env, addr, ra);
81
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
84
+}
82
if (!done) {
85
+
83
finish_folding(&ctx, op);
86
+static inline int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
84
}
87
+ int mmu_idx, uintptr_t ra)
88
+{
89
+ return cpu_ldsw_data_ra(env, addr, ra);
90
+}
91
+
92
+static inline void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
93
+ uint32_t val, int mmu_idx, uintptr_t ra)
94
+{
95
+ cpu_stb_data_ra(env, addr, val, ra);
96
+}
97
+
98
+static inline void cpu_stw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
99
+ uint32_t val, int mmu_idx, uintptr_t ra)
100
+{
101
+ cpu_stw_data_ra(env, addr, val, ra);
102
+}
103
+
104
+static inline void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
105
+ uint32_t val, int mmu_idx, uintptr_t ra)
106
+{
107
+ cpu_stl_data_ra(env, addr, val, ra);
108
+}
109
+
110
+static inline void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
111
+ uint64_t val, int mmu_idx, uintptr_t ra)
112
+{
113
+ cpu_stq_data_ra(env, addr, val, ra);
114
+}
115
+
116
#else
117
118
/* Needed for TCG_OVERSIZED_GUEST */
119
diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst
120
index XXXXXXX..XXXXXXX 100644
121
--- a/docs/devel/loads-stores.rst
122
+++ b/docs/devel/loads-stores.rst
123
@@ -XXX,XX +XXX,XX @@ Regexes for git grep
124
- ``\<ldn_\([hbl]e\)?_p\>``
125
- ``\<stn_\([hbl]e\)?_p\>``
126
127
-``cpu_{ld,st}_*``
128
-~~~~~~~~~~~~~~~~~
129
+``cpu_{ld,st}*_mmuidx_ra``
130
+~~~~~~~~~~~~~~~~~~~~~~~~~~
131
132
-These functions operate on a guest virtual address. Be aware
133
-that these functions may cause a guest CPU exception to be
134
-taken (e.g. for an alignment fault or MMU fault) which will
135
-result in guest CPU state being updated and control longjumping
136
-out of the function call. They should therefore only be used
137
-in code that is implementing emulation of the target CPU.
138
+These functions operate on a guest virtual address plus a context,
139
+known as a "mmu index" or ``mmuidx``, which controls how that virtual
140
+address is translated. The meaning of the indexes are target specific,
141
+but specifying a particular index might be necessary if, for instance,
142
+the helper requires an "always as non-privileged" access rather that
143
+the default access for the current state of the guest CPU.
144
145
-These functions may throw an exception (longjmp() back out
146
-to the top level TCG loop). This means they must only be used
147
-from helper functions where the translator has saved all
148
-necessary CPU state before generating the helper function call.
149
-It's usually better to use the ``_ra`` variants described below
150
-from helper functions, but these functions are the right choice
151
-for calls made from hooks like the CPU do_interrupt hook or
152
-when you know for certain that the translator had to save all
153
-the CPU state that ``cpu_restore_state()`` would restore anyway.
154
+These functions may cause a guest CPU exception to be taken
155
+(e.g. for an alignment fault or MMU fault) which will result in
156
+guest CPU state being updated and control longjmp'ing out of the
157
+function call. They should therefore only be used in code that is
158
+implementing emulation of the guest CPU.
159
+
160
+The ``retaddr`` parameter is used to control unwinding of the
161
+guest CPU state in case of a guest CPU exception. This is passed
162
+to ``cpu_restore_state()``. Therefore the value should either be 0,
163
+to indicate that the guest CPU state is already synchronized, or
164
+the result of ``GETPC()`` from the top level ``HELPER(foo)``
165
+function, which is a return address into the generated code.
166
167
Function names follow the pattern:
168
169
-load: ``cpu_ld{sign}{size}_{mmusuffix}(env, ptr)``
170
+load: ``cpu_ld{sign}{size}_mmuidx_ra(env, ptr, mmuidx, retaddr)``
171
172
-store: ``cpu_st{size}_{mmusuffix}(env, ptr, val)``
173
+store: ``cpu_st{size}_mmuidx_ra(env, ptr, val, mmuidx, retaddr)``
174
175
``sign``
176
- (empty) : for 32 or 64 bit sizes
177
@@ -XXX,XX +XXX,XX @@ store: ``cpu_st{size}_{mmusuffix}(env, ptr, val)``
178
- ``l`` : 32 bits
179
- ``q`` : 64 bits
180
181
-``mmusuffix`` is one of the generic suffixes ``data`` or ``code``, or
182
-(for softmmu configs) a target-specific MMU mode suffix as defined
183
-in the target's ``cpu.h``.
184
+Regexes for git grep:
185
+ - ``\<cpu_ld[us]\?[bwlq]_mmuidx_ra\>``
186
+ - ``\<cpu_st[bwlq]_mmuidx_ra\>``
187
188
-Regexes for git grep
189
- - ``\<cpu_ld[us]\?[bwlq]_[a-zA-Z0-9]\+\>``
190
- - ``\<cpu_st[bwlq]_[a-zA-Z0-9]\+\>``
191
+``cpu_{ld,st}*_data_ra``
192
+~~~~~~~~~~~~~~~~~~~~~~~~
193
194
-``cpu_{ld,st}_*_ra``
195
-~~~~~~~~~~~~~~~~~~~~
196
-
85
-
197
-These functions work like the ``cpu_{ld,st}_*`` functions except
86
- /* Eliminate duplicate and redundant fence instructions. */
198
-that they also take a ``retaddr`` argument. This extra argument
87
- if (ctx.prev_mb) {
199
-allows for correct unwinding of any exception that is taken,
88
- switch (opc) {
200
-and should generally be the result of GETPC() called directly
89
- case INDEX_op_mb:
201
-from the top level HELPER(foo) function (i.e. the return address
90
- /* Merge two barriers of the same type into one,
202
-in the generated code).
91
- * or a weaker barrier into a stronger one,
203
+These functions work like the ``cpu_{ld,st}_mmuidx_ra`` functions
92
- * or two weaker barriers into a stronger one.
204
+except that the ``mmuidx`` parameter is taken from the current mode
93
- * mb X; mb Y => mb X|Y
205
+of the guest CPU, as determined by ``cpu_mmu_index(env, false)``.
94
- * mb; strl => mb; st
206
95
- * ldaq; mb => ld; mb
207
These are generally the preferred way to do accesses by guest
96
- * ldaq; strl => ld; mb; st
208
-virtual address from helper functions; see the documentation
97
- * Other combinations are also merged into a strong
209
-of the non-``_ra`` variants for when those would be better.
98
- * barrier. This is stricter than specified but for
99
- * the purposes of TCG is better than not optimizing.
100
- */
101
- ctx.prev_mb->args[0] |= op->args[0];
102
- tcg_op_remove(s, op);
103
- break;
210
-
104
-
211
-Calling these functions with a ``retaddr`` argument of 0 is
105
- default:
212
-equivalent to calling the non-``_ra`` version of the function.
106
- /* Opcodes that end the block stop the optimization. */
213
+virtual address from helper functions, unless the access should
107
- if ((def->flags & TCG_OPF_BB_END) == 0) {
214
+be performed with a context other than the default.
108
- break;
215
109
- }
216
Function names follow the pattern:
110
- /* fallthru */
217
111
- case INDEX_op_qemu_ld_i32:
218
-load: ``cpu_ld{sign}{size}_{mmusuffix}_ra(env, ptr, retaddr)``
112
- case INDEX_op_qemu_ld_i64:
219
+load: ``cpu_ld{sign}{size}_data_ra(env, ptr, ra)``
113
- case INDEX_op_qemu_st_i32:
220
114
- case INDEX_op_qemu_st8_i32:
221
-store: ``cpu_st{sign}{size}_{mmusuffix}_ra(env, ptr, val, retaddr)``
115
- case INDEX_op_qemu_st_i64:
222
+store: ``cpu_st{size}_data_ra(env, ptr, val, ra)``
116
- /* Opcodes that touch guest memory stop the optimization. */
223
+
117
- ctx.prev_mb = NULL;
224
+``sign``
118
- break;
225
+ - (empty) : for 32 or 64 bit sizes
119
- }
226
+ - ``u`` : unsigned
120
- } else if (opc == INDEX_op_mb) {
227
+ - ``s`` : signed
121
- ctx.prev_mb = op;
228
+
122
- }
229
+``size``
123
}
230
+ - ``b`` : 8 bits
124
}
231
+ - ``w`` : 16 bits
232
+ - ``l`` : 32 bits
233
+ - ``q`` : 64 bits
234
+
235
+Regexes for git grep:
236
+ - ``\<cpu_ld[us]\?[bwlq]_data_ra\>``
237
+ - ``\<cpu_st[bwlq]_data_ra\>``
238
+
239
+``cpu_{ld,st}*_data``
240
+~~~~~~~~~~~~~~~~~~~~~
241
+
242
+These functions work like the ``cpu_{ld,st}_data_ra`` functions
243
+except that the ``retaddr`` parameter is 0, and thus does not
244
+unwind guest CPU state.
245
+
246
+This means they must only be used from helper functions where the
247
+translator has saved all necessary CPU state. These functions are
248
+the right choice for calls made from hooks like the CPU ``do_interrupt``
249
+hook or when you know for certain that the translator had to save all
250
+the CPU state anyway.
251
+
252
+Function names follow the pattern:
253
+
254
+load: ``cpu_ld{sign}{size}_data(env, ptr)``
255
+
256
+store: ``cpu_st{size}_data(env, ptr, val)``
257
+
258
+``sign``
259
+ - (empty) : for 32 or 64 bit sizes
260
+ - ``u`` : unsigned
261
+ - ``s`` : signed
262
+
263
+``size``
264
+ - ``b`` : 8 bits
265
+ - ``w`` : 16 bits
266
+ - ``l`` : 32 bits
267
+ - ``q`` : 64 bits
268
269
Regexes for git grep
270
- - ``\<cpu_ld[us]\?[bwlq]_[a-zA-Z0-9]\+_ra\>``
271
- - ``\<cpu_st[bwlq]_[a-zA-Z0-9]\+_ra\>``
272
+ - ``\<cpu_ld[us]\?[bwlq]_data\>``
273
+ - ``\<cpu_st[bwlq]_data\+\>``
274
275
-``helper_*_{ld,st}*mmu``
276
-~~~~~~~~~~~~~~~~~~~~~~~~
277
+``cpu_ld*_code``
278
+~~~~~~~~~~~~~~~~
279
+
280
+These functions perform a read for instruction execution. The ``mmuidx``
281
+parameter is taken from the current mode of the guest CPU, as determined
282
+by ``cpu_mmu_index(env, true)``. The ``retaddr`` parameter is 0, and
283
+thus does not unwind guest CPU state, because CPU state is always
284
+synchronized while translating instructions. Any guest CPU exception
285
+that is raised will indicate an instruction execution fault rather than
286
+a data read fault.
287
+
288
+In general these functions should not be used directly during translation.
289
+There are wrapper functions that are to be used which also take care of
290
+plugins for tracing.
291
+
292
+Function names follow the pattern:
293
+
294
+load: ``cpu_ld{sign}{size}_code(env, ptr)``
295
+
296
+``sign``
297
+ - (empty) : for 32 or 64 bit sizes
298
+ - ``u`` : unsigned
299
+ - ``s`` : signed
300
+
301
+``size``
302
+ - ``b`` : 8 bits
303
+ - ``w`` : 16 bits
304
+ - ``l`` : 32 bits
305
+ - ``q`` : 64 bits
306
+
307
+Regexes for git grep:
308
+ - ``\<cpu_ld[us]\?[bwlq]_code\>``
309
+
310
+``translator_ld*``
311
+~~~~~~~~~~~~~~~~~~
312
+
313
+These functions are a wrapper for ``cpu_ld*_code`` which also perform
314
+any actions required by any tracing plugins. They are only to be
315
+called during the translator callback ``translate_insn``.
316
+
317
+There is a set of functions ending in ``_swap`` which, if the parameter
318
+is true, returns the value in the endianness that is the reverse of
319
+the guest native endianness, as determined by ``TARGET_WORDS_BIGENDIAN``.
320
+
321
+Function names follow the pattern:
322
+
323
+load: ``translator_ld{sign}{size}(env, ptr)``
324
+
325
+swap: ``translator_ld{sign}{size}_swap(env, ptr, swap)``
326
+
327
+``sign``
328
+ - (empty) : for 32 or 64 bit sizes
329
+ - ``u`` : unsigned
330
+ - ``s`` : signed
331
+
332
+``size``
333
+ - ``b`` : 8 bits
334
+ - ``w`` : 16 bits
335
+ - ``l`` : 32 bits
336
+ - ``q`` : 64 bits
337
+
338
+Regexes for git grep
339
+ - ``\<translator_ld[us]\?[bwlq]\(_swap\)\?\>``
340
+
341
+``helper_*_{ld,st}*_mmu``
342
+~~~~~~~~~~~~~~~~~~~~~~~~~
343
344
These functions are intended primarily to be called by the code
345
generated by the TCG backend. They may also be called by target
346
-CPU helper function code. Like the ``cpu_{ld,st}_*_ra`` functions
347
-they perform accesses by guest virtual address; the difference is
348
-that these functions allow you to specify an ``opindex`` parameter
349
-which encodes (among other things) the mmu index to use for the
350
-access. This is necessary if your helper needs to make an access
351
-via a specific mmu index (for instance, an "always as non-privileged"
352
-access) rather than using the default mmu index for the current state
353
-of the guest CPU.
354
+CPU helper function code. Like the ``cpu_{ld,st}_mmuidx_ra`` functions
355
+they perform accesses by guest virtual address, with a given ``mmuidx``.
356
357
-The ``opindex`` parameter should be created by calling ``make_memop_idx()``.
358
+These functions specify an ``opindex`` parameter which encodes
359
+(among other things) the mmu index to use for the access. This parameter
360
+should be created by calling ``make_memop_idx()``.
361
362
The ``retaddr`` parameter should be the result of GETPC() called directly
363
from the top level HELPER(foo) function (or 0 if no guest CPU state
364
@@ -XXX,XX +XXX,XX @@ unwinding is required).
365
366
**TODO** The names of these functions are a bit odd for historical
367
reasons because they were originally expected to be called only from
368
-within generated code. We should rename them to bring them
369
-more in line with the other memory access functions.
370
+within generated code. We should rename them to bring them more in
371
+line with the other memory access functions. The explicit endianness
372
+is the only feature they have beyond ``*_mmuidx_ra``.
373
374
load: ``helper_{endian}_ld{sign}{size}_mmu(env, addr, opindex, retaddr)``
375
376
--
125
--
377
2.20.1
126
2.25.1
378
127
379
128
diff view generated by jsdifflib
New patch
1
1
Split out a whole bunch of placeholder functions, which are
2
currently identical. That won't last as more code gets moved.
3
4
Use CASE_32_64_VEC for some logical operators that previously
5
missed the addition of vectors.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 271 +++++++++++++++++++++++++++++++++++++++----------
12
1 file changed, 219 insertions(+), 52 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
19
}
20
}
21
22
+/*
23
+ * The fold_* functions return true when processing is complete,
24
+ * usually by folding the operation to a constant or to a copy,
25
+ * and calling tcg_opt_gen_{mov,movi}. They may do other things,
26
+ * like collect information about the value produced, for use in
27
+ * optimizing a subsequent operation.
28
+ *
29
+ * These first fold_* functions are all helpers, used by other
30
+ * folders for more specific operations.
31
+ */
32
+
33
+static bool fold_const1(OptContext *ctx, TCGOp *op)
34
+{
35
+ if (arg_is_const(op->args[1])) {
36
+ uint64_t t;
37
+
38
+ t = arg_info(op->args[1])->val;
39
+ t = do_constant_folding(op->opc, t, 0);
40
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
41
+ }
42
+ return false;
43
+}
44
+
45
+static bool fold_const2(OptContext *ctx, TCGOp *op)
46
+{
47
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
48
+ uint64_t t1 = arg_info(op->args[1])->val;
49
+ uint64_t t2 = arg_info(op->args[2])->val;
50
+
51
+ t1 = do_constant_folding(op->opc, t1, t2);
52
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
53
+ }
54
+ return false;
55
+}
56
+
57
+/*
58
+ * These outermost fold_<op> functions are sorted alphabetically.
59
+ */
60
+
61
+static bool fold_add(OptContext *ctx, TCGOp *op)
62
+{
63
+ return fold_const2(ctx, op);
64
+}
65
+
66
+static bool fold_and(OptContext *ctx, TCGOp *op)
67
+{
68
+ return fold_const2(ctx, op);
69
+}
70
+
71
+static bool fold_andc(OptContext *ctx, TCGOp *op)
72
+{
73
+ return fold_const2(ctx, op);
74
+}
75
+
76
static bool fold_call(OptContext *ctx, TCGOp *op)
77
{
78
TCGContext *s = ctx->tcg;
79
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
80
return true;
81
}
82
83
+static bool fold_ctpop(OptContext *ctx, TCGOp *op)
84
+{
85
+ return fold_const1(ctx, op);
86
+}
87
+
88
+static bool fold_divide(OptContext *ctx, TCGOp *op)
89
+{
90
+ return fold_const2(ctx, op);
91
+}
92
+
93
+static bool fold_eqv(OptContext *ctx, TCGOp *op)
94
+{
95
+ return fold_const2(ctx, op);
96
+}
97
+
98
+static bool fold_exts(OptContext *ctx, TCGOp *op)
99
+{
100
+ return fold_const1(ctx, op);
101
+}
102
+
103
+static bool fold_extu(OptContext *ctx, TCGOp *op)
104
+{
105
+ return fold_const1(ctx, op);
106
+}
107
+
108
static bool fold_mb(OptContext *ctx, TCGOp *op)
109
{
110
/* Eliminate duplicate and redundant fence instructions. */
111
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
112
return true;
113
}
114
115
+static bool fold_mul(OptContext *ctx, TCGOp *op)
116
+{
117
+ return fold_const2(ctx, op);
118
+}
119
+
120
+static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
121
+{
122
+ return fold_const2(ctx, op);
123
+}
124
+
125
+static bool fold_nand(OptContext *ctx, TCGOp *op)
126
+{
127
+ return fold_const2(ctx, op);
128
+}
129
+
130
+static bool fold_neg(OptContext *ctx, TCGOp *op)
131
+{
132
+ return fold_const1(ctx, op);
133
+}
134
+
135
+static bool fold_nor(OptContext *ctx, TCGOp *op)
136
+{
137
+ return fold_const2(ctx, op);
138
+}
139
+
140
+static bool fold_not(OptContext *ctx, TCGOp *op)
141
+{
142
+ return fold_const1(ctx, op);
143
+}
144
+
145
+static bool fold_or(OptContext *ctx, TCGOp *op)
146
+{
147
+ return fold_const2(ctx, op);
148
+}
149
+
150
+static bool fold_orc(OptContext *ctx, TCGOp *op)
151
+{
152
+ return fold_const2(ctx, op);
153
+}
154
+
155
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
156
{
157
/* Opcodes that touch guest memory stop the mb optimization. */
158
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
159
return false;
160
}
161
162
+static bool fold_remainder(OptContext *ctx, TCGOp *op)
163
+{
164
+ return fold_const2(ctx, op);
165
+}
166
+
167
+static bool fold_shift(OptContext *ctx, TCGOp *op)
168
+{
169
+ return fold_const2(ctx, op);
170
+}
171
+
172
+static bool fold_sub(OptContext *ctx, TCGOp *op)
173
+{
174
+ return fold_const2(ctx, op);
175
+}
176
+
177
+static bool fold_xor(OptContext *ctx, TCGOp *op)
178
+{
179
+ return fold_const2(ctx, op);
180
+}
181
+
182
/* Propagate constants and copies, fold constant expressions. */
183
void tcg_optimize(TCGContext *s)
184
{
185
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
186
}
187
break;
188
189
- CASE_OP_32_64(not):
190
- CASE_OP_32_64(neg):
191
- CASE_OP_32_64(ext8s):
192
- CASE_OP_32_64(ext8u):
193
- CASE_OP_32_64(ext16s):
194
- CASE_OP_32_64(ext16u):
195
- CASE_OP_32_64(ctpop):
196
- case INDEX_op_ext32s_i64:
197
- case INDEX_op_ext32u_i64:
198
- case INDEX_op_ext_i32_i64:
199
- case INDEX_op_extu_i32_i64:
200
- case INDEX_op_extrl_i64_i32:
201
- case INDEX_op_extrh_i64_i32:
202
- if (arg_is_const(op->args[1])) {
203
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
204
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
205
- continue;
206
- }
207
- break;
208
-
209
CASE_OP_32_64(bswap16):
210
CASE_OP_32_64(bswap32):
211
case INDEX_op_bswap64_i64:
212
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
213
}
214
break;
215
216
- CASE_OP_32_64(add):
217
- CASE_OP_32_64(sub):
218
- CASE_OP_32_64(mul):
219
- CASE_OP_32_64(or):
220
- CASE_OP_32_64(and):
221
- CASE_OP_32_64(xor):
222
- CASE_OP_32_64(shl):
223
- CASE_OP_32_64(shr):
224
- CASE_OP_32_64(sar):
225
- CASE_OP_32_64(rotl):
226
- CASE_OP_32_64(rotr):
227
- CASE_OP_32_64(andc):
228
- CASE_OP_32_64(orc):
229
- CASE_OP_32_64(eqv):
230
- CASE_OP_32_64(nand):
231
- CASE_OP_32_64(nor):
232
- CASE_OP_32_64(muluh):
233
- CASE_OP_32_64(mulsh):
234
- CASE_OP_32_64(div):
235
- CASE_OP_32_64(divu):
236
- CASE_OP_32_64(rem):
237
- CASE_OP_32_64(remu):
238
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
239
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
240
- arg_info(op->args[2])->val);
241
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
242
- continue;
243
- }
244
- break;
245
-
246
CASE_OP_32_64(clz):
247
CASE_OP_32_64(ctz):
248
if (arg_is_const(op->args[1])) {
249
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
250
}
251
break;
252
253
+ default:
254
+ break;
255
+
256
+ /* ---------------------------------------------------------- */
257
+ /* Sorted alphabetically by opcode as much as possible. */
258
+
259
+ CASE_OP_32_64_VEC(add):
260
+ done = fold_add(&ctx, op);
261
+ break;
262
+ CASE_OP_32_64_VEC(and):
263
+ done = fold_and(&ctx, op);
264
+ break;
265
+ CASE_OP_32_64_VEC(andc):
266
+ done = fold_andc(&ctx, op);
267
+ break;
268
+ CASE_OP_32_64(ctpop):
269
+ done = fold_ctpop(&ctx, op);
270
+ break;
271
+ CASE_OP_32_64(div):
272
+ CASE_OP_32_64(divu):
273
+ done = fold_divide(&ctx, op);
274
+ break;
275
+ CASE_OP_32_64(eqv):
276
+ done = fold_eqv(&ctx, op);
277
+ break;
278
+ CASE_OP_32_64(ext8s):
279
+ CASE_OP_32_64(ext16s):
280
+ case INDEX_op_ext32s_i64:
281
+ case INDEX_op_ext_i32_i64:
282
+ done = fold_exts(&ctx, op);
283
+ break;
284
+ CASE_OP_32_64(ext8u):
285
+ CASE_OP_32_64(ext16u):
286
+ case INDEX_op_ext32u_i64:
287
+ case INDEX_op_extu_i32_i64:
288
+ case INDEX_op_extrl_i64_i32:
289
+ case INDEX_op_extrh_i64_i32:
290
+ done = fold_extu(&ctx, op);
291
+ break;
292
case INDEX_op_mb:
293
done = fold_mb(&ctx, op);
294
break;
295
+ CASE_OP_32_64(mul):
296
+ done = fold_mul(&ctx, op);
297
+ break;
298
+ CASE_OP_32_64(mulsh):
299
+ CASE_OP_32_64(muluh):
300
+ done = fold_mul_highpart(&ctx, op);
301
+ break;
302
+ CASE_OP_32_64(nand):
303
+ done = fold_nand(&ctx, op);
304
+ break;
305
+ CASE_OP_32_64(neg):
306
+ done = fold_neg(&ctx, op);
307
+ break;
308
+ CASE_OP_32_64(nor):
309
+ done = fold_nor(&ctx, op);
310
+ break;
311
+ CASE_OP_32_64_VEC(not):
312
+ done = fold_not(&ctx, op);
313
+ break;
314
+ CASE_OP_32_64_VEC(or):
315
+ done = fold_or(&ctx, op);
316
+ break;
317
+ CASE_OP_32_64_VEC(orc):
318
+ done = fold_orc(&ctx, op);
319
+ break;
320
case INDEX_op_qemu_ld_i32:
321
case INDEX_op_qemu_ld_i64:
322
done = fold_qemu_ld(&ctx, op);
323
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
324
case INDEX_op_qemu_st_i64:
325
done = fold_qemu_st(&ctx, op);
326
break;
327
-
328
- default:
329
+ CASE_OP_32_64(rem):
330
+ CASE_OP_32_64(remu):
331
+ done = fold_remainder(&ctx, op);
332
+ break;
333
+ CASE_OP_32_64(rotl):
334
+ CASE_OP_32_64(rotr):
335
+ CASE_OP_32_64(sar):
336
+ CASE_OP_32_64(shl):
337
+ CASE_OP_32_64(shr):
338
+ done = fold_shift(&ctx, op);
339
+ break;
340
+ CASE_OP_32_64_VEC(sub):
341
+ done = fold_sub(&ctx, op);
342
+ break;
343
+ CASE_OP_32_64_VEC(xor):
344
+ done = fold_xor(&ctx, op);
345
break;
346
}
347
348
--
349
2.25.1
350
351
diff view generated by jsdifflib
1
Some distributions, e.g. Ubuntu 19.10, enable PIE by default.
1
Reduce some code duplication by folding the NE and EQ cases.
2
If for some reason one wishes to build a non-pie binary, we
3
must provide additional options to override.
4
5
At the same time, reorg the code to an elif chain.
6
2
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
9
Reviewed-by: Thomas Huth <thuth@redhat.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
6
---
12
configure | 25 ++++++++++++-------------
7
tcg/optimize.c | 145 ++++++++++++++++++++++++-------------------------
13
1 file changed, 12 insertions(+), 13 deletions(-)
8
1 file changed, 72 insertions(+), 73 deletions(-)
14
9
15
diff --git a/configure b/configure
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100755
11
index XXXXXXX..XXXXXXX 100644
17
--- a/configure
12
--- a/tcg/optimize.c
18
+++ b/configure
13
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ if compile_prog "-Werror -fno-pie" "-no-pie"; then
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
20
LDFLAGS_NOPIE="-no-pie"
15
return fold_const2(ctx, op);
21
fi
16
}
22
17
23
-if test "$pie" != "no" ; then
18
+static bool fold_setcond2(OptContext *ctx, TCGOp *op)
24
- if compile_prog "-fPIE -DPIE" "-pie"; then
19
+{
25
- QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS"
20
+ TCGCond cond = op->args[5];
26
- LDFLAGS="-pie $LDFLAGS"
21
+ int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
27
- pie="yes"
22
+ int inv = 0;
28
- else
23
+
29
- if test "$pie" = "yes"; then
24
+ if (i >= 0) {
30
- error_exit "PIE not available due to missing toolchain support"
25
+ goto do_setcond_const;
31
- else
26
+ }
32
- echo "Disabling PIE due to missing toolchain support"
27
+
33
- pie="no"
28
+ switch (cond) {
34
- fi
29
+ case TCG_COND_LT:
35
- fi
30
+ case TCG_COND_GE:
36
+if test "$pie" = "no"; then
31
+ /*
37
+ QEMU_CFLAGS="$CFLAGS_NOPIE $QEMU_CFLAGS"
32
+ * Simplify LT/GE comparisons vs zero to a single compare
38
+ LDFLAGS="$LDFLAGS_NOPIE $LDFLAGS"
33
+ * vs the high word of the input.
39
+elif compile_prog "-fPIE -DPIE" "-pie"; then
34
+ */
40
+ QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS"
35
+ if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 &&
41
+ LDFLAGS="-pie $LDFLAGS"
36
+ arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) {
42
+ pie="yes"
37
+ goto do_setcond_high;
43
+elif test "$pie" = "yes"; then
38
+ }
44
+ error_exit "PIE not available due to missing toolchain support"
39
+ break;
45
+else
40
+
46
+ echo "Disabling PIE due to missing toolchain support"
41
+ case TCG_COND_NE:
47
+ pie="no"
42
+ inv = 1;
48
fi
43
+ QEMU_FALLTHROUGH;
49
44
+ case TCG_COND_EQ:
50
# Detect support for PT_GNU_RELRO + DT_BIND_NOW.
45
+ /*
46
+ * Simplify EQ/NE comparisons where one of the pairs
47
+ * can be simplified.
48
+ */
49
+ i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
50
+ op->args[3], cond);
51
+ switch (i ^ inv) {
52
+ case 0:
53
+ goto do_setcond_const;
54
+ case 1:
55
+ goto do_setcond_high;
56
+ }
57
+
58
+ i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
59
+ op->args[4], cond);
60
+ switch (i ^ inv) {
61
+ case 0:
62
+ goto do_setcond_const;
63
+ case 1:
64
+ op->args[2] = op->args[3];
65
+ op->args[3] = cond;
66
+ op->opc = INDEX_op_setcond_i32;
67
+ break;
68
+ }
69
+ break;
70
+
71
+ default:
72
+ break;
73
+
74
+ do_setcond_high:
75
+ op->args[1] = op->args[2];
76
+ op->args[2] = op->args[4];
77
+ op->args[3] = cond;
78
+ op->opc = INDEX_op_setcond_i32;
79
+ break;
80
+ }
81
+ return false;
82
+
83
+ do_setcond_const:
84
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
85
+}
86
+
87
static bool fold_shift(OptContext *ctx, TCGOp *op)
88
{
89
return fold_const2(ctx, op);
90
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
91
}
92
break;
93
94
- case INDEX_op_setcond2_i32:
95
- i = do_constant_folding_cond2(&op->args[1], &op->args[3],
96
- op->args[5]);
97
- if (i >= 0) {
98
- do_setcond_const:
99
- tcg_opt_gen_movi(&ctx, op, op->args[0], i);
100
- continue;
101
- }
102
- if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
103
- && arg_is_const(op->args[3])
104
- && arg_info(op->args[3])->val == 0
105
- && arg_is_const(op->args[4])
106
- && arg_info(op->args[4])->val == 0) {
107
- /* Simplify LT/GE comparisons vs zero to a single compare
108
- vs the high word of the input. */
109
- do_setcond_high:
110
- reset_temp(op->args[0]);
111
- arg_info(op->args[0])->z_mask = 1;
112
- op->opc = INDEX_op_setcond_i32;
113
- op->args[1] = op->args[2];
114
- op->args[2] = op->args[4];
115
- op->args[3] = op->args[5];
116
- break;
117
- }
118
- if (op->args[5] == TCG_COND_EQ) {
119
- /* Simplify EQ comparisons where one of the pairs
120
- can be simplified. */
121
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
122
- op->args[1], op->args[3],
123
- TCG_COND_EQ);
124
- if (i == 0) {
125
- goto do_setcond_const;
126
- } else if (i > 0) {
127
- goto do_setcond_high;
128
- }
129
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
130
- op->args[2], op->args[4],
131
- TCG_COND_EQ);
132
- if (i == 0) {
133
- goto do_setcond_high;
134
- } else if (i < 0) {
135
- break;
136
- }
137
- do_setcond_low:
138
- reset_temp(op->args[0]);
139
- arg_info(op->args[0])->z_mask = 1;
140
- op->opc = INDEX_op_setcond_i32;
141
- op->args[2] = op->args[3];
142
- op->args[3] = op->args[5];
143
- break;
144
- }
145
- if (op->args[5] == TCG_COND_NE) {
146
- /* Simplify NE comparisons where one of the pairs
147
- can be simplified. */
148
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
149
- op->args[1], op->args[3],
150
- TCG_COND_NE);
151
- if (i == 0) {
152
- goto do_setcond_high;
153
- } else if (i > 0) {
154
- goto do_setcond_const;
155
- }
156
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
157
- op->args[2], op->args[4],
158
- TCG_COND_NE);
159
- if (i == 0) {
160
- goto do_setcond_low;
161
- } else if (i > 0) {
162
- goto do_setcond_const;
163
- }
164
- }
165
- break;
166
-
167
default:
168
break;
169
170
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
171
CASE_OP_32_64(shr):
172
done = fold_shift(&ctx, op);
173
break;
174
+ case INDEX_op_setcond2_i32:
175
+ done = fold_setcond2(&ctx, op);
176
+ break;
177
CASE_OP_32_64_VEC(sub):
178
done = fold_sub(&ctx, op);
179
break;
51
--
180
--
52
2.20.1
181
2.25.1
53
182
54
183
diff view generated by jsdifflib
New patch
1
Reduce some code duplication by folding the NE and EQ cases.
1
2
3
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 159 +++++++++++++++++++++++++------------------------
7
1 file changed, 81 insertions(+), 78 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
14
return fold_const2(ctx, op);
15
}
16
17
+static bool fold_brcond2(OptContext *ctx, TCGOp *op)
18
+{
19
+ TCGCond cond = op->args[4];
20
+ int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
21
+ TCGArg label = op->args[5];
22
+ int inv = 0;
23
+
24
+ if (i >= 0) {
25
+ goto do_brcond_const;
26
+ }
27
+
28
+ switch (cond) {
29
+ case TCG_COND_LT:
30
+ case TCG_COND_GE:
31
+ /*
32
+ * Simplify LT/GE comparisons vs zero to a single compare
33
+ * vs the high word of the input.
34
+ */
35
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 &&
36
+ arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) {
37
+ goto do_brcond_high;
38
+ }
39
+ break;
40
+
41
+ case TCG_COND_NE:
42
+ inv = 1;
43
+ QEMU_FALLTHROUGH;
44
+ case TCG_COND_EQ:
45
+ /*
46
+ * Simplify EQ/NE comparisons where one of the pairs
47
+ * can be simplified.
48
+ */
49
+ i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
50
+ op->args[2], cond);
51
+ switch (i ^ inv) {
52
+ case 0:
53
+ goto do_brcond_const;
54
+ case 1:
55
+ goto do_brcond_high;
56
+ }
57
+
58
+ i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
59
+ op->args[3], cond);
60
+ switch (i ^ inv) {
61
+ case 0:
62
+ goto do_brcond_const;
63
+ case 1:
64
+ op->opc = INDEX_op_brcond_i32;
65
+ op->args[1] = op->args[2];
66
+ op->args[2] = cond;
67
+ op->args[3] = label;
68
+ break;
69
+ }
70
+ break;
71
+
72
+ default:
73
+ break;
74
+
75
+ do_brcond_high:
76
+ op->opc = INDEX_op_brcond_i32;
77
+ op->args[0] = op->args[1];
78
+ op->args[1] = op->args[3];
79
+ op->args[2] = cond;
80
+ op->args[3] = label;
81
+ break;
82
+
83
+ do_brcond_const:
84
+ if (i == 0) {
85
+ tcg_op_remove(ctx->tcg, op);
86
+ return true;
87
+ }
88
+ op->opc = INDEX_op_br;
89
+ op->args[0] = label;
90
+ break;
91
+ }
92
+ return false;
93
+}
94
+
95
static bool fold_call(OptContext *ctx, TCGOp *op)
96
{
97
TCGContext *s = ctx->tcg;
98
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
99
}
100
break;
101
102
- case INDEX_op_brcond2_i32:
103
- i = do_constant_folding_cond2(&op->args[0], &op->args[2],
104
- op->args[4]);
105
- if (i == 0) {
106
- do_brcond_false:
107
- tcg_op_remove(s, op);
108
- continue;
109
- }
110
- if (i > 0) {
111
- do_brcond_true:
112
- op->opc = opc = INDEX_op_br;
113
- op->args[0] = op->args[5];
114
- break;
115
- }
116
- if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE)
117
- && arg_is_const(op->args[2])
118
- && arg_info(op->args[2])->val == 0
119
- && arg_is_const(op->args[3])
120
- && arg_info(op->args[3])->val == 0) {
121
- /* Simplify LT/GE comparisons vs zero to a single compare
122
- vs the high word of the input. */
123
- do_brcond_high:
124
- op->opc = opc = INDEX_op_brcond_i32;
125
- op->args[0] = op->args[1];
126
- op->args[1] = op->args[3];
127
- op->args[2] = op->args[4];
128
- op->args[3] = op->args[5];
129
- break;
130
- }
131
- if (op->args[4] == TCG_COND_EQ) {
132
- /* Simplify EQ comparisons where one of the pairs
133
- can be simplified. */
134
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
135
- op->args[0], op->args[2],
136
- TCG_COND_EQ);
137
- if (i == 0) {
138
- goto do_brcond_false;
139
- } else if (i > 0) {
140
- goto do_brcond_high;
141
- }
142
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
143
- op->args[1], op->args[3],
144
- TCG_COND_EQ);
145
- if (i == 0) {
146
- goto do_brcond_false;
147
- } else if (i < 0) {
148
- break;
149
- }
150
- do_brcond_low:
151
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
152
- op->opc = INDEX_op_brcond_i32;
153
- op->args[1] = op->args[2];
154
- op->args[2] = op->args[4];
155
- op->args[3] = op->args[5];
156
- break;
157
- }
158
- if (op->args[4] == TCG_COND_NE) {
159
- /* Simplify NE comparisons where one of the pairs
160
- can be simplified. */
161
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
162
- op->args[0], op->args[2],
163
- TCG_COND_NE);
164
- if (i == 0) {
165
- goto do_brcond_high;
166
- } else if (i > 0) {
167
- goto do_brcond_true;
168
- }
169
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
170
- op->args[1], op->args[3],
171
- TCG_COND_NE);
172
- if (i == 0) {
173
- goto do_brcond_low;
174
- } else if (i > 0) {
175
- goto do_brcond_true;
176
- }
177
- }
178
- break;
179
-
180
default:
181
break;
182
183
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
184
CASE_OP_32_64_VEC(andc):
185
done = fold_andc(&ctx, op);
186
break;
187
+ case INDEX_op_brcond2_i32:
188
+ done = fold_brcond2(&ctx, op);
189
+ break;
190
CASE_OP_32_64(ctpop):
191
done = fold_ctpop(&ctx, op);
192
break;
193
--
194
2.25.1
195
196
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 33 +++++++++++++++++++--------------
6
1 file changed, 19 insertions(+), 14 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_brcond(OptContext *ctx, TCGOp *op)
17
+{
18
+ TCGCond cond = op->args[2];
19
+ int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
20
+
21
+ if (i == 0) {
22
+ tcg_op_remove(ctx->tcg, op);
23
+ return true;
24
+ }
25
+ if (i > 0) {
26
+ op->opc = INDEX_op_br;
27
+ op->args[0] = op->args[3];
28
+ }
29
+ return false;
30
+}
31
+
32
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
33
{
34
TCGCond cond = op->args[4];
35
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
36
}
37
break;
38
39
- CASE_OP_32_64(brcond):
40
- i = do_constant_folding_cond(opc, op->args[0],
41
- op->args[1], op->args[2]);
42
- if (i == 0) {
43
- tcg_op_remove(s, op);
44
- continue;
45
- } else if (i > 0) {
46
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
47
- op->opc = opc = INDEX_op_br;
48
- op->args[0] = op->args[3];
49
- break;
50
- }
51
- break;
52
-
53
CASE_OP_32_64(movcond):
54
i = do_constant_folding_cond(opc, op->args[1],
55
op->args[2], op->args[5]);
56
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
57
CASE_OP_32_64_VEC(andc):
58
done = fold_andc(&ctx, op);
59
break;
60
+ CASE_OP_32_64(brcond):
61
+ done = fold_brcond(&ctx, op);
62
+ break;
63
case INDEX_op_brcond2_i32:
64
done = fold_brcond2(&ctx, op);
65
break;
66
--
67
2.25.1
68
69
diff view generated by jsdifflib
New patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 23 ++++++++++++++---------
6
1 file changed, 14 insertions(+), 9 deletions(-)
1
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_setcond(OptContext *ctx, TCGOp *op)
17
+{
18
+ TCGCond cond = op->args[3];
19
+ int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
20
+
21
+ if (i >= 0) {
22
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
+ }
24
+ return false;
25
+}
26
+
27
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
28
{
29
TCGCond cond = op->args[5];
30
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
31
}
32
break;
33
34
- CASE_OP_32_64(setcond):
35
- i = do_constant_folding_cond(opc, op->args[1],
36
- op->args[2], op->args[3]);
37
- if (i >= 0) {
38
- tcg_opt_gen_movi(&ctx, op, op->args[0], i);
39
- continue;
40
- }
41
- break;
42
-
43
CASE_OP_32_64(movcond):
44
i = do_constant_folding_cond(opc, op->args[1],
45
op->args[2], op->args[5]);
46
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
47
CASE_OP_32_64(shr):
48
done = fold_shift(&ctx, op);
49
break;
50
+ CASE_OP_32_64(setcond):
51
+ done = fold_setcond(&ctx, op);
52
+ break;
53
case INDEX_op_setcond2_i32:
54
done = fold_setcond2(&ctx, op);
55
break;
56
--
57
2.25.1
58
59
diff view generated by jsdifflib
1
Code movement in an upcoming patch will show that this file
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
was implicitly depending on tcg.h being included indirectly.
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
4
Cc: Peter Maydell <peter.maydell@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
4
---
9
target/arm/sve_helper.c | 1 +
5
tcg/optimize.c | 37 +++++++++++++++++++++----------------
10
1 file changed, 1 insertion(+)
6
1 file changed, 21 insertions(+), 16 deletions(-)
11
7
12
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/sve_helper.c
10
--- a/tcg/optimize.c
15
+++ b/target/arm/sve_helper.c
11
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@
12
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
17
#include "exec/helper-proto.h"
13
return fold_const2(ctx, op);
18
#include "tcg/tcg-gvec-desc.h"
14
}
19
#include "fpu/softfloat.h"
15
20
+#include "tcg.h"
16
+static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
21
17
+{
22
18
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
23
/* Note that vector data is stored in host-endian 64-bit chunks,
19
+ uint32_t a = arg_info(op->args[2])->val;
20
+ uint32_t b = arg_info(op->args[3])->val;
21
+ uint64_t r = (uint64_t)a * b;
22
+ TCGArg rl, rh;
23
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
24
+
25
+ rl = op->args[0];
26
+ rh = op->args[1];
27
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
28
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
29
+ return true;
30
+ }
31
+ return false;
32
+}
33
+
34
static bool fold_nand(OptContext *ctx, TCGOp *op)
35
{
36
return fold_const2(ctx, op);
37
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
38
}
39
break;
40
41
- case INDEX_op_mulu2_i32:
42
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
43
- uint32_t a = arg_info(op->args[2])->val;
44
- uint32_t b = arg_info(op->args[3])->val;
45
- uint64_t r = (uint64_t)a * b;
46
- TCGArg rl, rh;
47
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
48
-
49
- rl = op->args[0];
50
- rh = op->args[1];
51
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
52
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
53
- continue;
54
- }
55
- break;
56
-
57
default:
58
break;
59
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
61
CASE_OP_32_64(muluh):
62
done = fold_mul_highpart(&ctx, op);
63
break;
64
+ case INDEX_op_mulu2_i32:
65
+ done = fold_mulu2_i32(&ctx, op);
66
+ break;
67
CASE_OP_32_64(nand):
68
done = fold_nand(&ctx, op);
69
break;
24
--
70
--
25
2.20.1
71
2.25.1
26
72
27
73
diff view generated by jsdifflib
1
Reduce the amount of preprocessor obfuscation by expanding
1
Add two additional helpers, fold_add2_i32 and fold_sub2_i32
2
the text of each of the functions generated. The result is
2
which will not be simple wrappers forever.
3
only slightly smaller than the original.
4
3
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
include/exec/cpu_ldst.h | 67 +++++++-----------
8
tcg/optimize.c | 70 +++++++++++++++++++++++++++++++-------------------
11
include/exec/cpu_ldst_template.h | 117 -------------------------------
9
1 file changed, 44 insertions(+), 26 deletions(-)
12
accel/tcg/cputlb.c | 107 +++++++++++++++++++++++++++-
13
3 files changed, 130 insertions(+), 161 deletions(-)
14
delete mode 100644 include/exec/cpu_ldst_template.h
15
10
16
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/cpu_ldst.h
13
--- a/tcg/optimize.c
19
+++ b/include/exec/cpu_ldst.h
14
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ typedef target_ulong abi_ptr;
15
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
21
#define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx
16
return fold_const2(ctx, op);
22
#endif
17
}
23
18
24
-#if defined(CONFIG_USER_ONLY)
19
+static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
25
-
20
+{
26
-extern __thread uintptr_t helper_retaddr;
21
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
27
-
22
+ arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
28
-static inline void set_helper_retaddr(uintptr_t ra)
23
+ uint32_t al = arg_info(op->args[2])->val;
29
-{
24
+ uint32_t ah = arg_info(op->args[3])->val;
30
- helper_retaddr = ra;
25
+ uint32_t bl = arg_info(op->args[4])->val;
31
- /*
26
+ uint32_t bh = arg_info(op->args[5])->val;
32
- * Ensure that this write is visible to the SIGSEGV handler that
27
+ uint64_t a = ((uint64_t)ah << 32) | al;
33
- * may be invoked due to a subsequent invalid memory operation.
28
+ uint64_t b = ((uint64_t)bh << 32) | bl;
34
- */
29
+ TCGArg rl, rh;
35
- signal_barrier();
30
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
36
-}
37
-
38
-static inline void clear_helper_retaddr(void)
39
-{
40
- /*
41
- * Ensure that previous memory operations have succeeded before
42
- * removing the data visible to the signal handler.
43
- */
44
- signal_barrier();
45
- helper_retaddr = 0;
46
-}
47
-
48
-/* In user-only mode we provide only the _code and _data accessors. */
49
-
50
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
51
uint32_t cpu_lduw_data(CPUArchState *env, abi_ptr ptr);
52
uint32_t cpu_ldl_data(CPUArchState *env, abi_ptr ptr);
53
@@ -XXX,XX +XXX,XX @@ void cpu_stl_data_ra(CPUArchState *env, abi_ptr ptr,
54
void cpu_stq_data_ra(CPUArchState *env, abi_ptr ptr,
55
uint64_t val, uintptr_t retaddr);
56
57
+#if defined(CONFIG_USER_ONLY)
58
+
31
+
59
+extern __thread uintptr_t helper_retaddr;
32
+ if (add) {
33
+ a += b;
34
+ } else {
35
+ a -= b;
36
+ }
60
+
37
+
61
+static inline void set_helper_retaddr(uintptr_t ra)
38
+ rl = op->args[0];
62
+{
39
+ rh = op->args[1];
63
+ helper_retaddr = ra;
40
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
64
+ /*
41
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
65
+ * Ensure that this write is visible to the SIGSEGV handler that
42
+ return true;
66
+ * may be invoked due to a subsequent invalid memory operation.
43
+ }
67
+ */
44
+ return false;
68
+ signal_barrier();
69
+}
45
+}
70
+
46
+
71
+static inline void clear_helper_retaddr(void)
47
+static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
72
+{
48
+{
73
+ /*
49
+ return fold_addsub2_i32(ctx, op, true);
74
+ * Ensure that previous memory operations have succeeded before
75
+ * removing the data visible to the signal handler.
76
+ */
77
+ signal_barrier();
78
+ helper_retaddr = 0;
79
+}
50
+}
80
+
51
+
81
/*
52
static bool fold_and(OptContext *ctx, TCGOp *op)
82
* Provide the same *_mmuidx_ra interface as for softmmu.
53
{
83
* The mmu_idx argument is ignored.
54
return fold_const2(ctx, op);
84
@@ -XXX,XX +XXX,XX @@ void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
55
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
85
void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
56
return fold_const2(ctx, op);
86
int mmu_idx, uintptr_t retaddr);
87
88
-/* these access are slower, they must be as rare as possible */
89
-#define CPU_MMU_INDEX (cpu_mmu_index(env, false))
90
-#define MEMSUFFIX _data
91
-#define DATA_SIZE 1
92
-#include "exec/cpu_ldst_template.h"
93
-
94
-#define DATA_SIZE 2
95
-#include "exec/cpu_ldst_template.h"
96
-
97
-#define DATA_SIZE 4
98
-#include "exec/cpu_ldst_template.h"
99
-
100
-#define DATA_SIZE 8
101
-#include "exec/cpu_ldst_template.h"
102
-#undef CPU_MMU_INDEX
103
-#undef MEMSUFFIX
104
-
105
#endif /* defined(CONFIG_USER_ONLY) */
106
107
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
108
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
109
deleted file mode 100644
110
index XXXXXXX..XXXXXXX
111
--- a/include/exec/cpu_ldst_template.h
112
+++ /dev/null
113
@@ -XXX,XX +XXX,XX @@
114
-/*
115
- * Software MMU support
116
- *
117
- * Generate inline load/store functions for one MMU mode and data
118
- * size.
119
- *
120
- * Generate a store function as well as signed and unsigned loads.
121
- *
122
- * Not used directly but included from cpu_ldst.h.
123
- *
124
- * Copyright (c) 2003 Fabrice Bellard
125
- *
126
- * This library is free software; you can redistribute it and/or
127
- * modify it under the terms of the GNU Lesser General Public
128
- * License as published by the Free Software Foundation; either
129
- * version 2 of the License, or (at your option) any later version.
130
- *
131
- * This library is distributed in the hope that it will be useful,
132
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
133
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
134
- * Lesser General Public License for more details.
135
- *
136
- * You should have received a copy of the GNU Lesser General Public
137
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
138
- */
139
-
140
-#if DATA_SIZE == 8
141
-#define SUFFIX q
142
-#define USUFFIX q
143
-#define DATA_TYPE uint64_t
144
-#define SHIFT 3
145
-#elif DATA_SIZE == 4
146
-#define SUFFIX l
147
-#define USUFFIX l
148
-#define DATA_TYPE uint32_t
149
-#define SHIFT 2
150
-#elif DATA_SIZE == 2
151
-#define SUFFIX w
152
-#define USUFFIX uw
153
-#define DATA_TYPE uint16_t
154
-#define DATA_STYPE int16_t
155
-#define SHIFT 1
156
-#elif DATA_SIZE == 1
157
-#define SUFFIX b
158
-#define USUFFIX ub
159
-#define DATA_TYPE uint8_t
160
-#define DATA_STYPE int8_t
161
-#define SHIFT 0
162
-#else
163
-#error unsupported data size
164
-#endif
165
-
166
-#if DATA_SIZE == 8
167
-#define RES_TYPE uint64_t
168
-#else
169
-#define RES_TYPE uint32_t
170
-#endif
171
-
172
-/* generic load/store macros */
173
-
174
-static inline RES_TYPE
175
-glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
176
- target_ulong ptr,
177
- uintptr_t retaddr)
178
-{
179
- return glue(glue(cpu_ld, USUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX,
180
- retaddr);
181
-}
182
-
183
-static inline RES_TYPE
184
-glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
185
-{
186
- return glue(glue(cpu_ld, USUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX, 0);
187
-}
188
-
189
-#if DATA_SIZE <= 2
190
-static inline int
191
-glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
192
- target_ulong ptr,
193
- uintptr_t retaddr)
194
-{
195
- return glue(glue(cpu_lds, SUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX,
196
- retaddr);
197
-}
198
-
199
-static inline int
200
-glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
201
-{
202
- return glue(glue(cpu_lds, SUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX, 0);
203
-}
204
-#endif
205
-
206
-/* generic store macro */
207
-
208
-static inline void
209
-glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
210
- target_ulong ptr,
211
- RES_TYPE v, uintptr_t retaddr)
212
-{
213
- glue(glue(cpu_st, SUFFIX), _mmuidx_ra)(env, ptr, v, CPU_MMU_INDEX,
214
- retaddr);
215
-}
216
-
217
-static inline void
218
-glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
219
- RES_TYPE v)
220
-{
221
- glue(glue(cpu_st, SUFFIX), _mmuidx_ra)(env, ptr, v, CPU_MMU_INDEX, 0);
222
-}
223
-
224
-#undef RES_TYPE
225
-#undef DATA_TYPE
226
-#undef DATA_STYPE
227
-#undef SUFFIX
228
-#undef USUFFIX
229
-#undef DATA_SIZE
230
-#undef SHIFT
231
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
232
index XXXXXXX..XXXXXXX 100644
233
--- a/accel/tcg/cputlb.c
234
+++ b/accel/tcg/cputlb.c
235
@@ -XXX,XX +XXX,XX @@
236
#include "qemu/atomic128.h"
237
#include "translate-all.h"
238
#include "trace-root.h"
239
-#include "qemu/plugin.h"
240
#include "trace/mem.h"
241
#ifdef CONFIG_PLUGIN
242
#include "qemu/plugin-memory.h"
243
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
244
? helper_le_ldq_mmu : helper_be_ldq_mmu);
245
}
57
}
246
58
247
+uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr,
59
+static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
248
+ uintptr_t retaddr)
249
+{
60
+{
250
+ return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
61
+ return fold_addsub2_i32(ctx, op, false);
251
+}
62
+}
252
+
63
+
253
+int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
64
static bool fold_xor(OptContext *ctx, TCGOp *op)
254
+{
65
{
255
+ return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
66
return fold_const2(ctx, op);
256
+}
67
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
257
+
68
}
258
+uint32_t cpu_lduw_data_ra(CPUArchState *env, target_ulong ptr,
69
break;
259
+ uintptr_t retaddr)
70
260
+{
71
- case INDEX_op_add2_i32:
261
+ return cpu_lduw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
72
- case INDEX_op_sub2_i32:
262
+}
73
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])
263
+
74
- && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
264
+int cpu_ldsw_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
75
- uint32_t al = arg_info(op->args[2])->val;
265
+{
76
- uint32_t ah = arg_info(op->args[3])->val;
266
+ return cpu_ldsw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
77
- uint32_t bl = arg_info(op->args[4])->val;
267
+}
78
- uint32_t bh = arg_info(op->args[5])->val;
268
+
79
- uint64_t a = ((uint64_t)ah << 32) | al;
269
+uint32_t cpu_ldl_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
80
- uint64_t b = ((uint64_t)bh << 32) | bl;
270
+{
81
- TCGArg rl, rh;
271
+ return cpu_ldl_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
82
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
272
+}
83
-
273
+
84
- if (opc == INDEX_op_add2_i32) {
274
+uint64_t cpu_ldq_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
85
- a += b;
275
+{
86
- } else {
276
+ return cpu_ldq_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
87
- a -= b;
277
+}
88
- }
278
+
89
-
279
+uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr)
90
- rl = op->args[0];
280
+{
91
- rh = op->args[1];
281
+ return cpu_ldub_data_ra(env, ptr, 0);
92
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
282
+}
93
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
283
+
94
- continue;
284
+int cpu_ldsb_data(CPUArchState *env, target_ulong ptr)
95
- }
285
+{
96
- break;
286
+ return cpu_ldsb_data_ra(env, ptr, 0);
97
287
+}
98
default:
288
+
99
break;
289
+uint32_t cpu_lduw_data(CPUArchState *env, target_ulong ptr)
100
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
290
+{
101
CASE_OP_32_64_VEC(add):
291
+ return cpu_lduw_data_ra(env, ptr, 0);
102
done = fold_add(&ctx, op);
292
+}
103
break;
293
+
104
+ case INDEX_op_add2_i32:
294
+int cpu_ldsw_data(CPUArchState *env, target_ulong ptr)
105
+ done = fold_add2_i32(&ctx, op);
295
+{
106
+ break;
296
+ return cpu_ldsw_data_ra(env, ptr, 0);
107
CASE_OP_32_64_VEC(and):
297
+}
108
done = fold_and(&ctx, op);
298
+
109
break;
299
+uint32_t cpu_ldl_data(CPUArchState *env, target_ulong ptr)
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
300
+{
111
CASE_OP_32_64_VEC(sub):
301
+ return cpu_ldl_data_ra(env, ptr, 0);
112
done = fold_sub(&ctx, op);
302
+}
113
break;
303
+
114
+ case INDEX_op_sub2_i32:
304
+uint64_t cpu_ldq_data(CPUArchState *env, target_ulong ptr)
115
+ done = fold_sub2_i32(&ctx, op);
305
+{
116
+ break;
306
+ return cpu_ldq_data_ra(env, ptr, 0);
117
CASE_OP_32_64_VEC(xor):
307
+}
118
done = fold_xor(&ctx, op);
308
+
119
break;
309
/*
310
* Store Helpers
311
*/
312
@@ -XXX,XX +XXX,XX @@ void cpu_stq_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
313
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEQ);
314
}
315
316
+void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr,
317
+ uint32_t val, uintptr_t retaddr)
318
+{
319
+ cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
320
+}
321
+
322
+void cpu_stw_data_ra(CPUArchState *env, target_ulong ptr,
323
+ uint32_t val, uintptr_t retaddr)
324
+{
325
+ cpu_stw_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
326
+}
327
+
328
+void cpu_stl_data_ra(CPUArchState *env, target_ulong ptr,
329
+ uint32_t val, uintptr_t retaddr)
330
+{
331
+ cpu_stl_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
332
+}
333
+
334
+void cpu_stq_data_ra(CPUArchState *env, target_ulong ptr,
335
+ uint64_t val, uintptr_t retaddr)
336
+{
337
+ cpu_stq_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
338
+}
339
+
340
+void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val)
341
+{
342
+ cpu_stb_data_ra(env, ptr, val, 0);
343
+}
344
+
345
+void cpu_stw_data(CPUArchState *env, target_ulong ptr, uint32_t val)
346
+{
347
+ cpu_stw_data_ra(env, ptr, val, 0);
348
+}
349
+
350
+void cpu_stl_data(CPUArchState *env, target_ulong ptr, uint32_t val)
351
+{
352
+ cpu_stl_data_ra(env, ptr, val, 0);
353
+}
354
+
355
+void cpu_stq_data(CPUArchState *env, target_ulong ptr, uint64_t val)
356
+{
357
+ cpu_stq_data_ra(env, ptr, val, 0);
358
+}
359
+
360
/* First set of helpers allows passing in of OI and RETADDR. This makes
361
them callable from other helpers. */
362
363
--
120
--
364
2.20.1
121
2.25.1
365
122
366
123
diff view generated by jsdifflib
1
Claudio's Huawei address has been defunct for quite a while. In
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
https://lists.gnu.org/archive/html/qemu-devel/2019-05/msg06872.html
4
5
he asked for his personal address to be removed as well.
6
7
I will take over officially.
8
9
Cc: Claudio Fontana <claudio.fontana@gmail.com>
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
4
---
14
MAINTAINERS | 3 +--
5
tcg/optimize.c | 56 ++++++++++++++++++++++++++++----------------------
15
1 file changed, 1 insertion(+), 2 deletions(-)
6
1 file changed, 31 insertions(+), 25 deletions(-)
16
7
17
diff --git a/MAINTAINERS b/MAINTAINERS
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
18
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
19
--- a/MAINTAINERS
10
--- a/tcg/optimize.c
20
+++ b/MAINTAINERS
11
+++ b/tcg/optimize.c
21
@@ -XXX,XX +XXX,XX @@ F: plugins/
12
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
22
F: tests/plugin
13
return true;
23
14
}
24
AArch64 TCG target
15
25
-M: Claudio Fontana <claudio.fontana@huawei.com>
16
+static bool fold_movcond(OptContext *ctx, TCGOp *op)
26
-M: Claudio Fontana <claudio.fontana@gmail.com>
17
+{
27
+M: Richard Henderson <richard.henderson@linaro.org>
18
+ TCGOpcode opc = op->opc;
28
S: Maintained
19
+ TCGCond cond = op->args[5];
29
L: qemu-arm@nongnu.org
20
+ int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
30
F: tcg/aarch64/
21
+
22
+ if (i >= 0) {
23
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
+ }
25
+
26
+ if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
27
+ uint64_t tv = arg_info(op->args[3])->val;
28
+ uint64_t fv = arg_info(op->args[4])->val;
29
+
30
+ opc = (opc == INDEX_op_movcond_i32
31
+ ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
32
+
33
+ if (tv == 1 && fv == 0) {
34
+ op->opc = opc;
35
+ op->args[3] = cond;
36
+ } else if (fv == 1 && tv == 0) {
37
+ op->opc = opc;
38
+ op->args[3] = tcg_invert_cond(cond);
39
+ }
40
+ }
41
+ return false;
42
+}
43
+
44
static bool fold_mul(OptContext *ctx, TCGOp *op)
45
{
46
return fold_const2(ctx, op);
47
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
48
}
49
break;
50
51
- CASE_OP_32_64(movcond):
52
- i = do_constant_folding_cond(opc, op->args[1],
53
- op->args[2], op->args[5]);
54
- if (i >= 0) {
55
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]);
56
- continue;
57
- }
58
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
59
- uint64_t tv = arg_info(op->args[3])->val;
60
- uint64_t fv = arg_info(op->args[4])->val;
61
- TCGCond cond = op->args[5];
62
-
63
- if (fv == 1 && tv == 0) {
64
- cond = tcg_invert_cond(cond);
65
- } else if (!(tv == 1 && fv == 0)) {
66
- break;
67
- }
68
- op->args[3] = cond;
69
- op->opc = opc = (opc == INDEX_op_movcond_i32
70
- ? INDEX_op_setcond_i32
71
- : INDEX_op_setcond_i64);
72
- }
73
- break;
74
-
75
-
76
default:
77
break;
78
79
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
80
case INDEX_op_mb:
81
done = fold_mb(&ctx, op);
82
break;
83
+ CASE_OP_32_64(movcond):
84
+ done = fold_movcond(&ctx, op);
85
+ break;
86
CASE_OP_32_64(mul):
87
done = fold_mul(&ctx, op);
88
break;
31
--
89
--
32
2.20.1
90
2.25.1
33
91
34
92
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
All tcg includes are relative to the repository root directory,
4
we can safely remove the tcg/ directory from the include search
5
path list.
6
7
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Stefan Weil <sw@weilnetz.de>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
12
Message-Id: <20200101112303.20724-5-philmd@redhat.com>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
4
---
15
configure | 1 -
5
tcg/optimize.c | 39 ++++++++++++++++++++++-----------------
16
1 file changed, 1 deletion(-)
6
1 file changed, 22 insertions(+), 17 deletions(-)
17
7
18
diff --git a/configure b/configure
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
19
index XXXXXXX..XXXXXXX 100755
9
index XXXXXXX..XXXXXXX 100644
20
--- a/configure
10
--- a/tcg/optimize.c
21
+++ b/configure
11
+++ b/tcg/optimize.c
22
@@ -XXX,XX +XXX,XX @@ elif test "$ARCH" = "riscv32" || test "$ARCH" = "riscv64" ; then
12
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
23
else
13
return fold_const2(ctx, op);
24
QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/\$(ARCH) $QEMU_INCLUDES"
14
}
25
fi
15
26
-QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg $QEMU_INCLUDES"
16
+static bool fold_extract2(OptContext *ctx, TCGOp *op)
27
17
+{
28
echo "TOOLS=$tools" >> $config_host_mak
18
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
29
echo "ROMS=$roms" >> $config_host_mak
19
+ uint64_t v1 = arg_info(op->args[1])->val;
20
+ uint64_t v2 = arg_info(op->args[2])->val;
21
+ int shr = op->args[3];
22
+
23
+ if (op->opc == INDEX_op_extract2_i64) {
24
+ v1 >>= shr;
25
+ v2 <<= 64 - shr;
26
+ } else {
27
+ v1 = (uint32_t)v1 >> shr;
28
+ v2 = (int32_t)v2 << (32 - shr);
29
+ }
30
+ return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
31
+ }
32
+ return false;
33
+}
34
+
35
static bool fold_exts(OptContext *ctx, TCGOp *op)
36
{
37
return fold_const1(ctx, op);
38
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
39
}
40
break;
41
42
- CASE_OP_32_64(extract2):
43
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
44
- uint64_t v1 = arg_info(op->args[1])->val;
45
- uint64_t v2 = arg_info(op->args[2])->val;
46
- int shr = op->args[3];
47
-
48
- if (opc == INDEX_op_extract2_i64) {
49
- tmp = (v1 >> shr) | (v2 << (64 - shr));
50
- } else {
51
- tmp = (int32_t)(((uint32_t)v1 >> shr) |
52
- ((uint32_t)v2 << (32 - shr)));
53
- }
54
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
55
- continue;
56
- }
57
- break;
58
-
59
default:
60
break;
61
62
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
63
CASE_OP_32_64(eqv):
64
done = fold_eqv(&ctx, op);
65
break;
66
+ CASE_OP_32_64(extract2):
67
+ done = fold_extract2(&ctx, op);
68
+ break;
69
CASE_OP_32_64(ext8s):
70
CASE_OP_32_64(ext16s):
71
case INDEX_op_ext32s_i64:
30
--
72
--
31
2.20.1
73
2.25.1
32
74
33
75
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Stefan Weil <sw@weilnetz.de>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-Id: <20200101112303.20724-4-philmd@redhat.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
4
---
11
{tcg => include/tcg}/tcg-gvec-desc.h | 0
5
tcg/optimize.c | 48 ++++++++++++++++++++++++++++++------------------
12
{tcg => include/tcg}/tcg-mo.h | 0
6
1 file changed, 30 insertions(+), 18 deletions(-)
13
{tcg => include/tcg}/tcg-op-gvec.h | 0
14
{tcg => include/tcg}/tcg-op.h | 0
15
{tcg => include/tcg}/tcg-opc.h | 0
16
{tcg => include/tcg}/tcg.h | 0
17
MAINTAINERS | 1 +
18
7 files changed, 1 insertion(+)
19
rename {tcg => include/tcg}/tcg-gvec-desc.h (100%)
20
rename {tcg => include/tcg}/tcg-mo.h (100%)
21
rename {tcg => include/tcg}/tcg-op-gvec.h (100%)
22
rename {tcg => include/tcg}/tcg-op.h (100%)
23
rename {tcg => include/tcg}/tcg-opc.h (100%)
24
rename {tcg => include/tcg}/tcg.h (100%)
25
7
26
diff --git a/tcg/tcg-gvec-desc.h b/include/tcg/tcg-gvec-desc.h
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
27
similarity index 100%
28
rename from tcg/tcg-gvec-desc.h
29
rename to include/tcg/tcg-gvec-desc.h
30
diff --git a/tcg/tcg-mo.h b/include/tcg/tcg-mo.h
31
similarity index 100%
32
rename from tcg/tcg-mo.h
33
rename to include/tcg/tcg-mo.h
34
diff --git a/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
35
similarity index 100%
36
rename from tcg/tcg-op-gvec.h
37
rename to include/tcg/tcg-op-gvec.h
38
diff --git a/tcg/tcg-op.h b/include/tcg/tcg-op.h
39
similarity index 100%
40
rename from tcg/tcg-op.h
41
rename to include/tcg/tcg-op.h
42
diff --git a/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
43
similarity index 100%
44
rename from tcg/tcg-opc.h
45
rename to include/tcg/tcg-opc.h
46
diff --git a/tcg/tcg.h b/include/tcg/tcg.h
47
similarity index 100%
48
rename from tcg/tcg.h
49
rename to include/tcg/tcg.h
50
diff --git a/MAINTAINERS b/MAINTAINERS
51
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
52
--- a/MAINTAINERS
10
--- a/tcg/optimize.c
53
+++ b/MAINTAINERS
11
+++ b/tcg/optimize.c
54
@@ -XXX,XX +XXX,XX @@ Common TCG code
12
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
55
M: Richard Henderson <rth@twiddle.net>
13
return fold_const2(ctx, op);
56
S: Maintained
14
}
57
F: tcg/
15
58
+F: include/tcg/
16
+static bool fold_extract(OptContext *ctx, TCGOp *op)
59
17
+{
60
TCG Plugins
18
+ if (arg_is_const(op->args[1])) {
61
M: Alex Bennée <alex.bennee@linaro.org>
19
+ uint64_t t;
20
+
21
+ t = arg_info(op->args[1])->val;
22
+ t = extract64(t, op->args[2], op->args[3]);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
24
+ }
25
+ return false;
26
+}
27
+
28
static bool fold_extract2(OptContext *ctx, TCGOp *op)
29
{
30
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
31
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
32
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
33
}
34
35
+static bool fold_sextract(OptContext *ctx, TCGOp *op)
36
+{
37
+ if (arg_is_const(op->args[1])) {
38
+ uint64_t t;
39
+
40
+ t = arg_info(op->args[1])->val;
41
+ t = sextract64(t, op->args[2], op->args[3]);
42
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
43
+ }
44
+ return false;
45
+}
46
+
47
static bool fold_shift(OptContext *ctx, TCGOp *op)
48
{
49
return fold_const2(ctx, op);
50
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
51
}
52
break;
53
54
- CASE_OP_32_64(extract):
55
- if (arg_is_const(op->args[1])) {
56
- tmp = extract64(arg_info(op->args[1])->val,
57
- op->args[2], op->args[3]);
58
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
59
- continue;
60
- }
61
- break;
62
-
63
- CASE_OP_32_64(sextract):
64
- if (arg_is_const(op->args[1])) {
65
- tmp = sextract64(arg_info(op->args[1])->val,
66
- op->args[2], op->args[3]);
67
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
68
- continue;
69
- }
70
- break;
71
-
72
default:
73
break;
74
75
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
76
CASE_OP_32_64(eqv):
77
done = fold_eqv(&ctx, op);
78
break;
79
+ CASE_OP_32_64(extract):
80
+ done = fold_extract(&ctx, op);
81
+ break;
82
CASE_OP_32_64(extract2):
83
done = fold_extract2(&ctx, op);
84
break;
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
86
case INDEX_op_setcond2_i32:
87
done = fold_setcond2(&ctx, op);
88
break;
89
+ CASE_OP_32_64(sextract):
90
+ done = fold_sextract(&ctx, op);
91
+ break;
92
CASE_OP_32_64_VEC(sub):
93
done = fold_sub(&ctx, op);
94
break;
62
--
95
--
63
2.20.1
96
2.25.1
64
97
65
98
diff view generated by jsdifflib
1
All users have now been converted to cpu_*_mmuidx_ra.
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
4
---
7
include/exec/cpu_ldst.h | 230 ----------------------------------------
5
tcg/optimize.c | 25 +++++++++++++++----------
8
1 file changed, 230 deletions(-)
6
1 file changed, 15 insertions(+), 10 deletions(-)
9
7
10
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
12
--- a/include/exec/cpu_ldst.h
10
--- a/tcg/optimize.c
13
+++ b/include/exec/cpu_ldst.h
11
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
12
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
15
void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
13
return fold_const1(ctx, op);
16
int mmu_idx, uintptr_t retaddr);
14
}
17
15
18
-#ifdef MMU_MODE0_SUFFIX
16
+static bool fold_deposit(OptContext *ctx, TCGOp *op)
19
-#define CPU_MMU_INDEX 0
17
+{
20
-#define MEMSUFFIX MMU_MODE0_SUFFIX
18
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
21
-#define DATA_SIZE 1
19
+ uint64_t t1 = arg_info(op->args[1])->val;
22
-#include "exec/cpu_ldst_template.h"
20
+ uint64_t t2 = arg_info(op->args[2])->val;
21
+
22
+ t1 = deposit64(t1, op->args[3], op->args[4], t2);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
24
+ }
25
+ return false;
26
+}
27
+
28
static bool fold_divide(OptContext *ctx, TCGOp *op)
29
{
30
return fold_const2(ctx, op);
31
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
32
}
33
break;
34
35
- CASE_OP_32_64(deposit):
36
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
37
- tmp = deposit64(arg_info(op->args[1])->val,
38
- op->args[3], op->args[4],
39
- arg_info(op->args[2])->val);
40
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
41
- continue;
42
- }
43
- break;
23
-
44
-
24
-#define DATA_SIZE 2
45
default:
25
-#include "exec/cpu_ldst_template.h"
46
break;
26
-
47
27
-#define DATA_SIZE 4
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
28
-#include "exec/cpu_ldst_template.h"
49
CASE_OP_32_64(ctpop):
29
-
50
done = fold_ctpop(&ctx, op);
30
-#define DATA_SIZE 8
51
break;
31
-#include "exec/cpu_ldst_template.h"
52
+ CASE_OP_32_64(deposit):
32
-#undef CPU_MMU_INDEX
53
+ done = fold_deposit(&ctx, op);
33
-#undef MEMSUFFIX
54
+ break;
34
-#endif
55
CASE_OP_32_64(div):
35
-
56
CASE_OP_32_64(divu):
36
-#if (NB_MMU_MODES >= 2) && defined(MMU_MODE1_SUFFIX)
57
done = fold_divide(&ctx, op);
37
-#define CPU_MMU_INDEX 1
38
-#define MEMSUFFIX MMU_MODE1_SUFFIX
39
-#define DATA_SIZE 1
40
-#include "exec/cpu_ldst_template.h"
41
-
42
-#define DATA_SIZE 2
43
-#include "exec/cpu_ldst_template.h"
44
-
45
-#define DATA_SIZE 4
46
-#include "exec/cpu_ldst_template.h"
47
-
48
-#define DATA_SIZE 8
49
-#include "exec/cpu_ldst_template.h"
50
-#undef CPU_MMU_INDEX
51
-#undef MEMSUFFIX
52
-#endif
53
-
54
-#if (NB_MMU_MODES >= 3) && defined(MMU_MODE2_SUFFIX)
55
-
56
-#define CPU_MMU_INDEX 2
57
-#define MEMSUFFIX MMU_MODE2_SUFFIX
58
-#define DATA_SIZE 1
59
-#include "exec/cpu_ldst_template.h"
60
-
61
-#define DATA_SIZE 2
62
-#include "exec/cpu_ldst_template.h"
63
-
64
-#define DATA_SIZE 4
65
-#include "exec/cpu_ldst_template.h"
66
-
67
-#define DATA_SIZE 8
68
-#include "exec/cpu_ldst_template.h"
69
-#undef CPU_MMU_INDEX
70
-#undef MEMSUFFIX
71
-#endif /* (NB_MMU_MODES >= 3) */
72
-
73
-#if (NB_MMU_MODES >= 4) && defined(MMU_MODE3_SUFFIX)
74
-
75
-#define CPU_MMU_INDEX 3
76
-#define MEMSUFFIX MMU_MODE3_SUFFIX
77
-#define DATA_SIZE 1
78
-#include "exec/cpu_ldst_template.h"
79
-
80
-#define DATA_SIZE 2
81
-#include "exec/cpu_ldst_template.h"
82
-
83
-#define DATA_SIZE 4
84
-#include "exec/cpu_ldst_template.h"
85
-
86
-#define DATA_SIZE 8
87
-#include "exec/cpu_ldst_template.h"
88
-#undef CPU_MMU_INDEX
89
-#undef MEMSUFFIX
90
-#endif /* (NB_MMU_MODES >= 4) */
91
-
92
-#if (NB_MMU_MODES >= 5) && defined(MMU_MODE4_SUFFIX)
93
-
94
-#define CPU_MMU_INDEX 4
95
-#define MEMSUFFIX MMU_MODE4_SUFFIX
96
-#define DATA_SIZE 1
97
-#include "exec/cpu_ldst_template.h"
98
-
99
-#define DATA_SIZE 2
100
-#include "exec/cpu_ldst_template.h"
101
-
102
-#define DATA_SIZE 4
103
-#include "exec/cpu_ldst_template.h"
104
-
105
-#define DATA_SIZE 8
106
-#include "exec/cpu_ldst_template.h"
107
-#undef CPU_MMU_INDEX
108
-#undef MEMSUFFIX
109
-#endif /* (NB_MMU_MODES >= 5) */
110
-
111
-#if (NB_MMU_MODES >= 6) && defined(MMU_MODE5_SUFFIX)
112
-
113
-#define CPU_MMU_INDEX 5
114
-#define MEMSUFFIX MMU_MODE5_SUFFIX
115
-#define DATA_SIZE 1
116
-#include "exec/cpu_ldst_template.h"
117
-
118
-#define DATA_SIZE 2
119
-#include "exec/cpu_ldst_template.h"
120
-
121
-#define DATA_SIZE 4
122
-#include "exec/cpu_ldst_template.h"
123
-
124
-#define DATA_SIZE 8
125
-#include "exec/cpu_ldst_template.h"
126
-#undef CPU_MMU_INDEX
127
-#undef MEMSUFFIX
128
-#endif /* (NB_MMU_MODES >= 6) */
129
-
130
-#if (NB_MMU_MODES >= 7) && defined(MMU_MODE6_SUFFIX)
131
-
132
-#define CPU_MMU_INDEX 6
133
-#define MEMSUFFIX MMU_MODE6_SUFFIX
134
-#define DATA_SIZE 1
135
-#include "exec/cpu_ldst_template.h"
136
-
137
-#define DATA_SIZE 2
138
-#include "exec/cpu_ldst_template.h"
139
-
140
-#define DATA_SIZE 4
141
-#include "exec/cpu_ldst_template.h"
142
-
143
-#define DATA_SIZE 8
144
-#include "exec/cpu_ldst_template.h"
145
-#undef CPU_MMU_INDEX
146
-#undef MEMSUFFIX
147
-#endif /* (NB_MMU_MODES >= 7) */
148
-
149
-#if (NB_MMU_MODES >= 8) && defined(MMU_MODE7_SUFFIX)
150
-
151
-#define CPU_MMU_INDEX 7
152
-#define MEMSUFFIX MMU_MODE7_SUFFIX
153
-#define DATA_SIZE 1
154
-#include "exec/cpu_ldst_template.h"
155
-
156
-#define DATA_SIZE 2
157
-#include "exec/cpu_ldst_template.h"
158
-
159
-#define DATA_SIZE 4
160
-#include "exec/cpu_ldst_template.h"
161
-
162
-#define DATA_SIZE 8
163
-#include "exec/cpu_ldst_template.h"
164
-#undef CPU_MMU_INDEX
165
-#undef MEMSUFFIX
166
-#endif /* (NB_MMU_MODES >= 8) */
167
-
168
-#if (NB_MMU_MODES >= 9) && defined(MMU_MODE8_SUFFIX)
169
-
170
-#define CPU_MMU_INDEX 8
171
-#define MEMSUFFIX MMU_MODE8_SUFFIX
172
-#define DATA_SIZE 1
173
-#include "exec/cpu_ldst_template.h"
174
-
175
-#define DATA_SIZE 2
176
-#include "exec/cpu_ldst_template.h"
177
-
178
-#define DATA_SIZE 4
179
-#include "exec/cpu_ldst_template.h"
180
-
181
-#define DATA_SIZE 8
182
-#include "exec/cpu_ldst_template.h"
183
-#undef CPU_MMU_INDEX
184
-#undef MEMSUFFIX
185
-#endif /* (NB_MMU_MODES >= 9) */
186
-
187
-#if (NB_MMU_MODES >= 10) && defined(MMU_MODE9_SUFFIX)
188
-
189
-#define CPU_MMU_INDEX 9
190
-#define MEMSUFFIX MMU_MODE9_SUFFIX
191
-#define DATA_SIZE 1
192
-#include "exec/cpu_ldst_template.h"
193
-
194
-#define DATA_SIZE 2
195
-#include "exec/cpu_ldst_template.h"
196
-
197
-#define DATA_SIZE 4
198
-#include "exec/cpu_ldst_template.h"
199
-
200
-#define DATA_SIZE 8
201
-#include "exec/cpu_ldst_template.h"
202
-#undef CPU_MMU_INDEX
203
-#undef MEMSUFFIX
204
-#endif /* (NB_MMU_MODES >= 10) */
205
-
206
-#if (NB_MMU_MODES >= 11) && defined(MMU_MODE10_SUFFIX)
207
-
208
-#define CPU_MMU_INDEX 10
209
-#define MEMSUFFIX MMU_MODE10_SUFFIX
210
-#define DATA_SIZE 1
211
-#include "exec/cpu_ldst_template.h"
212
-
213
-#define DATA_SIZE 2
214
-#include "exec/cpu_ldst_template.h"
215
-
216
-#define DATA_SIZE 4
217
-#include "exec/cpu_ldst_template.h"
218
-
219
-#define DATA_SIZE 8
220
-#include "exec/cpu_ldst_template.h"
221
-#undef CPU_MMU_INDEX
222
-#undef MEMSUFFIX
223
-#endif /* (NB_MMU_MODES >= 11) */
224
-
225
-#if (NB_MMU_MODES >= 12) && defined(MMU_MODE11_SUFFIX)
226
-
227
-#define CPU_MMU_INDEX 11
228
-#define MEMSUFFIX MMU_MODE11_SUFFIX
229
-#define DATA_SIZE 1
230
-#include "exec/cpu_ldst_template.h"
231
-
232
-#define DATA_SIZE 2
233
-#include "exec/cpu_ldst_template.h"
234
-
235
-#define DATA_SIZE 4
236
-#include "exec/cpu_ldst_template.h"
237
-
238
-#define DATA_SIZE 8
239
-#include "exec/cpu_ldst_template.h"
240
-#undef CPU_MMU_INDEX
241
-#undef MEMSUFFIX
242
-#endif /* (NB_MMU_MODES >= 12) */
243
-
244
-#if (NB_MMU_MODES > 12)
245
-#error "NB_MMU_MODES > 12 is not supported for now"
246
-#endif /* (NB_MMU_MODES > 12) */
247
-
248
/* these access are slower, they must be as rare as possible */
249
#define CPU_MMU_INDEX (cpu_mmu_index(env, false))
250
#define MEMSUFFIX _data
251
--
58
--
252
2.20.1
59
2.25.1
253
60
254
61
diff view generated by jsdifflib
1
The functions generated by these macros are unused.
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
Acked-by: Max Filippov <jcmvbkbc@gmail.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
target/xtensa/cpu.h | 4 ----
5
tcg/optimize.c | 32 ++++++++++++++++++--------------
9
1 file changed, 4 deletions(-)
6
1 file changed, 18 insertions(+), 14 deletions(-)
10
7
11
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/target/xtensa/cpu.h
10
--- a/tcg/optimize.c
14
+++ b/target/xtensa/cpu.h
11
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static inline uint32_t xtensa_replicate_windowstart(CPUXtensaState *env)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
13
return true;
16
}
14
}
17
15
18
/* MMU modes definitions */
16
+static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
19
-#define MMU_MODE0_SUFFIX _ring0
17
+{
20
-#define MMU_MODE1_SUFFIX _ring1
18
+ if (arg_is_const(op->args[1])) {
21
-#define MMU_MODE2_SUFFIX _ring2
19
+ uint64_t t = arg_info(op->args[1])->val;
22
-#define MMU_MODE3_SUFFIX _ring3
20
+
23
#define MMU_USER_IDX 3
21
+ if (t != 0) {
24
22
+ t = do_constant_folding(op->opc, t, 0);
25
static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
24
+ }
25
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
26
+ }
27
+ return false;
28
+}
29
+
30
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
31
{
32
return fold_const1(ctx, op);
33
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
34
}
35
break;
36
37
- CASE_OP_32_64(clz):
38
- CASE_OP_32_64(ctz):
39
- if (arg_is_const(op->args[1])) {
40
- TCGArg v = arg_info(op->args[1])->val;
41
- if (v != 0) {
42
- tmp = do_constant_folding(opc, v, 0);
43
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
44
- } else {
45
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
46
- }
47
- continue;
48
- }
49
- break;
50
-
51
default:
52
break;
53
54
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
55
case INDEX_op_brcond2_i32:
56
done = fold_brcond2(&ctx, op);
57
break;
58
+ CASE_OP_32_64(clz):
59
+ CASE_OP_32_64(ctz):
60
+ done = fold_count_zeros(&ctx, op);
61
+ break;
62
CASE_OP_32_64(ctpop):
63
done = fold_ctpop(&ctx, op);
64
break;
26
--
65
--
27
2.20.1
66
2.25.1
28
67
29
68
diff view generated by jsdifflib
1
The functions generated by these macros are unused.
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
target/unicore32/cpu.h | 2 --
5
tcg/optimize.c | 27 ++++++++++++++++-----------
9
1 file changed, 2 deletions(-)
6
1 file changed, 16 insertions(+), 11 deletions(-)
10
7
11
diff --git a/target/unicore32/cpu.h b/target/unicore32/cpu.h
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/target/unicore32/cpu.h
10
--- a/tcg/optimize.c
14
+++ b/target/unicore32/cpu.h
11
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ void cpu_asr_write(CPUUniCore32State *env1, target_ulong val, target_ulong mask)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
int uc32_cpu_signal_handler(int host_signum, void *pinfo, void *puc);
13
return false;
17
14
}
18
/* MMU modes definitions */
15
19
-#define MMU_MODE0_SUFFIX _kernel
16
+static bool fold_bswap(OptContext *ctx, TCGOp *op)
20
-#define MMU_MODE1_SUFFIX _user
17
+{
21
#define MMU_USER_IDX 1
18
+ if (arg_is_const(op->args[1])) {
22
static inline int cpu_mmu_index(CPUUniCore32State *env, bool ifetch)
19
+ uint64_t t = arg_info(op->args[1])->val;
20
+
21
+ t = do_constant_folding(op->opc, t, op->args[2]);
22
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
23
+ }
24
+ return false;
25
+}
26
+
27
static bool fold_call(OptContext *ctx, TCGOp *op)
23
{
28
{
29
TCGContext *s = ctx->tcg;
30
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
31
}
32
break;
33
34
- CASE_OP_32_64(bswap16):
35
- CASE_OP_32_64(bswap32):
36
- case INDEX_op_bswap64_i64:
37
- if (arg_is_const(op->args[1])) {
38
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
39
- op->args[2]);
40
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
41
- continue;
42
- }
43
- break;
44
-
45
default:
46
break;
47
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
49
case INDEX_op_brcond2_i32:
50
done = fold_brcond2(&ctx, op);
51
break;
52
+ CASE_OP_32_64(bswap16):
53
+ CASE_OP_32_64(bswap32):
54
+ case INDEX_op_bswap64_i64:
55
+ done = fold_bswap(&ctx, op);
56
+ break;
57
CASE_OP_32_64(clz):
58
CASE_OP_32_64(ctz):
59
done = fold_count_zeros(&ctx, op);
24
--
60
--
25
2.20.1
61
2.25.1
26
62
27
63
diff view generated by jsdifflib
1
The functions generated by these macros are unused.
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Cc: Aurelien Jarno <aurelien@aurel32.net>
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
target/sh4/cpu.h | 2 --
5
tcg/optimize.c | 53 +++++++++++++++++++++++++++++---------------------
9
1 file changed, 2 deletions(-)
6
1 file changed, 31 insertions(+), 22 deletions(-)
10
7
11
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sh4/cpu.h
10
--- a/tcg/optimize.c
14
+++ b/target/sh4/cpu.h
11
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ void cpu_load_tlb(CPUSH4State * env);
12
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
16
#define cpu_list sh4_cpu_list
13
return fold_const2(ctx, op);
17
14
}
18
/* MMU modes definitions */
15
19
-#define MMU_MODE0_SUFFIX _kernel
16
+static bool fold_dup(OptContext *ctx, TCGOp *op)
20
-#define MMU_MODE1_SUFFIX _user
17
+{
21
#define MMU_USER_IDX 1
18
+ if (arg_is_const(op->args[1])) {
22
static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
19
+ uint64_t t = arg_info(op->args[1])->val;
20
+ t = dup_const(TCGOP_VECE(op), t);
21
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
22
+ }
23
+ return false;
24
+}
25
+
26
+static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
+{
28
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
29
+ uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
30
+ arg_info(op->args[2])->val);
31
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
32
+ }
33
+
34
+ if (args_are_copies(op->args[1], op->args[2])) {
35
+ op->opc = INDEX_op_dup_vec;
36
+ TCGOP_VECE(op) = MO_32;
37
+ }
38
+ return false;
39
+}
40
+
41
static bool fold_eqv(OptContext *ctx, TCGOp *op)
23
{
42
{
43
return fold_const2(ctx, op);
44
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
45
done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
46
break;
47
48
- case INDEX_op_dup_vec:
49
- if (arg_is_const(op->args[1])) {
50
- tmp = arg_info(op->args[1])->val;
51
- tmp = dup_const(TCGOP_VECE(op), tmp);
52
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
53
- continue;
54
- }
55
- break;
56
-
57
- case INDEX_op_dup2_vec:
58
- assert(TCG_TARGET_REG_BITS == 32);
59
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
60
- tcg_opt_gen_movi(&ctx, op, op->args[0],
61
- deposit64(arg_info(op->args[1])->val, 32, 32,
62
- arg_info(op->args[2])->val));
63
- continue;
64
- } else if (args_are_copies(op->args[1], op->args[2])) {
65
- op->opc = INDEX_op_dup_vec;
66
- TCGOP_VECE(op) = MO_32;
67
- }
68
- break;
69
-
70
default:
71
break;
72
73
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
74
CASE_OP_32_64(divu):
75
done = fold_divide(&ctx, op);
76
break;
77
+ case INDEX_op_dup_vec:
78
+ done = fold_dup(&ctx, op);
79
+ break;
80
+ case INDEX_op_dup2_vec:
81
+ done = fold_dup2(&ctx, op);
82
+ break;
83
CASE_OP_32_64(eqv):
84
done = fold_eqv(&ctx, op);
85
break;
24
--
86
--
25
2.20.1
87
2.25.1
26
88
27
89
diff view generated by jsdifflib
1
The functions generated by these macros are unused.
1
This is the final entry in the main switch that was in a
2
different form. After this, we have the option to convert
3
the switch into a function dispatch table.
2
4
3
Cc: Edgar E. Iglesias <edgar.iglesias@gmail.com>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
target/microblaze/cpu.h | 3 ---
9
tcg/optimize.c | 27 ++++++++++++++-------------
9
1 file changed, 3 deletions(-)
10
1 file changed, 14 insertions(+), 13 deletions(-)
10
11
11
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/target/microblaze/cpu.h
14
--- a/tcg/optimize.c
14
+++ b/target/microblaze/cpu.h
15
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ int cpu_mb_signal_handler(int host_signum, void *pinfo,
16
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
16
#define cpu_signal_handler cpu_mb_signal_handler
17
return true;
17
18
}
18
/* MMU modes definitions */
19
19
-#define MMU_MODE0_SUFFIX _nommu
20
+static bool fold_mov(OptContext *ctx, TCGOp *op)
20
-#define MMU_MODE1_SUFFIX _kernel
21
+{
21
-#define MMU_MODE2_SUFFIX _user
22
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
22
#define MMU_NOMMU_IDX 0
23
+}
23
#define MMU_KERNEL_IDX 1
24
+
24
#define MMU_USER_IDX 2
25
static bool fold_movcond(OptContext *ctx, TCGOp *op)
26
{
27
TCGOpcode opc = op->opc;
28
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
29
break;
30
}
31
32
- /* Propagate constants through copy operations and do constant
33
- folding. Constants will be substituted to arguments by register
34
- allocator where needed and possible. Also detect copies. */
35
+ /*
36
+ * Process each opcode.
37
+ * Sorted alphabetically by opcode as much as possible.
38
+ */
39
switch (opc) {
40
- CASE_OP_32_64_VEC(mov):
41
- done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
42
- break;
43
-
44
- default:
45
- break;
46
-
47
- /* ---------------------------------------------------------- */
48
- /* Sorted alphabetically by opcode as much as possible. */
49
-
50
CASE_OP_32_64_VEC(add):
51
done = fold_add(&ctx, op);
52
break;
53
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
54
case INDEX_op_mb:
55
done = fold_mb(&ctx, op);
56
break;
57
+ CASE_OP_32_64_VEC(mov):
58
+ done = fold_mov(&ctx, op);
59
+ break;
60
CASE_OP_32_64(movcond):
61
done = fold_movcond(&ctx, op);
62
break;
63
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
64
CASE_OP_32_64_VEC(xor):
65
done = fold_xor(&ctx, op);
66
break;
67
+ default:
68
+ break;
69
}
70
71
if (!done) {
25
--
72
--
26
2.20.1
73
2.25.1
27
74
28
75
diff view generated by jsdifflib
1
Code movement in an upcoming patch will show that this file
1
Pull the "op r, a, a => movi r, 0" optimization into a function,
2
was implicitly depending on trace-root.h being included beforehand.
2
and use it in the outer opcode fold functions.
3
3
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
include/user/syscall-trace.h | 2 ++
8
tcg/optimize.c | 41 ++++++++++++++++++++++++-----------------
10
1 file changed, 2 insertions(+)
9
1 file changed, 24 insertions(+), 17 deletions(-)
11
10
12
diff --git a/include/user/syscall-trace.h b/include/user/syscall-trace.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/include/user/syscall-trace.h
13
--- a/tcg/optimize.c
15
+++ b/include/user/syscall-trace.h
14
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
17
#ifndef _SYSCALL_TRACE_H_
16
return false;
18
#define _SYSCALL_TRACE_H_
17
}
19
18
20
+#include "trace-root.h"
19
+/* If the binary operation has both arguments equal, fold to @i. */
20
+static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
21
+{
22
+ if (args_are_copies(op->args[1], op->args[2])) {
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
24
+ }
25
+ return false;
26
+}
21
+
27
+
22
/*
28
/*
23
* These helpers just provide a common place for the various
29
* These outermost fold_<op> functions are sorted alphabetically.
24
* subsystems that want to track syscalls to put their hooks in. We
30
*/
31
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
32
33
static bool fold_andc(OptContext *ctx, TCGOp *op)
34
{
35
- return fold_const2(ctx, op);
36
+ if (fold_const2(ctx, op) ||
37
+ fold_xx_to_i(ctx, op, 0)) {
38
+ return true;
39
+ }
40
+ return false;
41
}
42
43
static bool fold_brcond(OptContext *ctx, TCGOp *op)
44
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
45
46
static bool fold_sub(OptContext *ctx, TCGOp *op)
47
{
48
- return fold_const2(ctx, op);
49
+ if (fold_const2(ctx, op) ||
50
+ fold_xx_to_i(ctx, op, 0)) {
51
+ return true;
52
+ }
53
+ return false;
54
}
55
56
static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
57
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
58
59
static bool fold_xor(OptContext *ctx, TCGOp *op)
60
{
61
- return fold_const2(ctx, op);
62
+ if (fold_const2(ctx, op) ||
63
+ fold_xx_to_i(ctx, op, 0)) {
64
+ return true;
65
+ }
66
+ return false;
67
}
68
69
/* Propagate constants and copies, fold constant expressions. */
70
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
71
break;
72
}
73
74
- /* Simplify expression for "op r, a, a => movi r, 0" cases */
75
- switch (opc) {
76
- CASE_OP_32_64_VEC(andc):
77
- CASE_OP_32_64_VEC(sub):
78
- CASE_OP_32_64_VEC(xor):
79
- if (args_are_copies(op->args[1], op->args[2])) {
80
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
81
- continue;
82
- }
83
- break;
84
- default:
85
- break;
86
- }
87
-
88
/*
89
* Process each opcode.
90
* Sorted alphabetically by opcode as much as possible.
25
--
91
--
26
2.20.1
92
2.25.1
27
93
28
94
diff view generated by jsdifflib
1
With the tracing hooks, the inline functions are no longer
1
Pull the "op r, a, a => mov r, a" optimization into a function,
2
so simple. Once out-of-line, the current tlb_entry lookup
2
and use it in the outer opcode fold functions.
3
is redundant with the one in the main load/store_helper.
4
3
5
This also begins the introduction of a new target facing
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
interface, with suffix *_mmuidx_ra. This is not yet
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
official because the interface is not done for user-only.
8
9
Use abi_ptr instead of target_ulong in preparation for
10
user-only; the two types are identical for softmmu.
11
12
What remains in cpu_ldst_template.h are the expansions
13
for _code, _data, and MMU_MODE<N>_SUFFIX.
14
15
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
---
7
---
18
include/exec/cpu_ldst.h | 25 ++++++-
8
tcg/optimize.c | 39 ++++++++++++++++++++++++---------------
19
include/exec/cpu_ldst_template.h | 125 +++++++------------------------
9
1 file changed, 24 insertions(+), 15 deletions(-)
20
accel/tcg/cputlb.c | 116 ++++++++++++++++++++++++++++
21
3 files changed, 166 insertions(+), 100 deletions(-)
22
10
23
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
24
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
25
--- a/include/exec/cpu_ldst.h
13
--- a/tcg/optimize.c
26
+++ b/include/exec/cpu_ldst.h
14
+++ b/tcg/optimize.c
27
@@ -XXX,XX +XXX,XX @@ static inline void clear_helper_retaddr(void)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
28
16
return false;
29
#else
30
31
-/* The memory helpers for tcg-generated code need tcg_target_long etc. */
32
+/* Needed for TCG_OVERSIZED_GUEST */
33
#include "tcg.h"
34
35
static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
36
@@ -XXX,XX +XXX,XX @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
37
return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
38
}
17
}
39
18
40
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
19
+/* If the binary operation has both arguments equal, fold to identity. */
41
+ int mmu_idx, uintptr_t ra);
20
+static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
42
+uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
43
+ int mmu_idx, uintptr_t ra);
44
+uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
45
+ int mmu_idx, uintptr_t ra);
46
+uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
47
+ int mmu_idx, uintptr_t ra);
48
+
49
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
50
+ int mmu_idx, uintptr_t ra);
51
+int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
52
+ int mmu_idx, uintptr_t ra);
53
+
54
+void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
55
+ int mmu_idx, uintptr_t retaddr);
56
+void cpu_stw_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
57
+ int mmu_idx, uintptr_t retaddr);
58
+void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
59
+ int mmu_idx, uintptr_t retaddr);
60
+void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
61
+ int mmu_idx, uintptr_t retaddr);
62
+
63
#ifdef MMU_MODE0_SUFFIX
64
#define CPU_MMU_INDEX 0
65
#define MEMSUFFIX MMU_MODE0_SUFFIX
66
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
67
index XXXXXXX..XXXXXXX 100644
68
--- a/include/exec/cpu_ldst_template.h
69
+++ b/include/exec/cpu_ldst_template.h
70
@@ -XXX,XX +XXX,XX @@
71
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
72
*/
73
74
-#if !defined(SOFTMMU_CODE_ACCESS)
75
-#include "trace-root.h"
76
-#endif
77
-
78
-#include "qemu/plugin.h"
79
-#include "trace/mem.h"
80
-
81
#if DATA_SIZE == 8
82
#define SUFFIX q
83
#define USUFFIX q
84
@@ -XXX,XX +XXX,XX @@
85
#define RES_TYPE uint32_t
86
#endif
87
88
+/* generic load/store macros */
89
+
90
#ifdef SOFTMMU_CODE_ACCESS
91
-#define ADDR_READ addr_code
92
-#define MMUSUFFIX _cmmu
93
-#define URETSUFFIX USUFFIX
94
-#define SRETSUFFIX glue(s, SUFFIX)
95
-#else
96
-#define ADDR_READ addr_read
97
-#define MMUSUFFIX _mmu
98
-#define URETSUFFIX USUFFIX
99
-#define SRETSUFFIX glue(s, SUFFIX)
100
+
101
+static inline RES_TYPE
102
+glue(glue(cpu_ld, USUFFIX), _code)(CPUArchState *env, target_ulong ptr)
103
+{
21
+{
104
+ TCGMemOpIdx oi = make_memop_idx(MO_TE | SHIFT, CPU_MMU_INDEX);
22
+ if (args_are_copies(op->args[1], op->args[2])) {
105
+ return glue(glue(helper_ret_ld, USUFFIX), _cmmu)(env, ptr, oi, 0);
23
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
106
+}
24
+ }
107
+
25
+ return false;
108
+#if DATA_SIZE <= 2
109
+static inline int
110
+glue(glue(cpu_lds, SUFFIX), _code)(CPUArchState *env, target_ulong ptr)
111
+{
112
+ return (DATA_STYPE)glue(glue(cpu_ld, USUFFIX), _code)(env, ptr);
113
+}
114
#endif
115
116
-/* generic load/store macros */
117
+#else
118
119
static inline RES_TYPE
120
glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
121
target_ulong ptr,
122
uintptr_t retaddr)
123
{
124
- CPUTLBEntry *entry;
125
- RES_TYPE res;
126
- target_ulong addr;
127
- int mmu_idx = CPU_MMU_INDEX;
128
- MemOp op = MO_TE | SHIFT;
129
-#if !defined(SOFTMMU_CODE_ACCESS)
130
- uint16_t meminfo = trace_mem_get_info(op, mmu_idx, false);
131
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
132
-#endif
133
-
134
- addr = ptr;
135
- entry = tlb_entry(env, mmu_idx, addr);
136
- if (unlikely(entry->ADDR_READ !=
137
- (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
138
- TCGMemOpIdx oi = make_memop_idx(op, mmu_idx);
139
- res = glue(glue(helper_ret_ld, URETSUFFIX), MMUSUFFIX)(env, addr,
140
- oi, retaddr);
141
- } else {
142
- uintptr_t hostaddr = addr + entry->addend;
143
- res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr);
144
- }
145
-#ifndef SOFTMMU_CODE_ACCESS
146
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
147
-#endif
148
- return res;
149
+ return glue(glue(cpu_ld, USUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX,
150
+ retaddr);
151
}
152
153
static inline RES_TYPE
154
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
155
{
156
- return glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(env, ptr, 0);
157
+ return glue(glue(cpu_ld, USUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX, 0);
158
}
159
160
#if DATA_SIZE <= 2
161
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
162
target_ulong ptr,
163
uintptr_t retaddr)
164
{
165
- CPUTLBEntry *entry;
166
- int res;
167
- target_ulong addr;
168
- int mmu_idx = CPU_MMU_INDEX;
169
- MemOp op = MO_TE | MO_SIGN | SHIFT;
170
-#ifndef SOFTMMU_CODE_ACCESS
171
- uint16_t meminfo = trace_mem_get_info(op, mmu_idx, false);
172
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
173
-#endif
174
-
175
- addr = ptr;
176
- entry = tlb_entry(env, mmu_idx, addr);
177
- if (unlikely(entry->ADDR_READ !=
178
- (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
179
- TCGMemOpIdx oi = make_memop_idx(op & ~MO_SIGN, mmu_idx);
180
- res = (DATA_STYPE)glue(glue(helper_ret_ld, SRETSUFFIX),
181
- MMUSUFFIX)(env, addr, oi, retaddr);
182
- } else {
183
- uintptr_t hostaddr = addr + entry->addend;
184
- res = glue(glue(lds, SUFFIX), _p)((uint8_t *)hostaddr);
185
- }
186
-#ifndef SOFTMMU_CODE_ACCESS
187
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
188
-#endif
189
- return res;
190
+ return glue(glue(cpu_lds, SUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX,
191
+ retaddr);
192
}
193
194
static inline int
195
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
196
{
197
- return glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(env, ptr, 0);
198
+ return glue(glue(cpu_lds, SUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX, 0);
199
}
200
#endif
201
202
-#ifndef SOFTMMU_CODE_ACCESS
203
-
204
/* generic store macro */
205
206
static inline void
207
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
208
target_ulong ptr,
209
RES_TYPE v, uintptr_t retaddr)
210
{
211
- CPUTLBEntry *entry;
212
- target_ulong addr;
213
- int mmu_idx = CPU_MMU_INDEX;
214
- MemOp op = MO_TE | SHIFT;
215
-#if !defined(SOFTMMU_CODE_ACCESS)
216
- uint16_t meminfo = trace_mem_get_info(op, mmu_idx, true);
217
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
218
-#endif
219
-
220
- addr = ptr;
221
- entry = tlb_entry(env, mmu_idx, addr);
222
- if (unlikely(tlb_addr_write(entry) !=
223
- (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
224
- TCGMemOpIdx oi = make_memop_idx(op, mmu_idx);
225
- glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, v, oi,
226
- retaddr);
227
- } else {
228
- uintptr_t hostaddr = addr + entry->addend;
229
- glue(glue(st, SUFFIX), _p)((uint8_t *)hostaddr, v);
230
- }
231
-#ifndef SOFTMMU_CODE_ACCESS
232
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
233
-#endif
234
+ glue(glue(cpu_st, SUFFIX), _mmuidx_ra)(env, ptr, v, CPU_MMU_INDEX,
235
+ retaddr);
236
}
237
238
static inline void
239
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
240
RES_TYPE v)
241
{
242
- glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(env, ptr, v, 0);
243
+ glue(glue(cpu_st, SUFFIX), _mmuidx_ra)(env, ptr, v, CPU_MMU_INDEX, 0);
244
}
245
246
#endif /* !SOFTMMU_CODE_ACCESS */
247
@@ -XXX,XX +XXX,XX @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
248
#undef SUFFIX
249
#undef USUFFIX
250
#undef DATA_SIZE
251
-#undef MMUSUFFIX
252
-#undef ADDR_READ
253
-#undef URETSUFFIX
254
-#undef SRETSUFFIX
255
#undef SHIFT
256
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
257
index XXXXXXX..XXXXXXX 100644
258
--- a/accel/tcg/cputlb.c
259
+++ b/accel/tcg/cputlb.c
260
@@ -XXX,XX +XXX,XX @@
261
#include "qemu/atomic.h"
262
#include "qemu/atomic128.h"
263
#include "translate-all.h"
264
+#include "trace-root.h"
265
+#include "qemu/plugin.h"
266
+#include "trace/mem.h"
267
#ifdef CONFIG_PLUGIN
268
#include "qemu/plugin-memory.h"
269
#endif
270
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
271
return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
272
}
273
274
+/*
275
+ * Load helpers for cpu_ldst.h.
276
+ */
277
+
278
+static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
279
+ int mmu_idx, uintptr_t retaddr,
280
+ MemOp op, FullLoadHelper *full_load)
281
+{
282
+ uint16_t meminfo;
283
+ TCGMemOpIdx oi;
284
+ uint64_t ret;
285
+
286
+ meminfo = trace_mem_get_info(op, mmu_idx, false);
287
+ trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
288
+
289
+ op &= ~MO_SIGN;
290
+ oi = make_memop_idx(op, mmu_idx);
291
+ ret = full_load(env, addr, oi, retaddr);
292
+
293
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
294
+
295
+ return ret;
296
+}
297
+
298
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
299
+ int mmu_idx, uintptr_t ra)
300
+{
301
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu);
302
+}
303
+
304
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
305
+ int mmu_idx, uintptr_t ra)
306
+{
307
+ return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB,
308
+ full_ldub_mmu);
309
+}
310
+
311
+uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
312
+ int mmu_idx, uintptr_t ra)
313
+{
314
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUW,
315
+ MO_TE == MO_LE
316
+ ? full_le_lduw_mmu : full_be_lduw_mmu);
317
+}
318
+
319
+int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
320
+ int mmu_idx, uintptr_t ra)
321
+{
322
+ return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_TESW,
323
+ MO_TE == MO_LE
324
+ ? full_le_lduw_mmu : full_be_lduw_mmu);
325
+}
326
+
327
+uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
328
+ int mmu_idx, uintptr_t ra)
329
+{
330
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUL,
331
+ MO_TE == MO_LE
332
+ ? full_le_ldul_mmu : full_be_ldul_mmu);
333
+}
334
+
335
+uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
336
+ int mmu_idx, uintptr_t ra)
337
+{
338
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEQ,
339
+ MO_TE == MO_LE
340
+ ? helper_le_ldq_mmu : helper_be_ldq_mmu);
341
+}
26
+}
342
+
27
+
343
/*
28
/*
344
* Store Helpers
29
* These outermost fold_<op> functions are sorted alphabetically.
30
+ *
31
+ * The ordering of the transformations should be:
32
+ * 1) those that produce a constant
33
+ * 2) those that produce a copy
34
+ * 3) those that produce information about the result value.
345
*/
35
*/
346
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
36
347
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
37
static bool fold_add(OptContext *ctx, TCGOp *op)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
39
40
static bool fold_and(OptContext *ctx, TCGOp *op)
41
{
42
- return fold_const2(ctx, op);
43
+ if (fold_const2(ctx, op) ||
44
+ fold_xx_to_x(ctx, op)) {
45
+ return true;
46
+ }
47
+ return false;
348
}
48
}
349
49
350
+/*
50
static bool fold_andc(OptContext *ctx, TCGOp *op)
351
+ * Store Helpers for cpu_ldst.h
51
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
352
+ */
52
353
+
53
static bool fold_or(OptContext *ctx, TCGOp *op)
354
+static inline void QEMU_ALWAYS_INLINE
54
{
355
+cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
55
- return fold_const2(ctx, op);
356
+ int mmu_idx, uintptr_t retaddr, MemOp op)
56
+ if (fold_const2(ctx, op) ||
357
+{
57
+ fold_xx_to_x(ctx, op)) {
358
+ TCGMemOpIdx oi;
58
+ return true;
359
+ uint16_t meminfo;
59
+ }
360
+
60
+ return false;
361
+ meminfo = trace_mem_get_info(op, mmu_idx, true);
61
}
362
+ trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
62
363
+
63
static bool fold_orc(OptContext *ctx, TCGOp *op)
364
+ oi = make_memop_idx(op, mmu_idx);
64
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
365
+ store_helper(env, addr, val, oi, retaddr, op);
65
break;
366
+
66
}
367
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
67
368
+}
68
- /* Simplify expression for "op r, a, a => mov r, a" cases */
369
+
69
- switch (opc) {
370
+void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
70
- CASE_OP_32_64_VEC(or):
371
+ int mmu_idx, uintptr_t retaddr)
71
- CASE_OP_32_64_VEC(and):
372
+{
72
- if (args_are_copies(op->args[1], op->args[2])) {
373
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB);
73
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
374
+}
74
- continue;
375
+
75
- }
376
+void cpu_stw_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
76
- break;
377
+ int mmu_idx, uintptr_t retaddr)
77
- default:
378
+{
78
- break;
379
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUW);
79
- }
380
+}
80
-
381
+
81
/*
382
+void cpu_stl_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
82
* Process each opcode.
383
+ int mmu_idx, uintptr_t retaddr)
83
* Sorted alphabetically by opcode as much as possible.
384
+{
385
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUL);
386
+}
387
+
388
+void cpu_stq_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
389
+ int mmu_idx, uintptr_t retaddr)
390
+{
391
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEQ);
392
+}
393
+
394
/* First set of helpers allows passing in of OI and RETADDR. This makes
395
them callable from other helpers. */
396
397
--
84
--
398
2.20.1
85
2.25.1
399
86
400
87
diff view generated by jsdifflib
1
The functions generated by these macros are unused.
1
Pull the "op r, a, 0 => movi r, 0" optimization into a function,
2
and use it in the outer opcode fold functions.
2
3
3
Cc: Eduardo Habkost <ehabkost@redhat.com>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
target/i386/cpu.h | 3 ---
8
tcg/optimize.c | 38 ++++++++++++++++++++------------------
10
1 file changed, 3 deletions(-)
9
1 file changed, 20 insertions(+), 18 deletions(-)
11
10
12
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/target/i386/cpu.h
13
--- a/tcg/optimize.c
15
+++ b/target/i386/cpu.h
14
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_get_tsc(CPUX86State *env);
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
17
#define cpu_list x86_cpu_list
16
return false;
18
17
}
19
/* MMU modes definitions */
18
20
-#define MMU_MODE0_SUFFIX _ksmap
19
+/* If the binary operation has second argument @i, fold to @i. */
21
-#define MMU_MODE1_SUFFIX _user
20
+static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
22
-#define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */
21
+{
23
#define MMU_KSMAP_IDX 0
22
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
24
#define MMU_USER_IDX 1
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
25
#define MMU_KNOSMAP_IDX 2
24
+ }
25
+ return false;
26
+}
27
+
28
/* If the binary operation has both arguments equal, fold to @i. */
29
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
30
{
31
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
32
static bool fold_and(OptContext *ctx, TCGOp *op)
33
{
34
if (fold_const2(ctx, op) ||
35
+ fold_xi_to_i(ctx, op, 0) ||
36
fold_xx_to_x(ctx, op)) {
37
return true;
38
}
39
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
40
41
static bool fold_mul(OptContext *ctx, TCGOp *op)
42
{
43
- return fold_const2(ctx, op);
44
+ if (fold_const2(ctx, op) ||
45
+ fold_xi_to_i(ctx, op, 0)) {
46
+ return true;
47
+ }
48
+ return false;
49
}
50
51
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
52
{
53
- return fold_const2(ctx, op);
54
+ if (fold_const2(ctx, op) ||
55
+ fold_xi_to_i(ctx, op, 0)) {
56
+ return true;
57
+ }
58
+ return false;
59
}
60
61
static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
62
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
63
continue;
64
}
65
66
- /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
67
- switch (opc) {
68
- CASE_OP_32_64_VEC(and):
69
- CASE_OP_32_64_VEC(mul):
70
- CASE_OP_32_64(muluh):
71
- CASE_OP_32_64(mulsh):
72
- if (arg_is_const(op->args[2])
73
- && arg_info(op->args[2])->val == 0) {
74
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
75
- continue;
76
- }
77
- break;
78
- default:
79
- break;
80
- }
81
-
82
/*
83
* Process each opcode.
84
* Sorted alphabetically by opcode as much as possible.
26
--
85
--
27
2.20.1
86
2.25.1
28
87
29
88
diff view generated by jsdifflib
New patch
1
1
Compute the type of the operation early.
2
3
There are at least 4 places that used a def->flags ladder
4
to determine the type of the operation being optimized.
5
6
There were two places that assumed !TCG_OPF_64BIT means
7
TCG_TYPE_I32, and so could potentially compute incorrect
8
results for vector operations.
9
10
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
tcg/optimize.c | 149 +++++++++++++++++++++++++++++--------------------
14
1 file changed, 89 insertions(+), 60 deletions(-)
15
16
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/optimize.c
19
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
21
22
/* In flight values from optimization. */
23
uint64_t z_mask;
24
+ TCGType type;
25
} OptContext;
26
27
static inline TempOptInfo *ts_info(TCGTemp *ts)
28
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
29
{
30
TCGTemp *dst_ts = arg_temp(dst);
31
TCGTemp *src_ts = arg_temp(src);
32
- const TCGOpDef *def;
33
TempOptInfo *di;
34
TempOptInfo *si;
35
uint64_t z_mask;
36
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
37
reset_ts(dst_ts);
38
di = ts_info(dst_ts);
39
si = ts_info(src_ts);
40
- def = &tcg_op_defs[op->opc];
41
- if (def->flags & TCG_OPF_VECTOR) {
42
- new_op = INDEX_op_mov_vec;
43
- } else if (def->flags & TCG_OPF_64BIT) {
44
- new_op = INDEX_op_mov_i64;
45
- } else {
46
+
47
+ switch (ctx->type) {
48
+ case TCG_TYPE_I32:
49
new_op = INDEX_op_mov_i32;
50
+ break;
51
+ case TCG_TYPE_I64:
52
+ new_op = INDEX_op_mov_i64;
53
+ break;
54
+ case TCG_TYPE_V64:
55
+ case TCG_TYPE_V128:
56
+ case TCG_TYPE_V256:
57
+ /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
58
+ new_op = INDEX_op_mov_vec;
59
+ break;
60
+ default:
61
+ g_assert_not_reached();
62
}
63
op->opc = new_op;
64
- /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
65
op->args[0] = dst;
66
op->args[1] = src;
67
68
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
69
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
70
TCGArg dst, uint64_t val)
71
{
72
- const TCGOpDef *def = &tcg_op_defs[op->opc];
73
- TCGType type;
74
- TCGTemp *tv;
75
-
76
- if (def->flags & TCG_OPF_VECTOR) {
77
- type = TCGOP_VECL(op) + TCG_TYPE_V64;
78
- } else if (def->flags & TCG_OPF_64BIT) {
79
- type = TCG_TYPE_I64;
80
- } else {
81
- type = TCG_TYPE_I32;
82
- }
83
-
84
/* Convert movi to mov with constant temp. */
85
- tv = tcg_constant_internal(type, val);
86
+ TCGTemp *tv = tcg_constant_internal(ctx->type, val);
87
+
88
init_ts_info(ctx, tv);
89
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
90
}
91
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
92
}
93
}
94
95
-static uint64_t do_constant_folding(TCGOpcode op, uint64_t x, uint64_t y)
96
+static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
97
+ uint64_t x, uint64_t y)
98
{
99
- const TCGOpDef *def = &tcg_op_defs[op];
100
uint64_t res = do_constant_folding_2(op, x, y);
101
- if (!(def->flags & TCG_OPF_64BIT)) {
102
+ if (type == TCG_TYPE_I32) {
103
res = (int32_t)res;
104
}
105
return res;
106
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
107
* Return -1 if the condition can't be simplified,
108
* and the result of the condition (0 or 1) if it can.
109
*/
110
-static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
111
+static int do_constant_folding_cond(TCGType type, TCGArg x,
112
TCGArg y, TCGCond c)
113
{
114
uint64_t xv = arg_info(x)->val;
115
uint64_t yv = arg_info(y)->val;
116
117
if (arg_is_const(x) && arg_is_const(y)) {
118
- const TCGOpDef *def = &tcg_op_defs[op];
119
- tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR));
120
- if (def->flags & TCG_OPF_64BIT) {
121
- return do_constant_folding_cond_64(xv, yv, c);
122
- } else {
123
+ switch (type) {
124
+ case TCG_TYPE_I32:
125
return do_constant_folding_cond_32(xv, yv, c);
126
+ case TCG_TYPE_I64:
127
+ return do_constant_folding_cond_64(xv, yv, c);
128
+ default:
129
+ /* Only scalar comparisons are optimizable */
130
+ return -1;
131
}
132
} else if (args_are_copies(x, y)) {
133
return do_constant_folding_cond_eq(c);
134
@@ -XXX,XX +XXX,XX @@ static bool fold_const1(OptContext *ctx, TCGOp *op)
135
uint64_t t;
136
137
t = arg_info(op->args[1])->val;
138
- t = do_constant_folding(op->opc, t, 0);
139
+ t = do_constant_folding(op->opc, ctx->type, t, 0);
140
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
141
}
142
return false;
143
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
144
uint64_t t1 = arg_info(op->args[1])->val;
145
uint64_t t2 = arg_info(op->args[2])->val;
146
147
- t1 = do_constant_folding(op->opc, t1, t2);
148
+ t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
149
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
150
}
151
return false;
152
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
153
static bool fold_brcond(OptContext *ctx, TCGOp *op)
154
{
155
TCGCond cond = op->args[2];
156
- int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
157
+ int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
158
159
if (i == 0) {
160
tcg_op_remove(ctx->tcg, op);
161
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
162
* Simplify EQ/NE comparisons where one of the pairs
163
* can be simplified.
164
*/
165
- i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
166
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
167
op->args[2], cond);
168
switch (i ^ inv) {
169
case 0:
170
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
171
goto do_brcond_high;
172
}
173
174
- i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
175
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
176
op->args[3], cond);
177
switch (i ^ inv) {
178
case 0:
179
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
180
if (arg_is_const(op->args[1])) {
181
uint64_t t = arg_info(op->args[1])->val;
182
183
- t = do_constant_folding(op->opc, t, op->args[2]);
184
+ t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
185
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
186
}
187
return false;
188
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
189
uint64_t t = arg_info(op->args[1])->val;
190
191
if (t != 0) {
192
- t = do_constant_folding(op->opc, t, 0);
193
+ t = do_constant_folding(op->opc, ctx->type, t, 0);
194
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
195
}
196
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
197
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
198
199
static bool fold_movcond(OptContext *ctx, TCGOp *op)
200
{
201
- TCGOpcode opc = op->opc;
202
TCGCond cond = op->args[5];
203
- int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
204
+ int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
205
206
if (i >= 0) {
207
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
208
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
209
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
210
uint64_t tv = arg_info(op->args[3])->val;
211
uint64_t fv = arg_info(op->args[4])->val;
212
+ TCGOpcode opc;
213
214
- opc = (opc == INDEX_op_movcond_i32
215
- ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
216
+ switch (ctx->type) {
217
+ case TCG_TYPE_I32:
218
+ opc = INDEX_op_setcond_i32;
219
+ break;
220
+ case TCG_TYPE_I64:
221
+ opc = INDEX_op_setcond_i64;
222
+ break;
223
+ default:
224
+ g_assert_not_reached();
225
+ }
226
227
if (tv == 1 && fv == 0) {
228
op->opc = opc;
229
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
230
static bool fold_setcond(OptContext *ctx, TCGOp *op)
231
{
232
TCGCond cond = op->args[3];
233
- int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
234
+ int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
235
236
if (i >= 0) {
237
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
238
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
239
* Simplify EQ/NE comparisons where one of the pairs
240
* can be simplified.
241
*/
242
- i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
243
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
244
op->args[3], cond);
245
switch (i ^ inv) {
246
case 0:
247
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
248
goto do_setcond_high;
249
}
250
251
- i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
252
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
253
op->args[4], cond);
254
switch (i ^ inv) {
255
case 0:
256
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
257
init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
258
copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
259
260
+ /* Pre-compute the type of the operation. */
261
+ if (def->flags & TCG_OPF_VECTOR) {
262
+ ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
263
+ } else if (def->flags & TCG_OPF_64BIT) {
264
+ ctx.type = TCG_TYPE_I64;
265
+ } else {
266
+ ctx.type = TCG_TYPE_I32;
267
+ }
268
+
269
/* For commutative operations make constant second argument */
270
switch (opc) {
271
CASE_OP_32_64_VEC(add):
272
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
273
/* Proceed with possible constant folding. */
274
break;
275
}
276
- if (opc == INDEX_op_sub_i32) {
277
+ switch (ctx.type) {
278
+ case TCG_TYPE_I32:
279
neg_op = INDEX_op_neg_i32;
280
have_neg = TCG_TARGET_HAS_neg_i32;
281
- } else if (opc == INDEX_op_sub_i64) {
282
+ break;
283
+ case TCG_TYPE_I64:
284
neg_op = INDEX_op_neg_i64;
285
have_neg = TCG_TARGET_HAS_neg_i64;
286
- } else if (TCG_TARGET_HAS_neg_vec) {
287
- TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64;
288
- unsigned vece = TCGOP_VECE(op);
289
- neg_op = INDEX_op_neg_vec;
290
- have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0;
291
- } else {
292
break;
293
+ case TCG_TYPE_V64:
294
+ case TCG_TYPE_V128:
295
+ case TCG_TYPE_V256:
296
+ neg_op = INDEX_op_neg_vec;
297
+ have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
298
+ TCGOP_VECE(op)) > 0;
299
+ break;
300
+ default:
301
+ g_assert_not_reached();
302
}
303
if (!have_neg) {
304
break;
305
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
306
TCGOpcode not_op;
307
bool have_not;
308
309
- if (def->flags & TCG_OPF_VECTOR) {
310
- not_op = INDEX_op_not_vec;
311
- have_not = TCG_TARGET_HAS_not_vec;
312
- } else if (def->flags & TCG_OPF_64BIT) {
313
- not_op = INDEX_op_not_i64;
314
- have_not = TCG_TARGET_HAS_not_i64;
315
- } else {
316
+ switch (ctx.type) {
317
+ case TCG_TYPE_I32:
318
not_op = INDEX_op_not_i32;
319
have_not = TCG_TARGET_HAS_not_i32;
320
+ break;
321
+ case TCG_TYPE_I64:
322
+ not_op = INDEX_op_not_i64;
323
+ have_not = TCG_TARGET_HAS_not_i64;
324
+ break;
325
+ case TCG_TYPE_V64:
326
+ case TCG_TYPE_V128:
327
+ case TCG_TYPE_V256:
328
+ not_op = INDEX_op_not_vec;
329
+ have_not = TCG_TARGET_HAS_not_vec;
330
+ break;
331
+ default:
332
+ g_assert_not_reached();
333
}
334
if (!have_not) {
335
break;
336
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
337
below, we can ignore high bits, but for further optimizations we
338
need to record that the high bits contain garbage. */
339
partmask = z_mask;
340
- if (!(def->flags & TCG_OPF_64BIT)) {
341
+ if (ctx.type == TCG_TYPE_I32) {
342
z_mask |= ~(tcg_target_ulong)0xffffffffu;
343
partmask &= 0xffffffffu;
344
affected &= 0xffffffffu;
345
--
346
2.25.1
347
348
diff view generated by jsdifflib
New patch
1
1
Split out the conditional conversion from a more complex logical
2
operation to a simple NOT. Create a couple more helpers to make
3
this easy for the outer-most logical operations.
4
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 158 +++++++++++++++++++++++++++----------------------
9
1 file changed, 86 insertions(+), 72 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
16
return false;
17
}
18
19
+/*
20
+ * Convert @op to NOT, if NOT is supported by the host.
21
+ * Return true f the conversion is successful, which will still
22
+ * indicate that the processing is complete.
23
+ */
24
+static bool fold_not(OptContext *ctx, TCGOp *op);
25
+static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
26
+{
27
+ TCGOpcode not_op;
28
+ bool have_not;
29
+
30
+ switch (ctx->type) {
31
+ case TCG_TYPE_I32:
32
+ not_op = INDEX_op_not_i32;
33
+ have_not = TCG_TARGET_HAS_not_i32;
34
+ break;
35
+ case TCG_TYPE_I64:
36
+ not_op = INDEX_op_not_i64;
37
+ have_not = TCG_TARGET_HAS_not_i64;
38
+ break;
39
+ case TCG_TYPE_V64:
40
+ case TCG_TYPE_V128:
41
+ case TCG_TYPE_V256:
42
+ not_op = INDEX_op_not_vec;
43
+ have_not = TCG_TARGET_HAS_not_vec;
44
+ break;
45
+ default:
46
+ g_assert_not_reached();
47
+ }
48
+ if (have_not) {
49
+ op->opc = not_op;
50
+ op->args[1] = op->args[idx];
51
+ return fold_not(ctx, op);
52
+ }
53
+ return false;
54
+}
55
+
56
+/* If the binary operation has first argument @i, fold to NOT. */
57
+static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
58
+{
59
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
60
+ return fold_to_not(ctx, op, 2);
61
+ }
62
+ return false;
63
+}
64
+
65
/* If the binary operation has second argument @i, fold to @i. */
66
static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
67
{
68
@@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
69
return false;
70
}
71
72
+/* If the binary operation has second argument @i, fold to NOT. */
73
+static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
74
+{
75
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
76
+ return fold_to_not(ctx, op, 1);
77
+ }
78
+ return false;
79
+}
80
+
81
/* If the binary operation has both arguments equal, fold to @i. */
82
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
83
{
84
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
85
static bool fold_andc(OptContext *ctx, TCGOp *op)
86
{
87
if (fold_const2(ctx, op) ||
88
- fold_xx_to_i(ctx, op, 0)) {
89
+ fold_xx_to_i(ctx, op, 0) ||
90
+ fold_ix_to_not(ctx, op, -1)) {
91
return true;
92
}
93
return false;
94
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
95
96
static bool fold_eqv(OptContext *ctx, TCGOp *op)
97
{
98
- return fold_const2(ctx, op);
99
+ if (fold_const2(ctx, op) ||
100
+ fold_xi_to_not(ctx, op, 0)) {
101
+ return true;
102
+ }
103
+ return false;
104
}
105
106
static bool fold_extract(OptContext *ctx, TCGOp *op)
107
@@ -XXX,XX +XXX,XX @@ static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
108
109
static bool fold_nand(OptContext *ctx, TCGOp *op)
110
{
111
- return fold_const2(ctx, op);
112
+ if (fold_const2(ctx, op) ||
113
+ fold_xi_to_not(ctx, op, -1)) {
114
+ return true;
115
+ }
116
+ return false;
117
}
118
119
static bool fold_neg(OptContext *ctx, TCGOp *op)
120
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
121
122
static bool fold_nor(OptContext *ctx, TCGOp *op)
123
{
124
- return fold_const2(ctx, op);
125
+ if (fold_const2(ctx, op) ||
126
+ fold_xi_to_not(ctx, op, 0)) {
127
+ return true;
128
+ }
129
+ return false;
130
}
131
132
static bool fold_not(OptContext *ctx, TCGOp *op)
133
{
134
- return fold_const1(ctx, op);
135
+ if (fold_const1(ctx, op)) {
136
+ return true;
137
+ }
138
+
139
+ /* Because of fold_to_not, we want to always return true, via finish. */
140
+ finish_folding(ctx, op);
141
+ return true;
142
}
143
144
static bool fold_or(OptContext *ctx, TCGOp *op)
145
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
146
147
static bool fold_orc(OptContext *ctx, TCGOp *op)
148
{
149
- return fold_const2(ctx, op);
150
+ if (fold_const2(ctx, op) ||
151
+ fold_ix_to_not(ctx, op, 0)) {
152
+ return true;
153
+ }
154
+ return false;
155
}
156
157
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
158
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
159
static bool fold_xor(OptContext *ctx, TCGOp *op)
160
{
161
if (fold_const2(ctx, op) ||
162
- fold_xx_to_i(ctx, op, 0)) {
163
+ fold_xx_to_i(ctx, op, 0) ||
164
+ fold_xi_to_not(ctx, op, -1)) {
165
return true;
166
}
167
return false;
168
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
169
}
170
}
171
break;
172
- CASE_OP_32_64_VEC(xor):
173
- CASE_OP_32_64(nand):
174
- if (!arg_is_const(op->args[1])
175
- && arg_is_const(op->args[2])
176
- && arg_info(op->args[2])->val == -1) {
177
- i = 1;
178
- goto try_not;
179
- }
180
- break;
181
- CASE_OP_32_64(nor):
182
- if (!arg_is_const(op->args[1])
183
- && arg_is_const(op->args[2])
184
- && arg_info(op->args[2])->val == 0) {
185
- i = 1;
186
- goto try_not;
187
- }
188
- break;
189
- CASE_OP_32_64_VEC(andc):
190
- if (!arg_is_const(op->args[2])
191
- && arg_is_const(op->args[1])
192
- && arg_info(op->args[1])->val == -1) {
193
- i = 2;
194
- goto try_not;
195
- }
196
- break;
197
- CASE_OP_32_64_VEC(orc):
198
- CASE_OP_32_64(eqv):
199
- if (!arg_is_const(op->args[2])
200
- && arg_is_const(op->args[1])
201
- && arg_info(op->args[1])->val == 0) {
202
- i = 2;
203
- goto try_not;
204
- }
205
- break;
206
- try_not:
207
- {
208
- TCGOpcode not_op;
209
- bool have_not;
210
-
211
- switch (ctx.type) {
212
- case TCG_TYPE_I32:
213
- not_op = INDEX_op_not_i32;
214
- have_not = TCG_TARGET_HAS_not_i32;
215
- break;
216
- case TCG_TYPE_I64:
217
- not_op = INDEX_op_not_i64;
218
- have_not = TCG_TARGET_HAS_not_i64;
219
- break;
220
- case TCG_TYPE_V64:
221
- case TCG_TYPE_V128:
222
- case TCG_TYPE_V256:
223
- not_op = INDEX_op_not_vec;
224
- have_not = TCG_TARGET_HAS_not_vec;
225
- break;
226
- default:
227
- g_assert_not_reached();
228
- }
229
- if (!have_not) {
230
- break;
231
- }
232
- op->opc = not_op;
233
- reset_temp(op->args[0]);
234
- op->args[1] = op->args[i];
235
- continue;
236
- }
237
default:
238
break;
239
}
240
--
241
2.25.1
242
243
diff view generated by jsdifflib
1
The functions generated by these macros are unused.
1
Even though there is only one user, place this more complex
2
conversion into its own helper.
2
3
3
Cc: Edgar E. Iglesias <edgar.iglesias@gmail.com>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
target/cris/cpu.h | 2 --
7
tcg/optimize.c | 89 ++++++++++++++++++++++++++------------------------
9
1 file changed, 2 deletions(-)
8
1 file changed, 47 insertions(+), 42 deletions(-)
10
9
11
diff --git a/target/cris/cpu.h b/target/cris/cpu.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/target/cris/cpu.h
12
--- a/tcg/optimize.c
14
+++ b/target/cris/cpu.h
13
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ enum {
14
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
16
#define cpu_signal_handler cpu_cris_signal_handler
15
17
16
static bool fold_neg(OptContext *ctx, TCGOp *op)
18
/* MMU modes definitions */
19
-#define MMU_MODE0_SUFFIX _kernel
20
-#define MMU_MODE1_SUFFIX _user
21
#define MMU_USER_IDX 1
22
static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
23
{
17
{
18
- return fold_const1(ctx, op);
19
+ if (fold_const1(ctx, op)) {
20
+ return true;
21
+ }
22
+ /*
23
+ * Because of fold_sub_to_neg, we want to always return true,
24
+ * via finish_folding.
25
+ */
26
+ finish_folding(ctx, op);
27
+ return true;
28
}
29
30
static bool fold_nor(OptContext *ctx, TCGOp *op)
31
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
32
return fold_const2(ctx, op);
33
}
34
35
+static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
36
+{
37
+ TCGOpcode neg_op;
38
+ bool have_neg;
39
+
40
+ if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
41
+ return false;
42
+ }
43
+
44
+ switch (ctx->type) {
45
+ case TCG_TYPE_I32:
46
+ neg_op = INDEX_op_neg_i32;
47
+ have_neg = TCG_TARGET_HAS_neg_i32;
48
+ break;
49
+ case TCG_TYPE_I64:
50
+ neg_op = INDEX_op_neg_i64;
51
+ have_neg = TCG_TARGET_HAS_neg_i64;
52
+ break;
53
+ case TCG_TYPE_V64:
54
+ case TCG_TYPE_V128:
55
+ case TCG_TYPE_V256:
56
+ neg_op = INDEX_op_neg_vec;
57
+ have_neg = (TCG_TARGET_HAS_neg_vec &&
58
+ tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
59
+ break;
60
+ default:
61
+ g_assert_not_reached();
62
+ }
63
+ if (have_neg) {
64
+ op->opc = neg_op;
65
+ op->args[1] = op->args[2];
66
+ return fold_neg(ctx, op);
67
+ }
68
+ return false;
69
+}
70
+
71
static bool fold_sub(OptContext *ctx, TCGOp *op)
72
{
73
if (fold_const2(ctx, op) ||
74
- fold_xx_to_i(ctx, op, 0)) {
75
+ fold_xx_to_i(ctx, op, 0) ||
76
+ fold_sub_to_neg(ctx, op)) {
77
return true;
78
}
79
return false;
80
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
81
continue;
82
}
83
break;
84
- CASE_OP_32_64_VEC(sub):
85
- {
86
- TCGOpcode neg_op;
87
- bool have_neg;
88
-
89
- if (arg_is_const(op->args[2])) {
90
- /* Proceed with possible constant folding. */
91
- break;
92
- }
93
- switch (ctx.type) {
94
- case TCG_TYPE_I32:
95
- neg_op = INDEX_op_neg_i32;
96
- have_neg = TCG_TARGET_HAS_neg_i32;
97
- break;
98
- case TCG_TYPE_I64:
99
- neg_op = INDEX_op_neg_i64;
100
- have_neg = TCG_TARGET_HAS_neg_i64;
101
- break;
102
- case TCG_TYPE_V64:
103
- case TCG_TYPE_V128:
104
- case TCG_TYPE_V256:
105
- neg_op = INDEX_op_neg_vec;
106
- have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
107
- TCGOP_VECE(op)) > 0;
108
- break;
109
- default:
110
- g_assert_not_reached();
111
- }
112
- if (!have_neg) {
113
- break;
114
- }
115
- if (arg_is_const(op->args[1])
116
- && arg_info(op->args[1])->val == 0) {
117
- op->opc = neg_op;
118
- reset_temp(op->args[0]);
119
- op->args[1] = op->args[2];
120
- continue;
121
- }
122
- }
123
- break;
124
default:
125
break;
126
}
24
--
127
--
25
2.20.1
128
2.25.1
26
129
27
130
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
Pull the "op r, a, i => mov r, a" optimization into a function,
2
and use them in the outer-most logical operations.
2
3
3
All the *.inc.c files included by tcg/$TARGET/tcg-target.inc.c
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
are in tcg/, their parent directory. To simplify the preprocessor
5
search path, include the relative parent path: '..'.
6
7
Patch created mechanically by running:
8
9
$ for x in tcg-pool.inc.c tcg-ldst.inc.c; do \
10
sed -i "s,#include \"$x\",#include \"../$x\"," \
11
$(git grep -l "#include \"$x\""); \
12
done
13
14
Acked-by: David Gibson <david@gibson.dropbear.id.au> (ppc parts)
15
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
16
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
17
Reviewed-by: Stefan Weil <sw@weilnetz.de>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
20
Message-Id: <20200101112303.20724-3-philmd@redhat.com>
21
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
---
6
---
23
tcg/aarch64/tcg-target.inc.c | 4 ++--
7
tcg/optimize.c | 61 +++++++++++++++++++++-----------------------------
24
tcg/arm/tcg-target.inc.c | 4 ++--
8
1 file changed, 26 insertions(+), 35 deletions(-)
25
tcg/i386/tcg-target.inc.c | 4 ++--
26
tcg/mips/tcg-target.inc.c | 2 +-
27
tcg/ppc/tcg-target.inc.c | 4 ++--
28
tcg/riscv/tcg-target.inc.c | 4 ++--
29
tcg/s390/tcg-target.inc.c | 4 ++--
30
tcg/sparc/tcg-target.inc.c | 2 +-
31
8 files changed, 14 insertions(+), 14 deletions(-)
32
9
33
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
34
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/aarch64/tcg-target.inc.c
12
--- a/tcg/optimize.c
36
+++ b/tcg/aarch64/tcg-target.inc.c
13
+++ b/tcg/optimize.c
37
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
38
* See the COPYING file in the top-level directory for details.
15
return false;
39
*/
40
41
-#include "tcg-pool.inc.c"
42
+#include "../tcg-pool.inc.c"
43
#include "qemu/bitops.h"
44
45
/* We're going to re-use TCGType in setting of the SF bit, which controls
46
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
47
}
16
}
48
17
49
#ifdef CONFIG_SOFTMMU
18
+/* If the binary operation has second argument @i, fold to identity. */
50
-#include "tcg-ldst.inc.c"
19
+static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
51
+#include "../tcg-ldst.inc.c"
20
+{
52
21
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
53
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
22
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
54
* TCGMemOpIdx oi, uintptr_t ra)
23
+ }
55
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
24
+ return false;
56
index XXXXXXX..XXXXXXX 100644
25
+}
57
--- a/tcg/arm/tcg-target.inc.c
26
+
58
+++ b/tcg/arm/tcg-target.inc.c
27
/* If the binary operation has second argument @i, fold to NOT. */
59
@@ -XXX,XX +XXX,XX @@
28
static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
60
*/
29
{
61
30
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
62
#include "elf.h"
31
63
-#include "tcg-pool.inc.c"
32
static bool fold_add(OptContext *ctx, TCGOp *op)
64
+#include "../tcg-pool.inc.c"
33
{
65
34
- return fold_const2(ctx, op);
66
int arm_arch = __ARM_ARCH;
35
+ if (fold_const2(ctx, op) ||
67
36
+ fold_xi_to_x(ctx, op, 0)) {
68
@@ -XXX,XX +XXX,XX @@ static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
37
+ return true;
38
+ }
39
+ return false;
69
}
40
}
70
41
71
#ifdef CONFIG_SOFTMMU
42
static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
72
-#include "tcg-ldst.inc.c"
43
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
73
+#include "../tcg-ldst.inc.c"
44
{
74
45
if (fold_const2(ctx, op) ||
75
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
46
fold_xi_to_i(ctx, op, 0) ||
76
* int mmu_idx, uintptr_t ra)
47
+ fold_xi_to_x(ctx, op, -1) ||
77
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
48
fold_xx_to_x(ctx, op)) {
78
index XXXXXXX..XXXXXXX 100644
49
return true;
79
--- a/tcg/i386/tcg-target.inc.c
50
}
80
+++ b/tcg/i386/tcg-target.inc.c
51
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
81
@@ -XXX,XX +XXX,XX @@
52
{
82
* THE SOFTWARE.
53
if (fold_const2(ctx, op) ||
83
*/
54
fold_xx_to_i(ctx, op, 0) ||
84
55
+ fold_xi_to_x(ctx, op, 0) ||
85
-#include "tcg-pool.inc.c"
56
fold_ix_to_not(ctx, op, -1)) {
86
+#include "../tcg-pool.inc.c"
57
return true;
87
58
}
88
#ifdef CONFIG_DEBUG_TCG
59
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
89
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
60
static bool fold_eqv(OptContext *ctx, TCGOp *op)
90
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nopn(TCGContext *s, int n)
61
{
62
if (fold_const2(ctx, op) ||
63
+ fold_xi_to_x(ctx, op, -1) ||
64
fold_xi_to_not(ctx, op, 0)) {
65
return true;
66
}
67
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
68
static bool fold_or(OptContext *ctx, TCGOp *op)
69
{
70
if (fold_const2(ctx, op) ||
71
+ fold_xi_to_x(ctx, op, 0) ||
72
fold_xx_to_x(ctx, op)) {
73
return true;
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
76
static bool fold_orc(OptContext *ctx, TCGOp *op)
77
{
78
if (fold_const2(ctx, op) ||
79
+ fold_xi_to_x(ctx, op, -1) ||
80
fold_ix_to_not(ctx, op, 0)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
84
85
static bool fold_shift(OptContext *ctx, TCGOp *op)
86
{
87
- return fold_const2(ctx, op);
88
+ if (fold_const2(ctx, op) ||
89
+ fold_xi_to_x(ctx, op, 0)) {
90
+ return true;
91
+ }
92
+ return false;
91
}
93
}
92
94
93
#if defined(CONFIG_SOFTMMU)
95
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
94
-#include "tcg-ldst.inc.c"
96
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
95
+#include "../tcg-ldst.inc.c"
97
{
96
98
if (fold_const2(ctx, op) ||
97
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
99
fold_xx_to_i(ctx, op, 0) ||
98
* int mmu_idx, uintptr_t ra)
100
+ fold_xi_to_x(ctx, op, 0) ||
99
diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c
101
fold_sub_to_neg(ctx, op)) {
100
index XXXXXXX..XXXXXXX 100644
102
return true;
101
--- a/tcg/mips/tcg-target.inc.c
103
}
102
+++ b/tcg/mips/tcg-target.inc.c
104
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
103
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
105
{
104
}
106
if (fold_const2(ctx, op) ||
105
107
fold_xx_to_i(ctx, op, 0) ||
106
#if defined(CONFIG_SOFTMMU)
108
+ fold_xi_to_x(ctx, op, 0) ||
107
-#include "tcg-ldst.inc.c"
109
fold_xi_to_not(ctx, op, -1)) {
108
+#include "../tcg-ldst.inc.c"
110
return true;
109
111
}
110
static void * const qemu_ld_helpers[16] = {
112
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
[MO_UB] = helper_ret_ldub_mmu,
113
break;
112
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
114
}
113
index XXXXXXX..XXXXXXX 100644
115
114
--- a/tcg/ppc/tcg-target.inc.c
116
- /* Simplify expression for "op r, a, const => mov r, a" cases */
115
+++ b/tcg/ppc/tcg-target.inc.c
117
- switch (opc) {
116
@@ -XXX,XX +XXX,XX @@
118
- CASE_OP_32_64_VEC(add):
117
*/
119
- CASE_OP_32_64_VEC(sub):
118
120
- CASE_OP_32_64_VEC(or):
119
#include "elf.h"
121
- CASE_OP_32_64_VEC(xor):
120
-#include "tcg-pool.inc.c"
122
- CASE_OP_32_64_VEC(andc):
121
+#include "../tcg-pool.inc.c"
123
- CASE_OP_32_64(shl):
122
124
- CASE_OP_32_64(shr):
123
#if defined _CALL_DARWIN || defined __APPLE__
125
- CASE_OP_32_64(sar):
124
#define TCG_TARGET_CALL_DARWIN
126
- CASE_OP_32_64(rotl):
125
@@ -XXX,XX +XXX,XX @@ static const uint32_t qemu_exts_opc[4] = {
127
- CASE_OP_32_64(rotr):
126
};
128
- if (!arg_is_const(op->args[1])
127
129
- && arg_is_const(op->args[2])
128
#if defined (CONFIG_SOFTMMU)
130
- && arg_info(op->args[2])->val == 0) {
129
-#include "tcg-ldst.inc.c"
131
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
130
+#include "../tcg-ldst.inc.c"
132
- continue;
131
133
- }
132
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
134
- break;
133
* int mmu_idx, uintptr_t ra)
135
- CASE_OP_32_64_VEC(and):
134
diff --git a/tcg/riscv/tcg-target.inc.c b/tcg/riscv/tcg-target.inc.c
136
- CASE_OP_32_64_VEC(orc):
135
index XXXXXXX..XXXXXXX 100644
137
- CASE_OP_32_64(eqv):
136
--- a/tcg/riscv/tcg-target.inc.c
138
- if (!arg_is_const(op->args[1])
137
+++ b/tcg/riscv/tcg-target.inc.c
139
- && arg_is_const(op->args[2])
138
@@ -XXX,XX +XXX,XX @@
140
- && arg_info(op->args[2])->val == -1) {
139
* THE SOFTWARE.
141
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
140
*/
142
- continue;
141
143
- }
142
-#include "tcg-pool.inc.c"
144
- break;
143
+#include "../tcg-pool.inc.c"
145
- default:
144
146
- break;
145
#ifdef CONFIG_DEBUG_TCG
147
- }
146
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
148
-
147
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
149
/* Simplify using known-zero bits. Currently only ops with a single
148
*/
150
output argument is supported. */
149
151
z_mask = -1;
150
#if defined(CONFIG_SOFTMMU)
151
-#include "tcg-ldst.inc.c"
152
+#include "../tcg-ldst.inc.c"
153
154
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
155
* TCGMemOpIdx oi, uintptr_t ra)
156
diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c
157
index XXXXXXX..XXXXXXX 100644
158
--- a/tcg/s390/tcg-target.inc.c
159
+++ b/tcg/s390/tcg-target.inc.c
160
@@ -XXX,XX +XXX,XX @@
161
#error "unsupported code generation mode"
162
#endif
163
164
-#include "tcg-pool.inc.c"
165
+#include "../tcg-pool.inc.c"
166
#include "elf.h"
167
168
/* ??? The translation blocks produced by TCG are generally small enough to
169
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
170
}
171
172
#if defined(CONFIG_SOFTMMU)
173
-#include "tcg-ldst.inc.c"
174
+#include "../tcg-ldst.inc.c"
175
176
/* We're expecting to use a 20-bit negative offset on the tlb memory ops. */
177
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
178
diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c
179
index XXXXXXX..XXXXXXX 100644
180
--- a/tcg/sparc/tcg-target.inc.c
181
+++ b/tcg/sparc/tcg-target.inc.c
182
@@ -XXX,XX +XXX,XX @@
183
* THE SOFTWARE.
184
*/
185
186
-#include "tcg-pool.inc.c"
187
+#include "../tcg-pool.inc.c"
188
189
#ifdef CONFIG_DEBUG_TCG
190
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
191
--
152
--
192
2.20.1
153
2.25.1
193
154
194
155
diff view generated by jsdifflib
1
The functions generated by these macros are unused.
1
Pull the "op r, 0, b => movi r, 0" optimization into a function,
2
and use it in fold_shift.
2
3
3
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
target/alpha/cpu.h | 2 --
8
tcg/optimize.c | 28 ++++++++++------------------
8
1 file changed, 2 deletions(-)
9
1 file changed, 10 insertions(+), 18 deletions(-)
9
10
10
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/target/alpha/cpu.h
13
--- a/tcg/optimize.c
13
+++ b/target/alpha/cpu.h
14
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ enum {
15
@@ -XXX,XX +XXX,XX @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
15
PALcode cheats and usees the KSEG mapping for its code+data rather than
16
return false;
16
physical addresses. */
17
}
17
18
18
-#define MMU_MODE0_SUFFIX _kernel
19
+/* If the binary operation has first argument @i, fold to @i. */
19
-#define MMU_MODE1_SUFFIX _user
20
+static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
20
#define MMU_KERNEL_IDX 0
21
+{
21
#define MMU_USER_IDX 1
22
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
22
#define MMU_PHYS_IDX 2
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
24
+ }
25
+ return false;
26
+}
27
+
28
/* If the binary operation has first argument @i, fold to NOT. */
29
static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
30
{
31
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
32
static bool fold_shift(OptContext *ctx, TCGOp *op)
33
{
34
if (fold_const2(ctx, op) ||
35
+ fold_ix_to_i(ctx, op, 0) ||
36
fold_xi_to_x(ctx, op, 0)) {
37
return true;
38
}
39
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
40
break;
41
}
42
43
- /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
44
- and "sub r, 0, a => neg r, a" case. */
45
- switch (opc) {
46
- CASE_OP_32_64(shl):
47
- CASE_OP_32_64(shr):
48
- CASE_OP_32_64(sar):
49
- CASE_OP_32_64(rotl):
50
- CASE_OP_32_64(rotr):
51
- if (arg_is_const(op->args[1])
52
- && arg_info(op->args[1])->val == 0) {
53
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
54
- continue;
55
- }
56
- break;
57
- default:
58
- break;
59
- }
60
-
61
/* Simplify using known-zero bits. Currently only ops with a single
62
output argument is supported. */
63
z_mask = -1;
23
--
64
--
24
2.20.1
65
2.25.1
25
66
26
67
diff view generated by jsdifflib
1
This adjustment was random and unnecessary. The user mode
1
Move all of the known-zero optimizations into the per-opcode
2
startup code in probe_guest_base() will choose a value for
2
functions. Use fold_masks when there is a possibility of the
3
guest_base that allows the host qemu binary to not conflict
3
result being determined, and simply set ctx->z_mask otherwise.
4
with the guest binary.
5
6
With modern distributions, this isn't even used, as the default
7
is PIE, which does the same job in a more portable way.
8
4
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Thomas Huth <thuth@redhat.com>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
8
---
13
v2: Remove mention of config-host.ld from make distclean
9
tcg/optimize.c | 545 ++++++++++++++++++++++++++-----------------------
14
---
10
1 file changed, 294 insertions(+), 251 deletions(-)
15
Makefile | 2 +-
16
configure | 47 -----------------------------------------------
17
2 files changed, 1 insertion(+), 48 deletions(-)
18
11
19
diff --git a/Makefile b/Makefile
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
20
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
21
--- a/Makefile
14
--- a/tcg/optimize.c
22
+++ b/Makefile
15
+++ b/tcg/optimize.c
23
@@ -XXX,XX +XXX,XX @@ rm -f $(MANUAL_BUILDDIR)/$1/objects.inv $(MANUAL_BUILDDIR)/$1/searchindex.js $(M
16
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
24
endef
17
TCGTempSet temps_used;
25
18
26
distclean: clean
19
/* In flight values from optimization. */
27
-    rm -f config-host.mak config-host.h* config-host.ld $(DOCS) qemu-options.texi qemu-img-cmds.texi qemu-monitor.texi qemu-monitor-info.texi
20
- uint64_t z_mask;
28
+    rm -f config-host.mak config-host.h* $(DOCS) qemu-options.texi qemu-img-cmds.texi qemu-monitor.texi qemu-monitor-info.texi
21
+ uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
29
    rm -f tests/tcg/config-*.mak
22
+ uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
30
    rm -f config-all-devices.mak config-all-disas.mak config.status
23
TCGType type;
31
    rm -f $(SUBDIR_DEVICES_MAK)
24
} OptContext;
32
diff --git a/configure b/configure
25
33
index XXXXXXX..XXXXXXX 100755
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
34
--- a/configure
27
return false;
35
+++ b/configure
28
}
36
@@ -XXX,XX +XXX,XX @@ if test "$cpu" = "s390x" ; then
29
37
fi
30
+static bool fold_masks(OptContext *ctx, TCGOp *op)
38
fi
31
+{
39
32
+ uint64_t a_mask = ctx->a_mask;
40
-# Probe for the need for relocating the user-only binary.
33
+ uint64_t z_mask = ctx->z_mask;
41
-if ( [ "$linux_user" = yes ] || [ "$bsd_user" = yes ] ) && [ "$pie" = no ]; then
34
+
42
- textseg_addr=
35
+ /*
43
- case "$cpu" in
36
+ * 32-bit ops generate 32-bit results. For the result is zero test
44
- arm | i386 | ppc* | s390* | sparc* | x86_64 | x32)
37
+ * below, we can ignore high bits, but for further optimizations we
45
- # ??? Rationale for choosing this address
38
+ * need to record that the high bits contain garbage.
46
- textseg_addr=0x60000000
39
+ */
47
- ;;
40
+ if (ctx->type == TCG_TYPE_I32) {
48
- mips)
41
+ ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
49
- # A 256M aligned address, high in the address space, with enough
42
+ a_mask &= MAKE_64BIT_MASK(0, 32);
50
- # room for the code_gen_buffer above it before the stack.
43
+ z_mask &= MAKE_64BIT_MASK(0, 32);
51
- textseg_addr=0x60000000
44
+ }
52
- ;;
45
+
53
- esac
46
+ if (z_mask == 0) {
54
- if [ -n "$textseg_addr" ]; then
47
+ return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
55
- cat > $TMPC <<EOF
48
+ }
56
- int main(void) { return 0; }
49
+ if (a_mask == 0) {
57
-EOF
50
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
58
- textseg_ldflags="-Wl,-Ttext-segment=$textseg_addr"
51
+ }
59
- if ! compile_prog "" "$textseg_ldflags"; then
52
+ return false;
60
- # In case ld does not support -Ttext-segment, edit the default linker
53
+}
61
- # script via sed to set the .text start addr. This is needed on FreeBSD
54
+
62
- # at least.
55
/*
63
- if ! $ld --verbose >/dev/null 2>&1; then
56
* Convert @op to NOT, if NOT is supported by the host.
64
- error_exit \
57
* Return true f the conversion is successful, which will still
65
- "We need to link the QEMU user mode binaries at a" \
58
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
66
- "specific text address. Unfortunately your linker" \
59
67
- "doesn't support either the -Ttext-segment option or" \
60
static bool fold_and(OptContext *ctx, TCGOp *op)
68
- "printing the default linker script with --verbose." \
61
{
69
- "If you don't want the user mode binaries, pass the" \
62
+ uint64_t z1, z2;
70
- "--disable-user option to configure."
63
+
71
- fi
64
if (fold_const2(ctx, op) ||
72
-
65
fold_xi_to_i(ctx, op, 0) ||
73
- $ld --verbose | sed \
66
fold_xi_to_x(ctx, op, -1) ||
74
- -e '1,/==================================================/d' \
67
fold_xx_to_x(ctx, op)) {
75
- -e '/==================================================/,$d' \
68
return true;
76
- -e "s/[.] = [0-9a-fx]* [+] SIZEOF_HEADERS/. = $textseg_addr + SIZEOF_HEADERS/" \
69
}
77
- -e "s/__executable_start = [0-9a-fx]*/__executable_start = $textseg_addr/" > config-host.ld
70
- return false;
78
- textseg_ldflags="-Wl,-T../config-host.ld"
71
+
79
- fi
72
+ z1 = arg_info(op->args[1])->z_mask;
80
- fi
73
+ z2 = arg_info(op->args[2])->z_mask;
81
-fi
74
+ ctx->z_mask = z1 & z2;
82
-
75
+
83
# Check that the C++ compiler exists and works with the C compiler.
76
+ /*
84
# All the QEMU_CXXFLAGS are based on QEMU_CFLAGS. Keep this at the end to don't miss any other that could be added.
77
+ * Known-zeros does not imply known-ones. Therefore unless
85
if has $cxx; then
78
+ * arg2 is constant, we can't infer affected bits from it.
86
@@ -XXX,XX +XXX,XX @@ if test "$gprof" = "yes" ; then
79
+ */
87
fi
80
+ if (arg_is_const(op->args[2])) {
88
fi
81
+ ctx->a_mask = z1 & ~z2;
89
82
+ }
90
-if test "$target_linux_user" = "yes" || test "$target_bsd_user" = "yes" ; then
83
+
91
- ldflags="$ldflags $textseg_ldflags"
84
+ return fold_masks(ctx, op);
92
-fi
85
}
93
-
86
94
# Newer kernels on s390 check for an S390_PGSTE program header and
87
static bool fold_andc(OptContext *ctx, TCGOp *op)
95
# enable the pgste page table extensions in that case. This makes
88
{
96
# the vm.allocate_pgste sysctl unnecessary. We enable this program
89
+ uint64_t z1;
90
+
91
if (fold_const2(ctx, op) ||
92
fold_xx_to_i(ctx, op, 0) ||
93
fold_xi_to_x(ctx, op, 0) ||
94
fold_ix_to_not(ctx, op, -1)) {
95
return true;
96
}
97
- return false;
98
+
99
+ z1 = arg_info(op->args[1])->z_mask;
100
+
101
+ /*
102
+ * Known-zeros does not imply known-ones. Therefore unless
103
+ * arg2 is constant, we can't infer anything from it.
104
+ */
105
+ if (arg_is_const(op->args[2])) {
106
+ uint64_t z2 = ~arg_info(op->args[2])->z_mask;
107
+ ctx->a_mask = z1 & ~z2;
108
+ z1 &= z2;
109
+ }
110
+ ctx->z_mask = z1;
111
+
112
+ return fold_masks(ctx, op);
113
}
114
115
static bool fold_brcond(OptContext *ctx, TCGOp *op)
116
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
117
118
static bool fold_bswap(OptContext *ctx, TCGOp *op)
119
{
120
+ uint64_t z_mask, sign;
121
+
122
if (arg_is_const(op->args[1])) {
123
uint64_t t = arg_info(op->args[1])->val;
124
125
t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
126
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
127
}
128
- return false;
129
+
130
+ z_mask = arg_info(op->args[1])->z_mask;
131
+ switch (op->opc) {
132
+ case INDEX_op_bswap16_i32:
133
+ case INDEX_op_bswap16_i64:
134
+ z_mask = bswap16(z_mask);
135
+ sign = INT16_MIN;
136
+ break;
137
+ case INDEX_op_bswap32_i32:
138
+ case INDEX_op_bswap32_i64:
139
+ z_mask = bswap32(z_mask);
140
+ sign = INT32_MIN;
141
+ break;
142
+ case INDEX_op_bswap64_i64:
143
+ z_mask = bswap64(z_mask);
144
+ sign = INT64_MIN;
145
+ break;
146
+ default:
147
+ g_assert_not_reached();
148
+ }
149
+
150
+ switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
151
+ case TCG_BSWAP_OZ:
152
+ break;
153
+ case TCG_BSWAP_OS:
154
+ /* If the sign bit may be 1, force all the bits above to 1. */
155
+ if (z_mask & sign) {
156
+ z_mask |= sign;
157
+ }
158
+ break;
159
+ default:
160
+ /* The high bits are undefined: force all bits above the sign to 1. */
161
+ z_mask |= sign << 1;
162
+ break;
163
+ }
164
+ ctx->z_mask = z_mask;
165
+
166
+ return fold_masks(ctx, op);
167
}
168
169
static bool fold_call(OptContext *ctx, TCGOp *op)
170
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
171
172
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
173
{
174
+ uint64_t z_mask;
175
+
176
if (arg_is_const(op->args[1])) {
177
uint64_t t = arg_info(op->args[1])->val;
178
179
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
180
}
181
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
182
}
183
+
184
+ switch (ctx->type) {
185
+ case TCG_TYPE_I32:
186
+ z_mask = 31;
187
+ break;
188
+ case TCG_TYPE_I64:
189
+ z_mask = 63;
190
+ break;
191
+ default:
192
+ g_assert_not_reached();
193
+ }
194
+ ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
195
+
196
return false;
197
}
198
199
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
200
{
201
- return fold_const1(ctx, op);
202
+ if (fold_const1(ctx, op)) {
203
+ return true;
204
+ }
205
+
206
+ switch (ctx->type) {
207
+ case TCG_TYPE_I32:
208
+ ctx->z_mask = 32 | 31;
209
+ break;
210
+ case TCG_TYPE_I64:
211
+ ctx->z_mask = 64 | 63;
212
+ break;
213
+ default:
214
+ g_assert_not_reached();
215
+ }
216
+ return false;
217
}
218
219
static bool fold_deposit(OptContext *ctx, TCGOp *op)
220
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
221
t1 = deposit64(t1, op->args[3], op->args[4], t2);
222
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
223
}
224
+
225
+ ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
226
+ op->args[3], op->args[4],
227
+ arg_info(op->args[2])->z_mask);
228
return false;
229
}
230
231
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
232
233
static bool fold_extract(OptContext *ctx, TCGOp *op)
234
{
235
+ uint64_t z_mask_old, z_mask;
236
+
237
if (arg_is_const(op->args[1])) {
238
uint64_t t;
239
240
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
241
t = extract64(t, op->args[2], op->args[3]);
242
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
243
}
244
- return false;
245
+
246
+ z_mask_old = arg_info(op->args[1])->z_mask;
247
+ z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
248
+ if (op->args[2] == 0) {
249
+ ctx->a_mask = z_mask_old ^ z_mask;
250
+ }
251
+ ctx->z_mask = z_mask;
252
+
253
+ return fold_masks(ctx, op);
254
}
255
256
static bool fold_extract2(OptContext *ctx, TCGOp *op)
257
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
258
259
static bool fold_exts(OptContext *ctx, TCGOp *op)
260
{
261
- return fold_const1(ctx, op);
262
+ uint64_t z_mask_old, z_mask, sign;
263
+ bool type_change = false;
264
+
265
+ if (fold_const1(ctx, op)) {
266
+ return true;
267
+ }
268
+
269
+ z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
270
+
271
+ switch (op->opc) {
272
+ CASE_OP_32_64(ext8s):
273
+ sign = INT8_MIN;
274
+ z_mask = (uint8_t)z_mask;
275
+ break;
276
+ CASE_OP_32_64(ext16s):
277
+ sign = INT16_MIN;
278
+ z_mask = (uint16_t)z_mask;
279
+ break;
280
+ case INDEX_op_ext_i32_i64:
281
+ type_change = true;
282
+ QEMU_FALLTHROUGH;
283
+ case INDEX_op_ext32s_i64:
284
+ sign = INT32_MIN;
285
+ z_mask = (uint32_t)z_mask;
286
+ break;
287
+ default:
288
+ g_assert_not_reached();
289
+ }
290
+
291
+ if (z_mask & sign) {
292
+ z_mask |= sign;
293
+ } else if (!type_change) {
294
+ ctx->a_mask = z_mask_old ^ z_mask;
295
+ }
296
+ ctx->z_mask = z_mask;
297
+
298
+ return fold_masks(ctx, op);
299
}
300
301
static bool fold_extu(OptContext *ctx, TCGOp *op)
302
{
303
- return fold_const1(ctx, op);
304
+ uint64_t z_mask_old, z_mask;
305
+ bool type_change = false;
306
+
307
+ if (fold_const1(ctx, op)) {
308
+ return true;
309
+ }
310
+
311
+ z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
312
+
313
+ switch (op->opc) {
314
+ CASE_OP_32_64(ext8u):
315
+ z_mask = (uint8_t)z_mask;
316
+ break;
317
+ CASE_OP_32_64(ext16u):
318
+ z_mask = (uint16_t)z_mask;
319
+ break;
320
+ case INDEX_op_extrl_i64_i32:
321
+ case INDEX_op_extu_i32_i64:
322
+ type_change = true;
323
+ QEMU_FALLTHROUGH;
324
+ case INDEX_op_ext32u_i64:
325
+ z_mask = (uint32_t)z_mask;
326
+ break;
327
+ case INDEX_op_extrh_i64_i32:
328
+ type_change = true;
329
+ z_mask >>= 32;
330
+ break;
331
+ default:
332
+ g_assert_not_reached();
333
+ }
334
+
335
+ ctx->z_mask = z_mask;
336
+ if (!type_change) {
337
+ ctx->a_mask = z_mask_old ^ z_mask;
338
+ }
339
+ return fold_masks(ctx, op);
340
}
341
342
static bool fold_mb(OptContext *ctx, TCGOp *op)
343
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
344
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
345
}
346
347
+ ctx->z_mask = arg_info(op->args[3])->z_mask
348
+ | arg_info(op->args[4])->z_mask;
349
+
350
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
351
uint64_t tv = arg_info(op->args[3])->val;
352
uint64_t fv = arg_info(op->args[4])->val;
353
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
354
355
static bool fold_neg(OptContext *ctx, TCGOp *op)
356
{
357
+ uint64_t z_mask;
358
+
359
if (fold_const1(ctx, op)) {
360
return true;
361
}
362
+
363
+ /* Set to 1 all bits to the left of the rightmost. */
364
+ z_mask = arg_info(op->args[1])->z_mask;
365
+ ctx->z_mask = -(z_mask & -z_mask);
366
+
367
/*
368
* Because of fold_sub_to_neg, we want to always return true,
369
* via finish_folding.
370
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
371
fold_xx_to_x(ctx, op)) {
372
return true;
373
}
374
- return false;
375
+
376
+ ctx->z_mask = arg_info(op->args[1])->z_mask
377
+ | arg_info(op->args[2])->z_mask;
378
+ return fold_masks(ctx, op);
379
}
380
381
static bool fold_orc(OptContext *ctx, TCGOp *op)
382
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
383
384
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
385
{
386
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
387
+ MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
388
+ MemOp mop = get_memop(oi);
389
+ int width = 8 * memop_size(mop);
390
+
391
+ if (!(mop & MO_SIGN) && width < 64) {
392
+ ctx->z_mask = MAKE_64BIT_MASK(0, width);
393
+ }
394
+
395
/* Opcodes that touch guest memory stop the mb optimization. */
396
ctx->prev_mb = NULL;
397
return false;
398
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
399
if (i >= 0) {
400
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
401
}
402
+
403
+ ctx->z_mask = 1;
404
return false;
405
}
406
407
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
408
op->opc = INDEX_op_setcond_i32;
409
break;
410
}
411
+
412
+ ctx->z_mask = 1;
413
return false;
414
415
do_setcond_const:
416
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
417
418
static bool fold_sextract(OptContext *ctx, TCGOp *op)
419
{
420
+ int64_t z_mask_old, z_mask;
421
+
422
if (arg_is_const(op->args[1])) {
423
uint64_t t;
424
425
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
426
t = sextract64(t, op->args[2], op->args[3]);
427
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
428
}
429
- return false;
430
+
431
+ z_mask_old = arg_info(op->args[1])->z_mask;
432
+ z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
433
+ if (op->args[2] == 0 && z_mask >= 0) {
434
+ ctx->a_mask = z_mask_old ^ z_mask;
435
+ }
436
+ ctx->z_mask = z_mask;
437
+
438
+ return fold_masks(ctx, op);
439
}
440
441
static bool fold_shift(OptContext *ctx, TCGOp *op)
442
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
443
fold_xi_to_x(ctx, op, 0)) {
444
return true;
445
}
446
+
447
+ if (arg_is_const(op->args[2])) {
448
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type,
449
+ arg_info(op->args[1])->z_mask,
450
+ arg_info(op->args[2])->val);
451
+ return fold_masks(ctx, op);
452
+ }
453
return false;
454
}
455
456
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
457
return fold_addsub2_i32(ctx, op, false);
458
}
459
460
+static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
461
+{
462
+ /* We can't do any folding with a load, but we can record bits. */
463
+ switch (op->opc) {
464
+ CASE_OP_32_64(ld8u):
465
+ ctx->z_mask = MAKE_64BIT_MASK(0, 8);
466
+ break;
467
+ CASE_OP_32_64(ld16u):
468
+ ctx->z_mask = MAKE_64BIT_MASK(0, 16);
469
+ break;
470
+ case INDEX_op_ld32u_i64:
471
+ ctx->z_mask = MAKE_64BIT_MASK(0, 32);
472
+ break;
473
+ default:
474
+ g_assert_not_reached();
475
+ }
476
+ return false;
477
+}
478
+
479
static bool fold_xor(OptContext *ctx, TCGOp *op)
480
{
481
if (fold_const2(ctx, op) ||
482
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
483
fold_xi_to_not(ctx, op, -1)) {
484
return true;
485
}
486
- return false;
487
+
488
+ ctx->z_mask = arg_info(op->args[1])->z_mask
489
+ | arg_info(op->args[2])->z_mask;
490
+ return fold_masks(ctx, op);
491
}
492
493
/* Propagate constants and copies, fold constant expressions. */
494
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
495
}
496
497
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
498
- uint64_t z_mask, partmask, affected, tmp;
499
TCGOpcode opc = op->opc;
500
const TCGOpDef *def;
501
bool done = false;
502
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
503
break;
504
}
505
506
- /* Simplify using known-zero bits. Currently only ops with a single
507
- output argument is supported. */
508
- z_mask = -1;
509
- affected = -1;
510
- switch (opc) {
511
- CASE_OP_32_64(ext8s):
512
- if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
513
- break;
514
- }
515
- QEMU_FALLTHROUGH;
516
- CASE_OP_32_64(ext8u):
517
- z_mask = 0xff;
518
- goto and_const;
519
- CASE_OP_32_64(ext16s):
520
- if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
521
- break;
522
- }
523
- QEMU_FALLTHROUGH;
524
- CASE_OP_32_64(ext16u):
525
- z_mask = 0xffff;
526
- goto and_const;
527
- case INDEX_op_ext32s_i64:
528
- if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
529
- break;
530
- }
531
- QEMU_FALLTHROUGH;
532
- case INDEX_op_ext32u_i64:
533
- z_mask = 0xffffffffU;
534
- goto and_const;
535
-
536
- CASE_OP_32_64(and):
537
- z_mask = arg_info(op->args[2])->z_mask;
538
- if (arg_is_const(op->args[2])) {
539
- and_const:
540
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
541
- }
542
- z_mask = arg_info(op->args[1])->z_mask & z_mask;
543
- break;
544
-
545
- case INDEX_op_ext_i32_i64:
546
- if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
547
- break;
548
- }
549
- QEMU_FALLTHROUGH;
550
- case INDEX_op_extu_i32_i64:
551
- /* We do not compute affected as it is a size changing op. */
552
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
553
- break;
554
-
555
- CASE_OP_32_64(andc):
556
- /* Known-zeros does not imply known-ones. Therefore unless
557
- op->args[2] is constant, we can't infer anything from it. */
558
- if (arg_is_const(op->args[2])) {
559
- z_mask = ~arg_info(op->args[2])->z_mask;
560
- goto and_const;
561
- }
562
- /* But we certainly know nothing outside args[1] may be set. */
563
- z_mask = arg_info(op->args[1])->z_mask;
564
- break;
565
-
566
- case INDEX_op_sar_i32:
567
- if (arg_is_const(op->args[2])) {
568
- tmp = arg_info(op->args[2])->val & 31;
569
- z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
570
- }
571
- break;
572
- case INDEX_op_sar_i64:
573
- if (arg_is_const(op->args[2])) {
574
- tmp = arg_info(op->args[2])->val & 63;
575
- z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
576
- }
577
- break;
578
-
579
- case INDEX_op_shr_i32:
580
- if (arg_is_const(op->args[2])) {
581
- tmp = arg_info(op->args[2])->val & 31;
582
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
583
- }
584
- break;
585
- case INDEX_op_shr_i64:
586
- if (arg_is_const(op->args[2])) {
587
- tmp = arg_info(op->args[2])->val & 63;
588
- z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
589
- }
590
- break;
591
-
592
- case INDEX_op_extrl_i64_i32:
593
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
594
- break;
595
- case INDEX_op_extrh_i64_i32:
596
- z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
597
- break;
598
-
599
- CASE_OP_32_64(shl):
600
- if (arg_is_const(op->args[2])) {
601
- tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
602
- z_mask = arg_info(op->args[1])->z_mask << tmp;
603
- }
604
- break;
605
-
606
- CASE_OP_32_64(neg):
607
- /* Set to 1 all bits to the left of the rightmost. */
608
- z_mask = -(arg_info(op->args[1])->z_mask
609
- & -arg_info(op->args[1])->z_mask);
610
- break;
611
-
612
- CASE_OP_32_64(deposit):
613
- z_mask = deposit64(arg_info(op->args[1])->z_mask,
614
- op->args[3], op->args[4],
615
- arg_info(op->args[2])->z_mask);
616
- break;
617
-
618
- CASE_OP_32_64(extract):
619
- z_mask = extract64(arg_info(op->args[1])->z_mask,
620
- op->args[2], op->args[3]);
621
- if (op->args[2] == 0) {
622
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
623
- }
624
- break;
625
- CASE_OP_32_64(sextract):
626
- z_mask = sextract64(arg_info(op->args[1])->z_mask,
627
- op->args[2], op->args[3]);
628
- if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
629
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
630
- }
631
- break;
632
-
633
- CASE_OP_32_64(or):
634
- CASE_OP_32_64(xor):
635
- z_mask = arg_info(op->args[1])->z_mask
636
- | arg_info(op->args[2])->z_mask;
637
- break;
638
-
639
- case INDEX_op_clz_i32:
640
- case INDEX_op_ctz_i32:
641
- z_mask = arg_info(op->args[2])->z_mask | 31;
642
- break;
643
-
644
- case INDEX_op_clz_i64:
645
- case INDEX_op_ctz_i64:
646
- z_mask = arg_info(op->args[2])->z_mask | 63;
647
- break;
648
-
649
- case INDEX_op_ctpop_i32:
650
- z_mask = 32 | 31;
651
- break;
652
- case INDEX_op_ctpop_i64:
653
- z_mask = 64 | 63;
654
- break;
655
-
656
- CASE_OP_32_64(setcond):
657
- case INDEX_op_setcond2_i32:
658
- z_mask = 1;
659
- break;
660
-
661
- CASE_OP_32_64(movcond):
662
- z_mask = arg_info(op->args[3])->z_mask
663
- | arg_info(op->args[4])->z_mask;
664
- break;
665
-
666
- CASE_OP_32_64(ld8u):
667
- z_mask = 0xff;
668
- break;
669
- CASE_OP_32_64(ld16u):
670
- z_mask = 0xffff;
671
- break;
672
- case INDEX_op_ld32u_i64:
673
- z_mask = 0xffffffffu;
674
- break;
675
-
676
- CASE_OP_32_64(qemu_ld):
677
- {
678
- MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
679
- MemOp mop = get_memop(oi);
680
- if (!(mop & MO_SIGN)) {
681
- z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
682
- }
683
- }
684
- break;
685
-
686
- CASE_OP_32_64(bswap16):
687
- z_mask = arg_info(op->args[1])->z_mask;
688
- if (z_mask <= 0xffff) {
689
- op->args[2] |= TCG_BSWAP_IZ;
690
- }
691
- z_mask = bswap16(z_mask);
692
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
693
- case TCG_BSWAP_OZ:
694
- break;
695
- case TCG_BSWAP_OS:
696
- z_mask = (int16_t)z_mask;
697
- break;
698
- default: /* undefined high bits */
699
- z_mask |= MAKE_64BIT_MASK(16, 48);
700
- break;
701
- }
702
- break;
703
-
704
- case INDEX_op_bswap32_i64:
705
- z_mask = arg_info(op->args[1])->z_mask;
706
- if (z_mask <= 0xffffffffu) {
707
- op->args[2] |= TCG_BSWAP_IZ;
708
- }
709
- z_mask = bswap32(z_mask);
710
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
711
- case TCG_BSWAP_OZ:
712
- break;
713
- case TCG_BSWAP_OS:
714
- z_mask = (int32_t)z_mask;
715
- break;
716
- default: /* undefined high bits */
717
- z_mask |= MAKE_64BIT_MASK(32, 32);
718
- break;
719
- }
720
- break;
721
-
722
- default:
723
- break;
724
- }
725
-
726
- /* 32-bit ops generate 32-bit results. For the result is zero test
727
- below, we can ignore high bits, but for further optimizations we
728
- need to record that the high bits contain garbage. */
729
- partmask = z_mask;
730
- if (ctx.type == TCG_TYPE_I32) {
731
- z_mask |= ~(tcg_target_ulong)0xffffffffu;
732
- partmask &= 0xffffffffu;
733
- affected &= 0xffffffffu;
734
- }
735
- ctx.z_mask = z_mask;
736
-
737
- if (partmask == 0) {
738
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
739
- continue;
740
- }
741
- if (affected == 0) {
742
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
743
- continue;
744
- }
745
+ /* Assume all bits affected, and no bits known zero. */
746
+ ctx.a_mask = -1;
747
+ ctx.z_mask = -1;
748
749
/*
750
* Process each opcode.
751
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
752
case INDEX_op_extrh_i64_i32:
753
done = fold_extu(&ctx, op);
754
break;
755
+ CASE_OP_32_64(ld8u):
756
+ CASE_OP_32_64(ld16u):
757
+ case INDEX_op_ld32u_i64:
758
+ done = fold_tcg_ld(&ctx, op);
759
+ break;
760
case INDEX_op_mb:
761
done = fold_mb(&ctx, op);
762
break;
97
--
763
--
98
2.20.1
764
2.25.1
99
765
100
766
diff view generated by jsdifflib
1
Do not use exec/cpu_ldst_{,useronly_}template.h directly,
1
Rename to fold_multiply2, and handle muls2_i32, mulu2_i64,
2
but instead use the functional interface.
2
and muls2_i64.
3
3
4
Cc: Eduardo Habkost <ehabkost@redhat.com>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
target/i386/seg_helper.c | 56 ++++++++++++++++++++--------------------
8
tcg/optimize.c | 44 +++++++++++++++++++++++++++++++++++---------
11
1 file changed, 28 insertions(+), 28 deletions(-)
9
1 file changed, 35 insertions(+), 9 deletions(-)
12
10
13
diff --git a/target/i386/seg_helper.c b/target/i386/seg_helper.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/target/i386/seg_helper.c
13
--- a/tcg/optimize.c
16
+++ b/target/i386/seg_helper.c
14
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
18
# define LOG_PCALL_STATE(cpu) do { } while (0)
16
return false;
19
#endif
17
}
20
18
21
-#ifdef CONFIG_USER_ONLY
19
-static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
22
-#define MEMSUFFIX _kernel
20
+static bool fold_multiply2(OptContext *ctx, TCGOp *op)
23
-#define DATA_SIZE 1
21
{
24
-#include "exec/cpu_ldst_useronly_template.h"
22
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
25
+/*
23
- uint32_t a = arg_info(op->args[2])->val;
26
+ * TODO: Convert callers to compute cpu_mmu_index_kernel once
24
- uint32_t b = arg_info(op->args[3])->val;
27
+ * and use *_mmuidx_ra directly.
25
- uint64_t r = (uint64_t)a * b;
28
+ */
26
+ uint64_t a = arg_info(op->args[2])->val;
29
+#define cpu_ldub_kernel_ra(e, p, r) \
27
+ uint64_t b = arg_info(op->args[3])->val;
30
+ cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
28
+ uint64_t h, l;
31
+#define cpu_lduw_kernel_ra(e, p, r) \
29
TCGArg rl, rh;
32
+ cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
30
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
33
+#define cpu_ldl_kernel_ra(e, p, r) \
31
+ TCGOp *op2;
34
+ cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
32
+
35
+#define cpu_ldq_kernel_ra(e, p, r) \
33
+ switch (op->opc) {
36
+ cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
34
+ case INDEX_op_mulu2_i32:
37
35
+ l = (uint64_t)(uint32_t)a * (uint32_t)b;
38
-#define DATA_SIZE 2
36
+ h = (int32_t)(l >> 32);
39
-#include "exec/cpu_ldst_useronly_template.h"
37
+ l = (int32_t)l;
40
+#define cpu_stb_kernel_ra(e, p, v, r) \
38
+ break;
41
+ cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
39
+ case INDEX_op_muls2_i32:
42
+#define cpu_stw_kernel_ra(e, p, v, r) \
40
+ l = (int64_t)(int32_t)a * (int32_t)b;
43
+ cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
41
+ h = l >> 32;
44
+#define cpu_stl_kernel_ra(e, p, v, r) \
42
+ l = (int32_t)l;
45
+ cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
43
+ break;
46
+#define cpu_stq_kernel_ra(e, p, v, r) \
44
+ case INDEX_op_mulu2_i64:
47
+ cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
45
+ mulu64(&l, &h, a, b);
48
46
+ break;
49
-#define DATA_SIZE 4
47
+ case INDEX_op_muls2_i64:
50
-#include "exec/cpu_ldst_useronly_template.h"
48
+ muls64(&l, &h, a, b);
51
+#define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0)
49
+ break;
52
+#define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0)
50
+ default:
53
+#define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0)
51
+ g_assert_not_reached();
54
+#define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0)
52
+ }
55
53
56
-#define DATA_SIZE 8
54
rl = op->args[0];
57
-#include "exec/cpu_ldst_useronly_template.h"
55
rh = op->args[1];
58
-#undef MEMSUFFIX
56
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
59
-#else
57
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
60
-#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
58
+
61
-#define MEMSUFFIX _kernel
59
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
62
-#define DATA_SIZE 1
60
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
63
-#include "exec/cpu_ldst_template.h"
61
+
64
-
62
+ tcg_opt_gen_movi(ctx, op, rl, l);
65
-#define DATA_SIZE 2
63
+ tcg_opt_gen_movi(ctx, op2, rh, h);
66
-#include "exec/cpu_ldst_template.h"
64
return true;
67
-
65
}
68
-#define DATA_SIZE 4
66
return false;
69
-#include "exec/cpu_ldst_template.h"
67
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
70
-
68
CASE_OP_32_64(muluh):
71
-#define DATA_SIZE 8
69
done = fold_mul_highpart(&ctx, op);
72
-#include "exec/cpu_ldst_template.h"
70
break;
73
-#undef CPU_MMU_INDEX
71
- case INDEX_op_mulu2_i32:
74
-#undef MEMSUFFIX
72
- done = fold_mulu2_i32(&ctx, op);
75
-#endif
73
+ CASE_OP_32_64(muls2):
76
+#define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0)
74
+ CASE_OP_32_64(mulu2):
77
+#define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0)
75
+ done = fold_multiply2(&ctx, op);
78
+#define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0)
76
break;
79
+#define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0)
77
CASE_OP_32_64(nand):
80
78
done = fold_nand(&ctx, op);
81
/* return non zero if error */
82
static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
83
--
79
--
84
2.20.1
80
2.25.1
85
81
86
82
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
Rename to fold_addsub2.
2
Use Int128 to implement the wider operation.
2
3
3
We currently search both the root and the tcg/ directories for tcg
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
files:
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
$ git grep '#include "tcg/' | wc -l
7
28
8
9
$ git grep '#include "tcg[^/]' | wc -l
10
94
11
12
To simplify the preprocessor search path, unify by expliciting the
13
tcg/ directory.
14
15
Patch created mechanically by running:
16
17
$ for x in \
18
tcg.h tcg-mo.h tcg-op.h tcg-opc.h \
19
tcg-op-gvec.h tcg-gvec-desc.h; do \
20
sed -i "s,#include \"$x\",#include \"tcg/$x\"," \
21
$(git grep -l "#include \"$x\""); \
22
done
23
24
Acked-by: David Gibson <david@gibson.dropbear.id.au> (ppc parts)
25
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
26
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
27
Reviewed-by: Stefan Weil <sw@weilnetz.de>
28
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
30
Message-Id: <20200101112303.20724-2-philmd@redhat.com>
31
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
32
---
8
---
33
include/exec/cpu_ldst.h | 2 +-
9
tcg/optimize.c | 65 ++++++++++++++++++++++++++++++++++----------------
34
tcg/i386/tcg-target.h | 2 +-
10
1 file changed, 44 insertions(+), 21 deletions(-)
35
tcg/tcg-op.h | 2 +-
36
tcg/tcg.h | 4 ++--
37
accel/tcg/cpu-exec.c | 2 +-
38
accel/tcg/tcg-runtime-gvec.c | 2 +-
39
accel/tcg/tcg-runtime.c | 2 +-
40
accel/tcg/translate-all.c | 2 +-
41
accel/tcg/user-exec.c | 2 +-
42
bsd-user/main.c | 2 +-
43
cpus.c | 2 +-
44
exec.c | 2 +-
45
linux-user/main.c | 2 +-
46
linux-user/syscall.c | 2 +-
47
target/alpha/translate.c | 2 +-
48
target/arm/helper-a64.c | 2 +-
49
target/arm/sve_helper.c | 2 +-
50
target/arm/translate-a64.c | 4 ++--
51
target/arm/translate-sve.c | 6 +++---
52
target/arm/translate.c | 4 ++--
53
target/cris/translate.c | 2 +-
54
target/hppa/translate.c | 2 +-
55
target/i386/mem_helper.c | 2 +-
56
target/i386/translate.c | 2 +-
57
target/lm32/translate.c | 2 +-
58
target/m68k/translate.c | 2 +-
59
target/microblaze/translate.c | 2 +-
60
target/mips/translate.c | 2 +-
61
target/moxie/translate.c | 2 +-
62
target/nios2/translate.c | 2 +-
63
target/openrisc/translate.c | 2 +-
64
target/ppc/mem_helper.c | 2 +-
65
target/ppc/translate.c | 4 ++--
66
target/riscv/cpu_helper.c | 2 +-
67
target/riscv/translate.c | 2 +-
68
target/s390x/mem_helper.c | 2 +-
69
target/s390x/translate.c | 4 ++--
70
target/sh4/translate.c | 2 +-
71
target/sparc/ldst_helper.c | 2 +-
72
target/sparc/translate.c | 2 +-
73
target/tilegx/translate.c | 2 +-
74
target/tricore/translate.c | 2 +-
75
target/unicore32/translate.c | 2 +-
76
target/xtensa/translate.c | 2 +-
77
tcg/optimize.c | 2 +-
78
tcg/tcg-common.c | 2 +-
79
tcg/tcg-op-gvec.c | 8 ++++----
80
tcg/tcg-op-vec.c | 6 +++---
81
tcg/tcg-op.c | 6 +++---
82
tcg/tcg.c | 2 +-
83
tcg/tci.c | 2 +-
84
51 files changed, 65 insertions(+), 65 deletions(-)
85
11
86
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
87
index XXXXXXX..XXXXXXX 100644
88
--- a/include/exec/cpu_ldst.h
89
+++ b/include/exec/cpu_ldst.h
90
@@ -XXX,XX +XXX,XX @@ static inline void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
91
#else
92
93
/* Needed for TCG_OVERSIZED_GUEST */
94
-#include "tcg.h"
95
+#include "tcg/tcg.h"
96
97
static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
98
{
99
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
100
index XXXXXXX..XXXXXXX 100644
101
--- a/tcg/i386/tcg-target.h
102
+++ b/tcg/i386/tcg-target.h
103
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
104
* The x86 has a pretty strong memory ordering which only really
105
* allows for some stores to be re-ordered after loads.
106
*/
107
-#include "tcg-mo.h"
108
+#include "tcg/tcg-mo.h"
109
110
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
111
112
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
113
index XXXXXXX..XXXXXXX 100644
114
--- a/tcg/tcg-op.h
115
+++ b/tcg/tcg-op.h
116
@@ -XXX,XX +XXX,XX @@
117
#ifndef TCG_TCG_OP_H
118
#define TCG_TCG_OP_H
119
120
-#include "tcg.h"
121
+#include "tcg/tcg.h"
122
#include "exec/helper-proto.h"
123
#include "exec/helper-gen.h"
124
125
diff --git a/tcg/tcg.h b/tcg/tcg.h
126
index XXXXXXX..XXXXXXX 100644
127
--- a/tcg/tcg.h
128
+++ b/tcg/tcg.h
129
@@ -XXX,XX +XXX,XX @@
130
#include "qemu/bitops.h"
131
#include "qemu/plugin.h"
132
#include "qemu/queue.h"
133
-#include "tcg-mo.h"
134
+#include "tcg/tcg-mo.h"
135
#include "tcg-target.h"
136
#include "qemu/int128.h"
137
138
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
139
140
typedef enum TCGOpcode {
141
#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
142
-#include "tcg-opc.h"
143
+#include "tcg/tcg-opc.h"
144
#undef DEF
145
NB_OPS,
146
} TCGOpcode;
147
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
148
index XXXXXXX..XXXXXXX 100644
149
--- a/accel/tcg/cpu-exec.c
150
+++ b/accel/tcg/cpu-exec.c
151
@@ -XXX,XX +XXX,XX @@
152
#include "trace.h"
153
#include "disas/disas.h"
154
#include "exec/exec-all.h"
155
-#include "tcg.h"
156
+#include "tcg/tcg.h"
157
#include "qemu/atomic.h"
158
#include "sysemu/qtest.h"
159
#include "qemu/timer.h"
160
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
161
index XXXXXXX..XXXXXXX 100644
162
--- a/accel/tcg/tcg-runtime-gvec.c
163
+++ b/accel/tcg/tcg-runtime-gvec.c
164
@@ -XXX,XX +XXX,XX @@
165
#include "qemu/host-utils.h"
166
#include "cpu.h"
167
#include "exec/helper-proto.h"
168
-#include "tcg-gvec-desc.h"
169
+#include "tcg/tcg-gvec-desc.h"
170
171
172
/* Virtually all hosts support 16-byte vectors. Those that don't can emulate
173
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
174
index XXXXXXX..XXXXXXX 100644
175
--- a/accel/tcg/tcg-runtime.c
176
+++ b/accel/tcg/tcg-runtime.c
177
@@ -XXX,XX +XXX,XX @@
178
#include "exec/tb-lookup.h"
179
#include "disas/disas.h"
180
#include "exec/log.h"
181
-#include "tcg.h"
182
+#include "tcg/tcg.h"
183
184
/* 32-bit helpers */
185
186
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
187
index XXXXXXX..XXXXXXX 100644
188
--- a/accel/tcg/translate-all.c
189
+++ b/accel/tcg/translate-all.c
190
@@ -XXX,XX +XXX,XX @@
191
#include "trace.h"
192
#include "disas/disas.h"
193
#include "exec/exec-all.h"
194
-#include "tcg.h"
195
+#include "tcg/tcg.h"
196
#if defined(CONFIG_USER_ONLY)
197
#include "qemu.h"
198
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
199
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
200
index XXXXXXX..XXXXXXX 100644
201
--- a/accel/tcg/user-exec.c
202
+++ b/accel/tcg/user-exec.c
203
@@ -XXX,XX +XXX,XX @@
204
#include "cpu.h"
205
#include "disas/disas.h"
206
#include "exec/exec-all.h"
207
-#include "tcg.h"
208
+#include "tcg/tcg.h"
209
#include "qemu/bitops.h"
210
#include "exec/cpu_ldst.h"
211
#include "translate-all.h"
212
diff --git a/bsd-user/main.c b/bsd-user/main.c
213
index XXXXXXX..XXXXXXX 100644
214
--- a/bsd-user/main.c
215
+++ b/bsd-user/main.c
216
@@ -XXX,XX +XXX,XX @@
217
#include "qemu/module.h"
218
#include "cpu.h"
219
#include "exec/exec-all.h"
220
-#include "tcg.h"
221
+#include "tcg/tcg.h"
222
#include "qemu/timer.h"
223
#include "qemu/envlist.h"
224
#include "exec/log.h"
225
diff --git a/cpus.c b/cpus.c
226
index XXXXXXX..XXXXXXX 100644
227
--- a/cpus.c
228
+++ b/cpus.c
229
@@ -XXX,XX +XXX,XX @@
230
#include "qemu/bitmap.h"
231
#include "qemu/seqlock.h"
232
#include "qemu/guest-random.h"
233
-#include "tcg.h"
234
+#include "tcg/tcg.h"
235
#include "hw/nmi.h"
236
#include "sysemu/replay.h"
237
#include "sysemu/runstate.h"
238
diff --git a/exec.c b/exec.c
239
index XXXXXXX..XXXXXXX 100644
240
--- a/exec.c
241
+++ b/exec.c
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
#include "exec/exec-all.h"
245
#include "exec/target_page.h"
246
-#include "tcg.h"
247
+#include "tcg/tcg.h"
248
#include "hw/qdev-core.h"
249
#include "hw/qdev-properties.h"
250
#if !defined(CONFIG_USER_ONLY)
251
diff --git a/linux-user/main.c b/linux-user/main.c
252
index XXXXXXX..XXXXXXX 100644
253
--- a/linux-user/main.c
254
+++ b/linux-user/main.c
255
@@ -XXX,XX +XXX,XX @@
256
#include "qemu/plugin.h"
257
#include "cpu.h"
258
#include "exec/exec-all.h"
259
-#include "tcg.h"
260
+#include "tcg/tcg.h"
261
#include "qemu/timer.h"
262
#include "qemu/envlist.h"
263
#include "qemu/guest-random.h"
264
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
265
index XXXXXXX..XXXXXXX 100644
266
--- a/linux-user/syscall.c
267
+++ b/linux-user/syscall.c
268
@@ -XXX,XX +XXX,XX @@
269
#include "user/syscall-trace.h"
270
#include "qapi/error.h"
271
#include "fd-trans.h"
272
-#include "tcg.h"
273
+#include "tcg/tcg.h"
274
275
#ifndef CLONE_IO
276
#define CLONE_IO 0x80000000 /* Clone io context */
277
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/alpha/translate.c
280
+++ b/target/alpha/translate.c
281
@@ -XXX,XX +XXX,XX @@
282
#include "disas/disas.h"
283
#include "qemu/host-utils.h"
284
#include "exec/exec-all.h"
285
-#include "tcg-op.h"
286
+#include "tcg/tcg-op.h"
287
#include "exec/cpu_ldst.h"
288
#include "exec/helper-proto.h"
289
#include "exec/helper-gen.h"
290
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
291
index XXXXXXX..XXXXXXX 100644
292
--- a/target/arm/helper-a64.c
293
+++ b/target/arm/helper-a64.c
294
@@ -XXX,XX +XXX,XX @@
295
#include "exec/cpu_ldst.h"
296
#include "qemu/int128.h"
297
#include "qemu/atomic128.h"
298
-#include "tcg.h"
299
+#include "tcg/tcg.h"
300
#include "fpu/softfloat.h"
301
#include <zlib.h> /* For crc32 */
302
303
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
304
index XXXXXXX..XXXXXXX 100644
305
--- a/target/arm/sve_helper.c
306
+++ b/target/arm/sve_helper.c
307
@@ -XXX,XX +XXX,XX @@
308
#include "exec/helper-proto.h"
309
#include "tcg/tcg-gvec-desc.h"
310
#include "fpu/softfloat.h"
311
-#include "tcg.h"
312
+#include "tcg/tcg.h"
313
314
315
/* Note that vector data is stored in host-endian 64-bit chunks,
316
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
317
index XXXXXXX..XXXXXXX 100644
318
--- a/target/arm/translate-a64.c
319
+++ b/target/arm/translate-a64.c
320
@@ -XXX,XX +XXX,XX @@
321
322
#include "cpu.h"
323
#include "exec/exec-all.h"
324
-#include "tcg-op.h"
325
-#include "tcg-op-gvec.h"
326
+#include "tcg/tcg-op.h"
327
+#include "tcg/tcg-op-gvec.h"
328
#include "qemu/log.h"
329
#include "arm_ldst.h"
330
#include "translate.h"
331
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
332
index XXXXXXX..XXXXXXX 100644
333
--- a/target/arm/translate-sve.c
334
+++ b/target/arm/translate-sve.c
335
@@ -XXX,XX +XXX,XX @@
336
#include "qemu/osdep.h"
337
#include "cpu.h"
338
#include "exec/exec-all.h"
339
-#include "tcg-op.h"
340
-#include "tcg-op-gvec.h"
341
-#include "tcg-gvec-desc.h"
342
+#include "tcg/tcg-op.h"
343
+#include "tcg/tcg-op-gvec.h"
344
+#include "tcg/tcg-gvec-desc.h"
345
#include "qemu/log.h"
346
#include "arm_ldst.h"
347
#include "translate.h"
348
diff --git a/target/arm/translate.c b/target/arm/translate.c
349
index XXXXXXX..XXXXXXX 100644
350
--- a/target/arm/translate.c
351
+++ b/target/arm/translate.c
352
@@ -XXX,XX +XXX,XX @@
353
#include "internals.h"
354
#include "disas/disas.h"
355
#include "exec/exec-all.h"
356
-#include "tcg-op.h"
357
-#include "tcg-op-gvec.h"
358
+#include "tcg/tcg-op.h"
359
+#include "tcg/tcg-op-gvec.h"
360
#include "qemu/log.h"
361
#include "qemu/bitops.h"
362
#include "arm_ldst.h"
363
diff --git a/target/cris/translate.c b/target/cris/translate.c
364
index XXXXXXX..XXXXXXX 100644
365
--- a/target/cris/translate.c
366
+++ b/target/cris/translate.c
367
@@ -XXX,XX +XXX,XX @@
368
#include "cpu.h"
369
#include "disas/disas.h"
370
#include "exec/exec-all.h"
371
-#include "tcg-op.h"
372
+#include "tcg/tcg-op.h"
373
#include "exec/helper-proto.h"
374
#include "mmu.h"
375
#include "exec/cpu_ldst.h"
376
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
377
index XXXXXXX..XXXXXXX 100644
378
--- a/target/hppa/translate.c
379
+++ b/target/hppa/translate.c
380
@@ -XXX,XX +XXX,XX @@
381
#include "disas/disas.h"
382
#include "qemu/host-utils.h"
383
#include "exec/exec-all.h"
384
-#include "tcg-op.h"
385
+#include "tcg/tcg-op.h"
386
#include "exec/cpu_ldst.h"
387
#include "exec/helper-proto.h"
388
#include "exec/helper-gen.h"
389
diff --git a/target/i386/mem_helper.c b/target/i386/mem_helper.c
390
index XXXXXXX..XXXXXXX 100644
391
--- a/target/i386/mem_helper.c
392
+++ b/target/i386/mem_helper.c
393
@@ -XXX,XX +XXX,XX @@
394
#include "exec/cpu_ldst.h"
395
#include "qemu/int128.h"
396
#include "qemu/atomic128.h"
397
-#include "tcg.h"
398
+#include "tcg/tcg.h"
399
400
void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0)
401
{
402
diff --git a/target/i386/translate.c b/target/i386/translate.c
403
index XXXXXXX..XXXXXXX 100644
404
--- a/target/i386/translate.c
405
+++ b/target/i386/translate.c
406
@@ -XXX,XX +XXX,XX @@
407
#include "cpu.h"
408
#include "disas/disas.h"
409
#include "exec/exec-all.h"
410
-#include "tcg-op.h"
411
+#include "tcg/tcg-op.h"
412
#include "exec/cpu_ldst.h"
413
#include "exec/translator.h"
414
415
diff --git a/target/lm32/translate.c b/target/lm32/translate.c
416
index XXXXXXX..XXXXXXX 100644
417
--- a/target/lm32/translate.c
418
+++ b/target/lm32/translate.c
419
@@ -XXX,XX +XXX,XX @@
420
#include "exec/helper-proto.h"
421
#include "exec/exec-all.h"
422
#include "exec/translator.h"
423
-#include "tcg-op.h"
424
+#include "tcg/tcg-op.h"
425
#include "qemu/qemu-print.h"
426
427
#include "exec/cpu_ldst.h"
428
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
429
index XXXXXXX..XXXXXXX 100644
430
--- a/target/m68k/translate.c
431
+++ b/target/m68k/translate.c
432
@@ -XXX,XX +XXX,XX @@
433
#include "cpu.h"
434
#include "disas/disas.h"
435
#include "exec/exec-all.h"
436
-#include "tcg-op.h"
437
+#include "tcg/tcg-op.h"
438
#include "qemu/log.h"
439
#include "qemu/qemu-print.h"
440
#include "exec/cpu_ldst.h"
441
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
442
index XXXXXXX..XXXXXXX 100644
443
--- a/target/microblaze/translate.c
444
+++ b/target/microblaze/translate.c
445
@@ -XXX,XX +XXX,XX @@
446
#include "cpu.h"
447
#include "disas/disas.h"
448
#include "exec/exec-all.h"
449
-#include "tcg-op.h"
450
+#include "tcg/tcg-op.h"
451
#include "exec/helper-proto.h"
452
#include "microblaze-decode.h"
453
#include "exec/cpu_ldst.h"
454
diff --git a/target/mips/translate.c b/target/mips/translate.c
455
index XXXXXXX..XXXXXXX 100644
456
--- a/target/mips/translate.c
457
+++ b/target/mips/translate.c
458
@@ -XXX,XX +XXX,XX @@
459
#include "internal.h"
460
#include "disas/disas.h"
461
#include "exec/exec-all.h"
462
-#include "tcg-op.h"
463
+#include "tcg/tcg-op.h"
464
#include "exec/cpu_ldst.h"
465
#include "hw/mips/cpudevs.h"
466
467
diff --git a/target/moxie/translate.c b/target/moxie/translate.c
468
index XXXXXXX..XXXXXXX 100644
469
--- a/target/moxie/translate.c
470
+++ b/target/moxie/translate.c
471
@@ -XXX,XX +XXX,XX @@
472
#include "cpu.h"
473
#include "exec/exec-all.h"
474
#include "disas/disas.h"
475
-#include "tcg-op.h"
476
+#include "tcg/tcg-op.h"
477
#include "exec/cpu_ldst.h"
478
#include "qemu/qemu-print.h"
479
480
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
481
index XXXXXXX..XXXXXXX 100644
482
--- a/target/nios2/translate.c
483
+++ b/target/nios2/translate.c
484
@@ -XXX,XX +XXX,XX @@
485
486
#include "qemu/osdep.h"
487
#include "cpu.h"
488
-#include "tcg-op.h"
489
+#include "tcg/tcg-op.h"
490
#include "exec/exec-all.h"
491
#include "disas/disas.h"
492
#include "exec/helper-proto.h"
493
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
494
index XXXXXXX..XXXXXXX 100644
495
--- a/target/openrisc/translate.c
496
+++ b/target/openrisc/translate.c
497
@@ -XXX,XX +XXX,XX @@
498
#include "cpu.h"
499
#include "exec/exec-all.h"
500
#include "disas/disas.h"
501
-#include "tcg-op.h"
502
+#include "tcg/tcg-op.h"
503
#include "qemu/log.h"
504
#include "qemu/bitops.h"
505
#include "qemu/qemu-print.h"
506
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
507
index XXXXXXX..XXXXXXX 100644
508
--- a/target/ppc/mem_helper.c
509
+++ b/target/ppc/mem_helper.c
510
@@ -XXX,XX +XXX,XX @@
511
#include "exec/helper-proto.h"
512
#include "helper_regs.h"
513
#include "exec/cpu_ldst.h"
514
-#include "tcg.h"
515
+#include "tcg/tcg.h"
516
#include "internal.h"
517
#include "qemu/atomic128.h"
518
519
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/target/ppc/translate.c
522
+++ b/target/ppc/translate.c
523
@@ -XXX,XX +XXX,XX @@
524
#include "internal.h"
525
#include "disas/disas.h"
526
#include "exec/exec-all.h"
527
-#include "tcg-op.h"
528
-#include "tcg-op-gvec.h"
529
+#include "tcg/tcg-op.h"
530
+#include "tcg/tcg-op-gvec.h"
531
#include "qemu/host-utils.h"
532
#include "qemu/main-loop.h"
533
#include "exec/cpu_ldst.h"
534
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
535
index XXXXXXX..XXXXXXX 100644
536
--- a/target/riscv/cpu_helper.c
537
+++ b/target/riscv/cpu_helper.c
538
@@ -XXX,XX +XXX,XX @@
539
#include "qemu/main-loop.h"
540
#include "cpu.h"
541
#include "exec/exec-all.h"
542
-#include "tcg-op.h"
543
+#include "tcg/tcg-op.h"
544
#include "trace.h"
545
546
int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
547
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
548
index XXXXXXX..XXXXXXX 100644
549
--- a/target/riscv/translate.c
550
+++ b/target/riscv/translate.c
551
@@ -XXX,XX +XXX,XX @@
552
#include "qemu/osdep.h"
553
#include "qemu/log.h"
554
#include "cpu.h"
555
-#include "tcg-op.h"
556
+#include "tcg/tcg-op.h"
557
#include "disas/disas.h"
558
#include "exec/cpu_ldst.h"
559
#include "exec/exec-all.h"
560
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
561
index XXXXXXX..XXXXXXX 100644
562
--- a/target/s390x/mem_helper.c
563
+++ b/target/s390x/mem_helper.c
564
@@ -XXX,XX +XXX,XX @@
565
#include "exec/cpu_ldst.h"
566
#include "qemu/int128.h"
567
#include "qemu/atomic128.h"
568
-#include "tcg.h"
569
+#include "tcg/tcg.h"
570
571
#if !defined(CONFIG_USER_ONLY)
572
#include "hw/s390x/storage-keys.h"
573
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/s390x/translate.c
576
+++ b/target/s390x/translate.c
577
@@ -XXX,XX +XXX,XX @@
578
#include "internal.h"
579
#include "disas/disas.h"
580
#include "exec/exec-all.h"
581
-#include "tcg-op.h"
582
-#include "tcg-op-gvec.h"
583
+#include "tcg/tcg-op.h"
584
+#include "tcg/tcg-op-gvec.h"
585
#include "qemu/log.h"
586
#include "qemu/host-utils.h"
587
#include "exec/cpu_ldst.h"
588
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
589
index XXXXXXX..XXXXXXX 100644
590
--- a/target/sh4/translate.c
591
+++ b/target/sh4/translate.c
592
@@ -XXX,XX +XXX,XX @@
593
#include "cpu.h"
594
#include "disas/disas.h"
595
#include "exec/exec-all.h"
596
-#include "tcg-op.h"
597
+#include "tcg/tcg-op.h"
598
#include "exec/cpu_ldst.h"
599
#include "exec/helper-proto.h"
600
#include "exec/helper-gen.h"
601
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/sparc/ldst_helper.c
604
+++ b/target/sparc/ldst_helper.c
605
@@ -XXX,XX +XXX,XX @@
606
607
#include "qemu/osdep.h"
608
#include "cpu.h"
609
-#include "tcg.h"
610
+#include "tcg/tcg.h"
611
#include "exec/helper-proto.h"
612
#include "exec/exec-all.h"
613
#include "exec/cpu_ldst.h"
614
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
615
index XXXXXXX..XXXXXXX 100644
616
--- a/target/sparc/translate.c
617
+++ b/target/sparc/translate.c
618
@@ -XXX,XX +XXX,XX @@
619
#include "disas/disas.h"
620
#include "exec/helper-proto.h"
621
#include "exec/exec-all.h"
622
-#include "tcg-op.h"
623
+#include "tcg/tcg-op.h"
624
#include "exec/cpu_ldst.h"
625
626
#include "exec/helper-gen.h"
627
diff --git a/target/tilegx/translate.c b/target/tilegx/translate.c
628
index XXXXXXX..XXXXXXX 100644
629
--- a/target/tilegx/translate.c
630
+++ b/target/tilegx/translate.c
631
@@ -XXX,XX +XXX,XX @@
632
#include "exec/log.h"
633
#include "disas/disas.h"
634
#include "exec/exec-all.h"
635
-#include "tcg-op.h"
636
+#include "tcg/tcg-op.h"
637
#include "exec/cpu_ldst.h"
638
#include "linux-user/syscall_defs.h"
639
640
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
641
index XXXXXXX..XXXXXXX 100644
642
--- a/target/tricore/translate.c
643
+++ b/target/tricore/translate.c
644
@@ -XXX,XX +XXX,XX @@
645
#include "cpu.h"
646
#include "disas/disas.h"
647
#include "exec/exec-all.h"
648
-#include "tcg-op.h"
649
+#include "tcg/tcg-op.h"
650
#include "exec/cpu_ldst.h"
651
#include "qemu/qemu-print.h"
652
653
diff --git a/target/unicore32/translate.c b/target/unicore32/translate.c
654
index XXXXXXX..XXXXXXX 100644
655
--- a/target/unicore32/translate.c
656
+++ b/target/unicore32/translate.c
657
@@ -XXX,XX +XXX,XX @@
658
#include "cpu.h"
659
#include "disas/disas.h"
660
#include "exec/exec-all.h"
661
-#include "tcg-op.h"
662
+#include "tcg/tcg-op.h"
663
#include "qemu/log.h"
664
#include "exec/cpu_ldst.h"
665
#include "exec/translator.h"
666
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
667
index XXXXXXX..XXXXXXX 100644
668
--- a/target/xtensa/translate.c
669
+++ b/target/xtensa/translate.c
670
@@ -XXX,XX +XXX,XX @@
671
#include "cpu.h"
672
#include "exec/exec-all.h"
673
#include "disas/disas.h"
674
-#include "tcg-op.h"
675
+#include "tcg/tcg-op.h"
676
#include "qemu/log.h"
677
#include "qemu/qemu-print.h"
678
#include "exec/cpu_ldst.h"
679
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
680
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
681
--- a/tcg/optimize.c
14
--- a/tcg/optimize.c
682
+++ b/tcg/optimize.c
15
+++ b/tcg/optimize.c
683
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@
684
*/
17
*/
685
18
686
#include "qemu/osdep.h"
19
#include "qemu/osdep.h"
687
-#include "tcg-op.h"
20
+#include "qemu/int128.h"
688
+#include "tcg/tcg-op.h"
21
#include "tcg/tcg-op.h"
689
22
#include "tcg-internal.h"
690
#define CASE_OP_32_64(x) \
23
691
glue(glue(case INDEX_op_, x), _i32): \
24
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
692
diff --git a/tcg/tcg-common.c b/tcg/tcg-common.c
25
return false;
693
index XXXXXXX..XXXXXXX 100644
26
}
694
--- a/tcg/tcg-common.c
27
695
+++ b/tcg/tcg-common.c
28
-static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
696
@@ -XXX,XX +XXX,XX @@ uintptr_t tci_tb_ptr;
29
+static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
697
TCGOpDef tcg_op_defs[] = {
30
{
698
#define DEF(s, oargs, iargs, cargs, flags) \
31
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
699
{ #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
32
arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
700
-#include "tcg-opc.h"
33
- uint32_t al = arg_info(op->args[2])->val;
701
+#include "tcg/tcg-opc.h"
34
- uint32_t ah = arg_info(op->args[3])->val;
702
#undef DEF
35
- uint32_t bl = arg_info(op->args[4])->val;
703
};
36
- uint32_t bh = arg_info(op->args[5])->val;
704
const size_t tcg_op_defs_max = ARRAY_SIZE(tcg_op_defs);
37
- uint64_t a = ((uint64_t)ah << 32) | al;
705
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
38
- uint64_t b = ((uint64_t)bh << 32) | bl;
706
index XXXXXXX..XXXXXXX 100644
39
+ uint64_t al = arg_info(op->args[2])->val;
707
--- a/tcg/tcg-op-gvec.c
40
+ uint64_t ah = arg_info(op->args[3])->val;
708
+++ b/tcg/tcg-op-gvec.c
41
+ uint64_t bl = arg_info(op->args[4])->val;
709
@@ -XXX,XX +XXX,XX @@
42
+ uint64_t bh = arg_info(op->args[5])->val;
710
*/
43
TCGArg rl, rh;
711
44
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
712
#include "qemu/osdep.h"
45
+ TCGOp *op2;
713
-#include "tcg.h"
46
714
-#include "tcg-op.h"
47
- if (add) {
715
-#include "tcg-op-gvec.h"
48
- a += b;
716
+#include "tcg/tcg.h"
49
+ if (ctx->type == TCG_TYPE_I32) {
717
+#include "tcg/tcg-op.h"
50
+ uint64_t a = deposit64(al, 32, 32, ah);
718
+#include "tcg/tcg-op-gvec.h"
51
+ uint64_t b = deposit64(bl, 32, 32, bh);
719
#include "qemu/main-loop.h"
52
+
720
-#include "tcg-gvec-desc.h"
53
+ if (add) {
721
+#include "tcg/tcg-gvec-desc.h"
54
+ a += b;
722
55
+ } else {
723
#define MAX_UNROLL 4
56
+ a -= b;
724
57
+ }
725
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
58
+
726
index XXXXXXX..XXXXXXX 100644
59
+ al = sextract64(a, 0, 32);
727
--- a/tcg/tcg-op-vec.c
60
+ ah = sextract64(a, 32, 32);
728
+++ b/tcg/tcg-op-vec.c
61
} else {
729
@@ -XXX,XX +XXX,XX @@
62
- a -= b;
730
63
+ Int128 a = int128_make128(al, ah);
731
#include "qemu/osdep.h"
64
+ Int128 b = int128_make128(bl, bh);
732
#include "cpu.h"
65
+
733
-#include "tcg.h"
66
+ if (add) {
734
-#include "tcg-op.h"
67
+ a = int128_add(a, b);
735
-#include "tcg-mo.h"
68
+ } else {
736
+#include "tcg/tcg.h"
69
+ a = int128_sub(a, b);
737
+#include "tcg/tcg-op.h"
70
+ }
738
+#include "tcg/tcg-mo.h"
71
+
739
72
+ al = int128_getlo(a);
740
/* Reduce the number of ifdefs below. This assumes that all uses of
73
+ ah = int128_gethi(a);
741
TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
74
}
742
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
75
743
index XXXXXXX..XXXXXXX 100644
76
rl = op->args[0];
744
--- a/tcg/tcg-op.c
77
rh = op->args[1];
745
+++ b/tcg/tcg-op.c
78
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
746
@@ -XXX,XX +XXX,XX @@
79
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
747
#include "qemu/osdep.h"
80
+
748
#include "cpu.h"
81
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
749
#include "exec/exec-all.h"
82
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
750
-#include "tcg.h"
83
+
751
-#include "tcg-op.h"
84
+ tcg_opt_gen_movi(ctx, op, rl, al);
752
-#include "tcg-mo.h"
85
+ tcg_opt_gen_movi(ctx, op2, rh, ah);
753
+#include "tcg/tcg.h"
86
return true;
754
+#include "tcg/tcg-op.h"
87
}
755
+#include "tcg/tcg-mo.h"
88
return false;
756
#include "trace-tcg.h"
89
}
757
#include "trace/mem.h"
90
758
#include "exec/plugin-gen.h"
91
-static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
759
diff --git a/tcg/tcg.c b/tcg/tcg.c
92
+static bool fold_add2(OptContext *ctx, TCGOp *op)
760
index XXXXXXX..XXXXXXX 100644
93
{
761
--- a/tcg/tcg.c
94
- return fold_addsub2_i32(ctx, op, true);
762
+++ b/tcg/tcg.c
95
+ return fold_addsub2(ctx, op, true);
763
@@ -XXX,XX +XXX,XX @@
96
}
764
#include "hw/boards.h"
97
765
#endif
98
static bool fold_and(OptContext *ctx, TCGOp *op)
766
99
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
767
-#include "tcg-op.h"
100
return false;
768
+#include "tcg/tcg-op.h"
101
}
769
102
770
#if UINTPTR_MAX == UINT32_MAX
103
-static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
771
# define ELF_CLASS ELFCLASS32
104
+static bool fold_sub2(OptContext *ctx, TCGOp *op)
772
diff --git a/tcg/tci.c b/tcg/tci.c
105
{
773
index XXXXXXX..XXXXXXX 100644
106
- return fold_addsub2_i32(ctx, op, false);
774
--- a/tcg/tci.c
107
+ return fold_addsub2(ctx, op, false);
775
+++ b/tcg/tci.c
108
}
776
@@ -XXX,XX +XXX,XX @@
109
777
#include "qemu-common.h"
110
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
778
#include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
111
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
779
#include "exec/cpu_ldst.h"
112
CASE_OP_32_64_VEC(add):
780
-#include "tcg-op.h"
113
done = fold_add(&ctx, op);
781
+#include "tcg/tcg-op.h"
114
break;
782
115
- case INDEX_op_add2_i32:
783
/* Marker for missing code. */
116
- done = fold_add2_i32(&ctx, op);
784
#define TODO() \
117
+ CASE_OP_32_64(add2):
118
+ done = fold_add2(&ctx, op);
119
break;
120
CASE_OP_32_64_VEC(and):
121
done = fold_and(&ctx, op);
122
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
123
CASE_OP_32_64_VEC(sub):
124
done = fold_sub(&ctx, op);
125
break;
126
- case INDEX_op_sub2_i32:
127
- done = fold_sub2_i32(&ctx, op);
128
+ CASE_OP_32_64(sub2):
129
+ done = fold_sub2(&ctx, op);
130
break;
131
CASE_OP_32_64_VEC(xor):
132
done = fold_xor(&ctx, op);
785
--
133
--
786
2.20.1
134
2.25.1
787
135
788
136
diff view generated by jsdifflib
New patch
1
1
Most of these are handled by creating a fold_const2_commutative
2
to handle all of the binary operators. The rest were already
3
handled on a case-by-case basis in the switch, and have their
4
own fold function in which to place the call.
5
6
We now have only one major switch on TCGOpcode.
7
8
Introduce NO_DEST and a block comment for swap_commutative in
9
order to make the handling of brcond and movcond opcodes cleaner.
10
11
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
tcg/optimize.c | 142 ++++++++++++++++++++++++-------------------------
15
1 file changed, 70 insertions(+), 72 deletions(-)
16
17
diff --git a/tcg/optimize.c b/tcg/optimize.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/optimize.c
20
+++ b/tcg/optimize.c
21
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
22
return -1;
23
}
24
25
+/**
26
+ * swap_commutative:
27
+ * @dest: TCGArg of the destination argument, or NO_DEST.
28
+ * @p1: first paired argument
29
+ * @p2: second paired argument
30
+ *
31
+ * If *@p1 is a constant and *@p2 is not, swap.
32
+ * If *@p2 matches @dest, swap.
33
+ * Return true if a swap was performed.
34
+ */
35
+
36
+#define NO_DEST temp_arg(NULL)
37
+
38
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
39
{
40
TCGArg a1 = *p1, a2 = *p2;
41
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
42
return false;
43
}
44
45
+static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
46
+{
47
+ swap_commutative(op->args[0], &op->args[1], &op->args[2]);
48
+ return fold_const2(ctx, op);
49
+}
50
+
51
static bool fold_masks(OptContext *ctx, TCGOp *op)
52
{
53
uint64_t a_mask = ctx->a_mask;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
55
56
static bool fold_add(OptContext *ctx, TCGOp *op)
57
{
58
- if (fold_const2(ctx, op) ||
59
+ if (fold_const2_commutative(ctx, op) ||
60
fold_xi_to_x(ctx, op, 0)) {
61
return true;
62
}
63
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
64
65
static bool fold_add2(OptContext *ctx, TCGOp *op)
66
{
67
+ /* Note that the high and low parts may be independently swapped. */
68
+ swap_commutative(op->args[0], &op->args[2], &op->args[4]);
69
+ swap_commutative(op->args[1], &op->args[3], &op->args[5]);
70
+
71
return fold_addsub2(ctx, op, true);
72
}
73
74
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
75
{
76
uint64_t z1, z2;
77
78
- if (fold_const2(ctx, op) ||
79
+ if (fold_const2_commutative(ctx, op) ||
80
fold_xi_to_i(ctx, op, 0) ||
81
fold_xi_to_x(ctx, op, -1) ||
82
fold_xx_to_x(ctx, op)) {
83
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
85
{
86
TCGCond cond = op->args[2];
87
- int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
88
+ int i;
89
90
+ if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) {
91
+ op->args[2] = cond = tcg_swap_cond(cond);
92
+ }
93
+
94
+ i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
95
if (i == 0) {
96
tcg_op_remove(ctx->tcg, op);
97
return true;
98
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
99
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
100
{
101
TCGCond cond = op->args[4];
102
- int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
103
TCGArg label = op->args[5];
104
- int inv = 0;
105
+ int i, inv = 0;
106
107
+ if (swap_commutative2(&op->args[0], &op->args[2])) {
108
+ op->args[4] = cond = tcg_swap_cond(cond);
109
+ }
110
+
111
+ i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
112
if (i >= 0) {
113
goto do_brcond_const;
114
}
115
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
116
117
static bool fold_eqv(OptContext *ctx, TCGOp *op)
118
{
119
- if (fold_const2(ctx, op) ||
120
+ if (fold_const2_commutative(ctx, op) ||
121
fold_xi_to_x(ctx, op, -1) ||
122
fold_xi_to_not(ctx, op, 0)) {
123
return true;
124
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
125
static bool fold_movcond(OptContext *ctx, TCGOp *op)
126
{
127
TCGCond cond = op->args[5];
128
- int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
129
+ int i;
130
131
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
132
+ op->args[5] = cond = tcg_swap_cond(cond);
133
+ }
134
+ /*
135
+ * Canonicalize the "false" input reg to match the destination reg so
136
+ * that the tcg backend can implement a "move if true" operation.
137
+ */
138
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
139
+ op->args[5] = cond = tcg_invert_cond(cond);
140
+ }
141
+
142
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
143
if (i >= 0) {
144
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
145
}
146
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
147
148
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
149
{
150
- if (fold_const2(ctx, op) ||
151
+ if (fold_const2_commutative(ctx, op) ||
152
fold_xi_to_i(ctx, op, 0)) {
153
return true;
154
}
155
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
156
157
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
158
{
159
+ swap_commutative(op->args[0], &op->args[2], &op->args[3]);
160
+
161
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
162
uint64_t a = arg_info(op->args[2])->val;
163
uint64_t b = arg_info(op->args[3])->val;
164
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
165
166
static bool fold_nand(OptContext *ctx, TCGOp *op)
167
{
168
- if (fold_const2(ctx, op) ||
169
+ if (fold_const2_commutative(ctx, op) ||
170
fold_xi_to_not(ctx, op, -1)) {
171
return true;
172
}
173
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
174
175
static bool fold_nor(OptContext *ctx, TCGOp *op)
176
{
177
- if (fold_const2(ctx, op) ||
178
+ if (fold_const2_commutative(ctx, op) ||
179
fold_xi_to_not(ctx, op, 0)) {
180
return true;
181
}
182
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
183
184
static bool fold_or(OptContext *ctx, TCGOp *op)
185
{
186
- if (fold_const2(ctx, op) ||
187
+ if (fold_const2_commutative(ctx, op) ||
188
fold_xi_to_x(ctx, op, 0) ||
189
fold_xx_to_x(ctx, op)) {
190
return true;
191
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
192
static bool fold_setcond(OptContext *ctx, TCGOp *op)
193
{
194
TCGCond cond = op->args[3];
195
- int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
196
+ int i;
197
198
+ if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
199
+ op->args[3] = cond = tcg_swap_cond(cond);
200
+ }
201
+
202
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
203
if (i >= 0) {
204
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
205
}
206
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
207
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
208
{
209
TCGCond cond = op->args[5];
210
- int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
211
- int inv = 0;
212
+ int i, inv = 0;
213
214
+ if (swap_commutative2(&op->args[1], &op->args[3])) {
215
+ op->args[5] = cond = tcg_swap_cond(cond);
216
+ }
217
+
218
+ i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
219
if (i >= 0) {
220
goto do_setcond_const;
221
}
222
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
223
224
static bool fold_xor(OptContext *ctx, TCGOp *op)
225
{
226
- if (fold_const2(ctx, op) ||
227
+ if (fold_const2_commutative(ctx, op) ||
228
fold_xx_to_i(ctx, op, 0) ||
229
fold_xi_to_x(ctx, op, 0) ||
230
fold_xi_to_not(ctx, op, -1)) {
231
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
232
ctx.type = TCG_TYPE_I32;
233
}
234
235
- /* For commutative operations make constant second argument */
236
- switch (opc) {
237
- CASE_OP_32_64_VEC(add):
238
- CASE_OP_32_64_VEC(mul):
239
- CASE_OP_32_64_VEC(and):
240
- CASE_OP_32_64_VEC(or):
241
- CASE_OP_32_64_VEC(xor):
242
- CASE_OP_32_64(eqv):
243
- CASE_OP_32_64(nand):
244
- CASE_OP_32_64(nor):
245
- CASE_OP_32_64(muluh):
246
- CASE_OP_32_64(mulsh):
247
- swap_commutative(op->args[0], &op->args[1], &op->args[2]);
248
- break;
249
- CASE_OP_32_64(brcond):
250
- if (swap_commutative(-1, &op->args[0], &op->args[1])) {
251
- op->args[2] = tcg_swap_cond(op->args[2]);
252
- }
253
- break;
254
- CASE_OP_32_64(setcond):
255
- if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
256
- op->args[3] = tcg_swap_cond(op->args[3]);
257
- }
258
- break;
259
- CASE_OP_32_64(movcond):
260
- if (swap_commutative(-1, &op->args[1], &op->args[2])) {
261
- op->args[5] = tcg_swap_cond(op->args[5]);
262
- }
263
- /* For movcond, we canonicalize the "false" input reg to match
264
- the destination reg so that the tcg backend can implement
265
- a "move if true" operation. */
266
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
267
- op->args[5] = tcg_invert_cond(op->args[5]);
268
- }
269
- break;
270
- CASE_OP_32_64(add2):
271
- swap_commutative(op->args[0], &op->args[2], &op->args[4]);
272
- swap_commutative(op->args[1], &op->args[3], &op->args[5]);
273
- break;
274
- CASE_OP_32_64(mulu2):
275
- CASE_OP_32_64(muls2):
276
- swap_commutative(op->args[0], &op->args[2], &op->args[3]);
277
- break;
278
- case INDEX_op_brcond2_i32:
279
- if (swap_commutative2(&op->args[0], &op->args[2])) {
280
- op->args[4] = tcg_swap_cond(op->args[4]);
281
- }
282
- break;
283
- case INDEX_op_setcond2_i32:
284
- if (swap_commutative2(&op->args[1], &op->args[3])) {
285
- op->args[5] = tcg_swap_cond(op->args[5]);
286
- }
287
- break;
288
- default:
289
- break;
290
- }
291
-
292
/* Assume all bits affected, and no bits known zero. */
293
ctx.a_mask = -1;
294
ctx.z_mask = -1;
295
--
296
2.25.1
297
298
diff view generated by jsdifflib
1
The DO_LOAD macros replicate the distinction already performed
1
This "garbage" setting pre-dates the addition of the type
2
by the cpu_ldst.h functions. Use them.
2
changing opcodes INDEX_op_ext_i32_i64, INDEX_op_extu_i32_i64,
3
and INDEX_op_extr{l,h}_i64_i32.
3
4
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
So now we have a definitive points at which to adjust z_mask
6
to eliminate such bits from the 32-bit operands.
7
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
11
---
8
include/exec/cpu_ldst.h | 11 ---------
12
tcg/optimize.c | 35 ++++++++++++++++-------------------
9
include/exec/translator.h | 48 +++++++++++----------------------------
13
1 file changed, 16 insertions(+), 19 deletions(-)
10
2 files changed, 13 insertions(+), 46 deletions(-)
11
14
12
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
15
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/cpu_ldst.h
17
--- a/tcg/optimize.c
15
+++ b/include/exec/cpu_ldst.h
18
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static inline void clear_helper_retaddr(void)
19
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
17
#include "exec/cpu_ldst_useronly_template.h"
20
ti->is_const = true;
18
#undef MEMSUFFIX
21
ti->val = ts->val;
19
22
ti->z_mask = ts->val;
20
-/*
23
- if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
21
- * Code access is deprecated in favour of translator_ld* functions
24
- /* High bits of a 32-bit quantity are garbage. */
22
- * (see translator.h). However there are still users that need to
25
- ti->z_mask |= ~0xffffffffull;
23
- * converted so for now these stay.
26
- }
24
- */
27
} else {
25
#define MEMSUFFIX _code
28
ti->is_const = false;
26
#define CODE_ACCESS
29
ti->z_mask = -1;
27
#define DATA_SIZE 1
30
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
28
@@ -XXX,XX +XXX,XX @@ void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
31
TCGTemp *src_ts = arg_temp(src);
29
#undef CPU_MMU_INDEX
32
TempOptInfo *di;
30
#undef MEMSUFFIX
33
TempOptInfo *si;
31
34
- uint64_t z_mask;
32
-/*
35
TCGOpcode new_op;
33
- * Code access is deprecated in favour of translator_ld* functions
36
34
- * (see translator.h). However there are still users that need to
37
if (ts_are_copies(dst_ts, src_ts)) {
35
- * converted so for now these stay.
38
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
36
- */
39
op->args[0] = dst;
37
-
40
op->args[1] = src;
38
#define CPU_MMU_INDEX (cpu_mmu_index(env, true))
41
39
#define MEMSUFFIX _code
42
- z_mask = si->z_mask;
40
#define SOFTMMU_CODE_ACCESS
43
- if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
41
diff --git a/include/exec/translator.h b/include/exec/translator.h
44
- /* High bits of the destination are now garbage. */
42
index XXXXXXX..XXXXXXX 100644
45
- z_mask |= ~0xffffffffull;
43
--- a/include/exec/translator.h
46
- }
44
+++ b/include/exec/translator.h
47
- di->z_mask = z_mask;
45
@@ -XXX,XX +XXX,XX @@ void translator_loop_temp_check(DisasContextBase *db);
48
+ di->z_mask = si->z_mask;
46
/*
49
47
* Translator Load Functions
50
if (src_ts->type == dst_ts->type) {
48
*
51
TempOptInfo *ni = ts_info(si->next_copy);
49
- * These are intended to replace the old cpu_ld*_code functions and
52
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
50
- * are mandatory for front-ends that have been migrated to the common
53
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
51
- * translator_loop. These functions are only intended to be called
54
TCGArg dst, uint64_t val)
52
- * from the translation stage and should not be called from helper
55
{
53
- * functions. Those functions should be converted to encode the
56
- /* Convert movi to mov with constant temp. */
54
- * relevant information at translation time.
57
- TCGTemp *tv = tcg_constant_internal(ctx->type, val);
55
+ * These are intended to replace the direct usage of the cpu_ld*_code
58
+ TCGTemp *tv;
56
+ * functions and are mandatory for front-ends that have been migrated
59
57
+ * to the common translator_loop. These functions are only intended
60
+ if (ctx->type == TCG_TYPE_I32) {
58
+ * to be called from the translation stage and should not be called
61
+ val = (int32_t)val;
59
+ * from helper functions. Those functions should be converted to encode
62
+ }
60
+ * the relevant information at translation time.
63
+
61
*/
64
+ /* Convert movi to mov with constant temp. */
62
65
+ tv = tcg_constant_internal(ctx->type, val);
63
-#ifdef CONFIG_USER_ONLY
66
init_ts_info(ctx, tv);
64
-
67
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
65
-#define DO_LOAD(type, name, shift) \
68
}
66
- do { \
69
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
67
- set_helper_retaddr(1); \
70
uint64_t z_mask = ctx->z_mask;
68
- ret = name ## _p(g2h(pc)); \
71
69
- clear_helper_retaddr(); \
72
/*
70
- } while (0)
73
- * 32-bit ops generate 32-bit results. For the result is zero test
71
-
74
- * below, we can ignore high bits, but for further optimizations we
72
-#else
75
- * need to record that the high bits contain garbage.
73
-
76
+ * 32-bit ops generate 32-bit results, which for the purpose of
74
-#define DO_LOAD(type, name, shift) \
77
+ * simplifying tcg are sign-extended. Certainly that's how we
75
- do { \
78
+ * represent our constants elsewhere. Note that the bits will
76
- int mmu_idx = cpu_mmu_index(env, true); \
79
+ * be reset properly for a 64-bit value when encountering the
77
- TCGMemOpIdx oi = make_memop_idx(shift, mmu_idx); \
80
+ * type changing opcodes.
78
- ret = helper_ret_ ## name ## _cmmu(env, pc, oi, 0); \
81
*/
79
- } while (0)
82
if (ctx->type == TCG_TYPE_I32) {
80
-
83
- ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
81
-#endif
84
- a_mask &= MAKE_64BIT_MASK(0, 32);
82
-
85
- z_mask &= MAKE_64BIT_MASK(0, 32);
83
-#define GEN_TRANSLATOR_LD(fullname, name, type, shift, swap_fn) \
86
+ a_mask = (int32_t)a_mask;
84
+#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
87
+ z_mask = (int32_t)z_mask;
85
static inline type \
88
+ ctx->z_mask = z_mask;
86
fullname ## _swap(CPUArchState *env, abi_ptr pc, bool do_swap) \
87
{ \
88
- type ret; \
89
- DO_LOAD(type, name, shift); \
90
- \
91
+ type ret = load_fn(env, pc); \
92
if (do_swap) { \
93
ret = swap_fn(ret); \
94
} \
95
@@ -XXX,XX +XXX,XX @@ void translator_loop_temp_check(DisasContextBase *db);
96
return fullname ## _swap(env, pc, false); \
97
}
89
}
98
90
99
-GEN_TRANSLATOR_LD(translator_ldub, ldub, uint8_t, 0, /* no swap */ )
91
if (z_mask == 0) {
100
-GEN_TRANSLATOR_LD(translator_ldsw, ldsw, int16_t, 1, bswap16)
101
-GEN_TRANSLATOR_LD(translator_lduw, lduw, uint16_t, 1, bswap16)
102
-GEN_TRANSLATOR_LD(translator_ldl, ldl, uint32_t, 2, bswap32)
103
-GEN_TRANSLATOR_LD(translator_ldq, ldq, uint64_t, 3, bswap64)
104
+GEN_TRANSLATOR_LD(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */)
105
+GEN_TRANSLATOR_LD(translator_ldsw, int16_t, cpu_ldsw_code, bswap16)
106
+GEN_TRANSLATOR_LD(translator_lduw, uint16_t, cpu_lduw_code, bswap16)
107
+GEN_TRANSLATOR_LD(translator_ldl, uint32_t, cpu_ldl_code, bswap32)
108
+GEN_TRANSLATOR_LD(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
109
#undef GEN_TRANSLATOR_LD
110
111
#endif /* EXEC__TRANSLATOR_H */
112
--
92
--
113
2.20.1
93
2.25.1
114
94
115
95
diff view generated by jsdifflib
1
Code movement in an upcoming patch will show that this file
1
Recognize the constant function for or-complement.
2
was implicitly depending on tcg.h being included indirectly.
3
2
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
target/s390x/mem_helper.c | 1 +
8
tcg/optimize.c | 1 +
10
1 file changed, 1 insertion(+)
9
1 file changed, 1 insertion(+)
11
10
12
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/target/s390x/mem_helper.c
13
--- a/tcg/optimize.c
15
+++ b/target/s390x/mem_helper.c
14
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
17
#include "exec/cpu_ldst.h"
16
static bool fold_orc(OptContext *ctx, TCGOp *op)
18
#include "qemu/int128.h"
17
{
19
#include "qemu/atomic128.h"
18
if (fold_const2(ctx, op) ||
20
+#include "tcg.h"
19
+ fold_xx_to_i(ctx, op, -1) ||
21
20
fold_xi_to_x(ctx, op, -1) ||
22
#if !defined(CONFIG_USER_ONLY)
21
fold_ix_to_not(ctx, op, 0)) {
23
#include "hw/s390x/storage-keys.h"
22
return true;
24
--
23
--
25
2.20.1
24
2.25.1
26
25
27
26
diff view generated by jsdifflib
1
In the cpu_ldst templates, we already require a MemOp, and it
1
Recognize the identity function for low-part multiply.
2
is cleaner and clearer to pass that instead of 3 separate
3
arguments describing the memory operation.
4
2
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
include/exec/cpu_ldst_template.h | 22 +++++++++++-----------
8
tcg/optimize.c | 3 ++-
11
include/exec/cpu_ldst_useronly_template.h | 12 ++++++------
9
1 file changed, 2 insertions(+), 1 deletion(-)
12
2 files changed, 17 insertions(+), 17 deletions(-)
13
10
14
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu_ldst_template.h
13
--- a/tcg/optimize.c
17
+++ b/include/exec/cpu_ldst_template.h
14
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
15
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
19
RES_TYPE res;
16
static bool fold_mul(OptContext *ctx, TCGOp *op)
20
target_ulong addr;
21
int mmu_idx = CPU_MMU_INDEX;
22
- TCGMemOpIdx oi;
23
+ MemOp op = MO_TE | SHIFT;
24
#if !defined(SOFTMMU_CODE_ACCESS)
25
- uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false, mmu_idx);
26
+ uint16_t meminfo = trace_mem_get_info(op, mmu_idx, false);
27
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
28
#endif
29
30
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
31
entry = tlb_entry(env, mmu_idx, addr);
32
if (unlikely(entry->ADDR_READ !=
33
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
34
- oi = make_memop_idx(SHIFT, mmu_idx);
35
+ TCGMemOpIdx oi = make_memop_idx(op, mmu_idx);
36
res = glue(glue(helper_ret_ld, URETSUFFIX), MMUSUFFIX)(env, addr,
37
- oi, retaddr);
38
+ oi, retaddr);
39
} else {
40
uintptr_t hostaddr = addr + entry->addend;
41
res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr);
42
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
43
int res;
44
target_ulong addr;
45
int mmu_idx = CPU_MMU_INDEX;
46
- TCGMemOpIdx oi;
47
-#if !defined(SOFTMMU_CODE_ACCESS)
48
- uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false, mmu_idx);
49
+ MemOp op = MO_TE | MO_SIGN | SHIFT;
50
+#ifndef SOFTMMU_CODE_ACCESS
51
+ uint16_t meminfo = trace_mem_get_info(op, mmu_idx, false);
52
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
53
#endif
54
55
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
56
entry = tlb_entry(env, mmu_idx, addr);
57
if (unlikely(entry->ADDR_READ !=
58
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
59
- oi = make_memop_idx(SHIFT, mmu_idx);
60
+ TCGMemOpIdx oi = make_memop_idx(op & ~MO_SIGN, mmu_idx);
61
res = (DATA_STYPE)glue(glue(helper_ret_ld, SRETSUFFIX),
62
MMUSUFFIX)(env, addr, oi, retaddr);
63
} else {
64
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
65
CPUTLBEntry *entry;
66
target_ulong addr;
67
int mmu_idx = CPU_MMU_INDEX;
68
- TCGMemOpIdx oi;
69
+ MemOp op = MO_TE | SHIFT;
70
#if !defined(SOFTMMU_CODE_ACCESS)
71
- uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true, mmu_idx);
72
+ uint16_t meminfo = trace_mem_get_info(op, mmu_idx, true);
73
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
74
#endif
75
76
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
77
entry = tlb_entry(env, mmu_idx, addr);
78
if (unlikely(tlb_addr_write(entry) !=
79
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
80
- oi = make_memop_idx(SHIFT, mmu_idx);
81
+ TCGMemOpIdx oi = make_memop_idx(op, mmu_idx);
82
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, v, oi,
83
retaddr);
84
} else {
85
diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h
86
index XXXXXXX..XXXXXXX 100644
87
--- a/include/exec/cpu_ldst_useronly_template.h
88
+++ b/include/exec/cpu_ldst_useronly_template.h
89
@@ -XXX,XX +XXX,XX @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
90
ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
91
clear_helper_retaddr();
92
#else
93
- uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false,
94
- MMU_USER_IDX);
95
+ MemOp op = MO_TE | SHIFT;
96
+ uint16_t meminfo = trace_mem_get_info(op, MMU_USER_IDX, false);
97
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
98
ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
99
#endif
100
@@ -XXX,XX +XXX,XX @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
101
ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
102
clear_helper_retaddr();
103
#else
104
- uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false,
105
- MMU_USER_IDX);
106
+ MemOp op = MO_TE | MO_SIGN | SHIFT;
107
+ uint16_t meminfo = trace_mem_get_info(op, MMU_USER_IDX, false);
108
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
109
ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
110
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
111
@@ -XXX,XX +XXX,XX @@ static inline void
112
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr,
113
RES_TYPE v)
114
{
17
{
115
- uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true,
18
if (fold_const2(ctx, op) ||
116
- MMU_USER_IDX);
19
- fold_xi_to_i(ctx, op, 0)) {
117
+ MemOp op = MO_TE | SHIFT;
20
+ fold_xi_to_i(ctx, op, 0) ||
118
+ uint16_t meminfo = trace_mem_get_info(op, MMU_USER_IDX, true);
21
+ fold_xi_to_x(ctx, op, 1)) {
119
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
22
return true;
120
glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
23
}
121
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
24
return false;
122
--
25
--
123
2.20.1
26
2.25.1
124
27
125
28
diff view generated by jsdifflib
1
The CFLAGS_NOPIE and LDFLAGS_NOPIE variables are used
1
Recognize the identity function for division.
2
in pc-bios/optionrom/Makefile, which has nothing to do
3
with the PIE setting of the main qemu executables.
4
2
5
This overrides any operating system default to build
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
6
all executables as PIE, which is important for ROMs.
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Thomas Huth <thuth@redhat.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
7
---
12
configure | 18 ++++++++----------
8
tcg/optimize.c | 6 +++++-
13
1 file changed, 8 insertions(+), 10 deletions(-)
9
1 file changed, 5 insertions(+), 1 deletion(-)
14
10
15
diff --git a/configure b/configure
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100755
12
index XXXXXXX..XXXXXXX 100644
17
--- a/configure
13
--- a/tcg/optimize.c
18
+++ b/configure
14
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ if ! compile_prog "-Werror" "" ; then
15
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
20
    "Thread-Local Storage (TLS). Please upgrade to a version that does."
16
21
fi
17
static bool fold_divide(OptContext *ctx, TCGOp *op)
22
18
{
23
-if test "$pie" != "no" ; then
19
- return fold_const2(ctx, op);
24
- cat > $TMPC << EOF
20
+ if (fold_const2(ctx, op) ||
25
+cat > $TMPC << EOF
21
+ fold_xi_to_x(ctx, op, 1)) {
26
22
+ return true;
27
#ifdef __linux__
23
+ }
28
# define THREAD __thread
24
+ return false;
29
#else
25
}
30
# define THREAD
26
31
#endif
27
static bool fold_dup(OptContext *ctx, TCGOp *op)
32
-
33
static THREAD int tls_var;
34
-
35
int main(void) { return tls_var; }
36
-
37
EOF
38
- # check we support --no-pie first...
39
- if compile_prog "-Werror -fno-pie" "-no-pie"; then
40
- CFLAGS_NOPIE="-fno-pie"
41
- LDFLAGS_NOPIE="-nopie"
42
- fi
43
44
+# Check we support --no-pie first; we will need this for building ROMs.
45
+if compile_prog "-Werror -fno-pie" "-no-pie"; then
46
+ CFLAGS_NOPIE="-fno-pie"
47
+ LDFLAGS_NOPIE="-no-pie"
48
+fi
49
+
50
+if test "$pie" != "no" ; then
51
if compile_prog "-fPIE -DPIE" "-pie"; then
52
QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS"
53
LDFLAGS="-pie $LDFLAGS"
54
--
28
--
55
2.20.1
29
2.25.1
56
30
57
31
diff view generated by jsdifflib
1
We don't actually need the result of the read, only to probe that the
1
Recognize the constant function for remainder.
2
memory mapping exists. This is exactly what probe_access does.
3
2
4
This is also the only user of any cpu_ld*_code_ra function.
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
5
Removing this allows the interface to be removed shortly.
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
7
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Acked-by: Max Filippov <jcmvbkbc@gmail.com>
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
6
---
13
target/xtensa/mmu_helper.c | 5 +++--
7
tcg/optimize.c | 6 +++++-
14
1 file changed, 3 insertions(+), 2 deletions(-)
8
1 file changed, 5 insertions(+), 1 deletion(-)
15
9
16
diff --git a/target/xtensa/mmu_helper.c b/target/xtensa/mmu_helper.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/target/xtensa/mmu_helper.c
12
--- a/tcg/optimize.c
19
+++ b/target/xtensa/mmu_helper.c
13
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
21
void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr)
15
16
static bool fold_remainder(OptContext *ctx, TCGOp *op)
22
{
17
{
23
/*
18
- return fold_const2(ctx, op);
24
- * Attempt the memory load; we don't care about the result but
19
+ if (fold_const2(ctx, op) ||
25
+ * Probe the memory; we don't care about the result but
20
+ fold_xx_to_i(ctx, op, 0)) {
26
* only the side-effects (ie any MMU or other exception)
21
+ return true;
27
*/
22
+ }
28
- cpu_ldub_code_ra(env, vaddr, GETPC());
23
+ return false;
29
+ probe_access(env, vaddr, 1, MMU_INST_FETCH,
30
+ cpu_mmu_index(env, true), GETPC());
31
}
24
}
32
25
33
void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
26
static bool fold_setcond(OptContext *ctx, TCGOp *op)
34
--
27
--
35
2.20.1
28
2.25.1
36
29
37
30
diff view generated by jsdifflib
New patch
1
1
Certain targets, like riscv, produce signed 32-bit results.
2
This can lead to lots of redundant extensions as values are
3
manipulated.
4
5
Begin by tracking only the obvious sign-extensions, and
6
converting them to simple copies when possible.
7
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
tcg/optimize.c | 123 ++++++++++++++++++++++++++++++++++++++++---------
13
1 file changed, 102 insertions(+), 21 deletions(-)
14
15
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/optimize.c
18
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
20
TCGTemp *next_copy;
21
uint64_t val;
22
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
23
+ uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
24
} TempOptInfo;
25
26
typedef struct OptContext {
27
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
28
/* In flight values from optimization. */
29
uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
30
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
31
+ uint64_t s_mask; /* mask of clrsb(value) bits */
32
TCGType type;
33
} OptContext;
34
35
+/* Calculate the smask for a specific value. */
36
+static uint64_t smask_from_value(uint64_t value)
37
+{
38
+ int rep = clrsb64(value);
39
+ return ~(~0ull >> rep);
40
+}
41
+
42
+/*
43
+ * Calculate the smask for a given set of known-zeros.
44
+ * If there are lots of zeros on the left, we can consider the remainder
45
+ * an unsigned field, and thus the corresponding signed field is one bit
46
+ * larger.
47
+ */
48
+static uint64_t smask_from_zmask(uint64_t zmask)
49
+{
50
+ /*
51
+ * Only the 0 bits are significant for zmask, thus the msb itself
52
+ * must be zero, else we have no sign information.
53
+ */
54
+ int rep = clz64(zmask);
55
+ if (rep == 0) {
56
+ return 0;
57
+ }
58
+ rep -= 1;
59
+ return ~(~0ull >> rep);
60
+}
61
+
62
static inline TempOptInfo *ts_info(TCGTemp *ts)
63
{
64
return ts->state_ptr;
65
@@ -XXX,XX +XXX,XX @@ static void reset_ts(TCGTemp *ts)
66
ti->prev_copy = ts;
67
ti->is_const = false;
68
ti->z_mask = -1;
69
+ ti->s_mask = 0;
70
}
71
72
static void reset_temp(TCGArg arg)
73
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
74
ti->is_const = true;
75
ti->val = ts->val;
76
ti->z_mask = ts->val;
77
+ ti->s_mask = smask_from_value(ts->val);
78
} else {
79
ti->is_const = false;
80
ti->z_mask = -1;
81
+ ti->s_mask = 0;
82
}
83
}
84
85
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
86
op->args[1] = src;
87
88
di->z_mask = si->z_mask;
89
+ di->s_mask = si->s_mask;
90
91
if (src_ts->type == dst_ts->type) {
92
TempOptInfo *ni = ts_info(si->next_copy);
93
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
94
95
nb_oargs = def->nb_oargs;
96
for (i = 0; i < nb_oargs; i++) {
97
- reset_temp(op->args[i]);
98
+ TCGTemp *ts = arg_temp(op->args[i]);
99
+ reset_ts(ts);
100
/*
101
- * Save the corresponding known-zero bits mask for the
102
+ * Save the corresponding known-zero/sign bits mask for the
103
* first output argument (only one supported so far).
104
*/
105
if (i == 0) {
106
- arg_info(op->args[i])->z_mask = ctx->z_mask;
107
+ ts_info(ts)->z_mask = ctx->z_mask;
108
+ ts_info(ts)->s_mask = ctx->s_mask;
109
}
110
}
111
}
112
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
113
{
114
uint64_t a_mask = ctx->a_mask;
115
uint64_t z_mask = ctx->z_mask;
116
+ uint64_t s_mask = ctx->s_mask;
117
118
/*
119
* 32-bit ops generate 32-bit results, which for the purpose of
120
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
121
if (ctx->type == TCG_TYPE_I32) {
122
a_mask = (int32_t)a_mask;
123
z_mask = (int32_t)z_mask;
124
+ s_mask |= MAKE_64BIT_MASK(32, 32);
125
ctx->z_mask = z_mask;
126
+ ctx->s_mask = s_mask;
127
}
128
129
if (z_mask == 0) {
130
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
131
132
static bool fold_bswap(OptContext *ctx, TCGOp *op)
133
{
134
- uint64_t z_mask, sign;
135
+ uint64_t z_mask, s_mask, sign;
136
137
if (arg_is_const(op->args[1])) {
138
uint64_t t = arg_info(op->args[1])->val;
139
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
140
}
141
142
z_mask = arg_info(op->args[1])->z_mask;
143
+
144
switch (op->opc) {
145
case INDEX_op_bswap16_i32:
146
case INDEX_op_bswap16_i64:
147
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
148
default:
149
g_assert_not_reached();
150
}
151
+ s_mask = smask_from_zmask(z_mask);
152
153
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
154
case TCG_BSWAP_OZ:
155
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
156
/* If the sign bit may be 1, force all the bits above to 1. */
157
if (z_mask & sign) {
158
z_mask |= sign;
159
+ s_mask = sign << 1;
160
}
161
break;
162
default:
163
/* The high bits are undefined: force all bits above the sign to 1. */
164
z_mask |= sign << 1;
165
+ s_mask = 0;
166
break;
167
}
168
ctx->z_mask = z_mask;
169
+ ctx->s_mask = s_mask;
170
171
return fold_masks(ctx, op);
172
}
173
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
174
static bool fold_extract(OptContext *ctx, TCGOp *op)
175
{
176
uint64_t z_mask_old, z_mask;
177
+ int pos = op->args[2];
178
+ int len = op->args[3];
179
180
if (arg_is_const(op->args[1])) {
181
uint64_t t;
182
183
t = arg_info(op->args[1])->val;
184
- t = extract64(t, op->args[2], op->args[3]);
185
+ t = extract64(t, pos, len);
186
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
187
}
188
189
z_mask_old = arg_info(op->args[1])->z_mask;
190
- z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
191
- if (op->args[2] == 0) {
192
+ z_mask = extract64(z_mask_old, pos, len);
193
+ if (pos == 0) {
194
ctx->a_mask = z_mask_old ^ z_mask;
195
}
196
ctx->z_mask = z_mask;
197
+ ctx->s_mask = smask_from_zmask(z_mask);
198
199
return fold_masks(ctx, op);
200
}
201
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
202
203
static bool fold_exts(OptContext *ctx, TCGOp *op)
204
{
205
- uint64_t z_mask_old, z_mask, sign;
206
+ uint64_t s_mask_old, s_mask, z_mask, sign;
207
bool type_change = false;
208
209
if (fold_const1(ctx, op)) {
210
return true;
211
}
212
213
- z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
214
+ z_mask = arg_info(op->args[1])->z_mask;
215
+ s_mask = arg_info(op->args[1])->s_mask;
216
+ s_mask_old = s_mask;
217
218
switch (op->opc) {
219
CASE_OP_32_64(ext8s):
220
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
221
222
if (z_mask & sign) {
223
z_mask |= sign;
224
- } else if (!type_change) {
225
- ctx->a_mask = z_mask_old ^ z_mask;
226
}
227
+ s_mask |= sign << 1;
228
+
229
ctx->z_mask = z_mask;
230
+ ctx->s_mask = s_mask;
231
+ if (!type_change) {
232
+ ctx->a_mask = s_mask & ~s_mask_old;
233
+ }
234
235
return fold_masks(ctx, op);
236
}
237
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
238
}
239
240
ctx->z_mask = z_mask;
241
+ ctx->s_mask = smask_from_zmask(z_mask);
242
if (!type_change) {
243
ctx->a_mask = z_mask_old ^ z_mask;
244
}
245
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
246
MemOp mop = get_memop(oi);
247
int width = 8 * memop_size(mop);
248
249
- if (!(mop & MO_SIGN) && width < 64) {
250
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
251
+ if (width < 64) {
252
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
253
+ if (!(mop & MO_SIGN)) {
254
+ ctx->z_mask = MAKE_64BIT_MASK(0, width);
255
+ ctx->s_mask <<= 1;
256
+ }
257
}
258
259
/* Opcodes that touch guest memory stop the mb optimization. */
260
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
261
262
static bool fold_sextract(OptContext *ctx, TCGOp *op)
263
{
264
- int64_t z_mask_old, z_mask;
265
+ uint64_t z_mask, s_mask, s_mask_old;
266
+ int pos = op->args[2];
267
+ int len = op->args[3];
268
269
if (arg_is_const(op->args[1])) {
270
uint64_t t;
271
272
t = arg_info(op->args[1])->val;
273
- t = sextract64(t, op->args[2], op->args[3]);
274
+ t = sextract64(t, pos, len);
275
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
276
}
277
278
- z_mask_old = arg_info(op->args[1])->z_mask;
279
- z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
280
- if (op->args[2] == 0 && z_mask >= 0) {
281
- ctx->a_mask = z_mask_old ^ z_mask;
282
- }
283
+ z_mask = arg_info(op->args[1])->z_mask;
284
+ z_mask = sextract64(z_mask, pos, len);
285
ctx->z_mask = z_mask;
286
287
+ s_mask_old = arg_info(op->args[1])->s_mask;
288
+ s_mask = sextract64(s_mask_old, pos, len);
289
+ s_mask |= MAKE_64BIT_MASK(len, 64 - len);
290
+ ctx->s_mask = s_mask;
291
+
292
+ if (pos == 0) {
293
+ ctx->a_mask = s_mask & ~s_mask_old;
294
+ }
295
+
296
return fold_masks(ctx, op);
297
}
298
299
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
300
{
301
/* We can't do any folding with a load, but we can record bits. */
302
switch (op->opc) {
303
+ CASE_OP_32_64(ld8s):
304
+ ctx->s_mask = MAKE_64BIT_MASK(8, 56);
305
+ break;
306
CASE_OP_32_64(ld8u):
307
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
308
+ ctx->s_mask = MAKE_64BIT_MASK(9, 55);
309
+ break;
310
+ CASE_OP_32_64(ld16s):
311
+ ctx->s_mask = MAKE_64BIT_MASK(16, 48);
312
break;
313
CASE_OP_32_64(ld16u):
314
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
315
+ ctx->s_mask = MAKE_64BIT_MASK(17, 47);
316
+ break;
317
+ case INDEX_op_ld32s_i64:
318
+ ctx->s_mask = MAKE_64BIT_MASK(32, 32);
319
break;
320
case INDEX_op_ld32u_i64:
321
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
322
+ ctx->s_mask = MAKE_64BIT_MASK(33, 31);
323
break;
324
default:
325
g_assert_not_reached();
326
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
327
ctx.type = TCG_TYPE_I32;
328
}
329
330
- /* Assume all bits affected, and no bits known zero. */
331
+ /* Assume all bits affected, no bits known zero, no sign reps. */
332
ctx.a_mask = -1;
333
ctx.z_mask = -1;
334
+ ctx.s_mask = 0;
335
336
/*
337
* Process each opcode.
338
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
339
case INDEX_op_extrh_i64_i32:
340
done = fold_extu(&ctx, op);
341
break;
342
+ CASE_OP_32_64(ld8s):
343
CASE_OP_32_64(ld8u):
344
+ CASE_OP_32_64(ld16s):
345
CASE_OP_32_64(ld16u):
346
+ case INDEX_op_ld32s_i64:
347
case INDEX_op_ld32u_i64:
348
done = fold_tcg_ld(&ctx, op);
349
break;
350
--
351
2.25.1
352
353
diff view generated by jsdifflib
1
There are no uses of the *_cmmu names other than the bare wrapping
1
Sign repetitions are perforce all identical, whether they are 1 or 0.
2
within the *_code inlines. Therefore rename the functions so we
2
Bitwise operations preserve the relative quantity of the repetitions.
3
can drop the inlines.
4
3
5
Use abi_ptr instead of target_ulong in preparation for user-only;
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
the two types are identical for softmmu.
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
8
---
12
include/exec/cpu_ldst.h | 29 ++++------
9
tcg/optimize.c | 29 +++++++++++++++++++++++++++++
13
include/exec/cpu_ldst_template.h | 21 -------
10
1 file changed, 29 insertions(+)
14
tcg/tcg.h | 29 ----------
15
accel/tcg/cputlb.c | 94 ++++++++------------------------
16
docs/devel/loads-stores.rst | 4 +-
17
5 files changed, 36 insertions(+), 141 deletions(-)
18
11
19
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
20
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
21
--- a/include/exec/cpu_ldst.h
14
--- a/tcg/optimize.c
22
+++ b/include/exec/cpu_ldst.h
15
+++ b/tcg/optimize.c
23
@@ -XXX,XX +XXX,XX @@ void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
16
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
24
#undef CPU_MMU_INDEX
17
z2 = arg_info(op->args[2])->z_mask;
25
#undef MEMSUFFIX
18
ctx->z_mask = z1 & z2;
26
19
27
-#define CPU_MMU_INDEX (cpu_mmu_index(env, true))
20
+ /*
28
-#define MEMSUFFIX _code
21
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
29
-#define SOFTMMU_CODE_ACCESS
22
+ * Bitwise operations preserve the relative quantity of the repetitions.
30
+uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
23
+ */
31
+uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
24
+ ctx->s_mask = arg_info(op->args[1])->s_mask
32
+uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
25
+ & arg_info(op->args[2])->s_mask;
33
+uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
26
+
34
27
/*
35
-#define DATA_SIZE 1
28
* Known-zeros does not imply known-ones. Therefore unless
36
-#include "exec/cpu_ldst_template.h"
29
* arg2 is constant, we can't infer affected bits from it.
37
+static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr)
30
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
38
+{
31
}
39
+ return (int8_t)cpu_ldub_code(env, addr);
32
ctx->z_mask = z1;
40
+}
33
41
34
+ ctx->s_mask = arg_info(op->args[1])->s_mask
42
-#define DATA_SIZE 2
35
+ & arg_info(op->args[2])->s_mask;
43
-#include "exec/cpu_ldst_template.h"
36
return fold_masks(ctx, op);
44
-
45
-#define DATA_SIZE 4
46
-#include "exec/cpu_ldst_template.h"
47
-
48
-#define DATA_SIZE 8
49
-#include "exec/cpu_ldst_template.h"
50
-
51
-#undef CPU_MMU_INDEX
52
-#undef MEMSUFFIX
53
-#undef SOFTMMU_CODE_ACCESS
54
+static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
55
+{
56
+ return (int16_t)cpu_lduw_code(env, addr);
57
+}
58
59
#endif /* defined(CONFIG_USER_ONLY) */
60
61
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
62
index XXXXXXX..XXXXXXX 100644
63
--- a/include/exec/cpu_ldst_template.h
64
+++ b/include/exec/cpu_ldst_template.h
65
@@ -XXX,XX +XXX,XX @@
66
67
/* generic load/store macros */
68
69
-#ifdef SOFTMMU_CODE_ACCESS
70
-
71
-static inline RES_TYPE
72
-glue(glue(cpu_ld, USUFFIX), _code)(CPUArchState *env, target_ulong ptr)
73
-{
74
- TCGMemOpIdx oi = make_memop_idx(MO_TE | SHIFT, CPU_MMU_INDEX);
75
- return glue(glue(helper_ret_ld, USUFFIX), _cmmu)(env, ptr, oi, 0);
76
-}
77
-
78
-#if DATA_SIZE <= 2
79
-static inline int
80
-glue(glue(cpu_lds, SUFFIX), _code)(CPUArchState *env, target_ulong ptr)
81
-{
82
- return (DATA_STYPE)glue(glue(cpu_ld, USUFFIX), _code)(env, ptr);
83
-}
84
-#endif
85
-
86
-#else
87
-
88
static inline RES_TYPE
89
glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
90
target_ulong ptr,
91
@@ -XXX,XX +XXX,XX @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
92
glue(glue(cpu_st, SUFFIX), _mmuidx_ra)(env, ptr, v, CPU_MMU_INDEX, 0);
93
}
37
}
94
38
95
-#endif /* !SOFTMMU_CODE_ACCESS */
39
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
96
-
40
fold_xi_to_not(ctx, op, 0)) {
97
#undef RES_TYPE
41
return true;
98
#undef DATA_TYPE
42
}
99
#undef DATA_STYPE
43
+
100
diff --git a/tcg/tcg.h b/tcg/tcg.h
44
+ ctx->s_mask = arg_info(op->args[1])->s_mask
101
index XXXXXXX..XXXXXXX 100644
45
+ & arg_info(op->args[2])->s_mask;
102
--- a/tcg/tcg.h
46
return false;
103
+++ b/tcg/tcg.h
104
@@ -XXX,XX +XXX,XX @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
105
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
106
TCGMemOpIdx oi, uintptr_t retaddr);
107
108
-uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr,
109
- TCGMemOpIdx oi, uintptr_t retaddr);
110
-int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr,
111
- TCGMemOpIdx oi, uintptr_t retaddr);
112
-uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
113
- TCGMemOpIdx oi, uintptr_t retaddr);
114
-int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr,
115
- TCGMemOpIdx oi, uintptr_t retaddr);
116
-uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
117
- TCGMemOpIdx oi, uintptr_t retaddr);
118
-uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
119
- TCGMemOpIdx oi, uintptr_t retaddr);
120
-uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
121
- TCGMemOpIdx oi, uintptr_t retaddr);
122
-int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr,
123
- TCGMemOpIdx oi, uintptr_t retaddr);
124
-uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
125
- TCGMemOpIdx oi, uintptr_t retaddr);
126
-uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
127
- TCGMemOpIdx oi, uintptr_t retaddr);
128
-
129
/* Temporary aliases until backends are converted. */
130
#ifdef TARGET_WORDS_BIGENDIAN
131
# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
132
@@ -XXX,XX +XXX,XX @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
133
# define helper_ret_stw_mmu helper_be_stw_mmu
134
# define helper_ret_stl_mmu helper_be_stl_mmu
135
# define helper_ret_stq_mmu helper_be_stq_mmu
136
-# define helper_ret_lduw_cmmu helper_be_lduw_cmmu
137
-# define helper_ret_ldsw_cmmu helper_be_ldsw_cmmu
138
-# define helper_ret_ldl_cmmu helper_be_ldl_cmmu
139
-# define helper_ret_ldq_cmmu helper_be_ldq_cmmu
140
#else
141
# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
142
# define helper_ret_lduw_mmu helper_le_lduw_mmu
143
@@ -XXX,XX +XXX,XX @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
144
# define helper_ret_stw_mmu helper_le_stw_mmu
145
# define helper_ret_stl_mmu helper_le_stl_mmu
146
# define helper_ret_stq_mmu helper_le_stq_mmu
147
-# define helper_ret_lduw_cmmu helper_le_lduw_cmmu
148
-# define helper_ret_ldsw_cmmu helper_le_ldsw_cmmu
149
-# define helper_ret_ldl_cmmu helper_le_ldl_cmmu
150
-# define helper_ret_ldq_cmmu helper_le_ldq_cmmu
151
#endif
152
153
uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
154
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
155
index XXXXXXX..XXXXXXX 100644
156
--- a/accel/tcg/cputlb.c
157
+++ b/accel/tcg/cputlb.c
158
@@ -XXX,XX +XXX,XX @@ void cpu_stq_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
159
160
/* Code access functions. */
161
162
-static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
163
+static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
164
TCGMemOpIdx oi, uintptr_t retaddr)
165
{
166
- return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu);
167
+ return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
168
}
47
}
169
48
170
-uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr,
49
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
171
- TCGMemOpIdx oi, uintptr_t retaddr)
50
172
+uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
51
ctx->z_mask = arg_info(op->args[3])->z_mask
173
{
52
| arg_info(op->args[4])->z_mask;
174
- return full_ldub_cmmu(env, addr, oi, retaddr);
53
+ ctx->s_mask = arg_info(op->args[3])->s_mask
175
+ TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
54
+ & arg_info(op->args[4])->s_mask;
176
+ return full_ldub_code(env, addr, oi, 0);
55
56
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
57
uint64_t tv = arg_info(op->args[3])->val;
58
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
59
fold_xi_to_not(ctx, op, -1)) {
60
return true;
61
}
62
+
63
+ ctx->s_mask = arg_info(op->args[1])->s_mask
64
+ & arg_info(op->args[2])->s_mask;
65
return false;
177
}
66
}
178
67
179
-int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr,
68
@@ -XXX,XX +XXX,XX @@ static bool fold_nor(OptContext *ctx, TCGOp *op)
180
- TCGMemOpIdx oi, uintptr_t retaddr)
69
fold_xi_to_not(ctx, op, 0)) {
181
+static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
70
return true;
182
+ TCGMemOpIdx oi, uintptr_t retaddr)
71
}
183
{
72
+
184
- return (int8_t) full_ldub_cmmu(env, addr, oi, retaddr);
73
+ ctx->s_mask = arg_info(op->args[1])->s_mask
185
+ return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
74
+ & arg_info(op->args[2])->s_mask;
75
return false;
186
}
76
}
187
77
188
-static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
78
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
189
- TCGMemOpIdx oi, uintptr_t retaddr)
79
return true;
190
+uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
80
}
191
{
81
192
- return load_helper(env, addr, oi, retaddr, MO_LEUW, true,
82
+ ctx->s_mask = arg_info(op->args[1])->s_mask;
193
- full_le_lduw_cmmu);
83
+
194
+ TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
84
/* Because of fold_to_not, we want to always return true, via finish. */
195
+ return full_lduw_code(env, addr, oi, 0);
85
finish_folding(ctx, op);
86
return true;
87
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
88
89
ctx->z_mask = arg_info(op->args[1])->z_mask
90
| arg_info(op->args[2])->z_mask;
91
+ ctx->s_mask = arg_info(op->args[1])->s_mask
92
+ & arg_info(op->args[2])->s_mask;
93
return fold_masks(ctx, op);
196
}
94
}
197
95
198
-uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
96
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
199
- TCGMemOpIdx oi, uintptr_t retaddr)
97
fold_ix_to_not(ctx, op, 0)) {
200
+static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
98
return true;
201
+ TCGMemOpIdx oi, uintptr_t retaddr)
99
}
202
{
100
+
203
- return full_le_lduw_cmmu(env, addr, oi, retaddr);
101
+ ctx->s_mask = arg_info(op->args[1])->s_mask
204
+ return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
102
+ & arg_info(op->args[2])->s_mask;
103
return false;
205
}
104
}
206
105
207
-int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr,
106
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
208
- TCGMemOpIdx oi, uintptr_t retaddr)
107
209
+uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
108
ctx->z_mask = arg_info(op->args[1])->z_mask
210
{
109
| arg_info(op->args[2])->z_mask;
211
- return (int16_t) full_le_lduw_cmmu(env, addr, oi, retaddr);
110
+ ctx->s_mask = arg_info(op->args[1])->s_mask
212
+ TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
111
+ & arg_info(op->args[2])->s_mask;
213
+ return full_ldl_code(env, addr, oi, 0);
112
return fold_masks(ctx, op);
214
}
113
}
215
114
216
-static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
217
- TCGMemOpIdx oi, uintptr_t retaddr)
218
+static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
219
+ TCGMemOpIdx oi, uintptr_t retaddr)
220
{
221
- return load_helper(env, addr, oi, retaddr, MO_BEUW, true,
222
- full_be_lduw_cmmu);
223
+ return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
224
}
225
226
-uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
227
- TCGMemOpIdx oi, uintptr_t retaddr)
228
+uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
229
{
230
- return full_be_lduw_cmmu(env, addr, oi, retaddr);
231
-}
232
-
233
-int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr,
234
- TCGMemOpIdx oi, uintptr_t retaddr)
235
-{
236
- return (int16_t) full_be_lduw_cmmu(env, addr, oi, retaddr);
237
-}
238
-
239
-static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
240
- TCGMemOpIdx oi, uintptr_t retaddr)
241
-{
242
- return load_helper(env, addr, oi, retaddr, MO_LEUL, true,
243
- full_le_ldul_cmmu);
244
-}
245
-
246
-uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
247
- TCGMemOpIdx oi, uintptr_t retaddr)
248
-{
249
- return full_le_ldul_cmmu(env, addr, oi, retaddr);
250
-}
251
-
252
-static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
253
- TCGMemOpIdx oi, uintptr_t retaddr)
254
-{
255
- return load_helper(env, addr, oi, retaddr, MO_BEUL, true,
256
- full_be_ldul_cmmu);
257
-}
258
-
259
-uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
260
- TCGMemOpIdx oi, uintptr_t retaddr)
261
-{
262
- return full_be_ldul_cmmu(env, addr, oi, retaddr);
263
-}
264
-
265
-uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
266
- TCGMemOpIdx oi, uintptr_t retaddr)
267
-{
268
- return load_helper(env, addr, oi, retaddr, MO_LEQ, true,
269
- helper_le_ldq_cmmu);
270
-}
271
-
272
-uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
273
- TCGMemOpIdx oi, uintptr_t retaddr)
274
-{
275
- return load_helper(env, addr, oi, retaddr, MO_BEQ, true,
276
- helper_be_ldq_cmmu);
277
+ TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
278
+ return full_ldq_code(env, addr, oi, 0);
279
}
280
diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst
281
index XXXXXXX..XXXXXXX 100644
282
--- a/docs/devel/loads-stores.rst
283
+++ b/docs/devel/loads-stores.rst
284
@@ -XXX,XX +XXX,XX @@ more in line with the other memory access functions.
285
286
load: ``helper_{endian}_ld{sign}{size}_mmu(env, addr, opindex, retaddr)``
287
288
-load (code): ``helper_{endian}_ld{sign}{size}_cmmu(env, addr, opindex, retaddr)``
289
-
290
store: ``helper_{endian}_st{size}_mmu(env, addr, val, opindex, retaddr)``
291
292
``sign``
293
@@ -XXX,XX +XXX,XX @@ store: ``helper_{endian}_st{size}_mmu(env, addr, val, opindex, retaddr)``
294
- ``ret`` : target endianness
295
296
Regexes for git grep
297
- - ``\<helper_\(le\|be\|ret\)_ld[us]\?[bwlq]_c\?mmu\>``
298
+ - ``\<helper_\(le\|be\|ret\)_ld[us]\?[bwlq]_mmu\>``
299
- ``\<helper_\(le\|be\|ret\)_st[bwlq]_mmu\>``
300
301
``address_space_*``
302
--
115
--
303
2.20.1
116
2.25.1
304
117
305
118
diff view generated by jsdifflib
1
The commentary talks about "in concert with the addresses
1
The result is either 0 or 1, which means that we have
2
assigned in the relevant linker script", except there is no
2
a 2 bit signed result, and thus 62 bits of sign.
3
linker script for softmmu, nor has there been for some time.
3
For clarity, use the smask_from_zmask function.
4
5
(Do not confuse the user-only linker script editing that was
6
removed in the previous patch, because user-only does not
7
use this code_gen_buffer allocation method.)
8
4
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Thomas Huth <thuth@redhat.com>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
8
---
13
accel/tcg/translate-all.c | 37 +++++--------------------------------
9
tcg/optimize.c | 2 ++
14
1 file changed, 5 insertions(+), 32 deletions(-)
10
1 file changed, 2 insertions(+)
15
11
16
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
18
--- a/accel/tcg/translate-all.c
14
--- a/tcg/optimize.c
19
+++ b/accel/tcg/translate-all.c
15
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ static inline void *alloc_code_gen_buffer(void)
16
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
21
{
22
int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
23
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
24
- uintptr_t start = 0;
25
size_t size = tcg_ctx->code_gen_buffer_size;
26
void *buf;
27
28
- /* Constrain the position of the buffer based on the host cpu.
29
- Note that these addresses are chosen in concert with the
30
- addresses assigned in the relevant linker script file. */
31
-# if defined(__PIE__) || defined(__PIC__)
32
- /* Don't bother setting a preferred location if we're building
33
- a position-independent executable. We're more likely to get
34
- an address near the main executable if we let the kernel
35
- choose the address. */
36
-# elif defined(__x86_64__) && defined(MAP_32BIT)
37
- /* Force the memory down into low memory with the executable.
38
- Leave the choice of exact location with the kernel. */
39
- flags |= MAP_32BIT;
40
- /* Cannot expect to map more than 800MB in low memory. */
41
- if (size > 800u * 1024 * 1024) {
42
- tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
43
- }
44
-# elif defined(__sparc__)
45
- start = 0x40000000ul;
46
-# elif defined(__s390x__)
47
- start = 0x90000000ul;
48
-# elif defined(__mips__)
49
-# if _MIPS_SIM == _ABI64
50
- start = 0x128000000ul;
51
-# else
52
- start = 0x08000000ul;
53
-# endif
54
-# endif
55
-
56
- buf = mmap((void *)start, size, prot, flags, -1, 0);
57
+ buf = mmap(NULL, size, prot, flags, -1, 0);
58
if (buf == MAP_FAILED) {
59
return NULL;
60
}
17
}
61
18
62
#ifdef __mips__
19
ctx->z_mask = 1;
63
if (cross_256mb(buf, size)) {
20
+ ctx->s_mask = smask_from_zmask(1);
64
- /* Try again, with the original still mapped, to avoid re-acquiring
21
return false;
65
- that 256mb crossing. This time don't specify an address. */
22
}
66
+ /*
23
67
+ * Try again, with the original still mapped, to avoid re-acquiring
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
68
+ * the same 256mb crossing.
25
}
69
+ */
26
70
size_t size2;
27
ctx->z_mask = 1;
71
void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
28
+ ctx->s_mask = smask_from_zmask(1);
72
switch ((int)(buf2 != MAP_FAILED)) {
29
return false;
30
31
do_setcond_const:
73
--
32
--
74
2.20.1
33
2.25.1
75
34
76
35
diff view generated by jsdifflib
1
The functions generated by these macros are unused.
1
The results are generally 6 bit unsigned values, though
2
the count leading and trailing bits may produce any value
3
for a zero input.
2
4
3
Cc: Chris Wulff <crwulff@gmail.com>
4
Cc: Marek Vasut <marex@denx.de>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
target/nios2/cpu.h | 2 --
9
tcg/optimize.c | 3 ++-
10
1 file changed, 2 deletions(-)
10
1 file changed, 2 insertions(+), 1 deletion(-)
11
11
12
diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/nios2/cpu.h
14
--- a/tcg/optimize.c
15
+++ b/target/nios2/cpu.h
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ void do_nios2_semihosting(CPUNios2State *env);
16
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
17
#define CPU_SAVE_VERSION 1
17
g_assert_not_reached();
18
18
}
19
/* MMU modes definitions */
19
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
20
-#define MMU_MODE0_SUFFIX _kernel
20
-
21
-#define MMU_MODE1_SUFFIX _user
21
+ ctx->s_mask = smask_from_zmask(ctx->z_mask);
22
#define MMU_SUPERVISOR_IDX 0
22
return false;
23
#define MMU_USER_IDX 1
23
}
24
25
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
26
default:
27
g_assert_not_reached();
28
}
29
+ ctx->s_mask = smask_from_zmask(ctx->z_mask);
30
return false;
31
}
24
32
25
--
33
--
26
2.20.1
34
2.25.1
27
35
28
36
diff view generated by jsdifflib
1
PIE is supported on many other hosts besides x86.
1
For constant shifts, we can simply shift the s_mask.
2
2
3
The default for non-x86 is now the same as x86: pie is used
3
For variable shifts, we know that sar does not reduce
4
if supported, and may be forced via --enable/--disable-pie.
4
the s_mask, which helps for sequences like
5
5
6
The original commit (40d6444e91c) said:
6
ext32s_i64 t, in
7
sar_i64 t, t, v
8
ext32s_i64 out, t
7
9
8
"Non-x86 are not changed, as they require TCG changes"
10
allowing the final extend to be eliminated.
9
11
10
but I think that's wrong -- there's nothing about PIE that
11
affects TCG one way or another.
12
13
Tested on aarch64 (bionic) and ppc64le (centos 7) hosts.
14
15
Tested-by: Alex Bennée <alex.bennee@linaro.org>
16
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
17
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
13
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
18
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
19
---
15
---
20
configure | 10 ----------
16
tcg/optimize.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++---
21
1 file changed, 10 deletions(-)
17
1 file changed, 47 insertions(+), 3 deletions(-)
22
18
23
diff --git a/configure b/configure
19
diff --git a/tcg/optimize.c b/tcg/optimize.c
24
index XXXXXXX..XXXXXXX 100755
20
index XXXXXXX..XXXXXXX 100644
25
--- a/configure
21
--- a/tcg/optimize.c
26
+++ b/configure
22
+++ b/tcg/optimize.c
27
@@ -XXX,XX +XXX,XX @@ if ! compile_prog "-Werror" "" ; then
23
@@ -XXX,XX +XXX,XX @@ static uint64_t smask_from_zmask(uint64_t zmask)
28
    "Thread-Local Storage (TLS). Please upgrade to a version that does."
24
return ~(~0ull >> rep);
29
fi
25
}
30
26
31
-if test "$pie" = ""; then
27
+/*
32
- case "$cpu-$targetos" in
28
+ * Recreate a properly left-aligned smask after manipulation.
33
- i386-Linux|x86_64-Linux|x32-Linux|i386-OpenBSD|x86_64-OpenBSD)
29
+ * Some bit-shuffling, particularly shifts and rotates, may
34
- ;;
30
+ * retain sign bits on the left, but may scatter disconnected
35
- *)
31
+ * sign bits on the right. Retain only what remains to the left.
36
- pie="no"
32
+ */
37
- ;;
33
+static uint64_t smask_from_smask(int64_t smask)
38
- esac
34
+{
39
-fi
35
+ /* Only the 1 bits are significant for smask */
40
-
36
+ return smask_from_zmask(~smask);
41
if test "$pie" != "no" ; then
37
+}
42
cat > $TMPC << EOF
38
+
39
static inline TempOptInfo *ts_info(TCGTemp *ts)
40
{
41
return ts->state_ptr;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
43
44
static bool fold_shift(OptContext *ctx, TCGOp *op)
45
{
46
+ uint64_t s_mask, z_mask, sign;
47
+
48
if (fold_const2(ctx, op) ||
49
fold_ix_to_i(ctx, op, 0) ||
50
fold_xi_to_x(ctx, op, 0)) {
51
return true;
52
}
53
54
+ s_mask = arg_info(op->args[1])->s_mask;
55
+ z_mask = arg_info(op->args[1])->z_mask;
56
+
57
if (arg_is_const(op->args[2])) {
58
- ctx->z_mask = do_constant_folding(op->opc, ctx->type,
59
- arg_info(op->args[1])->z_mask,
60
- arg_info(op->args[2])->val);
61
+ int sh = arg_info(op->args[2])->val;
62
+
63
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
64
+
65
+ s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
66
+ ctx->s_mask = smask_from_smask(s_mask);
67
+
68
return fold_masks(ctx, op);
69
}
70
+
71
+ switch (op->opc) {
72
+ CASE_OP_32_64(sar):
73
+ /*
74
+ * Arithmetic right shift will not reduce the number of
75
+ * input sign repetitions.
76
+ */
77
+ ctx->s_mask = s_mask;
78
+ break;
79
+ CASE_OP_32_64(shr):
80
+ /*
81
+ * If the sign bit is known zero, then logical right shift
82
+ * will not reduced the number of input sign repetitions.
83
+ */
84
+ sign = (s_mask & -s_mask) >> 1;
85
+ if (!(z_mask & sign)) {
86
+ ctx->s_mask = s_mask;
87
+ }
88
+ break;
89
+ default:
90
+ break;
91
+ }
92
+
93
return false;
94
}
43
95
44
--
96
--
45
2.20.1
97
2.25.1
46
98
47
99
diff view generated by jsdifflib