1
The following changes since commit 13d5f87cc3b94bfccc501142df4a7b12fee3a6e7:
1
The following changes since commit c52d69e7dbaaed0ffdef8125e79218672c30161d:
2
2
3
Merge remote-tracking branch 'remotes/rth-gitlab/tags/pull-axp-20210628' into staging (2021-06-29 10:02:42 +0100)
3
Merge remote-tracking branch 'remotes/cschoenebeck/tags/pull-9p-20211027' into staging (2021-10-27 11:45:18 -0700)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20210629
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211027
8
8
9
for you to fetch changes up to c86bd2dc4c1d37653c27293b2dacee6bb46bb995:
9
for you to fetch changes up to 820c025f0dcacf2f3c12735b1f162893fbfa7bc6:
10
10
11
tcg/riscv: Remove MO_BSWAP handling (2021-06-29 10:04:57 -0700)
11
tcg/optimize: Propagate sign info for shifting (2021-10-27 17:11:23 -0700)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
TranslatorOps conversion for target/avr
14
Improvements to qemu/int128
15
TranslatorOps conversion for target/cris
15
Fixes for 128/64 division.
16
TranslatorOps conversion for target/nios2
16
Cleanup tcg/optimize.c
17
Simple vector operations on TCGv_i32
17
Optimize redundant sign extensions
18
Host signal fixes for *BSD
19
Improvements to tcg bswap operations
20
18
21
----------------------------------------------------------------
19
----------------------------------------------------------------
22
LIU Zhiwei (5):
20
Frédéric Pétrot (1):
23
tcg: Add tcg_gen_vec_add{sub}16_i32
21
qemu/int128: Add int128_{not,xor}
24
tcg: Add tcg_gen_vec_add{sub}8_i32
25
tcg: Add tcg_gen_vec_shl{shr}{sar}16i_i32
26
tcg: Add tcg_gen_vec_shl{shr}{sar}8i_i32
27
tcg: Implement tcg_gen_vec_add{sub}32_tl
28
22
29
Richard Henderson (57):
23
Luis Pires (4):
30
target/nios2: Replace DISAS_TB_JUMP with DISAS_NORETURN
24
host-utils: move checks out of divu128/divs128
31
target/nios2: Use global cpu_env
25
host-utils: move udiv_qrnnd() to host-utils
32
target/nios2: Use global cpu_R
26
host-utils: add 128-bit quotient support to divu128/divs128
33
target/nios2: Add DisasContextBase to DisasContext
27
host-utils: add unit tests for divu128/divs128
34
target/nios2: Convert to TranslatorOps
35
target/nios2: Remove assignment to env in handle_instruction
36
target/nios2: Clean up goto in handle_instruction
37
target/nios2: Inline handle_instruction
38
target/nios2: Use pc_next for pc + 4
39
target/avr: Add DisasContextBase to DisasContext
40
target/avr: Change ctx to DisasContext* in gen_intermediate_code
41
target/avr: Convert to TranslatorOps
42
target/cris: Add DisasContextBase to DisasContext
43
target/cris: Remove DISAS_SWI
44
target/cris: Replace DISAS_TB_JUMP with DISAS_NORETURN
45
target/cris: Mark exceptions as DISAS_NORETURN
46
target/cris: Fix use_goto_tb
47
target/cris: Convert to TranslatorOps
48
target/cris: Mark helper_raise_exception noreturn
49
target/cris: Mark static arrays const
50
target/cris: Fold unhandled X_FLAG changes into cpustate_changed
51
target/cris: Set cpustate_changed for rfe/rfn
52
target/cris: Add DISAS_UPDATE_NEXT
53
target/cris: Add DISAS_DBRANCH
54
target/cris: Use tcg_gen_lookup_and_goto_ptr
55
target/cris: Improve JMP_INDIRECT
56
target/cris: Remove dc->flagx_known
57
target/cris: Do not exit tb for X_FLAG changes
58
tcg: Add flags argument to bswap opcodes
59
tcg/i386: Support bswap flags
60
tcg/aarch64: Merge tcg_out_rev{16,32,64}
61
tcg/aarch64: Support bswap flags
62
tcg/arm: Support bswap flags
63
tcg/ppc: Split out tcg_out_ext{8,16,32}s
64
tcg/ppc: Split out tcg_out_sari{32,64}
65
tcg/ppc: Split out tcg_out_bswap16
66
tcg/ppc: Split out tcg_out_bswap32
67
tcg/ppc: Split out tcg_out_bswap64
68
tcg/ppc: Support bswap flags
69
tcg/ppc: Use power10 byte-reverse instructions
70
tcg/s390: Support bswap flags
71
tcg/mips: Support bswap flags in tcg_out_bswap16
72
tcg/mips: Support bswap flags in tcg_out_bswap32
73
tcg/tci: Support bswap flags
74
tcg: Handle new bswap flags during optimize
75
tcg: Add flags argument to tcg_gen_bswap16_*, tcg_gen_bswap32_i64
76
tcg: Make use of bswap flags in tcg_gen_qemu_ld_*
77
tcg: Make use of bswap flags in tcg_gen_qemu_st_*
78
target/arm: Improve REV32
79
target/arm: Improve vector REV
80
target/arm: Improve REVSH
81
target/i386: Improve bswap translation
82
target/sh4: Improve swap.b translation
83
target/mips: Fix gen_mxu_s32ldd_s32lddr
84
tcg/arm: Unset TCG_TARGET_HAS_MEMORY_BSWAP
85
tcg/aarch64: Unset TCG_TARGET_HAS_MEMORY_BSWAP
86
tcg/riscv: Remove MO_BSWAP handling
87
28
88
Warner Losh (1):
29
Richard Henderson (51):
89
tcg: Use correct trap number for page faults on *BSD systems
30
tcg/optimize: Rename "mask" to "z_mask"
31
tcg/optimize: Split out OptContext
32
tcg/optimize: Remove do_default label
33
tcg/optimize: Change tcg_opt_gen_{mov,movi} interface
34
tcg/optimize: Move prev_mb into OptContext
35
tcg/optimize: Split out init_arguments
36
tcg/optimize: Split out copy_propagate
37
tcg/optimize: Split out fold_call
38
tcg/optimize: Drop nb_oargs, nb_iargs locals
39
tcg/optimize: Change fail return for do_constant_folding_cond*
40
tcg/optimize: Return true from tcg_opt_gen_{mov,movi}
41
tcg/optimize: Split out finish_folding
42
tcg/optimize: Use a boolean to avoid a mass of continues
43
tcg/optimize: Split out fold_mb, fold_qemu_{ld,st}
44
tcg/optimize: Split out fold_const{1,2}
45
tcg/optimize: Split out fold_setcond2
46
tcg/optimize: Split out fold_brcond2
47
tcg/optimize: Split out fold_brcond
48
tcg/optimize: Split out fold_setcond
49
tcg/optimize: Split out fold_mulu2_i32
50
tcg/optimize: Split out fold_addsub2_i32
51
tcg/optimize: Split out fold_movcond
52
tcg/optimize: Split out fold_extract2
53
tcg/optimize: Split out fold_extract, fold_sextract
54
tcg/optimize: Split out fold_deposit
55
tcg/optimize: Split out fold_count_zeros
56
tcg/optimize: Split out fold_bswap
57
tcg/optimize: Split out fold_dup, fold_dup2
58
tcg/optimize: Split out fold_mov
59
tcg/optimize: Split out fold_xx_to_i
60
tcg/optimize: Split out fold_xx_to_x
61
tcg/optimize: Split out fold_xi_to_i
62
tcg/optimize: Add type to OptContext
63
tcg/optimize: Split out fold_to_not
64
tcg/optimize: Split out fold_sub_to_neg
65
tcg/optimize: Split out fold_xi_to_x
66
tcg/optimize: Split out fold_ix_to_i
67
tcg/optimize: Split out fold_masks
68
tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies
69
tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops
70
tcg/optimize: Sink commutative operand swapping into fold functions
71
tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values
72
tcg/optimize: Use fold_xx_to_i for orc
73
tcg/optimize: Use fold_xi_to_x for mul
74
tcg/optimize: Use fold_xi_to_x for div
75
tcg/optimize: Use fold_xx_to_i for rem
76
tcg/optimize: Optimize sign extensions
77
tcg/optimize: Propagate sign info for logical operations
78
tcg/optimize: Propagate sign info for setcond
79
tcg/optimize: Propagate sign info for bit counting
80
tcg/optimize: Propagate sign info for shifting
90
81
91
include/tcg/tcg-op-gvec.h | 43 ++++
82
include/fpu/softfloat-macros.h | 82 --
92
include/tcg/tcg-op.h | 8 +-
83
include/hw/clock.h | 5 +-
93
include/tcg/tcg-opc.h | 10 +-
84
include/qemu/host-utils.h | 121 +-
94
include/tcg/tcg.h | 12 +
85
include/qemu/int128.h | 20 +
95
target/cris/helper.h | 2 +-
86
target/ppc/int_helper.c | 23 +-
96
tcg/aarch64/tcg-target.h | 2 +-
87
tcg/optimize.c | 2644 ++++++++++++++++++++++++----------------
97
tcg/arm/tcg-target.h | 2 +-
88
tests/unit/test-div128.c | 197 +++
98
accel/tcg/user-exec.c | 20 +-
89
util/host-utils.c | 147 ++-
99
target/arm/translate-a64.c | 21 +-
90
tests/unit/meson.build | 1 +
100
target/arm/translate.c | 4 +-
91
9 files changed, 2053 insertions(+), 1187 deletions(-)
101
target/avr/translate.c | 284 ++++++++++++----------
92
create mode 100644 tests/unit/test-div128.c
102
target/cris/translate.c | 515 ++++++++++++++++++++--------------------
103
target/i386/tcg/translate.c | 14 +-
104
target/mips/tcg/mxu_translate.c | 6 +-
105
target/nios2/translate.c | 318 ++++++++++++-------------
106
target/s390x/translate.c | 4 +-
107
target/sh4/translate.c | 3 +-
108
tcg/optimize.c | 56 ++++-
109
tcg/tcg-op-gvec.c | 122 ++++++++++
110
tcg/tcg-op.c | 143 +++++++----
111
tcg/tcg.c | 28 +++
112
tcg/tci.c | 3 +-
113
target/cris/translate_v10.c.inc | 17 +-
114
tcg/aarch64/tcg-target.c.inc | 125 ++++------
115
tcg/arm/tcg-target.c.inc | 295 ++++++++++-------------
116
tcg/i386/tcg-target.c.inc | 20 +-
117
tcg/mips/tcg-target.c.inc | 102 ++++----
118
tcg/ppc/tcg-target.c.inc | 230 ++++++++++++------
119
tcg/riscv/tcg-target.c.inc | 64 ++---
120
tcg/s390/tcg-target.c.inc | 34 ++-
121
tcg/tci/tcg-target.c.inc | 23 +-
122
tcg/README | 22 +-
123
32 files changed, 1458 insertions(+), 1094 deletions(-)
124
93
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
2
2
3
Implement tcg_gen_vec_shl{shr}{sar}8i_tl by adding corresponging i32 OP.
3
Addition of not and xor on 128-bit integers.
4
4
5
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
5
Signed-off-by: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
6
Message-Id: <20210624105023.3852-5-zhiwei_liu@c-sky.com>
6
Co-authored-by: Fabien Portas <fabien.portas@grenoble-inp.org>
7
Message-Id: <20211025122818.168890-3-frederic.petrot@univ-grenoble-alpes.fr>
8
[rth: Split out logical operations.]
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
11
---
9
include/tcg/tcg-op-gvec.h | 10 ++++++++++
12
include/qemu/int128.h | 20 ++++++++++++++++++++
10
tcg/tcg-op-gvec.c | 28 ++++++++++++++++++++++++++++
13
1 file changed, 20 insertions(+)
11
2 files changed, 38 insertions(+)
12
14
13
diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
15
diff --git a/include/qemu/int128.h b/include/qemu/int128.h
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-op-gvec.h
17
--- a/include/qemu/int128.h
16
+++ b/include/tcg/tcg-op-gvec.h
18
+++ b/include/qemu/int128.h
17
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
19
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_exts64(int64_t a)
18
void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
20
return a;
19
void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
20
21
+void tcg_gen_vec_shl8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
22
void tcg_gen_vec_shl16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
23
+void tcg_gen_vec_shr8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
24
void tcg_gen_vec_shr16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
25
+void tcg_gen_vec_sar8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
26
void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
27
28
#if TARGET_LONG_BITS == 64
29
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
30
#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i64
31
#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i64
32
#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i64
33
+#define tcg_gen_vec_shl8i_tl tcg_gen_vec_shl8i_i64
34
+#define tcg_gen_vec_shr8i_tl tcg_gen_vec_shr8i_i64
35
+#define tcg_gen_vec_sar8i_tl tcg_gen_vec_sar8i_i64
36
#define tcg_gen_vec_shl16i_tl tcg_gen_vec_shl16i_i64
37
#define tcg_gen_vec_shr16i_tl tcg_gen_vec_shr16i_i64
38
#define tcg_gen_vec_sar16i_tl tcg_gen_vec_sar16i_i64
39
+
40
#else
41
#define tcg_gen_vec_add8_tl tcg_gen_vec_add8_i32
42
#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i32
43
#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i32
44
#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i32
45
+#define tcg_gen_vec_shl8i_tl tcg_gen_vec_shl8i_i32
46
+#define tcg_gen_vec_shr8i_tl tcg_gen_vec_shr8i_i32
47
+#define tcg_gen_vec_sar8i_tl tcg_gen_vec_sar8i_i32
48
#define tcg_gen_vec_shl16i_tl tcg_gen_vec_shl16i_i32
49
#define tcg_gen_vec_shr16i_tl tcg_gen_vec_shr16i_i32
50
#define tcg_gen_vec_sar16i_tl tcg_gen_vec_sar16i_i32
51
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/tcg/tcg-op-gvec.c
54
+++ b/tcg/tcg-op-gvec.c
55
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_shl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
56
tcg_gen_andi_i64(d, d, mask);
57
}
21
}
58
22
59
+void tcg_gen_vec_shl8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
23
+static inline Int128 int128_not(Int128 a)
60
+{
24
+{
61
+ uint32_t mask = dup_const(MO_8, 0xff << c);
25
+ return ~a;
62
+ tcg_gen_shli_i32(d, a, c);
63
+ tcg_gen_andi_i32(d, d, mask);
64
+}
26
+}
65
+
27
+
66
void tcg_gen_vec_shl16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
28
static inline Int128 int128_and(Int128 a, Int128 b)
67
{
29
{
68
uint32_t mask = dup_const(MO_16, 0xffff << c);
30
return a & b;
69
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_shr16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
31
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_or(Int128 a, Int128 b)
70
tcg_gen_andi_i64(d, d, mask);
32
return a | b;
71
}
33
}
72
34
73
+void tcg_gen_vec_shr8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
35
+static inline Int128 int128_xor(Int128 a, Int128 b)
74
+{
36
+{
75
+ uint32_t mask = dup_const(MO_8, 0xff >> c);
37
+ return a ^ b;
76
+ tcg_gen_shri_i32(d, a, c);
77
+ tcg_gen_andi_i32(d, d, mask);
78
+}
38
+}
79
+
39
+
80
void tcg_gen_vec_shr16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
40
static inline Int128 int128_rshift(Int128 a, int n)
81
{
41
{
82
uint32_t mask = dup_const(MO_16, 0xffff >> c);
42
return a >> n;
83
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
43
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_exts64(int64_t a)
84
tcg_temp_free_i64(s);
44
return int128_make128(a, (a < 0) ? -1 : 0);
85
}
45
}
86
46
87
+void tcg_gen_vec_sar8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
47
+static inline Int128 int128_not(Int128 a)
88
+{
48
+{
89
+ uint32_t s_mask = dup_const(MO_8, 0x80 >> c);
49
+ return int128_make128(~a.lo, ~a.hi);
90
+ uint32_t c_mask = dup_const(MO_8, 0xff >> c);
91
+ TCGv_i32 s = tcg_temp_new_i32();
92
+
93
+ tcg_gen_shri_i32(d, a, c);
94
+ tcg_gen_andi_i32(s, d, s_mask); /* isolate (shifted) sign bit */
95
+ tcg_gen_muli_i32(s, s, (2 << c) - 2); /* replicate isolated signs */
96
+ tcg_gen_andi_i32(d, d, c_mask); /* clear out bits above sign */
97
+ tcg_gen_or_i32(d, d, s); /* include sign extension */
98
+ tcg_temp_free_i32(s);
99
+}
50
+}
100
+
51
+
101
void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
52
static inline Int128 int128_and(Int128 a, Int128 b)
102
{
53
{
103
uint32_t s_mask = dup_const(MO_16, 0x8000 >> c);
54
return int128_make128(a.lo & b.lo, a.hi & b.hi);
55
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_or(Int128 a, Int128 b)
56
return int128_make128(a.lo | b.lo, a.hi | b.hi);
57
}
58
59
+static inline Int128 int128_xor(Int128 a, Int128 b)
60
+{
61
+ return int128_make128(a.lo ^ b.lo, a.hi ^ b.hi);
62
+}
63
+
64
static inline Int128 int128_rshift(Int128 a, int n)
65
{
66
int64_t h;
104
--
67
--
105
2.25.1
68
2.25.1
106
69
107
70
diff view generated by jsdifflib
1
TCG_TARGET_HAS_MEMORY_BSWAP is already unset for this backend,
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
which means that MO_BSWAP be handled by the middle-end and
2
3
will never be seen by the backend. Thus the indexes used with
3
In preparation for changing the divu128/divs128 implementations
4
qemu_{ld,st}_helpers will always be zero.
4
to allow for quotients larger than 64 bits, move the div-by-zero
5
5
and overflow checks to the callers.
6
Tidy the comments and asserts in tcg_out_qemu_{ld,st}_direct.
6
7
It is not that we do not handle bswap "yet", but never will.
7
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
8
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Acked-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-Id: <20211025191154.350831-2-luis.pires@eldorado.org.br>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
11
---
12
tcg/riscv/tcg-target.c.inc | 64 ++++++++++++++++++++------------------
12
include/hw/clock.h | 5 +++--
13
1 file changed, 33 insertions(+), 31 deletions(-)
13
include/qemu/host-utils.h | 34 ++++++++++++---------------------
14
14
target/ppc/int_helper.c | 14 +++++++++-----
15
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
15
util/host-utils.c | 40 ++++++++++++++++++---------------------
16
index XXXXXXX..XXXXXXX 100644
16
4 files changed, 42 insertions(+), 51 deletions(-)
17
--- a/tcg/riscv/tcg-target.c.inc
17
18
+++ b/tcg/riscv/tcg-target.c.inc
18
diff --git a/include/hw/clock.h b/include/hw/clock.h
19
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
19
index XXXXXXX..XXXXXXX 100644
20
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
20
--- a/include/hw/clock.h
21
* TCGMemOpIdx oi, uintptr_t ra)
21
+++ b/include/hw/clock.h
22
*/
22
@@ -XXX,XX +XXX,XX @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
23
-static void * const qemu_ld_helpers[16] = {
23
return 0;
24
- [MO_UB] = helper_ret_ldub_mmu,
24
}
25
- [MO_SB] = helper_ret_ldsb_mmu,
25
/*
26
- [MO_LEUW] = helper_le_lduw_mmu,
26
- * Ignore divu128() return value as we've caught div-by-zero and don't
27
- [MO_LESW] = helper_le_ldsw_mmu,
27
- * need different behaviour for overflow.
28
- [MO_LEUL] = helper_le_ldul_mmu,
28
+ * BUG: when CONFIG_INT128 is not defined, the current implementation of
29
+static void * const qemu_ld_helpers[8] = {
29
+ * divu128 does not return a valid truncated quotient, so the result will
30
+ [MO_UB] = helper_ret_ldub_mmu,
30
+ * be wrong.
31
+ [MO_SB] = helper_ret_ldsb_mmu,
31
*/
32
+#ifdef HOST_WORDS_BIGENDIAN
32
divu128(&lo, &hi, clk->period);
33
+ [MO_UW] = helper_be_lduw_mmu,
33
return lo;
34
+ [MO_SW] = helper_be_ldsw_mmu,
34
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
35
+ [MO_UL] = helper_be_ldul_mmu,
35
index XXXXXXX..XXXXXXX 100644
36
#if TCG_TARGET_REG_BITS == 64
36
--- a/include/qemu/host-utils.h
37
- [MO_LESL] = helper_le_ldsl_mmu,
37
+++ b/include/qemu/host-utils.h
38
+ [MO_SL] = helper_be_ldsl_mmu,
38
@@ -XXX,XX +XXX,XX @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
39
return (__int128_t)a * b / c;
40
}
41
42
-static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
43
+static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
44
{
45
- if (divisor == 0) {
46
- return 1;
47
- } else {
48
- __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
49
- __uint128_t result = dividend / divisor;
50
- *plow = result;
51
- *phigh = dividend % divisor;
52
- return result > UINT64_MAX;
53
- }
54
+ __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
55
+ __uint128_t result = dividend / divisor;
56
+ *plow = result;
57
+ *phigh = dividend % divisor;
58
}
59
60
-static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
61
+static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
62
{
63
- if (divisor == 0) {
64
- return 1;
65
- } else {
66
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
67
- __int128_t result = dividend / divisor;
68
- *plow = result;
69
- *phigh = dividend % divisor;
70
- return result != *plow;
71
- }
72
+ __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
73
+ __int128_t result = dividend / divisor;
74
+ *plow = result;
75
+ *phigh = dividend % divisor;
76
}
77
#else
78
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
79
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
80
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
81
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
82
+void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
83
+void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
84
85
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
86
{
87
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/ppc/int_helper.c
90
+++ b/target/ppc/int_helper.c
91
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
92
uint64_t rt = 0;
93
int overflow = 0;
94
95
- overflow = divu128(&rt, &ra, rb);
96
-
97
- if (unlikely(overflow)) {
98
+ if (unlikely(rb == 0 || ra >= rb)) {
99
+ overflow = 1;
100
rt = 0; /* Undefined */
101
+ } else {
102
+ divu128(&rt, &ra, rb);
103
}
104
105
if (oe) {
106
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
107
int64_t rt = 0;
108
int64_t ra = (int64_t)rau;
109
int64_t rb = (int64_t)rbu;
110
- int overflow = divs128(&rt, &ra, rb);
111
+ int overflow = 0;
112
113
- if (unlikely(overflow)) {
114
+ if (unlikely(rb == 0 || uabs64(ra) >= uabs64(rb))) {
115
+ overflow = 1;
116
rt = 0; /* Undefined */
117
+ } else {
118
+ divs128(&rt, &ra, rb);
119
}
120
121
if (oe) {
122
diff --git a/util/host-utils.c b/util/host-utils.c
123
index XXXXXXX..XXXXXXX 100644
124
--- a/util/host-utils.c
125
+++ b/util/host-utils.c
126
@@ -XXX,XX +XXX,XX @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
127
*phigh = rh;
128
}
129
130
-/* Unsigned 128x64 division. Returns 1 if overflow (divide by zero or */
131
-/* quotient exceeds 64 bits). Otherwise returns quotient via plow and */
132
-/* remainder via phigh. */
133
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
134
+/*
135
+ * Unsigned 128-by-64 division. Returns quotient via plow and
136
+ * remainder via phigh.
137
+ * The result must fit in 64 bits (plow) - otherwise, the result
138
+ * is undefined.
139
+ * This function will cause a division by zero if passed a zero divisor.
140
+ */
141
+void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
142
{
143
uint64_t dhi = *phigh;
144
uint64_t dlo = *plow;
145
unsigned i;
146
uint64_t carry = 0;
147
148
- if (divisor == 0) {
149
- return 1;
150
- } else if (dhi == 0) {
151
+ if (divisor == 0 || dhi == 0) {
152
*plow = dlo / divisor;
153
*phigh = dlo % divisor;
154
- return 0;
155
- } else if (dhi >= divisor) {
156
- return 1;
157
} else {
158
159
for (i = 0; i < 64; i++) {
160
@@ -XXX,XX +XXX,XX @@ int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
161
162
*plow = dlo;
163
*phigh = dhi;
164
- return 0;
165
}
166
}
167
168
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
169
+/*
170
+ * Signed 128-by-64 division. Returns quotient via plow and
171
+ * remainder via phigh.
172
+ * The result must fit in 64 bits (plow) - otherwise, the result
173
+ * is undefined.
174
+ * This function will cause a division by zero if passed a zero divisor.
175
+ */
176
+void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
177
{
178
int sgn_dvdnd = *phigh < 0;
179
int sgn_divsr = divisor < 0;
180
- int overflow = 0;
181
182
if (sgn_dvdnd) {
183
*plow = ~(*plow);
184
@@ -XXX,XX +XXX,XX @@ int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
185
divisor = 0 - divisor;
186
}
187
188
- overflow = divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
189
+ divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
190
191
if (sgn_dvdnd ^ sgn_divsr) {
192
*plow = 0 - *plow;
193
}
194
-
195
- if (!overflow) {
196
- if ((*plow < 0) ^ (sgn_dvdnd ^ sgn_divsr)) {
197
- overflow = 1;
198
- }
199
- }
200
-
201
- return overflow;
202
}
39
#endif
203
#endif
40
- [MO_LEQ] = helper_le_ldq_mmu,
204
41
- [MO_BEUW] = helper_be_lduw_mmu,
42
- [MO_BESW] = helper_be_ldsw_mmu,
43
- [MO_BEUL] = helper_be_ldul_mmu,
44
+ [MO_Q] = helper_be_ldq_mmu,
45
+#else
46
+ [MO_UW] = helper_le_lduw_mmu,
47
+ [MO_SW] = helper_le_ldsw_mmu,
48
+ [MO_UL] = helper_le_ldul_mmu,
49
#if TCG_TARGET_REG_BITS == 64
50
- [MO_BESL] = helper_be_ldsl_mmu,
51
+ [MO_SL] = helper_le_ldsl_mmu,
52
+#endif
53
+ [MO_Q] = helper_le_ldq_mmu,
54
#endif
55
- [MO_BEQ] = helper_be_ldq_mmu,
56
};
57
58
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
59
* uintxx_t val, TCGMemOpIdx oi,
60
* uintptr_t ra)
61
*/
62
-static void * const qemu_st_helpers[16] = {
63
- [MO_UB] = helper_ret_stb_mmu,
64
- [MO_LEUW] = helper_le_stw_mmu,
65
- [MO_LEUL] = helper_le_stl_mmu,
66
- [MO_LEQ] = helper_le_stq_mmu,
67
- [MO_BEUW] = helper_be_stw_mmu,
68
- [MO_BEUL] = helper_be_stl_mmu,
69
- [MO_BEQ] = helper_be_stq_mmu,
70
+static void * const qemu_st_helpers[4] = {
71
+ [MO_8] = helper_ret_stb_mmu,
72
+#ifdef HOST_WORDS_BIGENDIAN
73
+ [MO_16] = helper_be_stw_mmu,
74
+ [MO_32] = helper_be_stl_mmu,
75
+ [MO_64] = helper_be_stq_mmu,
76
+#else
77
+ [MO_16] = helper_le_stw_mmu,
78
+ [MO_32] = helper_le_stl_mmu,
79
+ [MO_64] = helper_le_stq_mmu,
80
+#endif
81
};
82
83
/* We don't support oversize guests */
84
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
85
tcg_out_movi(s, TCG_TYPE_PTR, a2, oi);
86
tcg_out_movi(s, TCG_TYPE_PTR, a3, (tcg_target_long)l->raddr);
87
88
- tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
89
+ tcg_out_call(s, qemu_ld_helpers[opc & MO_SSIZE]);
90
tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0);
91
92
tcg_out_goto(s, l->raddr);
93
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
94
tcg_out_movi(s, TCG_TYPE_PTR, a3, oi);
95
tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr);
96
97
- tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
98
+ tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE]);
99
100
tcg_out_goto(s, l->raddr);
101
return true;
102
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
103
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
104
TCGReg base, MemOp opc, bool is_64)
105
{
106
- const MemOp bswap = opc & MO_BSWAP;
107
-
108
- /* We don't yet handle byteswapping, assert */
109
- g_assert(!bswap);
110
+ /* Byte swapping is left to middle-end expansion. */
111
+ tcg_debug_assert((opc & MO_BSWAP) == 0);
112
113
switch (opc & (MO_SSIZE)) {
114
case MO_UB:
115
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
116
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
117
TCGReg base, MemOp opc)
118
{
119
- const MemOp bswap = opc & MO_BSWAP;
120
-
121
- /* We don't yet handle byteswapping, assert */
122
- g_assert(!bswap);
123
+ /* Byte swapping is left to middle-end expansion. */
124
+ tcg_debug_assert((opc & MO_BSWAP) == 0);
125
126
switch (opc & (MO_SSIZE)) {
127
case MO_8:
128
--
205
--
129
2.25.1
206
2.25.1
130
207
131
208
diff view generated by jsdifflib
1
Move handle_instruction into nios2_tr_translate_insn
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
as the only caller.
2
3
3
Move udiv_qrnnd() from include/fpu/softfloat-macros.h to host-utils,
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
so it can be reused by divu128().
5
6
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20211025191154.350831-3-luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
10
---
7
target/nios2/translate.c | 66 +++++++++++++++++++---------------------
11
include/fpu/softfloat-macros.h | 82 ----------------------------------
8
1 file changed, 31 insertions(+), 35 deletions(-)
12
include/qemu/host-utils.h | 81 +++++++++++++++++++++++++++++++++
9
13
2 files changed, 81 insertions(+), 82 deletions(-)
10
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
14
15
diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h
11
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
12
--- a/target/nios2/translate.c
17
--- a/include/fpu/softfloat-macros.h
13
+++ b/target/nios2/translate.c
18
+++ b/include/fpu/softfloat-macros.h
14
@@ -XXX,XX +XXX,XX @@ illegal_op:
19
@@ -XXX,XX +XXX,XX @@
15
t_gen_helper_raise_exception(dc, EXCP_ILLEGAL);
20
* so some portions are provided under:
21
* the SoftFloat-2a license
22
* the BSD license
23
- * GPL-v2-or-later
24
*
25
* Any future contributions to this file after December 1st 2014 will be
26
* taken to be licensed under the Softfloat-2a license unless specifically
27
@@ -XXX,XX +XXX,XX @@ this code that are retained.
28
* THE POSSIBILITY OF SUCH DAMAGE.
29
*/
30
31
-/* Portions of this work are licensed under the terms of the GNU GPL,
32
- * version 2 or later. See the COPYING file in the top-level directory.
33
- */
34
-
35
#ifndef FPU_SOFTFLOAT_MACROS_H
36
#define FPU_SOFTFLOAT_MACROS_H
37
38
@@ -XXX,XX +XXX,XX @@ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b)
39
16
}
40
}
17
41
18
-static void handle_instruction(DisasContext *dc, CPUNios2State *env)
42
-/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
43
- * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
44
- *
45
- * Licensed under the GPLv2/LGPLv3
46
- */
47
-static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
48
- uint64_t n0, uint64_t d)
19
-{
49
-{
20
- uint32_t code;
50
-#if defined(__x86_64__)
21
- uint8_t op;
51
- uint64_t q;
22
- const Nios2Instruction *instr;
52
- asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
23
-
53
- return q;
24
-#if defined(CONFIG_USER_ONLY)
54
-#elif defined(__s390x__) && !defined(__clang__)
25
- /* FIXME: Is this needed ? */
55
- /* Need to use a TImode type to get an even register pair for DLGR. */
26
- if (dc->pc >= 0x1000 && dc->pc < 0x2000) {
56
- unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
27
- t_gen_helper_raise_exception(dc, 0xaa);
57
- asm("dlgr %0, %1" : "+r"(n) : "r"(d));
28
- return;
58
- *r = n >> 64;
59
- return n;
60
-#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
61
- /* From Power ISA 2.06, programming note for divdeu. */
62
- uint64_t q1, q2, Q, r1, r2, R;
63
- asm("divdeu %0,%2,%4; divdu %1,%3,%4"
64
- : "=&r"(q1), "=r"(q2)
65
- : "r"(n1), "r"(n0), "r"(d));
66
- r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
67
- r2 = n0 - (q2 * d);
68
- Q = q1 + q2;
69
- R = r1 + r2;
70
- if (R >= d || R < r2) { /* overflow implies R > d */
71
- Q += 1;
72
- R -= d;
29
- }
73
- }
74
- *r = R;
75
- return Q;
76
-#else
77
- uint64_t d0, d1, q0, q1, r1, r0, m;
78
-
79
- d0 = (uint32_t)d;
80
- d1 = d >> 32;
81
-
82
- r1 = n1 % d1;
83
- q1 = n1 / d1;
84
- m = q1 * d0;
85
- r1 = (r1 << 32) | (n0 >> 32);
86
- if (r1 < m) {
87
- q1 -= 1;
88
- r1 += d;
89
- if (r1 >= d) {
90
- if (r1 < m) {
91
- q1 -= 1;
92
- r1 += d;
93
- }
94
- }
95
- }
96
- r1 -= m;
97
-
98
- r0 = r1 % d1;
99
- q0 = r1 / d1;
100
- m = q0 * d0;
101
- r0 = (r0 << 32) | (uint32_t)n0;
102
- if (r0 < m) {
103
- q0 -= 1;
104
- r0 += d;
105
- if (r0 >= d) {
106
- if (r0 < m) {
107
- q0 -= 1;
108
- r0 += d;
109
- }
110
- }
111
- }
112
- r0 -= m;
113
-
114
- *r = r0;
115
- return (q1 << 32) | q0;
30
-#endif
116
-#endif
31
-
32
- code = cpu_ldl_code(env, dc->pc);
33
- op = get_opcode(code);
34
-
35
- if (unlikely(op >= ARRAY_SIZE(i_type_instructions))) {
36
- t_gen_helper_raise_exception(dc, EXCP_ILLEGAL);
37
- return;
38
- }
39
-
40
- dc->zero = NULL;
41
-
42
- instr = &i_type_instructions[op];
43
- instr->handler(dc, code, instr->flags);
44
-
45
- if (dc->zero) {
46
- tcg_temp_free(dc->zero);
47
- }
48
-}
117
-}
49
-
118
-
50
static const char * const regnames[] = {
119
/*----------------------------------------------------------------------------
51
"zero", "at", "r2", "r3",
120
| Returns an approximation to the square root of the 32-bit significand given
52
"r4", "r5", "r6", "r7",
121
| by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of
53
@@ -XXX,XX +XXX,XX @@ static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
122
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
54
{
123
index XXXXXXX..XXXXXXX 100644
55
DisasContext *dc = container_of(dcbase, DisasContext, base);
124
--- a/include/qemu/host-utils.h
56
CPUNios2State *env = cs->env_ptr;
125
+++ b/include/qemu/host-utils.h
57
+ const Nios2Instruction *instr;
126
@@ -XXX,XX +XXX,XX @@
58
+ uint32_t code, pc;
127
* THE SOFTWARE.
59
+ uint8_t op;
128
*/
60
129
61
- dc->pc = dc->base.pc_next;
130
+/* Portions of this work are licensed under the terms of the GNU GPL,
62
- dc->base.pc_next += 4;
131
+ * version 2 or later. See the COPYING file in the top-level directory.
63
+ pc = dc->base.pc_next;
132
+ */
64
+ dc->pc = pc;
133
+
65
+ dc->base.pc_next = pc + 4;
134
#ifndef HOST_UTILS_H
66
135
#define HOST_UTILS_H
67
/* Decode an instruction */
136
68
- handle_instruction(dc, env);
137
@@ -XXX,XX +XXX,XX @@ void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
69
+
138
*/
70
+#if defined(CONFIG_USER_ONLY)
139
void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
71
+ /* FIXME: Is this needed ? */
140
72
+ if (pc >= 0x1000 && pc < 0x2000) {
141
+/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
73
+ t_gen_helper_raise_exception(dc, 0xaa);
142
+ * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
74
+ return;
143
+ *
144
+ * Licensed under the GPLv2/LGPLv3
145
+ */
146
+static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
147
+ uint64_t n0, uint64_t d)
148
+{
149
+#if defined(__x86_64__)
150
+ uint64_t q;
151
+ asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
152
+ return q;
153
+#elif defined(__s390x__) && !defined(__clang__)
154
+ /* Need to use a TImode type to get an even register pair for DLGR. */
155
+ unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
156
+ asm("dlgr %0, %1" : "+r"(n) : "r"(d));
157
+ *r = n >> 64;
158
+ return n;
159
+#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
160
+ /* From Power ISA 2.06, programming note for divdeu. */
161
+ uint64_t q1, q2, Q, r1, r2, R;
162
+ asm("divdeu %0,%2,%4; divdu %1,%3,%4"
163
+ : "=&r"(q1), "=r"(q2)
164
+ : "r"(n1), "r"(n0), "r"(d));
165
+ r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
166
+ r2 = n0 - (q2 * d);
167
+ Q = q1 + q2;
168
+ R = r1 + r2;
169
+ if (R >= d || R < r2) { /* overflow implies R > d */
170
+ Q += 1;
171
+ R -= d;
75
+ }
172
+ }
173
+ *r = R;
174
+ return Q;
175
+#else
176
+ uint64_t d0, d1, q0, q1, r1, r0, m;
177
+
178
+ d0 = (uint32_t)d;
179
+ d1 = d >> 32;
180
+
181
+ r1 = n1 % d1;
182
+ q1 = n1 / d1;
183
+ m = q1 * d0;
184
+ r1 = (r1 << 32) | (n0 >> 32);
185
+ if (r1 < m) {
186
+ q1 -= 1;
187
+ r1 += d;
188
+ if (r1 >= d) {
189
+ if (r1 < m) {
190
+ q1 -= 1;
191
+ r1 += d;
192
+ }
193
+ }
194
+ }
195
+ r1 -= m;
196
+
197
+ r0 = r1 % d1;
198
+ q0 = r1 / d1;
199
+ m = q0 * d0;
200
+ r0 = (r0 << 32) | (uint32_t)n0;
201
+ if (r0 < m) {
202
+ q0 -= 1;
203
+ r0 += d;
204
+ if (r0 >= d) {
205
+ if (r0 < m) {
206
+ q0 -= 1;
207
+ r0 += d;
208
+ }
209
+ }
210
+ }
211
+ r0 -= m;
212
+
213
+ *r = r0;
214
+ return (q1 << 32) | q0;
76
+#endif
215
+#endif
77
+
216
+}
78
+ code = cpu_ldl_code(env, pc);
217
+
79
+ op = get_opcode(code);
218
#endif
80
+
81
+ if (unlikely(op >= ARRAY_SIZE(i_type_instructions))) {
82
+ t_gen_helper_raise_exception(dc, EXCP_ILLEGAL);
83
+ return;
84
+ }
85
+
86
+ dc->zero = NULL;
87
+
88
+ instr = &i_type_instructions[op];
89
+ instr->handler(dc, code, instr->flags);
90
+
91
+ if (dc->zero) {
92
+ tcg_temp_free(dc->zero);
93
+ }
94
}
95
96
static void nios2_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
97
--
219
--
98
2.25.1
220
2.25.1
99
221
100
222
diff view generated by jsdifflib
1
There were two bugs here: (1) the required endianness was
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
not present in the MemOp, and (2) we were not providing a
2
3
zero-extended input to the bswap as semantics required.
3
These will be used to implement new decimal floating point
4
4
instructions from Power ISA 3.1.
5
The best fix is to fold the bswap into the memory operation,
5
6
producing the desired result directly.
6
The remainder is now returned directly by divu128/divs128,
7
7
freeing up phigh to receive the high 64 bits of the quotient.
8
Acked-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
9
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20211025191154.350831-4-luis.pires@eldorado.org.br>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
13
---
11
target/mips/tcg/mxu_translate.c | 6 +-----
14
include/hw/clock.h | 6 +-
12
1 file changed, 1 insertion(+), 5 deletions(-)
15
include/qemu/host-utils.h | 20 ++++--
13
16
target/ppc/int_helper.c | 9 +--
14
diff --git a/target/mips/tcg/mxu_translate.c b/target/mips/tcg/mxu_translate.c
17
util/host-utils.c | 133 +++++++++++++++++++++++++-------------
15
index XXXXXXX..XXXXXXX 100644
18
4 files changed, 108 insertions(+), 60 deletions(-)
16
--- a/target/mips/tcg/mxu_translate.c
19
17
+++ b/target/mips/tcg/mxu_translate.c
20
diff --git a/include/hw/clock.h b/include/hw/clock.h
18
@@ -XXX,XX +XXX,XX @@ static void gen_mxu_s32ldd_s32lddr(DisasContext *ctx)
21
index XXXXXXX..XXXXXXX 100644
19
tcg_gen_ori_tl(t1, t1, 0xFFFFF000);
22
--- a/include/hw/clock.h
20
}
23
+++ b/include/hw/clock.h
21
tcg_gen_add_tl(t1, t0, t1);
24
@@ -XXX,XX +XXX,XX @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
22
- tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, MO_SL);
25
if (clk->period == 0) {
23
+ tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, MO_TESL ^ (sel * MO_BSWAP));
26
return 0;
24
27
}
25
- if (sel == 1) {
28
- /*
26
- /* S32LDDR */
29
- * BUG: when CONFIG_INT128 is not defined, the current implementation of
27
- tcg_gen_bswap32_tl(t1, t1, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
30
- * divu128 does not return a valid truncated quotient, so the result will
31
- * be wrong.
32
- */
33
+
34
divu128(&lo, &hi, clk->period);
35
return lo;
36
}
37
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/include/qemu/host-utils.h
40
+++ b/include/qemu/host-utils.h
41
@@ -XXX,XX +XXX,XX @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
42
return (__int128_t)a * b / c;
43
}
44
45
-static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
46
+static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh,
47
+ uint64_t divisor)
48
{
49
__uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
50
__uint128_t result = dividend / divisor;
51
+
52
*plow = result;
53
- *phigh = dividend % divisor;
54
+ *phigh = result >> 64;
55
+ return dividend % divisor;
56
}
57
58
-static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
59
+static inline int64_t divs128(uint64_t *plow, int64_t *phigh,
60
+ int64_t divisor)
61
{
62
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
63
+ __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
64
__int128_t result = dividend / divisor;
65
+
66
*plow = result;
67
- *phigh = dividend % divisor;
68
+ *phigh = result >> 64;
69
+ return dividend % divisor;
70
}
71
#else
72
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
73
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
74
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
75
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
76
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
77
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor);
78
79
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
80
{
81
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/ppc/int_helper.c
84
+++ b/target/ppc/int_helper.c
85
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
86
87
uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
88
{
89
- int64_t rt = 0;
90
+ uint64_t rt = 0;
91
int64_t ra = (int64_t)rau;
92
int64_t rb = (int64_t)rbu;
93
int overflow = 0;
94
@@ -XXX,XX +XXX,XX @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
95
int cr;
96
uint64_t lo_value;
97
uint64_t hi_value;
98
+ uint64_t rem;
99
ppc_avr_t ret = { .u64 = { 0, 0 } };
100
101
if (b->VsrSD(0) < 0) {
102
@@ -XXX,XX +XXX,XX @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
103
* In that case, we leave r unchanged.
104
*/
105
} else {
106
- divu128(&lo_value, &hi_value, 1000000000000000ULL);
107
+ rem = divu128(&lo_value, &hi_value, 1000000000000000ULL);
108
109
- for (i = 1; i < 16; hi_value /= 10, i++) {
110
- bcd_put_digit(&ret, hi_value % 10, i);
111
+ for (i = 1; i < 16; rem /= 10, i++) {
112
+ bcd_put_digit(&ret, rem % 10, i);
113
}
114
115
for (; i < 32; lo_value /= 10, i++) {
116
diff --git a/util/host-utils.c b/util/host-utils.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/util/host-utils.c
119
+++ b/util/host-utils.c
120
@@ -XXX,XX +XXX,XX @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
121
}
122
123
/*
124
- * Unsigned 128-by-64 division. Returns quotient via plow and
125
- * remainder via phigh.
126
- * The result must fit in 64 bits (plow) - otherwise, the result
127
- * is undefined.
128
- * This function will cause a division by zero if passed a zero divisor.
129
+ * Unsigned 128-by-64 division.
130
+ * Returns the remainder.
131
+ * Returns quotient via plow and phigh.
132
+ * Also returns the remainder via the function return value.
133
*/
134
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
135
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
136
{
137
uint64_t dhi = *phigh;
138
uint64_t dlo = *plow;
139
- unsigned i;
140
- uint64_t carry = 0;
141
+ uint64_t rem, dhighest;
142
+ int sh;
143
144
if (divisor == 0 || dhi == 0) {
145
*plow = dlo / divisor;
146
- *phigh = dlo % divisor;
147
+ *phigh = 0;
148
+ return dlo % divisor;
149
} else {
150
+ sh = clz64(divisor);
151
152
- for (i = 0; i < 64; i++) {
153
- carry = dhi >> 63;
154
- dhi = (dhi << 1) | (dlo >> 63);
155
- if (carry || (dhi >= divisor)) {
156
- dhi -= divisor;
157
- carry = 1;
158
- } else {
159
- carry = 0;
160
+ if (dhi < divisor) {
161
+ if (sh != 0) {
162
+ /* normalize the divisor, shifting the dividend accordingly */
163
+ divisor <<= sh;
164
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
165
+ dlo <<= sh;
166
}
167
- dlo = (dlo << 1) | carry;
168
+
169
+ *phigh = 0;
170
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
171
+ } else {
172
+ if (sh != 0) {
173
+ /* normalize the divisor, shifting the dividend accordingly */
174
+ divisor <<= sh;
175
+ dhighest = dhi >> (64 - sh);
176
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
177
+ dlo <<= sh;
178
+
179
+ *phigh = udiv_qrnnd(&dhi, dhighest, dhi, divisor);
180
+ } else {
181
+ /**
182
+ * dhi >= divisor
183
+ * Since the MSB of divisor is set (sh == 0),
184
+ * (dhi - divisor) < divisor
185
+ *
186
+ * Thus, the high part of the quotient is 1, and we can
187
+ * calculate the low part with a single call to udiv_qrnnd
188
+ * after subtracting divisor from dhi
189
+ */
190
+ dhi -= divisor;
191
+ *phigh = 1;
192
+ }
193
+
194
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
195
}
196
197
- *plow = dlo;
198
- *phigh = dhi;
199
+ /*
200
+ * since the dividend/divisor might have been normalized,
201
+ * the remainder might also have to be shifted back
202
+ */
203
+ return rem >> sh;
204
}
205
}
206
207
/*
208
- * Signed 128-by-64 division. Returns quotient via plow and
209
- * remainder via phigh.
210
- * The result must fit in 64 bits (plow) - otherwise, the result
211
- * is undefined.
212
- * This function will cause a division by zero if passed a zero divisor.
213
+ * Signed 128-by-64 division.
214
+ * Returns quotient via plow and phigh.
215
+ * Also returns the remainder via the function return value.
216
*/
217
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
218
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor)
219
{
220
- int sgn_dvdnd = *phigh < 0;
221
- int sgn_divsr = divisor < 0;
222
+ bool neg_quotient = false, neg_remainder = false;
223
+ uint64_t unsig_hi = *phigh, unsig_lo = *plow;
224
+ uint64_t rem;
225
226
- if (sgn_dvdnd) {
227
- *plow = ~(*plow);
228
- *phigh = ~(*phigh);
229
- if (*plow == (int64_t)-1) {
230
+ if (*phigh < 0) {
231
+ neg_quotient = !neg_quotient;
232
+ neg_remainder = !neg_remainder;
233
+
234
+ if (unsig_lo == 0) {
235
+ unsig_hi = -unsig_hi;
236
+ } else {
237
+ unsig_hi = ~unsig_hi;
238
+ unsig_lo = -unsig_lo;
239
+ }
240
+ }
241
+
242
+ if (divisor < 0) {
243
+ neg_quotient = !neg_quotient;
244
+
245
+ divisor = -divisor;
246
+ }
247
+
248
+ rem = divu128(&unsig_lo, &unsig_hi, (uint64_t)divisor);
249
+
250
+ if (neg_quotient) {
251
+ if (unsig_lo == 0) {
252
+ *phigh = -unsig_hi;
253
*plow = 0;
254
- (*phigh)++;
255
- } else {
256
- (*plow)++;
257
- }
258
+ } else {
259
+ *phigh = ~unsig_hi;
260
+ *plow = -unsig_lo;
261
+ }
262
+ } else {
263
+ *phigh = unsig_hi;
264
+ *plow = unsig_lo;
265
}
266
267
- if (sgn_divsr) {
268
- divisor = 0 - divisor;
28
- }
269
- }
29
gen_store_mxu_gpr(t1, XRa);
270
-
30
271
- divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
31
tcg_temp_free(t0);
272
-
273
- if (sgn_dvdnd ^ sgn_divsr) {
274
- *plow = 0 - *plow;
275
+ if (neg_remainder) {
276
+ return -rem;
277
+ } else {
278
+ return rem;
279
}
280
}
281
#endif
32
--
282
--
33
2.25.1
283
2.25.1
34
284
35
285
diff view generated by jsdifflib
1
Remove TCG_BSWAP_IZ and the preceding zero-extension.
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20211025191154.350831-5-luis.pires@eldorado.org.br>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
7
---
6
target/sh4/translate.c | 3 +--
8
tests/unit/test-div128.c | 197 +++++++++++++++++++++++++++++++++++++++
7
1 file changed, 1 insertion(+), 2 deletions(-)
9
tests/unit/meson.build | 1 +
8
10
2 files changed, 198 insertions(+)
9
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
11
create mode 100644 tests/unit/test-div128.c
12
13
diff --git a/tests/unit/test-div128.c b/tests/unit/test-div128.c
14
new file mode 100644
15
index XXXXXXX..XXXXXXX
16
--- /dev/null
17
+++ b/tests/unit/test-div128.c
18
@@ -XXX,XX +XXX,XX @@
19
+/*
20
+ * Test 128-bit division functions
21
+ *
22
+ * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
23
+ *
24
+ * This library is free software; you can redistribute it and/or
25
+ * modify it under the terms of the GNU Lesser General Public
26
+ * License as published by the Free Software Foundation; either
27
+ * version 2.1 of the License, or (at your option) any later version.
28
+ *
29
+ * This library is distributed in the hope that it will be useful,
30
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
31
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
32
+ * Lesser General Public License for more details.
33
+ *
34
+ * You should have received a copy of the GNU Lesser General Public
35
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36
+ */
37
+
38
+#include "qemu/osdep.h"
39
+#include "qemu/host-utils.h"
40
+
41
+typedef struct {
42
+ uint64_t high;
43
+ uint64_t low;
44
+ uint64_t rhigh;
45
+ uint64_t rlow;
46
+ uint64_t divisor;
47
+ uint64_t remainder;
48
+} test_data_unsigned;
49
+
50
+typedef struct {
51
+ int64_t high;
52
+ uint64_t low;
53
+ int64_t rhigh;
54
+ uint64_t rlow;
55
+ int64_t divisor;
56
+ int64_t remainder;
57
+} test_data_signed;
58
+
59
+static const test_data_unsigned test_table_unsigned[] = {
60
+ /* Dividend fits in 64 bits */
61
+ { 0x0000000000000000ULL, 0x0000000000000000ULL,
62
+ 0x0000000000000000ULL, 0x0000000000000000ULL,
63
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
64
+ { 0x0000000000000000ULL, 0x0000000000000001ULL,
65
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
66
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
67
+ { 0x0000000000000000ULL, 0x0000000000000003ULL,
68
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
69
+ 0x0000000000000002ULL, 0x0000000000000001ULL},
70
+ { 0x0000000000000000ULL, 0x8000000000000000ULL,
71
+ 0x0000000000000000ULL, 0x8000000000000000ULL,
72
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
73
+ { 0x0000000000000000ULL, 0xa000000000000000ULL,
74
+ 0x0000000000000000ULL, 0x0000000000000002ULL,
75
+ 0x4000000000000000ULL, 0x2000000000000000ULL},
76
+ { 0x0000000000000000ULL, 0x8000000000000000ULL,
77
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
78
+ 0x8000000000000000ULL, 0x0000000000000000ULL},
79
+
80
+ /* Dividend > 64 bits, with MSB 0 */
81
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
82
+ 0x123456789abcdefeULL, 0xefedcba987654321ULL,
83
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
84
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
85
+ 0x0000000000000001ULL, 0x000000000000000dULL,
86
+ 0x123456789abcdefeULL, 0x03456789abcdf03bULL},
87
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
88
+ 0x0123456789abcdefULL, 0xeefedcba98765432ULL,
89
+ 0x0000000000000010ULL, 0x0000000000000001ULL},
90
+
91
+ /* Dividend > 64 bits, with MSB 1 */
92
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
93
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
94
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
95
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
96
+ 0x0000000000000001ULL, 0x0000000000000000ULL,
97
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL},
98
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
99
+ 0x0feeddccbbaa9988ULL, 0x7766554433221100ULL,
100
+ 0x0000000000000010ULL, 0x000000000000000fULL},
101
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
102
+ 0x000000000000000eULL, 0x00f0f0f0f0f0f35aULL,
103
+ 0x123456789abcdefeULL, 0x0f8922bc55ef90c3ULL},
104
+
105
+ /**
106
+ * Divisor == 64 bits, with MSB 1
107
+ * and high 64 bits of dividend >= divisor
108
+ * (for testing normalization)
109
+ */
110
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
111
+ 0x0000000000000001ULL, 0x0000000000000000ULL,
112
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL},
113
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
114
+ 0x0000000000000001ULL, 0xfddbb9977553310aULL,
115
+ 0x8000000000000001ULL, 0x78899aabbccddf05ULL},
116
+
117
+ /* Dividend > 64 bits, divisor almost as big */
118
+ { 0x0000000000000001ULL, 0x23456789abcdef01ULL,
119
+ 0x0000000000000000ULL, 0x000000000000000fULL,
120
+ 0x123456789abcdefeULL, 0x123456789abcde1fULL},
121
+};
122
+
123
+static const test_data_signed test_table_signed[] = {
124
+ /* Positive dividend, positive/negative divisors */
125
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
126
+ 0x0000000000000000LL, 0x0000000000bc614eULL,
127
+ 0x0000000000000001LL, 0x0000000000000000LL},
128
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
129
+ 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
130
+ 0xffffffffffffffffLL, 0x0000000000000000LL},
131
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
132
+ 0x0000000000000000LL, 0x00000000005e30a7ULL,
133
+ 0x0000000000000002LL, 0x0000000000000000LL},
134
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
135
+ 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL,
136
+ 0xfffffffffffffffeLL, 0x0000000000000000LL},
137
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
138
+ 0x0000000000000000LL, 0x0000000000178c29ULL,
139
+ 0x0000000000000008LL, 0x0000000000000006LL},
140
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
141
+ 0xffffffffffffffffLL, 0xffffffffffe873d7ULL,
142
+ 0xfffffffffffffff8LL, 0x0000000000000006LL},
143
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
144
+ 0x0000000000000000LL, 0x000000000000550dULL,
145
+ 0x0000000000000237LL, 0x0000000000000183LL},
146
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
147
+ 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL,
148
+ 0xfffffffffffffdc9LL, 0x0000000000000183LL},
149
+
150
+ /* Negative dividend, positive/negative divisors */
151
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
152
+ 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
153
+ 0x0000000000000001LL, 0x0000000000000000LL},
154
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
155
+ 0x0000000000000000LL, 0x0000000000bc614eULL,
156
+ 0xffffffffffffffffLL, 0x0000000000000000LL},
157
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
158
+ 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL,
159
+ 0x0000000000000002LL, 0x0000000000000000LL},
160
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
161
+ 0x0000000000000000LL, 0x00000000005e30a7ULL,
162
+ 0xfffffffffffffffeLL, 0x0000000000000000LL},
163
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
164
+ 0xffffffffffffffffLL, 0xffffffffffe873d7ULL,
165
+ 0x0000000000000008LL, 0xfffffffffffffffaLL},
166
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
167
+ 0x0000000000000000LL, 0x0000000000178c29ULL,
168
+ 0xfffffffffffffff8LL, 0xfffffffffffffffaLL},
169
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
170
+ 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL,
171
+ 0x0000000000000237LL, 0xfffffffffffffe7dLL},
172
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
173
+ 0x0000000000000000LL, 0x000000000000550dULL,
174
+ 0xfffffffffffffdc9LL, 0xfffffffffffffe7dLL},
175
+};
176
+
177
+static void test_divu128(void)
178
+{
179
+ int i;
180
+ uint64_t rem;
181
+ test_data_unsigned tmp;
182
+
183
+ for (i = 0; i < ARRAY_SIZE(test_table_unsigned); ++i) {
184
+ tmp = test_table_unsigned[i];
185
+
186
+ rem = divu128(&tmp.low, &tmp.high, tmp.divisor);
187
+ g_assert_cmpuint(tmp.low, ==, tmp.rlow);
188
+ g_assert_cmpuint(tmp.high, ==, tmp.rhigh);
189
+ g_assert_cmpuint(rem, ==, tmp.remainder);
190
+ }
191
+}
192
+
193
+static void test_divs128(void)
194
+{
195
+ int i;
196
+ int64_t rem;
197
+ test_data_signed tmp;
198
+
199
+ for (i = 0; i < ARRAY_SIZE(test_table_signed); ++i) {
200
+ tmp = test_table_signed[i];
201
+
202
+ rem = divs128(&tmp.low, &tmp.high, tmp.divisor);
203
+ g_assert_cmpuint(tmp.low, ==, tmp.rlow);
204
+ g_assert_cmpuint(tmp.high, ==, tmp.rhigh);
205
+ g_assert_cmpuint(rem, ==, tmp.remainder);
206
+ }
207
+}
208
+
209
+int main(int argc, char **argv)
210
+{
211
+ g_test_init(&argc, &argv, NULL);
212
+ g_test_add_func("/host-utils/test_divu128", test_divu128);
213
+ g_test_add_func("/host-utils/test_divs128", test_divs128);
214
+ return g_test_run();
215
+}
216
diff --git a/tests/unit/meson.build b/tests/unit/meson.build
10
index XXXXXXX..XXXXXXX 100644
217
index XXXXXXX..XXXXXXX 100644
11
--- a/target/sh4/translate.c
218
--- a/tests/unit/meson.build
12
+++ b/target/sh4/translate.c
219
+++ b/tests/unit/meson.build
13
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
220
@@ -XXX,XX +XXX,XX @@ tests = {
14
case 0x6008:        /* swap.b Rm,Rn */
221
# all code tested by test-x86-cpuid is inside topology.h
15
    {
222
'test-x86-cpuid': [],
16
TCGv low = tcg_temp_new();
223
'test-cutils': [],
17
-     tcg_gen_ext16u_i32(low, REG(B7_4));
224
+ 'test-div128': [],
18
-     tcg_gen_bswap16_i32(low, low, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
225
'test-shift128': [],
19
+ tcg_gen_bswap16_i32(low, REG(B7_4), 0);
226
'test-mul64': [],
20
tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
227
# all code tested by test-int128 is inside int128.h
21
     tcg_temp_free(low);
22
    }
23
--
228
--
24
2.25.1
229
2.25.1
25
230
26
231
diff view generated by jsdifflib
1
Migrate the bstate, tb and singlestep_enabled fields
1
Prepare for tracking different masks by renaming this one.
2
from DisasContext into the base.
3
2
4
Tested-by: Michael Rolnik <mrolnik@gmail.com>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
target/avr/translate.c | 58 +++++++++++++++++++++---------------------
8
tcg/optimize.c | 142 +++++++++++++++++++++++++------------------------
10
1 file changed, 29 insertions(+), 29 deletions(-)
9
1 file changed, 72 insertions(+), 70 deletions(-)
11
10
12
diff --git a/target/avr/translate.c b/target/avr/translate.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/target/avr/translate.c
13
--- a/tcg/optimize.c
15
+++ b/target/avr/translate.c
14
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext DisasContext;
15
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
17
16
TCGTemp *prev_copy;
18
/* This is the state at translation time. */
17
TCGTemp *next_copy;
19
struct DisasContext {
18
uint64_t val;
20
- TranslationBlock *tb;
19
- uint64_t mask;
21
+ DisasContextBase base;
20
+ uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
22
21
} TempOptInfo;
23
CPUAVRState *env;
22
24
CPUState *cs;
23
static inline TempOptInfo *ts_info(TCGTemp *ts)
25
@@ -XXX,XX +XXX,XX @@ struct DisasContext {
24
@@ -XXX,XX +XXX,XX @@ static void reset_ts(TCGTemp *ts)
26
25
ti->next_copy = ts;
27
/* Routine used to access memory */
26
ti->prev_copy = ts;
28
int memidx;
27
ti->is_const = false;
29
- int bstate;
28
- ti->mask = -1;
30
- int singlestep;
29
+ ti->z_mask = -1;
31
32
/*
33
* some AVR instructions can make the following instruction to be skipped
34
@@ -XXX,XX +XXX,XX @@ static bool avr_have_feature(DisasContext *ctx, int feature)
35
{
36
if (!avr_feature(ctx->env, feature)) {
37
gen_helper_unsupported(cpu_env);
38
- ctx->bstate = DISAS_NORETURN;
39
+ ctx->base.is_jmp = DISAS_NORETURN;
40
return false;
41
}
42
return true;
43
@@ -XXX,XX +XXX,XX @@ static void gen_jmp_ez(DisasContext *ctx)
44
{
45
tcg_gen_deposit_tl(cpu_pc, cpu_r[30], cpu_r[31], 8, 8);
46
tcg_gen_or_tl(cpu_pc, cpu_pc, cpu_eind);
47
- ctx->bstate = DISAS_LOOKUP;
48
+ ctx->base.is_jmp = DISAS_LOOKUP;
49
}
30
}
50
31
51
static void gen_jmp_z(DisasContext *ctx)
32
static void reset_temp(TCGArg arg)
52
{
33
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
53
tcg_gen_deposit_tl(cpu_pc, cpu_r[30], cpu_r[31], 8, 8);
34
if (ts->kind == TEMP_CONST) {
54
- ctx->bstate = DISAS_LOOKUP;
35
ti->is_const = true;
55
+ ctx->base.is_jmp = DISAS_LOOKUP;
36
ti->val = ts->val;
56
}
37
- ti->mask = ts->val;
57
38
+ ti->z_mask = ts->val;
58
static void gen_push_ret(DisasContext *ctx, int ret)
39
if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
59
@@ -XXX,XX +XXX,XX @@ static void gen_pop_ret(DisasContext *ctx, TCGv ret)
40
/* High bits of a 32-bit quantity are garbage. */
60
41
- ti->mask |= ~0xffffffffull;
61
static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
42
+ ti->z_mask |= ~0xffffffffull;
62
{
43
}
63
- TranslationBlock *tb = ctx->tb;
64
+ const TranslationBlock *tb = ctx->base.tb;
65
66
- if (ctx->singlestep == 0) {
67
+ if (!ctx->base.singlestep_enabled) {
68
tcg_gen_goto_tb(n);
69
tcg_gen_movi_i32(cpu_pc, dest);
70
tcg_gen_exit_tb(tb, n);
71
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
72
gen_helper_debug(cpu_env);
73
tcg_gen_exit_tb(NULL, 0);
74
}
75
- ctx->bstate = DISAS_NORETURN;
76
+ ctx->base.is_jmp = DISAS_NORETURN;
77
}
78
79
/*
80
@@ -XXX,XX +XXX,XX @@ static bool trans_RET(DisasContext *ctx, arg_RET *a)
81
{
82
gen_pop_ret(ctx, cpu_pc);
83
84
- ctx->bstate = DISAS_LOOKUP;
85
+ ctx->base.is_jmp = DISAS_LOOKUP;
86
return true;
87
}
88
89
@@ -XXX,XX +XXX,XX @@ static bool trans_RETI(DisasContext *ctx, arg_RETI *a)
90
tcg_gen_movi_tl(cpu_If, 1);
91
92
/* Need to return to main loop to re-evaluate interrupts. */
93
- ctx->bstate = DISAS_EXIT;
94
+ ctx->base.is_jmp = DISAS_EXIT;
95
return true;
96
}
97
98
@@ -XXX,XX +XXX,XX @@ static bool trans_BRBC(DisasContext *ctx, arg_BRBC *a)
99
gen_goto_tb(ctx, 0, ctx->npc + a->imm);
100
gen_set_label(not_taken);
101
102
- ctx->bstate = DISAS_CHAIN;
103
+ ctx->base.is_jmp = DISAS_CHAIN;
104
return true;
105
}
106
107
@@ -XXX,XX +XXX,XX @@ static bool trans_BRBS(DisasContext *ctx, arg_BRBS *a)
108
gen_goto_tb(ctx, 0, ctx->npc + a->imm);
109
gen_set_label(not_taken);
110
111
- ctx->bstate = DISAS_CHAIN;
112
+ ctx->base.is_jmp = DISAS_CHAIN;
113
return true;
114
}
115
116
@@ -XXX,XX +XXX,XX @@ static TCGv gen_get_zaddr(void)
117
*/
118
static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
119
{
120
- if (ctx->tb->flags & TB_FLAGS_FULL_ACCESS) {
121
+ if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
122
gen_helper_fullwr(cpu_env, data, addr);
123
} else {
44
} else {
124
tcg_gen_qemu_st8(data, addr, MMU_DATA_IDX); /* mem[addr] = data */
45
ti->is_const = false;
125
@@ -XXX,XX +XXX,XX @@ static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
46
- ti->mask = -1;
126
47
+ ti->z_mask = -1;
127
static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr)
128
{
129
- if (ctx->tb->flags & TB_FLAGS_FULL_ACCESS) {
130
+ if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
131
gen_helper_fullrd(data, cpu_env, addr);
132
} else {
133
tcg_gen_qemu_ld8u(data, addr, MMU_DATA_IDX); /* data = mem[addr] */
134
@@ -XXX,XX +XXX,XX @@ static bool trans_BREAK(DisasContext *ctx, arg_BREAK *a)
135
#ifdef BREAKPOINT_ON_BREAK
136
tcg_gen_movi_tl(cpu_pc, ctx->npc - 1);
137
gen_helper_debug(cpu_env);
138
- ctx->bstate = DISAS_EXIT;
139
+ ctx->base.is_jmp = DISAS_EXIT;
140
#else
141
/* NOP */
142
#endif
143
@@ -XXX,XX +XXX,XX @@ static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
144
static bool trans_SLEEP(DisasContext *ctx, arg_SLEEP *a)
145
{
146
gen_helper_sleep(cpu_env);
147
- ctx->bstate = DISAS_NORETURN;
148
+ ctx->base.is_jmp = DISAS_NORETURN;
149
return true;
150
}
151
152
@@ -XXX,XX +XXX,XX @@ static void translate(DisasContext *ctx)
153
154
if (!decode_insn(ctx, opcode)) {
155
gen_helper_unsupported(cpu_env);
156
- ctx->bstate = DISAS_NORETURN;
157
+ ctx->base.is_jmp = DISAS_NORETURN;
158
}
48
}
159
}
49
}
160
50
161
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
51
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
162
{
52
const TCGOpDef *def;
163
CPUAVRState *env = cs->env_ptr;
53
TempOptInfo *di;
164
DisasContext ctx = {
54
TempOptInfo *si;
165
- .tb = tb,
55
- uint64_t mask;
166
+ .base.tb = tb,
56
+ uint64_t z_mask;
167
+ .base.is_jmp = DISAS_NEXT,
57
TCGOpcode new_op;
168
+ .base.pc_first = tb->pc,
58
169
+ .base.pc_next = tb->pc,
59
if (ts_are_copies(dst_ts, src_ts)) {
170
+ .base.singlestep_enabled = cs->singlestep_enabled,
60
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
171
.cs = cs,
61
op->args[0] = dst;
172
.env = env,
62
op->args[1] = src;
173
.memidx = 0,
63
174
- .bstate = DISAS_NEXT,
64
- mask = si->mask;
175
.skip_cond = TCG_COND_NEVER,
65
+ z_mask = si->z_mask;
176
- .singlestep = cs->singlestep_enabled,
66
if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
177
};
67
/* High bits of the destination are now garbage. */
178
target_ulong pc_start = tb->pc / 2;
68
- mask |= ~0xffffffffull;
179
int num_insns = 0;
69
+ z_mask |= ~0xffffffffull;
180
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
181
*/
182
max_insns = 1;
183
}
70
}
184
- if (ctx.singlestep) {
71
- di->mask = mask;
185
+ if (ctx.base.singlestep_enabled) {
72
+ di->z_mask = z_mask;
186
max_insns = 1;
73
74
if (src_ts->type == dst_ts->type) {
75
TempOptInfo *ni = ts_info(si->next_copy);
76
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
187
}
77
}
188
78
189
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
79
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
190
* b main - sets breakpoint at address 0x00000100 (code)
80
- uint64_t mask, partmask, affected, tmp;
191
* b *0x100 - sets breakpoint at address 0x00800100 (data)
81
+ uint64_t z_mask, partmask, affected, tmp;
192
*/
82
int nb_oargs, nb_iargs;
193
- if (unlikely(!ctx.singlestep &&
83
TCGOpcode opc = op->opc;
194
+ if (unlikely(!ctx.base.singlestep_enabled &&
84
const TCGOpDef *def = &tcg_op_defs[opc];
195
(cpu_breakpoint_test(cs, OFFSET_CODE + ctx.npc * 2, BP_ANY) ||
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
196
cpu_breakpoint_test(cs, OFFSET_DATA + ctx.npc * 2, BP_ANY)))) {
86
197
canonicalize_skip(&ctx);
87
/* Simplify using known-zero bits. Currently only ops with a single
198
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
88
output argument is supported. */
199
if (skip_label) {
89
- mask = -1;
200
canonicalize_skip(&ctx);
90
+ z_mask = -1;
201
gen_set_label(skip_label);
91
affected = -1;
202
- if (ctx.bstate == DISAS_NORETURN) {
92
switch (opc) {
203
- ctx.bstate = DISAS_CHAIN;
93
CASE_OP_32_64(ext8s):
204
+ if (ctx.base.is_jmp == DISAS_NORETURN) {
94
- if ((arg_info(op->args[1])->mask & 0x80) != 0) {
205
+ ctx.base.is_jmp = DISAS_CHAIN;
95
+ if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
206
}
96
break;
97
}
98
QEMU_FALLTHROUGH;
99
CASE_OP_32_64(ext8u):
100
- mask = 0xff;
101
+ z_mask = 0xff;
102
goto and_const;
103
CASE_OP_32_64(ext16s):
104
- if ((arg_info(op->args[1])->mask & 0x8000) != 0) {
105
+ if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
106
break;
107
}
108
QEMU_FALLTHROUGH;
109
CASE_OP_32_64(ext16u):
110
- mask = 0xffff;
111
+ z_mask = 0xffff;
112
goto and_const;
113
case INDEX_op_ext32s_i64:
114
- if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
115
+ if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
116
break;
117
}
118
QEMU_FALLTHROUGH;
119
case INDEX_op_ext32u_i64:
120
- mask = 0xffffffffU;
121
+ z_mask = 0xffffffffU;
122
goto and_const;
123
124
CASE_OP_32_64(and):
125
- mask = arg_info(op->args[2])->mask;
126
+ z_mask = arg_info(op->args[2])->z_mask;
127
if (arg_is_const(op->args[2])) {
128
and_const:
129
- affected = arg_info(op->args[1])->mask & ~mask;
130
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
131
}
132
- mask = arg_info(op->args[1])->mask & mask;
133
+ z_mask = arg_info(op->args[1])->z_mask & z_mask;
134
break;
135
136
case INDEX_op_ext_i32_i64:
137
- if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
138
+ if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
139
break;
140
}
141
QEMU_FALLTHROUGH;
142
case INDEX_op_extu_i32_i64:
143
/* We do not compute affected as it is a size changing op. */
144
- mask = (uint32_t)arg_info(op->args[1])->mask;
145
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
146
break;
147
148
CASE_OP_32_64(andc):
149
/* Known-zeros does not imply known-ones. Therefore unless
150
op->args[2] is constant, we can't infer anything from it. */
151
if (arg_is_const(op->args[2])) {
152
- mask = ~arg_info(op->args[2])->mask;
153
+ z_mask = ~arg_info(op->args[2])->z_mask;
154
goto and_const;
155
}
156
/* But we certainly know nothing outside args[1] may be set. */
157
- mask = arg_info(op->args[1])->mask;
158
+ z_mask = arg_info(op->args[1])->z_mask;
159
break;
160
161
case INDEX_op_sar_i32:
162
if (arg_is_const(op->args[2])) {
163
tmp = arg_info(op->args[2])->val & 31;
164
- mask = (int32_t)arg_info(op->args[1])->mask >> tmp;
165
+ z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
166
}
167
break;
168
case INDEX_op_sar_i64:
169
if (arg_is_const(op->args[2])) {
170
tmp = arg_info(op->args[2])->val & 63;
171
- mask = (int64_t)arg_info(op->args[1])->mask >> tmp;
172
+ z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
173
}
174
break;
175
176
case INDEX_op_shr_i32:
177
if (arg_is_const(op->args[2])) {
178
tmp = arg_info(op->args[2])->val & 31;
179
- mask = (uint32_t)arg_info(op->args[1])->mask >> tmp;
180
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
181
}
182
break;
183
case INDEX_op_shr_i64:
184
if (arg_is_const(op->args[2])) {
185
tmp = arg_info(op->args[2])->val & 63;
186
- mask = (uint64_t)arg_info(op->args[1])->mask >> tmp;
187
+ z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
188
}
189
break;
190
191
case INDEX_op_extrl_i64_i32:
192
- mask = (uint32_t)arg_info(op->args[1])->mask;
193
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
194
break;
195
case INDEX_op_extrh_i64_i32:
196
- mask = (uint64_t)arg_info(op->args[1])->mask >> 32;
197
+ z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
198
break;
199
200
CASE_OP_32_64(shl):
201
if (arg_is_const(op->args[2])) {
202
tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
203
- mask = arg_info(op->args[1])->mask << tmp;
204
+ z_mask = arg_info(op->args[1])->z_mask << tmp;
205
}
206
break;
207
208
CASE_OP_32_64(neg):
209
/* Set to 1 all bits to the left of the rightmost. */
210
- mask = -(arg_info(op->args[1])->mask
211
- & -arg_info(op->args[1])->mask);
212
+ z_mask = -(arg_info(op->args[1])->z_mask
213
+ & -arg_info(op->args[1])->z_mask);
214
break;
215
216
CASE_OP_32_64(deposit):
217
- mask = deposit64(arg_info(op->args[1])->mask,
218
- op->args[3], op->args[4],
219
- arg_info(op->args[2])->mask);
220
+ z_mask = deposit64(arg_info(op->args[1])->z_mask,
221
+ op->args[3], op->args[4],
222
+ arg_info(op->args[2])->z_mask);
223
break;
224
225
CASE_OP_32_64(extract):
226
- mask = extract64(arg_info(op->args[1])->mask,
227
- op->args[2], op->args[3]);
228
+ z_mask = extract64(arg_info(op->args[1])->z_mask,
229
+ op->args[2], op->args[3]);
230
if (op->args[2] == 0) {
231
- affected = arg_info(op->args[1])->mask & ~mask;
232
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
233
}
234
break;
235
CASE_OP_32_64(sextract):
236
- mask = sextract64(arg_info(op->args[1])->mask,
237
- op->args[2], op->args[3]);
238
- if (op->args[2] == 0 && (tcg_target_long)mask >= 0) {
239
- affected = arg_info(op->args[1])->mask & ~mask;
240
+ z_mask = sextract64(arg_info(op->args[1])->z_mask,
241
+ op->args[2], op->args[3]);
242
+ if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
243
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
244
}
245
break;
246
247
CASE_OP_32_64(or):
248
CASE_OP_32_64(xor):
249
- mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask;
250
+ z_mask = arg_info(op->args[1])->z_mask
251
+ | arg_info(op->args[2])->z_mask;
252
break;
253
254
case INDEX_op_clz_i32:
255
case INDEX_op_ctz_i32:
256
- mask = arg_info(op->args[2])->mask | 31;
257
+ z_mask = arg_info(op->args[2])->z_mask | 31;
258
break;
259
260
case INDEX_op_clz_i64:
261
case INDEX_op_ctz_i64:
262
- mask = arg_info(op->args[2])->mask | 63;
263
+ z_mask = arg_info(op->args[2])->z_mask | 63;
264
break;
265
266
case INDEX_op_ctpop_i32:
267
- mask = 32 | 31;
268
+ z_mask = 32 | 31;
269
break;
270
case INDEX_op_ctpop_i64:
271
- mask = 64 | 63;
272
+ z_mask = 64 | 63;
273
break;
274
275
CASE_OP_32_64(setcond):
276
case INDEX_op_setcond2_i32:
277
- mask = 1;
278
+ z_mask = 1;
279
break;
280
281
CASE_OP_32_64(movcond):
282
- mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask;
283
+ z_mask = arg_info(op->args[3])->z_mask
284
+ | arg_info(op->args[4])->z_mask;
285
break;
286
287
CASE_OP_32_64(ld8u):
288
- mask = 0xff;
289
+ z_mask = 0xff;
290
break;
291
CASE_OP_32_64(ld16u):
292
- mask = 0xffff;
293
+ z_mask = 0xffff;
294
break;
295
case INDEX_op_ld32u_i64:
296
- mask = 0xffffffffu;
297
+ z_mask = 0xffffffffu;
298
break;
299
300
CASE_OP_32_64(qemu_ld):
301
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
302
MemOpIdx oi = op->args[nb_oargs + nb_iargs];
303
MemOp mop = get_memop(oi);
304
if (!(mop & MO_SIGN)) {
305
- mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
306
+ z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
307
}
308
}
309
break;
310
311
CASE_OP_32_64(bswap16):
312
- mask = arg_info(op->args[1])->mask;
313
- if (mask <= 0xffff) {
314
+ z_mask = arg_info(op->args[1])->z_mask;
315
+ if (z_mask <= 0xffff) {
316
op->args[2] |= TCG_BSWAP_IZ;
317
}
318
- mask = bswap16(mask);
319
+ z_mask = bswap16(z_mask);
320
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
321
case TCG_BSWAP_OZ:
322
break;
323
case TCG_BSWAP_OS:
324
- mask = (int16_t)mask;
325
+ z_mask = (int16_t)z_mask;
326
break;
327
default: /* undefined high bits */
328
- mask |= MAKE_64BIT_MASK(16, 48);
329
+ z_mask |= MAKE_64BIT_MASK(16, 48);
330
break;
331
}
332
break;
333
334
case INDEX_op_bswap32_i64:
335
- mask = arg_info(op->args[1])->mask;
336
- if (mask <= 0xffffffffu) {
337
+ z_mask = arg_info(op->args[1])->z_mask;
338
+ if (z_mask <= 0xffffffffu) {
339
op->args[2] |= TCG_BSWAP_IZ;
340
}
341
- mask = bswap32(mask);
342
+ z_mask = bswap32(z_mask);
343
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
344
case TCG_BSWAP_OZ:
345
break;
346
case TCG_BSWAP_OS:
347
- mask = (int32_t)mask;
348
+ z_mask = (int32_t)z_mask;
349
break;
350
default: /* undefined high bits */
351
- mask |= MAKE_64BIT_MASK(32, 32);
352
+ z_mask |= MAKE_64BIT_MASK(32, 32);
353
break;
354
}
355
break;
356
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
357
/* 32-bit ops generate 32-bit results. For the result is zero test
358
below, we can ignore high bits, but for further optimizations we
359
need to record that the high bits contain garbage. */
360
- partmask = mask;
361
+ partmask = z_mask;
362
if (!(def->flags & TCG_OPF_64BIT)) {
363
- mask |= ~(tcg_target_ulong)0xffffffffu;
364
+ z_mask |= ~(tcg_target_ulong)0xffffffffu;
365
partmask &= 0xffffffffu;
366
affected &= 0xffffffffu;
207
}
367
}
208
- } while (ctx.bstate == DISAS_NEXT
368
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
209
+ } while (ctx.base.is_jmp == DISAS_NEXT
369
vs the high word of the input. */
210
&& num_insns < max_insns
370
do_setcond_high:
211
&& (ctx.npc - pc_start) * 2 < TARGET_PAGE_SIZE - 4
371
reset_temp(op->args[0]);
212
&& !tcg_op_buf_full());
372
- arg_info(op->args[0])->mask = 1;
213
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
373
+ arg_info(op->args[0])->z_mask = 1;
214
374
op->opc = INDEX_op_setcond_i32;
215
bool nonconst_skip = canonicalize_skip(&ctx);
375
op->args[1] = op->args[2];
216
376
op->args[2] = op->args[4];
217
- switch (ctx.bstate) {
377
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
218
+ switch (ctx.base.is_jmp) {
378
}
219
case DISAS_NORETURN:
379
do_setcond_low:
220
assert(!nonconst_skip);
380
reset_temp(op->args[0]);
221
break;
381
- arg_info(op->args[0])->mask = 1;
222
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
382
+ arg_info(op->args[0])->z_mask = 1;
223
tcg_gen_movi_tl(cpu_pc, ctx.npc);
383
op->opc = INDEX_op_setcond_i32;
224
/* fall through */
384
op->args[2] = op->args[3];
225
case DISAS_LOOKUP:
385
op->args[3] = op->args[5];
226
- if (!ctx.singlestep) {
386
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
227
+ if (!ctx.base.singlestep_enabled) {
387
/* Default case: we know nothing about operation (or were unable
228
tcg_gen_lookup_and_goto_ptr();
388
to compute the operation result) so no propagation is done.
229
break;
389
We trash everything if the operation is the end of a basic
230
}
390
- block, otherwise we only trash the output args. "mask" is
231
/* fall through */
391
+ block, otherwise we only trash the output args. "z_mask" is
232
case DISAS_EXIT:
392
the non-zero bits mask for the first output arg. */
233
- if (ctx.singlestep) {
393
if (def->flags & TCG_OPF_BB_END) {
234
+ if (ctx.base.singlestep_enabled) {
394
memset(&temps_used, 0, sizeof(temps_used));
235
gen_helper_debug(cpu_env);
395
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
236
} else {
396
/* Save the corresponding known-zero bits mask for the
237
tcg_gen_exit_tb(NULL, 0);
397
first output argument (only one supported so far). */
398
if (i == 0) {
399
- arg_info(op->args[i])->mask = mask;
400
+ arg_info(op->args[i])->z_mask = z_mask;
401
}
402
}
403
}
238
--
404
--
239
2.25.1
405
2.25.1
240
406
241
407
diff view generated by jsdifflib
1
We can eliminate the requirement for a zero-extended output,
1
Provide what will become a larger context for splitting
2
because the following store will ignore any garbage high bits.
2
the very large tcg_optimize function.
3
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
target/arm/translate-a64.c | 6 ++----
9
tcg/optimize.c | 77 ++++++++++++++++++++++++++------------------------
9
1 file changed, 2 insertions(+), 4 deletions(-)
10
1 file changed, 40 insertions(+), 37 deletions(-)
10
11
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
14
--- a/tcg/optimize.c
14
+++ b/target/arm/translate-a64.c
15
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void handle_rev(DisasContext *s, int opcode, bool u,
16
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
16
read_vec_element(s, tcg_tmp, rn, i, grp_size);
17
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
17
switch (grp_size) {
18
} TempOptInfo;
18
case MO_16:
19
19
- tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp,
20
+typedef struct OptContext {
20
- TCG_BSWAP_IZ | TCG_BSWAP_OZ);
21
+ TCGTempSet temps_used;
21
+ tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
22
+} OptContext;
22
break;
23
+
23
case MO_32:
24
static inline TempOptInfo *ts_info(TCGTemp *ts)
24
- tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp,
25
{
25
- TCG_BSWAP_IZ | TCG_BSWAP_OZ);
26
return ts->state_ptr;
26
+ tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
27
@@ -XXX,XX +XXX,XX @@ static void reset_temp(TCGArg arg)
27
break;
28
}
28
case MO_64:
29
29
tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
30
/* Initialize and activate a temporary. */
31
-static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
32
+static void init_ts_info(OptContext *ctx, TCGTemp *ts)
33
{
34
size_t idx = temp_idx(ts);
35
TempOptInfo *ti;
36
37
- if (test_bit(idx, temps_used->l)) {
38
+ if (test_bit(idx, ctx->temps_used.l)) {
39
return;
40
}
41
- set_bit(idx, temps_used->l);
42
+ set_bit(idx, ctx->temps_used.l);
43
44
ti = ts->state_ptr;
45
if (ti == NULL) {
46
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
47
}
48
}
49
50
-static void init_arg_info(TCGTempSet *temps_used, TCGArg arg)
51
+static void init_arg_info(OptContext *ctx, TCGArg arg)
52
{
53
- init_ts_info(temps_used, arg_temp(arg));
54
+ init_ts_info(ctx, arg_temp(arg));
55
}
56
57
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
58
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
59
}
60
}
61
62
-static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
63
+static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
64
TCGOp *op, TCGArg dst, uint64_t val)
65
{
66
const TCGOpDef *def = &tcg_op_defs[op->opc];
67
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
68
69
/* Convert movi to mov with constant temp. */
70
tv = tcg_constant_internal(type, val);
71
- init_ts_info(temps_used, tv);
72
+ init_ts_info(ctx, tv);
73
tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
74
}
75
76
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
77
{
78
int nb_temps, nb_globals, i;
79
TCGOp *op, *op_next, *prev_mb = NULL;
80
- TCGTempSet temps_used;
81
+ OptContext ctx = {};
82
83
/* Array VALS has an element for each temp.
84
If this temp holds a constant then its value is kept in VALS' element.
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
86
nb_temps = s->nb_temps;
87
nb_globals = s->nb_globals;
88
89
- memset(&temps_used, 0, sizeof(temps_used));
90
for (i = 0; i < nb_temps; ++i) {
91
s->temps[i].state_ptr = NULL;
92
}
93
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
94
for (i = 0; i < nb_oargs + nb_iargs; i++) {
95
TCGTemp *ts = arg_temp(op->args[i]);
96
if (ts) {
97
- init_ts_info(&temps_used, ts);
98
+ init_ts_info(&ctx, ts);
99
}
100
}
101
} else {
102
nb_oargs = def->nb_oargs;
103
nb_iargs = def->nb_iargs;
104
for (i = 0; i < nb_oargs + nb_iargs; i++) {
105
- init_arg_info(&temps_used, op->args[i]);
106
+ init_arg_info(&ctx, op->args[i]);
107
}
108
}
109
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
CASE_OP_32_64(rotr):
112
if (arg_is_const(op->args[1])
113
&& arg_info(op->args[1])->val == 0) {
114
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
115
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
116
continue;
117
}
118
break;
119
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
120
121
if (partmask == 0) {
122
tcg_debug_assert(nb_oargs == 1);
123
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
124
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
125
continue;
126
}
127
if (affected == 0) {
128
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
129
CASE_OP_32_64(mulsh):
130
if (arg_is_const(op->args[2])
131
&& arg_info(op->args[2])->val == 0) {
132
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
133
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
134
continue;
135
}
136
break;
137
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
138
CASE_OP_32_64_VEC(sub):
139
CASE_OP_32_64_VEC(xor):
140
if (args_are_copies(op->args[1], op->args[2])) {
141
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
142
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
143
continue;
144
}
145
break;
146
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
147
if (arg_is_const(op->args[1])) {
148
tmp = arg_info(op->args[1])->val;
149
tmp = dup_const(TCGOP_VECE(op), tmp);
150
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
151
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
152
break;
153
}
154
goto do_default;
155
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
156
case INDEX_op_dup2_vec:
157
assert(TCG_TARGET_REG_BITS == 32);
158
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
159
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0],
160
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0],
161
deposit64(arg_info(op->args[1])->val, 32, 32,
162
arg_info(op->args[2])->val));
163
break;
164
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
165
case INDEX_op_extrh_i64_i32:
166
if (arg_is_const(op->args[1])) {
167
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
168
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
169
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
170
break;
171
}
172
goto do_default;
173
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
174
if (arg_is_const(op->args[1])) {
175
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
176
op->args[2]);
177
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
178
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
179
break;
180
}
181
goto do_default;
182
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
183
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
184
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
185
arg_info(op->args[2])->val);
186
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
187
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
188
break;
189
}
190
goto do_default;
191
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
192
TCGArg v = arg_info(op->args[1])->val;
193
if (v != 0) {
194
tmp = do_constant_folding(opc, v, 0);
195
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
196
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
197
} else {
198
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
199
}
200
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
201
tmp = deposit64(arg_info(op->args[1])->val,
202
op->args[3], op->args[4],
203
arg_info(op->args[2])->val);
204
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
205
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
206
break;
207
}
208
goto do_default;
209
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
210
if (arg_is_const(op->args[1])) {
211
tmp = extract64(arg_info(op->args[1])->val,
212
op->args[2], op->args[3]);
213
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
214
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
215
break;
216
}
217
goto do_default;
218
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
219
if (arg_is_const(op->args[1])) {
220
tmp = sextract64(arg_info(op->args[1])->val,
221
op->args[2], op->args[3]);
222
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
223
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
224
break;
225
}
226
goto do_default;
227
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
228
tmp = (int32_t)(((uint32_t)v1 >> shr) |
229
((uint32_t)v2 << (32 - shr)));
230
}
231
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
232
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
233
break;
234
}
235
goto do_default;
236
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
237
tmp = do_constant_folding_cond(opc, op->args[1],
238
op->args[2], op->args[3]);
239
if (tmp != 2) {
240
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
241
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
242
break;
243
}
244
goto do_default;
245
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
246
op->args[1], op->args[2]);
247
if (tmp != 2) {
248
if (tmp) {
249
- memset(&temps_used, 0, sizeof(temps_used));
250
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
251
op->opc = INDEX_op_br;
252
op->args[0] = op->args[3];
253
} else {
254
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
255
256
rl = op->args[0];
257
rh = op->args[1];
258
- tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)a);
259
- tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(a >> 32));
260
+ tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
261
+ tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
262
break;
263
}
264
goto do_default;
265
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
266
267
rl = op->args[0];
268
rh = op->args[1];
269
- tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)r);
270
- tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(r >> 32));
271
+ tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
272
+ tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
273
break;
274
}
275
goto do_default;
276
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
277
if (tmp != 2) {
278
if (tmp) {
279
do_brcond_true:
280
- memset(&temps_used, 0, sizeof(temps_used));
281
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
282
op->opc = INDEX_op_br;
283
op->args[0] = op->args[5];
284
} else {
285
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
286
/* Simplify LT/GE comparisons vs zero to a single compare
287
vs the high word of the input. */
288
do_brcond_high:
289
- memset(&temps_used, 0, sizeof(temps_used));
290
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
291
op->opc = INDEX_op_brcond_i32;
292
op->args[0] = op->args[1];
293
op->args[1] = op->args[3];
294
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
295
goto do_default;
296
}
297
do_brcond_low:
298
- memset(&temps_used, 0, sizeof(temps_used));
299
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
300
op->opc = INDEX_op_brcond_i32;
301
op->args[1] = op->args[2];
302
op->args[2] = op->args[4];
303
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
304
op->args[5]);
305
if (tmp != 2) {
306
do_setcond_const:
307
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
308
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
309
} else if ((op->args[5] == TCG_COND_LT
310
|| op->args[5] == TCG_COND_GE)
311
&& arg_is_const(op->args[3])
312
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
313
if (!(tcg_call_flags(op)
314
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
315
for (i = 0; i < nb_globals; i++) {
316
- if (test_bit(i, temps_used.l)) {
317
+ if (test_bit(i, ctx.temps_used.l)) {
318
reset_ts(&s->temps[i]);
319
}
320
}
321
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
322
block, otherwise we only trash the output args. "z_mask" is
323
the non-zero bits mask for the first output arg. */
324
if (def->flags & TCG_OPF_BB_END) {
325
- memset(&temps_used, 0, sizeof(temps_used));
326
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
327
} else {
328
do_reset_output:
329
for (i = 0; i < nb_oargs; i++) {
30
--
330
--
31
2.25.1
331
2.25.1
32
332
33
333
diff view generated by jsdifflib
1
By removing TCG_BSWAP_IZ we indicate that the input is
1
Break the final cleanup clause out of the main switch
2
not zero-extended, and thus can remove an explicit extend.
2
statement. When fully folding an opcode to mov/movi,
3
By removing TCG_BSWAP_OZ, we allow the implementation to
3
use "continue" to process the next opcode, else break
4
leave high bits set, which will be ignored by the store.
4
to fall into the final cleanup.
5
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
10
---
9
tcg/tcg-op.c | 9 +++------
11
tcg/optimize.c | 190 ++++++++++++++++++++++++-------------------------
10
1 file changed, 3 insertions(+), 6 deletions(-)
12
1 file changed, 94 insertions(+), 96 deletions(-)
11
13
12
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/tcg-op.c
16
--- a/tcg/optimize.c
15
+++ b/tcg/tcg-op.c
17
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
18
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
17
swap = tcg_temp_new_i32();
19
switch (opc) {
18
switch (memop & MO_SIZE) {
20
CASE_OP_32_64_VEC(mov):
19
case MO_16:
21
tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
20
- tcg_gen_ext16u_i32(swap, val);
22
- break;
21
- tcg_gen_bswap16_i32(swap, swap, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
23
+ continue;
22
+ tcg_gen_bswap16_i32(swap, val, 0);
24
25
case INDEX_op_dup_vec:
26
if (arg_is_const(op->args[1])) {
27
tmp = arg_info(op->args[1])->val;
28
tmp = dup_const(TCGOP_VECE(op), tmp);
29
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
30
- break;
31
+ continue;
32
}
33
- goto do_default;
34
+ break;
35
36
case INDEX_op_dup2_vec:
37
assert(TCG_TARGET_REG_BITS == 32);
38
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
39
tcg_opt_gen_movi(s, &ctx, op, op->args[0],
40
deposit64(arg_info(op->args[1])->val, 32, 32,
41
arg_info(op->args[2])->val));
42
- break;
43
+ continue;
44
} else if (args_are_copies(op->args[1], op->args[2])) {
45
op->opc = INDEX_op_dup_vec;
46
TCGOP_VECE(op) = MO_32;
47
nb_iargs = 1;
48
}
49
- goto do_default;
50
+ break;
51
52
CASE_OP_32_64(not):
53
CASE_OP_32_64(neg):
54
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
55
if (arg_is_const(op->args[1])) {
56
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
57
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
58
- break;
59
+ continue;
60
}
61
- goto do_default;
62
+ break;
63
64
CASE_OP_32_64(bswap16):
65
CASE_OP_32_64(bswap32):
66
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
67
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
68
op->args[2]);
69
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
70
- break;
71
+ continue;
72
}
73
- goto do_default;
74
+ break;
75
76
CASE_OP_32_64(add):
77
CASE_OP_32_64(sub):
78
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
79
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
80
arg_info(op->args[2])->val);
81
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
82
- break;
83
+ continue;
84
}
85
- goto do_default;
86
+ break;
87
88
CASE_OP_32_64(clz):
89
CASE_OP_32_64(ctz):
90
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
91
} else {
92
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
93
}
94
- break;
95
+ continue;
96
}
97
- goto do_default;
98
+ break;
99
100
CASE_OP_32_64(deposit):
101
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
102
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
103
op->args[3], op->args[4],
104
arg_info(op->args[2])->val);
105
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
106
- break;
107
+ continue;
108
}
109
- goto do_default;
110
+ break;
111
112
CASE_OP_32_64(extract):
113
if (arg_is_const(op->args[1])) {
114
tmp = extract64(arg_info(op->args[1])->val,
115
op->args[2], op->args[3]);
116
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
117
- break;
118
+ continue;
119
}
120
- goto do_default;
121
+ break;
122
123
CASE_OP_32_64(sextract):
124
if (arg_is_const(op->args[1])) {
125
tmp = sextract64(arg_info(op->args[1])->val,
126
op->args[2], op->args[3]);
127
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
128
- break;
129
+ continue;
130
}
131
- goto do_default;
132
+ break;
133
134
CASE_OP_32_64(extract2):
135
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
136
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
137
((uint32_t)v2 << (32 - shr)));
138
}
139
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
140
- break;
141
+ continue;
142
}
143
- goto do_default;
144
+ break;
145
146
CASE_OP_32_64(setcond):
147
tmp = do_constant_folding_cond(opc, op->args[1],
148
op->args[2], op->args[3]);
149
if (tmp != 2) {
150
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
151
- break;
152
+ continue;
153
}
154
- goto do_default;
155
+ break;
156
157
CASE_OP_32_64(brcond):
158
tmp = do_constant_folding_cond(opc, op->args[0],
159
op->args[1], op->args[2]);
160
- if (tmp != 2) {
161
- if (tmp) {
162
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
163
- op->opc = INDEX_op_br;
164
- op->args[0] = op->args[3];
165
- } else {
166
- tcg_op_remove(s, op);
167
- }
168
+ switch (tmp) {
169
+ case 0:
170
+ tcg_op_remove(s, op);
171
+ continue;
172
+ case 1:
173
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
174
+ op->opc = opc = INDEX_op_br;
175
+ op->args[0] = op->args[3];
176
break;
177
}
178
- goto do_default;
179
+ break;
180
181
CASE_OP_32_64(movcond):
182
tmp = do_constant_folding_cond(opc, op->args[1],
183
op->args[2], op->args[5]);
184
if (tmp != 2) {
185
tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
186
- break;
187
+ continue;
188
}
189
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
190
uint64_t tv = arg_info(op->args[3])->val;
191
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
192
if (fv == 1 && tv == 0) {
193
cond = tcg_invert_cond(cond);
194
} else if (!(tv == 1 && fv == 0)) {
195
- goto do_default;
196
+ break;
197
}
198
op->args[3] = cond;
199
op->opc = opc = (opc == INDEX_op_movcond_i32
200
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
201
: INDEX_op_setcond_i64);
202
nb_iargs = 2;
203
}
204
- goto do_default;
205
+ break;
206
207
case INDEX_op_add2_i32:
208
case INDEX_op_sub2_i32:
209
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
210
rh = op->args[1];
211
tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
212
tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
213
- break;
214
+ continue;
215
}
216
- goto do_default;
217
+ break;
218
219
case INDEX_op_mulu2_i32:
220
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
221
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
222
rh = op->args[1];
223
tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
224
tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
225
- break;
226
+ continue;
227
}
228
- goto do_default;
229
+ break;
230
231
case INDEX_op_brcond2_i32:
232
tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
233
op->args[4]);
234
- if (tmp != 2) {
235
- if (tmp) {
236
- do_brcond_true:
237
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
238
- op->opc = INDEX_op_br;
239
- op->args[0] = op->args[5];
240
- } else {
241
+ if (tmp == 0) {
242
do_brcond_false:
243
- tcg_op_remove(s, op);
244
- }
245
- } else if ((op->args[4] == TCG_COND_LT
246
- || op->args[4] == TCG_COND_GE)
247
- && arg_is_const(op->args[2])
248
- && arg_info(op->args[2])->val == 0
249
- && arg_is_const(op->args[3])
250
- && arg_info(op->args[3])->val == 0) {
251
+ tcg_op_remove(s, op);
252
+ continue;
253
+ }
254
+ if (tmp == 1) {
255
+ do_brcond_true:
256
+ op->opc = opc = INDEX_op_br;
257
+ op->args[0] = op->args[5];
258
+ break;
259
+ }
260
+ if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE)
261
+ && arg_is_const(op->args[2])
262
+ && arg_info(op->args[2])->val == 0
263
+ && arg_is_const(op->args[3])
264
+ && arg_info(op->args[3])->val == 0) {
265
/* Simplify LT/GE comparisons vs zero to a single compare
266
vs the high word of the input. */
267
do_brcond_high:
268
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
269
- op->opc = INDEX_op_brcond_i32;
270
+ op->opc = opc = INDEX_op_brcond_i32;
271
op->args[0] = op->args[1];
272
op->args[1] = op->args[3];
273
op->args[2] = op->args[4];
274
op->args[3] = op->args[5];
275
- } else if (op->args[4] == TCG_COND_EQ) {
276
+ break;
277
+ }
278
+ if (op->args[4] == TCG_COND_EQ) {
279
/* Simplify EQ comparisons where one of the pairs
280
can be simplified. */
281
tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
282
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
283
if (tmp == 0) {
284
goto do_brcond_false;
285
} else if (tmp != 1) {
286
- goto do_default;
287
+ break;
288
}
289
do_brcond_low:
290
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
291
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
292
op->args[1] = op->args[2];
293
op->args[2] = op->args[4];
294
op->args[3] = op->args[5];
295
- } else if (op->args[4] == TCG_COND_NE) {
296
+ break;
297
+ }
298
+ if (op->args[4] == TCG_COND_NE) {
299
/* Simplify NE comparisons where one of the pairs
300
can be simplified. */
301
tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
302
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
303
} else if (tmp == 1) {
304
goto do_brcond_true;
305
}
306
- goto do_default;
307
- } else {
308
- goto do_default;
309
}
23
break;
310
break;
24
case MO_32:
311
25
tcg_gen_bswap32_i32(swap, val);
312
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
26
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
313
if (tmp != 2) {
27
swap = tcg_temp_new_i64();
314
do_setcond_const:
28
switch (memop & MO_SIZE) {
315
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
29
case MO_16:
316
- } else if ((op->args[5] == TCG_COND_LT
30
- tcg_gen_ext16u_i64(swap, val);
317
- || op->args[5] == TCG_COND_GE)
31
- tcg_gen_bswap16_i64(swap, swap, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
318
- && arg_is_const(op->args[3])
32
+ tcg_gen_bswap16_i64(swap, val, 0);
319
- && arg_info(op->args[3])->val == 0
320
- && arg_is_const(op->args[4])
321
- && arg_info(op->args[4])->val == 0) {
322
+ continue;
323
+ }
324
+ if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
325
+ && arg_is_const(op->args[3])
326
+ && arg_info(op->args[3])->val == 0
327
+ && arg_is_const(op->args[4])
328
+ && arg_info(op->args[4])->val == 0) {
329
/* Simplify LT/GE comparisons vs zero to a single compare
330
vs the high word of the input. */
331
do_setcond_high:
332
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
333
op->args[1] = op->args[2];
334
op->args[2] = op->args[4];
335
op->args[3] = op->args[5];
336
- } else if (op->args[5] == TCG_COND_EQ) {
337
+ break;
338
+ }
339
+ if (op->args[5] == TCG_COND_EQ) {
340
/* Simplify EQ comparisons where one of the pairs
341
can be simplified. */
342
tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
343
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
344
if (tmp == 0) {
345
goto do_setcond_high;
346
} else if (tmp != 1) {
347
- goto do_default;
348
+ break;
349
}
350
do_setcond_low:
351
reset_temp(op->args[0]);
352
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
353
op->opc = INDEX_op_setcond_i32;
354
op->args[2] = op->args[3];
355
op->args[3] = op->args[5];
356
- } else if (op->args[5] == TCG_COND_NE) {
357
+ break;
358
+ }
359
+ if (op->args[5] == TCG_COND_NE) {
360
/* Simplify NE comparisons where one of the pairs
361
can be simplified. */
362
tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
363
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
364
} else if (tmp == 1) {
365
goto do_setcond_const;
366
}
367
- goto do_default;
368
- } else {
369
- goto do_default;
370
}
33
break;
371
break;
34
case MO_32:
372
35
- tcg_gen_ext32u_i64(swap, val);
373
- case INDEX_op_call:
36
- tcg_gen_bswap32_i64(swap, swap, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
374
- if (!(tcg_call_flags(op)
37
+ tcg_gen_bswap32_i64(swap, val, 0);
375
+ default:
38
break;
376
+ break;
39
case MO_64:
377
+ }
40
tcg_gen_bswap64_i64(swap, val);
378
+
379
+ /* Some of the folding above can change opc. */
380
+ opc = op->opc;
381
+ def = &tcg_op_defs[opc];
382
+ if (def->flags & TCG_OPF_BB_END) {
383
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
384
+ } else {
385
+ if (opc == INDEX_op_call &&
386
+ !(tcg_call_flags(op)
387
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
388
for (i = 0; i < nb_globals; i++) {
389
if (test_bit(i, ctx.temps_used.l)) {
390
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
391
}
392
}
393
}
394
- goto do_reset_output;
395
396
- default:
397
- do_default:
398
- /* Default case: we know nothing about operation (or were unable
399
- to compute the operation result) so no propagation is done.
400
- We trash everything if the operation is the end of a basic
401
- block, otherwise we only trash the output args. "z_mask" is
402
- the non-zero bits mask for the first output arg. */
403
- if (def->flags & TCG_OPF_BB_END) {
404
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
405
- } else {
406
- do_reset_output:
407
- for (i = 0; i < nb_oargs; i++) {
408
- reset_temp(op->args[i]);
409
- /* Save the corresponding known-zero bits mask for the
410
- first output argument (only one supported so far). */
411
- if (i == 0) {
412
- arg_info(op->args[i])->z_mask = z_mask;
413
- }
414
+ for (i = 0; i < nb_oargs; i++) {
415
+ reset_temp(op->args[i]);
416
+ /* Save the corresponding known-zero bits mask for the
417
+ first output argument (only one supported so far). */
418
+ if (i == 0) {
419
+ arg_info(op->args[i])->z_mask = z_mask;
420
}
421
}
422
- break;
423
}
424
425
/* Eliminate duplicate and redundant fence instructions. */
41
--
426
--
42
2.25.1
427
2.25.1
43
428
44
429
diff view generated by jsdifflib
1
Combine the three bswap16 routines, and differentiate via the flags.
1
Adjust the interface to take the OptContext parameter instead
2
Use the correct flags combination from the load/store routines, and
2
of TCGContext or both.
3
pass along the constant parameter from tcg_out_op.
3
4
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
tcg/arm/tcg-target.c.inc | 101 ++++++++++++++++++++++++---------------
8
tcg/optimize.c | 67 +++++++++++++++++++++++++-------------------------
9
1 file changed, 63 insertions(+), 38 deletions(-)
9
1 file changed, 34 insertions(+), 33 deletions(-)
10
10
11
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/arm/tcg-target.c.inc
13
--- a/tcg/optimize.c
14
+++ b/tcg/arm/tcg-target.c.inc
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_ext16u(TCGContext *s, int cond,
15
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
16
} TempOptInfo;
17
18
typedef struct OptContext {
19
+ TCGContext *tcg;
20
TCGTempSet temps_used;
21
} OptContext;
22
23
@@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
24
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
25
}
26
27
-static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
28
+static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
29
{
30
TCGTemp *dst_ts = arg_temp(dst);
31
TCGTemp *src_ts = arg_temp(src);
32
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
33
TCGOpcode new_op;
34
35
if (ts_are_copies(dst_ts, src_ts)) {
36
- tcg_op_remove(s, op);
37
+ tcg_op_remove(ctx->tcg, op);
38
return;
39
}
40
41
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
16
}
42
}
17
}
43
}
18
44
19
-static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
45
-static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
20
+static void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn, int flags)
46
- TCGOp *op, TCGArg dst, uint64_t val)
47
+static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
48
+ TCGArg dst, uint64_t val)
21
{
49
{
22
if (use_armv6_instructions) {
50
const TCGOpDef *def = &tcg_op_defs[op->opc];
23
- /* revsh */
51
TCGType type;
24
- tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
52
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
25
- } else {
53
/* Convert movi to mov with constant temp. */
26
- tcg_out_dat_reg(s, cond, ARITH_MOV,
54
tv = tcg_constant_internal(type, val);
27
- TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
55
init_ts_info(ctx, tv);
28
- tcg_out_dat_reg(s, cond, ARITH_MOV,
56
- tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
29
- TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16));
57
+ tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
30
- tcg_out_dat_reg(s, cond, ARITH_ORR,
31
- rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
32
- }
33
-}
34
+ if (flags & TCG_BSWAP_OS) {
35
+ /* revsh */
36
+ tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
37
+ return;
38
+ }
39
40
-static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
41
-{
42
- if (use_armv6_instructions) {
43
/* rev16 */
44
tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
45
- } else {
46
- tcg_out_dat_reg(s, cond, ARITH_MOV,
47
- TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
48
- tcg_out_dat_reg(s, cond, ARITH_MOV,
49
- TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16));
50
- tcg_out_dat_reg(s, cond, ARITH_ORR,
51
- rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
52
+ if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
53
+ /* uxth */
54
+ tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
55
+ }
56
+ return;
57
}
58
-}
59
60
-/* swap the two low bytes assuming that the two high input bytes and the
61
- two high output bit can hold any value. */
62
-static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn)
63
-{
64
- if (use_armv6_instructions) {
65
- /* rev16 */
66
- tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
67
- } else {
68
+ if (flags == 0) {
69
+ /*
70
+ * For stores, no input or output extension:
71
+ * rn = xxAB
72
+ * lsr tmp, rn, #8 tmp = 0xxA
73
+ * and tmp, tmp, #0xff tmp = 000A
74
+ * orr rd, tmp, rn, lsl #8 rd = xABA
75
+ */
76
tcg_out_dat_reg(s, cond, ARITH_MOV,
77
TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
78
tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
79
tcg_out_dat_reg(s, cond, ARITH_ORR,
80
rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
81
+ return;
82
}
83
+
84
+ /*
85
+ * Byte swap, leaving the result at the top of the register.
86
+ * We will then shift down, zero or sign-extending.
87
+ */
88
+ if (flags & TCG_BSWAP_IZ) {
89
+ /*
90
+ * rn = 00AB
91
+ * ror tmp, rn, #8 tmp = B00A
92
+ * orr tmp, tmp, tmp, lsl #16 tmp = BA00
93
+ */
94
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
95
+ TCG_REG_TMP, 0, rn, SHIFT_IMM_ROR(8));
96
+ tcg_out_dat_reg(s, cond, ARITH_ORR,
97
+ TCG_REG_TMP, TCG_REG_TMP, TCG_REG_TMP,
98
+ SHIFT_IMM_LSL(16));
99
+ } else {
100
+ /*
101
+ * rn = xxAB
102
+ * and tmp, rn, #0xff00 tmp = 00A0
103
+ * lsl tmp, tmp, #8 tmp = 0A00
104
+ * orr tmp, tmp, rn, lsl #24 tmp = BA00
105
+ */
106
+ tcg_out_dat_rI(s, cond, ARITH_AND, TCG_REG_TMP, rn, 0xff00, 1);
107
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
108
+ TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSL(8));
109
+ tcg_out_dat_reg(s, cond, ARITH_ORR,
110
+ TCG_REG_TMP, TCG_REG_TMP, rn, SHIFT_IMM_LSL(24));
111
+ }
112
+ tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, TCG_REG_TMP,
113
+ (flags & TCG_BSWAP_OS
114
+ ? SHIFT_IMM_ASR(8) : SHIFT_IMM_LSR(8)));
115
}
58
}
116
59
117
static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
60
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
118
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
119
case MO_UW:
62
{
120
tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
63
int nb_temps, nb_globals, i;
121
if (bswap) {
64
TCGOp *op, *op_next, *prev_mb = NULL;
122
- tcg_out_bswap16(s, COND_AL, datalo, datalo);
65
- OptContext ctx = {};
123
+ tcg_out_bswap16(s, COND_AL, datalo, datalo,
66
+ OptContext ctx = { .tcg = s };
124
+ TCG_BSWAP_IZ | TCG_BSWAP_OZ);
67
68
/* Array VALS has an element for each temp.
69
If this temp holds a constant then its value is kept in VALS' element.
70
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
71
CASE_OP_32_64(rotr):
72
if (arg_is_const(op->args[1])
73
&& arg_info(op->args[1])->val == 0) {
74
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
75
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
76
continue;
77
}
78
break;
79
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
80
if (!arg_is_const(op->args[1])
81
&& arg_is_const(op->args[2])
82
&& arg_info(op->args[2])->val == 0) {
83
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
84
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
85
continue;
86
}
87
break;
88
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
89
if (!arg_is_const(op->args[1])
90
&& arg_is_const(op->args[2])
91
&& arg_info(op->args[2])->val == -1) {
92
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
93
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
94
continue;
95
}
96
break;
97
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
98
99
if (partmask == 0) {
100
tcg_debug_assert(nb_oargs == 1);
101
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
102
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
103
continue;
125
}
104
}
126
break;
105
if (affected == 0) {
127
case MO_SW:
106
tcg_debug_assert(nb_oargs == 1);
128
if (bswap) {
107
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
129
tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
108
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
130
- tcg_out_bswap16s(s, COND_AL, datalo, datalo);
109
continue;
131
+ tcg_out_bswap16(s, COND_AL, datalo, datalo,
132
+ TCG_BSWAP_IZ | TCG_BSWAP_OS);
133
} else {
134
tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
135
}
110
}
136
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
111
137
case MO_UW:
112
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
138
tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
113
CASE_OP_32_64(mulsh):
139
if (bswap) {
114
if (arg_is_const(op->args[2])
140
- tcg_out_bswap16(s, COND_AL, datalo, datalo);
115
&& arg_info(op->args[2])->val == 0) {
141
+ tcg_out_bswap16(s, COND_AL, datalo, datalo,
116
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
142
+ TCG_BSWAP_IZ | TCG_BSWAP_OZ);
117
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
143
}
118
continue;
144
break;
119
}
145
case MO_SW:
120
break;
146
if (bswap) {
121
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
147
tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
122
CASE_OP_32_64_VEC(or):
148
- tcg_out_bswap16s(s, COND_AL, datalo, datalo);
123
CASE_OP_32_64_VEC(and):
149
+ tcg_out_bswap16(s, COND_AL, datalo, datalo,
124
if (args_are_copies(op->args[1], op->args[2])) {
150
+ TCG_BSWAP_IZ | TCG_BSWAP_OS);
125
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
151
} else {
126
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
152
tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
127
continue;
153
}
128
}
154
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc,
129
break;
155
break;
130
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
156
case MO_16:
131
CASE_OP_32_64_VEC(sub):
157
if (bswap) {
132
CASE_OP_32_64_VEC(xor):
158
- tcg_out_bswap16st(s, cond, TCG_REG_R0, datalo);
133
if (args_are_copies(op->args[1], op->args[2])) {
159
+ tcg_out_bswap16(s, cond, TCG_REG_R0, datalo, 0);
134
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
160
tcg_out_st16_r(s, cond, TCG_REG_R0, addrlo, addend);
135
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
161
} else {
136
continue;
162
tcg_out_st16_r(s, cond, datalo, addrlo, addend);
137
}
163
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc,
138
break;
164
break;
139
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
165
case MO_16:
140
allocator where needed and possible. Also detect copies. */
166
if (bswap) {
141
switch (opc) {
167
- tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, datalo);
142
CASE_OP_32_64_VEC(mov):
168
+ tcg_out_bswap16(s, COND_AL, TCG_REG_R0, datalo, 0);
143
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
169
tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addrlo, 0);
144
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
170
} else {
145
continue;
171
tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
146
172
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
147
case INDEX_op_dup_vec:
173
break;
148
if (arg_is_const(op->args[1])) {
174
149
tmp = arg_info(op->args[1])->val;
175
case INDEX_op_bswap16_i32:
150
tmp = dup_const(TCGOP_VECE(op), tmp);
176
- tcg_out_bswap16(s, COND_AL, args[0], args[1]);
151
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
177
+ tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
152
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
178
break;
153
continue;
179
case INDEX_op_bswap32_i32:
154
}
180
tcg_out_bswap32(s, COND_AL, args[0], args[1]);
155
break;
156
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
157
case INDEX_op_dup2_vec:
158
assert(TCG_TARGET_REG_BITS == 32);
159
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
160
- tcg_opt_gen_movi(s, &ctx, op, op->args[0],
161
+ tcg_opt_gen_movi(&ctx, op, op->args[0],
162
deposit64(arg_info(op->args[1])->val, 32, 32,
163
arg_info(op->args[2])->val));
164
continue;
165
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
166
case INDEX_op_extrh_i64_i32:
167
if (arg_is_const(op->args[1])) {
168
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
169
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
170
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
171
continue;
172
}
173
break;
174
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
175
if (arg_is_const(op->args[1])) {
176
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
177
op->args[2]);
178
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
179
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
180
continue;
181
}
182
break;
183
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
184
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
185
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
186
arg_info(op->args[2])->val);
187
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
188
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
189
continue;
190
}
191
break;
192
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
193
TCGArg v = arg_info(op->args[1])->val;
194
if (v != 0) {
195
tmp = do_constant_folding(opc, v, 0);
196
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
197
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
198
} else {
199
- tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
200
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
201
}
202
continue;
203
}
204
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
205
tmp = deposit64(arg_info(op->args[1])->val,
206
op->args[3], op->args[4],
207
arg_info(op->args[2])->val);
208
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
209
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
210
continue;
211
}
212
break;
213
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
214
if (arg_is_const(op->args[1])) {
215
tmp = extract64(arg_info(op->args[1])->val,
216
op->args[2], op->args[3]);
217
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
218
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
219
continue;
220
}
221
break;
222
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
223
if (arg_is_const(op->args[1])) {
224
tmp = sextract64(arg_info(op->args[1])->val,
225
op->args[2], op->args[3]);
226
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
227
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
228
continue;
229
}
230
break;
231
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
232
tmp = (int32_t)(((uint32_t)v1 >> shr) |
233
((uint32_t)v2 << (32 - shr)));
234
}
235
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
236
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
237
continue;
238
}
239
break;
240
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
241
tmp = do_constant_folding_cond(opc, op->args[1],
242
op->args[2], op->args[3]);
243
if (tmp != 2) {
244
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
245
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
246
continue;
247
}
248
break;
249
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
250
tmp = do_constant_folding_cond(opc, op->args[1],
251
op->args[2], op->args[5]);
252
if (tmp != 2) {
253
- tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
254
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
255
continue;
256
}
257
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
258
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
259
260
rl = op->args[0];
261
rh = op->args[1];
262
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
263
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
264
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
265
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
266
continue;
267
}
268
break;
269
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
270
271
rl = op->args[0];
272
rh = op->args[1];
273
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
274
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
275
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
276
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
277
continue;
278
}
279
break;
280
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
281
op->args[5]);
282
if (tmp != 2) {
283
do_setcond_const:
284
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
285
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
286
continue;
287
}
288
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
181
--
289
--
182
2.25.1
290
2.25.1
183
291
184
292
diff view generated by jsdifflib
1
Migrate the is_jmp, tb and singlestep_enabled fields
1
This will expose the variable to subroutines that
2
from DisasContext into the base.
2
will be broken out of tcg_optimize.
3
3
4
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
target/cris/translate.c | 49 +++++++++++++++++----------------
9
tcg/optimize.c | 11 ++++++-----
9
target/cris/translate_v10.c.inc | 4 +--
10
1 file changed, 6 insertions(+), 5 deletions(-)
10
2 files changed, 27 insertions(+), 26 deletions(-)
11
11
12
diff --git a/target/cris/translate.c b/target/cris/translate.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/cris/translate.c
14
--- a/tcg/optimize.c
15
+++ b/target/cris/translate.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static TCGv env_pc;
16
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
17
17
18
/* This is the state at translation time. */
18
typedef struct OptContext {
19
typedef struct DisasContext {
19
TCGContext *tcg;
20
+ DisasContextBase base;
20
+ TCGOp *prev_mb;
21
+
21
TCGTempSet temps_used;
22
CRISCPU *cpu;
22
} OptContext;
23
target_ulong pc, ppc;
23
24
24
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
25
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
25
void tcg_optimize(TCGContext *s)
26
int clear_locked_irq; /* Clear the irq lockout. */
27
int cpustate_changed;
28
unsigned int tb_flags; /* tb dependent flags. */
29
- int is_jmp;
30
31
#define JMP_NOJMP 0
32
#define JMP_DIRECT 1
33
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
34
uint32_t jmp_pc;
35
36
int delayed_branch;
37
-
38
- TranslationBlock *tb;
39
- int singlestep_enabled;
40
} DisasContext;
41
42
static void gen_BUG(DisasContext *dc, const char *file, int line)
43
@@ -XXX,XX +XXX,XX @@ static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
44
static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
45
{
26
{
46
#ifndef CONFIG_USER_ONLY
27
int nb_temps, nb_globals, i;
47
- return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
28
- TCGOp *op, *op_next, *prev_mb = NULL;
48
+ return (dc->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
29
+ TCGOp *op, *op_next;
49
(dc->ppc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
30
OptContext ctx = { .tcg = s };
50
#else
31
51
return true;
32
/* Array VALS has an element for each temp.
52
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
33
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
53
if (use_goto_tb(dc, dest)) {
34
}
54
tcg_gen_goto_tb(n);
35
55
tcg_gen_movi_tl(env_pc, dest);
36
/* Eliminate duplicate and redundant fence instructions. */
56
- tcg_gen_exit_tb(dc->tb, n);
37
- if (prev_mb) {
57
+ tcg_gen_exit_tb(dc->base.tb, n);
38
+ if (ctx.prev_mb) {
58
} else {
39
switch (opc) {
59
tcg_gen_movi_tl(env_pc, dest);
40
case INDEX_op_mb:
60
tcg_gen_exit_tb(NULL, 0);
41
/* Merge two barriers of the same type into one,
61
@@ -XXX,XX +XXX,XX @@ static int dec_setclrf(CPUCRISState *env, DisasContext *dc)
42
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
62
/* Break the TB if any of the SPI flag changes. */
43
* barrier. This is stricter than specified but for
63
if (flags & (P_FLAG | S_FLAG)) {
44
* the purposes of TCG is better than not optimizing.
64
tcg_gen_movi_tl(env_pc, dc->pc + 2);
45
*/
65
- dc->is_jmp = DISAS_UPDATE;
46
- prev_mb->args[0] |= op->args[0];
66
+ dc->base.is_jmp = DISAS_UPDATE;
47
+ ctx.prev_mb->args[0] |= op->args[0];
67
dc->cpustate_changed = 1;
48
tcg_op_remove(s, op);
68
}
49
break;
69
50
70
/* For the I flag, only act on posedge. */
51
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
71
if ((flags & I_FLAG)) {
52
case INDEX_op_qemu_st_i64:
72
tcg_gen_movi_tl(env_pc, dc->pc + 2);
53
case INDEX_op_call:
73
- dc->is_jmp = DISAS_UPDATE;
54
/* Opcodes that touch guest memory stop the optimization. */
74
+ dc->base.is_jmp = DISAS_UPDATE;
55
- prev_mb = NULL;
75
dc->cpustate_changed = 1;
56
+ ctx.prev_mb = NULL;
76
}
77
78
@@ -XXX,XX +XXX,XX @@ static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
79
LOG_DIS("rfe\n");
80
cris_evaluate_flags(dc);
81
gen_helper_rfe(cpu_env);
82
- dc->is_jmp = DISAS_UPDATE;
83
+ dc->base.is_jmp = DISAS_UPDATE;
84
break;
85
case 5:
86
/* rfn. */
87
LOG_DIS("rfn\n");
88
cris_evaluate_flags(dc);
89
gen_helper_rfn(cpu_env);
90
- dc->is_jmp = DISAS_UPDATE;
91
+ dc->base.is_jmp = DISAS_UPDATE;
92
break;
93
case 6:
94
LOG_DIS("break %d\n", dc->op1);
95
@@ -XXX,XX +XXX,XX @@ static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
96
/* Breaks start at 16 in the exception vector. */
97
t_gen_movi_env_TN(trap_vector, dc->op1 + 16);
98
t_gen_raise_exception(EXCP_BREAK);
99
- dc->is_jmp = DISAS_UPDATE;
100
+ dc->base.is_jmp = DISAS_UPDATE;
101
break;
102
default:
103
printf("op2=%x\n", dc->op2);
104
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
105
* delayslot, like in real hw.
106
*/
107
pc_start = tb->pc & ~1;
108
- dc->cpu = env_archcpu(env);
109
- dc->tb = tb;
110
111
- dc->is_jmp = DISAS_NEXT;
112
+ dc->base.tb = tb;
113
+ dc->base.pc_first = pc_start;
114
+ dc->base.pc_next = pc_start;
115
+ dc->base.is_jmp = DISAS_NEXT;
116
+ dc->base.singlestep_enabled = cs->singlestep_enabled;
117
+
118
+ dc->cpu = env_archcpu(env);
119
dc->ppc = pc_start;
120
dc->pc = pc_start;
121
- dc->singlestep_enabled = cs->singlestep_enabled;
122
dc->flags_uptodate = 1;
123
dc->flagx_known = 1;
124
dc->flags_x = tb->flags & X_FLAG;
125
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
126
cris_evaluate_flags(dc);
127
tcg_gen_movi_tl(env_pc, dc->pc);
128
t_gen_raise_exception(EXCP_DEBUG);
129
- dc->is_jmp = DISAS_UPDATE;
130
+ dc->base.is_jmp = DISAS_UPDATE;
131
/* The address covered by the breakpoint must be included in
132
[tb->pc, tb->pc + tb->size) in order to for it to be
133
properly cleared -- thus we increment the PC here so that
134
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
135
gen_goto_tb(dc, 1, dc->jmp_pc);
136
gen_set_label(l1);
137
gen_goto_tb(dc, 0, dc->pc);
138
- dc->is_jmp = DISAS_TB_JUMP;
139
+ dc->base.is_jmp = DISAS_TB_JUMP;
140
dc->jmp = JMP_NOJMP;
141
} else if (dc->jmp == JMP_DIRECT) {
142
cris_evaluate_flags(dc);
143
gen_goto_tb(dc, 0, dc->jmp_pc);
144
- dc->is_jmp = DISAS_TB_JUMP;
145
+ dc->base.is_jmp = DISAS_TB_JUMP;
146
dc->jmp = JMP_NOJMP;
147
} else {
148
TCGv c = tcg_const_tl(dc->pc);
149
t_gen_cc_jmp(env_btarget, c);
150
tcg_temp_free(c);
151
- dc->is_jmp = DISAS_JUMP;
152
+ dc->base.is_jmp = DISAS_JUMP;
153
}
154
break;
57
break;
155
}
58
}
156
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
59
} else if (opc == INDEX_op_mb) {
157
if (!(tb->pc & 1) && cs->singlestep_enabled) {
60
- prev_mb = op;
158
break;
61
+ ctx.prev_mb = op;
159
}
160
- } while (!dc->is_jmp && !dc->cpustate_changed
161
+ } while (!dc->base.is_jmp && !dc->cpustate_changed
162
&& !tcg_op_buf_full()
163
&& !singlestep
164
&& (dc->pc - page_start < TARGET_PAGE_SIZE)
165
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
166
npc = dc->pc;
167
168
/* Force an update if the per-tb cpu state has changed. */
169
- if (dc->is_jmp == DISAS_NEXT
170
+ if (dc->base.is_jmp == DISAS_NEXT
171
&& (dc->cpustate_changed || !dc->flagx_known
172
|| (dc->flags_x != (tb->flags & X_FLAG)))) {
173
- dc->is_jmp = DISAS_UPDATE;
174
+ dc->base.is_jmp = DISAS_UPDATE;
175
tcg_gen_movi_tl(env_pc, npc);
176
}
177
/* Broken branch+delayslot sequence. */
178
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
179
cris_evaluate_flags(dc);
180
181
if (unlikely(cs->singlestep_enabled)) {
182
- if (dc->is_jmp == DISAS_NEXT) {
183
+ if (dc->base.is_jmp == DISAS_NEXT) {
184
tcg_gen_movi_tl(env_pc, npc);
185
}
186
t_gen_raise_exception(EXCP_DEBUG);
187
} else {
188
- switch (dc->is_jmp) {
189
+ switch (dc->base.is_jmp) {
190
case DISAS_NEXT:
191
gen_goto_tb(dc, 1, npc);
192
break;
193
diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc
194
index XXXXXXX..XXXXXXX 100644
195
--- a/target/cris/translate_v10.c.inc
196
+++ b/target/cris/translate_v10.c.inc
197
@@ -XXX,XX +XXX,XX @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
198
t_gen_mov_env_TN(trap_vector, c);
199
tcg_temp_free(c);
200
t_gen_raise_exception(EXCP_BREAK);
201
- dc->is_jmp = DISAS_UPDATE;
202
+ dc->base.is_jmp = DISAS_UPDATE;
203
return insn_len;
204
}
205
LOG_DIS("%d: jump.%d %d r%d r%d\n", __LINE__, size,
206
@@ -XXX,XX +XXX,XX @@ static unsigned int crisv10_decoder(CPUCRISState *env, DisasContext *dc)
207
if (dc->clear_prefix && dc->tb_flags & PFIX_FLAG) {
208
dc->tb_flags &= ~PFIX_FLAG;
209
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~PFIX_FLAG);
210
- if (dc->tb_flags != dc->tb->flags) {
211
+ if (dc->tb_flags != dc->base.tb->flags) {
212
dc->cpustate_changed = 1;
213
}
62
}
214
}
63
}
64
}
215
--
65
--
216
2.25.1
66
2.25.1
217
67
218
68
diff view generated by jsdifflib
1
Pass in the input and output size. We currently use 3 of the 5
1
There was no real reason for calls to have separate code here.
2
possible combinations; the others may be used by new tcg opcodes.
2
Unify init for calls vs non-calls using the call path, which
3
handles TCG_CALL_DUMMY_ARG.
3
4
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
9
---
7
tcg/aarch64/tcg-target.c.inc | 42 ++++++++++++++----------------------
10
tcg/optimize.c | 25 +++++++++++--------------
8
1 file changed, 16 insertions(+), 26 deletions(-)
11
1 file changed, 11 insertions(+), 14 deletions(-)
9
12
10
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/aarch64/tcg-target.c.inc
15
--- a/tcg/optimize.c
13
+++ b/tcg/aarch64/tcg-target.c.inc
16
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ typedef enum {
17
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
15
/* Data-processing (1 source) instructions. */
16
I3507_CLZ = 0x5ac01000,
17
I3507_RBIT = 0x5ac00000,
18
- I3507_REV16 = 0x5ac00400,
19
- I3507_REV32 = 0x5ac00800,
20
- I3507_REV64 = 0x5ac00c00,
21
+ I3507_REV = 0x5ac00000, /* + size << 10 */
22
23
/* Data-processing (2 source) instructions. */
24
I3508_LSLV = 0x1ac02000,
25
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
26
}
18
}
27
}
19
}
28
20
29
-static inline void tcg_out_rev64(TCGContext *s, TCGReg rd, TCGReg rn)
21
-static void init_arg_info(OptContext *ctx, TCGArg arg)
30
+static inline void tcg_out_rev(TCGContext *s, int ext, MemOp s_bits,
22
-{
31
+ TCGReg rd, TCGReg rn)
23
- init_ts_info(ctx, arg_temp(arg));
32
{
33
- tcg_out_insn(s, 3507, REV64, TCG_TYPE_I64, rd, rn);
34
-}
24
-}
35
-
25
-
36
-static inline void tcg_out_rev32(TCGContext *s, TCGReg rd, TCGReg rn)
26
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
37
-{
27
{
38
- tcg_out_insn(s, 3507, REV32, TCG_TYPE_I32, rd, rn);
28
TCGTemp *i, *g, *l;
39
-}
29
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
40
-
30
return false;
41
-static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn)
42
-{
43
- tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn);
44
+ /* REV, REV16, REV32 */
45
+ tcg_out_insn_3507(s, I3507_REV | (s_bits << 10), ext, rd, rn);
46
}
31
}
47
32
48
static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits,
33
+static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
49
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
34
+{
50
case MO_UW:
35
+ for (int i = 0; i < nb_args; i++) {
51
tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
36
+ TCGTemp *ts = arg_temp(op->args[i]);
52
if (bswap) {
37
+ if (ts) {
53
- tcg_out_rev16(s, data_r, data_r);
38
+ init_ts_info(ctx, ts);
54
+ tcg_out_rev(s, TCG_TYPE_I32, MO_16, data_r, data_r);
39
+ }
40
+ }
41
+}
42
+
43
/* Propagate constants and copies, fold constant expressions. */
44
void tcg_optimize(TCGContext *s)
45
{
46
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
47
if (opc == INDEX_op_call) {
48
nb_oargs = TCGOP_CALLO(op);
49
nb_iargs = TCGOP_CALLI(op);
50
- for (i = 0; i < nb_oargs + nb_iargs; i++) {
51
- TCGTemp *ts = arg_temp(op->args[i]);
52
- if (ts) {
53
- init_ts_info(&ctx, ts);
54
- }
55
- }
56
} else {
57
nb_oargs = def->nb_oargs;
58
nb_iargs = def->nb_iargs;
59
- for (i = 0; i < nb_oargs + nb_iargs; i++) {
60
- init_arg_info(&ctx, op->args[i]);
61
- }
55
}
62
}
56
break;
63
+ init_arguments(&ctx, op, nb_oargs + nb_iargs);
57
case MO_SW:
64
58
if (bswap) {
65
/* Do copy propagation */
59
tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
66
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
60
- tcg_out_rev16(s, data_r, data_r);
61
+ tcg_out_rev(s, TCG_TYPE_I32, MO_16, data_r, data_r);
62
tcg_out_sxt(s, ext, MO_16, data_r, data_r);
63
} else {
64
tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
65
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
66
case MO_UL:
67
tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
68
if (bswap) {
69
- tcg_out_rev32(s, data_r, data_r);
70
+ tcg_out_rev(s, TCG_TYPE_I32, MO_32, data_r, data_r);
71
}
72
break;
73
case MO_SL:
74
if (bswap) {
75
tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
76
- tcg_out_rev32(s, data_r, data_r);
77
+ tcg_out_rev(s, TCG_TYPE_I32, MO_32, data_r, data_r);
78
tcg_out_sxt(s, TCG_TYPE_I64, MO_32, data_r, data_r);
79
} else {
80
tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
81
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
82
case MO_Q:
83
tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
84
if (bswap) {
85
- tcg_out_rev64(s, data_r, data_r);
86
+ tcg_out_rev(s, TCG_TYPE_I64, MO_64, data_r, data_r);
87
}
88
break;
89
default:
90
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
91
break;
92
case MO_16:
93
if (bswap && data_r != TCG_REG_XZR) {
94
- tcg_out_rev16(s, TCG_REG_TMP, data_r);
95
+ tcg_out_rev(s, TCG_TYPE_I32, MO_16, TCG_REG_TMP, data_r);
96
data_r = TCG_REG_TMP;
97
}
98
tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r);
99
break;
100
case MO_32:
101
if (bswap && data_r != TCG_REG_XZR) {
102
- tcg_out_rev32(s, TCG_REG_TMP, data_r);
103
+ tcg_out_rev(s, TCG_TYPE_I32, MO_32, TCG_REG_TMP, data_r);
104
data_r = TCG_REG_TMP;
105
}
106
tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r);
107
break;
108
case MO_64:
109
if (bswap && data_r != TCG_REG_XZR) {
110
- tcg_out_rev64(s, TCG_REG_TMP, data_r);
111
+ tcg_out_rev(s, TCG_TYPE_I64, MO_64, TCG_REG_TMP, data_r);
112
data_r = TCG_REG_TMP;
113
}
114
tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r);
115
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
116
break;
117
118
case INDEX_op_bswap64_i64:
119
- tcg_out_rev64(s, a0, a1);
120
+ tcg_out_rev(s, TCG_TYPE_I64, MO_64, a0, a1);
121
break;
122
case INDEX_op_bswap32_i64:
123
case INDEX_op_bswap32_i32:
124
- tcg_out_rev32(s, a0, a1);
125
+ tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
126
break;
127
case INDEX_op_bswap16_i64:
128
case INDEX_op_bswap16_i32:
129
- tcg_out_rev16(s, a0, a1);
130
+ tcg_out_rev(s, TCG_TYPE_I32, MO_16, a0, a1);
131
break;
132
133
case INDEX_op_ext8s_i64:
134
--
67
--
135
2.25.1
68
2.25.1
136
69
137
70
diff view generated by jsdifflib
1
For the sf version, we are performing two 32-bit bswaps
1
Continue splitting tcg_optimize.
2
in either half of the register. This is equivalent to
3
performing one 64-bit bswap followed by a rotate.
4
2
5
For the non-sf version, we can remove TCG_BSWAP_IZ
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
and the preceding zero-extension.
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
7
---
12
target/arm/translate-a64.c | 17 ++++-------------
8
tcg/optimize.c | 22 ++++++++++++++--------
13
1 file changed, 4 insertions(+), 13 deletions(-)
9
1 file changed, 14 insertions(+), 8 deletions(-)
14
10
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
13
--- a/tcg/optimize.c
18
+++ b/target/arm/translate-a64.c
14
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ static void handle_rev32(DisasContext *s, unsigned int sf,
15
@@ -XXX,XX +XXX,XX @@ static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
20
unsigned int rn, unsigned int rd)
21
{
22
TCGv_i64 tcg_rd = cpu_reg(s, rd);
23
+ TCGv_i64 tcg_rn = cpu_reg(s, rn);
24
25
if (sf) {
26
- TCGv_i64 tcg_tmp = tcg_temp_new_i64();
27
- TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
28
-
29
- /* bswap32_i64 requires zero high word */
30
- tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
31
- tcg_gen_bswap32_i64(tcg_rd, tcg_tmp, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
32
- tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
33
- tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
34
- tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
35
-
36
- tcg_temp_free_i64(tcg_tmp);
37
+ tcg_gen_bswap64_i64(tcg_rd, tcg_rn);
38
+ tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32);
39
} else {
40
- tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
41
- tcg_gen_bswap32_i64(tcg_rd, tcg_rd, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
42
+ tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ);
43
}
16
}
44
}
17
}
45
18
19
+static void copy_propagate(OptContext *ctx, TCGOp *op,
20
+ int nb_oargs, int nb_iargs)
21
+{
22
+ TCGContext *s = ctx->tcg;
23
+
24
+ for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
25
+ TCGTemp *ts = arg_temp(op->args[i]);
26
+ if (ts && ts_is_copy(ts)) {
27
+ op->args[i] = temp_arg(find_better_copy(s, ts));
28
+ }
29
+ }
30
+}
31
+
32
/* Propagate constants and copies, fold constant expressions. */
33
void tcg_optimize(TCGContext *s)
34
{
35
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
36
nb_iargs = def->nb_iargs;
37
}
38
init_arguments(&ctx, op, nb_oargs + nb_iargs);
39
-
40
- /* Do copy propagation */
41
- for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
42
- TCGTemp *ts = arg_temp(op->args[i]);
43
- if (ts && ts_is_copy(ts)) {
44
- op->args[i] = temp_arg(find_better_copy(s, ts));
45
- }
46
- }
47
+ copy_propagate(&ctx, op, nb_oargs, nb_iargs);
48
49
/* For commutative operations make constant second argument */
50
switch (opc) {
46
--
51
--
47
2.25.1
52
2.25.1
48
53
49
54
diff view generated by jsdifflib
1
We can use this in gen_goto_tb and for DISAS_JUMP
1
Calls are special in that they have a variable number
2
to indirectly chain to the next TB.
2
of arguments, and need to be able to clobber globals.
3
3
4
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
target/cris/translate.c | 4 +++-
8
tcg/optimize.c | 63 ++++++++++++++++++++++++++++++++------------------
9
1 file changed, 3 insertions(+), 1 deletion(-)
9
1 file changed, 41 insertions(+), 22 deletions(-)
10
10
11
diff --git a/target/cris/translate.c b/target/cris/translate.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/cris/translate.c
13
--- a/tcg/optimize.c
14
+++ b/target/cris/translate.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
16
tcg_gen_exit_tb(dc->base.tb, n);
17
} else {
18
tcg_gen_movi_tl(env_pc, dest);
19
- tcg_gen_exit_tb(NULL, 0);
20
+ tcg_gen_lookup_and_goto_ptr();
21
}
16
}
22
}
17
}
23
18
24
@@ -XXX,XX +XXX,XX @@ static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
19
+static bool fold_call(OptContext *ctx, TCGOp *op)
25
tcg_gen_movi_tl(env_pc, npc);
20
+{
26
/* fall through */
21
+ TCGContext *s = ctx->tcg;
27
case DISAS_JUMP:
22
+ int nb_oargs = TCGOP_CALLO(op);
28
+ tcg_gen_lookup_and_goto_ptr();
23
+ int nb_iargs = TCGOP_CALLI(op);
29
+ break;
24
+ int flags, i;
30
case DISAS_UPDATE:
25
+
31
/* Indicate that interupts must be re-evaluated before the next TB. */
26
+ init_arguments(ctx, op, nb_oargs + nb_iargs);
32
tcg_gen_exit_tb(NULL, 0);
27
+ copy_propagate(ctx, op, nb_oargs, nb_iargs);
28
+
29
+ /* If the function reads or writes globals, reset temp data. */
30
+ flags = tcg_call_flags(op);
31
+ if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
32
+ int nb_globals = s->nb_globals;
33
+
34
+ for (i = 0; i < nb_globals; i++) {
35
+ if (test_bit(i, ctx->temps_used.l)) {
36
+ reset_ts(&ctx->tcg->temps[i]);
37
+ }
38
+ }
39
+ }
40
+
41
+ /* Reset temp data for outputs. */
42
+ for (i = 0; i < nb_oargs; i++) {
43
+ reset_temp(op->args[i]);
44
+ }
45
+
46
+ /* Stop optimizing MB across calls. */
47
+ ctx->prev_mb = NULL;
48
+ return true;
49
+}
50
+
51
/* Propagate constants and copies, fold constant expressions. */
52
void tcg_optimize(TCGContext *s)
53
{
54
- int nb_temps, nb_globals, i;
55
+ int nb_temps, i;
56
TCGOp *op, *op_next;
57
OptContext ctx = { .tcg = s };
58
59
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
60
available through the doubly linked circular list. */
61
62
nb_temps = s->nb_temps;
63
- nb_globals = s->nb_globals;
64
-
65
for (i = 0; i < nb_temps; ++i) {
66
s->temps[i].state_ptr = NULL;
67
}
68
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
69
uint64_t z_mask, partmask, affected, tmp;
70
int nb_oargs, nb_iargs;
71
TCGOpcode opc = op->opc;
72
- const TCGOpDef *def = &tcg_op_defs[opc];
73
+ const TCGOpDef *def;
74
75
- /* Count the arguments, and initialize the temps that are
76
- going to be used */
77
+ /* Calls are special. */
78
if (opc == INDEX_op_call) {
79
- nb_oargs = TCGOP_CALLO(op);
80
- nb_iargs = TCGOP_CALLI(op);
81
- } else {
82
- nb_oargs = def->nb_oargs;
83
- nb_iargs = def->nb_iargs;
84
+ fold_call(&ctx, op);
85
+ continue;
86
}
87
+
88
+ def = &tcg_op_defs[opc];
89
+ nb_oargs = def->nb_oargs;
90
+ nb_iargs = def->nb_iargs;
91
init_arguments(&ctx, op, nb_oargs + nb_iargs);
92
copy_propagate(&ctx, op, nb_oargs, nb_iargs);
93
94
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
95
if (def->flags & TCG_OPF_BB_END) {
96
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
97
} else {
98
- if (opc == INDEX_op_call &&
99
- !(tcg_call_flags(op)
100
- & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
101
- for (i = 0; i < nb_globals; i++) {
102
- if (test_bit(i, ctx.temps_used.l)) {
103
- reset_ts(&s->temps[i]);
104
- }
105
- }
106
- }
107
-
108
for (i = 0; i < nb_oargs; i++) {
109
reset_temp(op->args[i]);
110
/* Save the corresponding known-zero bits mask for the
111
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
112
case INDEX_op_qemu_st_i32:
113
case INDEX_op_qemu_st8_i32:
114
case INDEX_op_qemu_st_i64:
115
- case INDEX_op_call:
116
/* Opcodes that touch guest memory stop the optimization. */
117
ctx.prev_mb = NULL;
118
break;
33
--
119
--
34
2.25.1
120
2.25.1
35
121
36
122
diff view generated by jsdifflib
1
Use a break instead of an ifdefed else.
1
Rather than try to keep these up-to-date across folding,
2
There's no need to move the values through s->T0.
2
re-read nb_oargs at the end, after re-reading the opcode.
3
Remove TCG_BSWAP_IZ and the preceding zero-extension.
4
3
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
A couple of asserts need dropping, but that will take care
5
of itself as we split the function further.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
10
---
8
target/i386/tcg/translate.c | 14 ++++----------
11
tcg/optimize.c | 14 ++++----------
9
1 file changed, 4 insertions(+), 10 deletions(-)
12
1 file changed, 4 insertions(+), 10 deletions(-)
10
13
11
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
13
--- a/target/i386/tcg/translate.c
16
--- a/tcg/optimize.c
14
+++ b/target/i386/tcg/translate.c
17
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
18
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
16
reg = (b & 7) | REX_B(s);
19
17
#ifdef TARGET_X86_64
20
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
18
if (dflag == MO_64) {
21
uint64_t z_mask, partmask, affected, tmp;
19
- gen_op_mov_v_reg(s, MO_64, s->T0, reg);
22
- int nb_oargs, nb_iargs;
20
- tcg_gen_bswap64_i64(s->T0, s->T0);
23
TCGOpcode opc = op->opc;
21
- gen_op_mov_reg_v(s, MO_64, reg, s->T0);
24
const TCGOpDef *def;
22
- } else
25
23
-#endif
26
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
24
- {
25
- gen_op_mov_v_reg(s, MO_32, s->T0, reg);
26
- tcg_gen_ext32u_tl(s->T0, s->T0);
27
- tcg_gen_bswap32_tl(s->T0, s->T0, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
28
- gen_op_mov_reg_v(s, MO_32, reg, s->T0);
29
+ tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]);
30
+ break;
31
}
27
}
32
+#endif
28
33
+ tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ);
29
def = &tcg_op_defs[opc];
34
break;
30
- nb_oargs = def->nb_oargs;
35
case 0xd6: /* salc */
31
- nb_iargs = def->nb_iargs;
36
if (CODE64(s))
32
- init_arguments(&ctx, op, nb_oargs + nb_iargs);
33
- copy_propagate(&ctx, op, nb_oargs, nb_iargs);
34
+ init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
35
+ copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
36
37
/* For commutative operations make constant second argument */
38
switch (opc) {
39
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
40
41
CASE_OP_32_64(qemu_ld):
42
{
43
- MemOpIdx oi = op->args[nb_oargs + nb_iargs];
44
+ MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
45
MemOp mop = get_memop(oi);
46
if (!(mop & MO_SIGN)) {
47
z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
49
}
50
51
if (partmask == 0) {
52
- tcg_debug_assert(nb_oargs == 1);
53
tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
54
continue;
55
}
56
if (affected == 0) {
57
- tcg_debug_assert(nb_oargs == 1);
58
tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
59
continue;
60
}
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
62
} else if (args_are_copies(op->args[1], op->args[2])) {
63
op->opc = INDEX_op_dup_vec;
64
TCGOP_VECE(op) = MO_32;
65
- nb_iargs = 1;
66
}
67
break;
68
69
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
70
op->opc = opc = (opc == INDEX_op_movcond_i32
71
? INDEX_op_setcond_i32
72
: INDEX_op_setcond_i64);
73
- nb_iargs = 2;
74
}
75
break;
76
77
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
78
if (def->flags & TCG_OPF_BB_END) {
79
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
80
} else {
81
+ int nb_oargs = def->nb_oargs;
82
for (i = 0; i < nb_oargs; i++) {
83
reset_temp(op->args[i]);
84
/* Save the corresponding known-zero bits mask for the
37
--
85
--
38
2.25.1
86
2.25.1
39
87
40
88
diff view generated by jsdifflib
1
Ever since 2a44f7f17364, flagx_known is always true.
1
Return -1 instead of 2 for failure, so that we can
2
Fold away all of the tests against the flag.
2
use comparisons against 0 for all cases.
3
3
4
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
target/cris/translate.c | 99 ++++++++-------------------------
8
tcg/optimize.c | 145 +++++++++++++++++++++++++------------------------
9
target/cris/translate_v10.c.inc | 6 +-
9
1 file changed, 74 insertions(+), 71 deletions(-)
10
2 files changed, 24 insertions(+), 81 deletions(-)
10
11
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/target/cris/translate.c b/target/cris/translate.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/target/cris/translate.c
13
--- a/tcg/optimize.c
15
+++ b/target/cris/translate.c
14
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
15
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
17
18
int cc_x_uptodate; /* 1 - ccs, 2 - known | X_FLAG. 0 not up-to-date. */
19
int flags_uptodate; /* Whether or not $ccs is up-to-date. */
20
- int flagx_known; /* Whether or not flags_x has the x flag known at
21
- translation time. */
22
int flags_x;
23
24
int clear_x; /* Clear x after this insn? */
25
@@ -XXX,XX +XXX,XX @@ static inline void t_gen_add_flag(TCGv d, int flag)
26
27
static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
28
{
29
- if (dc->flagx_known) {
30
- if (dc->flags_x) {
31
- TCGv c;
32
-
33
- c = tcg_temp_new();
34
- t_gen_mov_TN_preg(c, PR_CCS);
35
- /* C flag is already at bit 0. */
36
- tcg_gen_andi_tl(c, c, C_FLAG);
37
- tcg_gen_add_tl(d, d, c);
38
- tcg_temp_free(c);
39
- }
40
- } else {
41
- TCGv x, c;
42
+ if (dc->flags_x) {
43
+ TCGv c = tcg_temp_new();
44
45
- x = tcg_temp_new();
46
- c = tcg_temp_new();
47
- t_gen_mov_TN_preg(x, PR_CCS);
48
- tcg_gen_mov_tl(c, x);
49
-
50
- /* Propagate carry into d if X is set. Branch free. */
51
+ t_gen_mov_TN_preg(c, PR_CCS);
52
+ /* C flag is already at bit 0. */
53
tcg_gen_andi_tl(c, c, C_FLAG);
54
- tcg_gen_andi_tl(x, x, X_FLAG);
55
- tcg_gen_shri_tl(x, x, 4);
56
-
57
- tcg_gen_and_tl(x, x, c);
58
- tcg_gen_add_tl(d, d, x);
59
- tcg_temp_free(x);
60
+ tcg_gen_add_tl(d, d, c);
61
tcg_temp_free(c);
62
}
16
}
63
}
17
}
64
18
65
static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
19
-/* Return 2 if the condition can't be simplified, and the result
20
- of the condition (0 or 1) if it can */
21
-static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
22
- TCGArg y, TCGCond c)
23
+/*
24
+ * Return -1 if the condition can't be simplified,
25
+ * and the result of the condition (0 or 1) if it can.
26
+ */
27
+static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
28
+ TCGArg y, TCGCond c)
66
{
29
{
67
- if (dc->flagx_known) {
30
uint64_t xv = arg_info(x)->val;
68
- if (dc->flags_x) {
31
uint64_t yv = arg_info(y)->val;
69
- TCGv c;
32
@@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
70
-
33
case TCG_COND_GEU:
71
- c = tcg_temp_new();
34
return 1;
72
- t_gen_mov_TN_preg(c, PR_CCS);
35
default:
73
- /* C flag is already at bit 0. */
36
- return 2;
74
- tcg_gen_andi_tl(c, c, C_FLAG);
37
+ return -1;
75
- tcg_gen_sub_tl(d, d, c);
38
}
76
- tcg_temp_free(c);
77
- }
78
- } else {
79
- TCGv x, c;
80
+ if (dc->flags_x) {
81
+ TCGv c = tcg_temp_new();
82
83
- x = tcg_temp_new();
84
- c = tcg_temp_new();
85
- t_gen_mov_TN_preg(x, PR_CCS);
86
- tcg_gen_mov_tl(c, x);
87
-
88
- /* Propagate carry into d if X is set. Branch free. */
89
+ t_gen_mov_TN_preg(c, PR_CCS);
90
+ /* C flag is already at bit 0. */
91
tcg_gen_andi_tl(c, c, C_FLAG);
92
- tcg_gen_andi_tl(x, x, X_FLAG);
93
- tcg_gen_shri_tl(x, x, 4);
94
-
95
- tcg_gen_and_tl(x, x, c);
96
- tcg_gen_sub_tl(d, d, x);
97
- tcg_temp_free(x);
98
+ tcg_gen_sub_tl(d, d, c);
99
tcg_temp_free(c);
100
}
39
}
40
- return 2;
41
+ return -1;
101
}
42
}
102
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
43
103
44
-/* Return 2 if the condition can't be simplified, and the result
104
static inline void cris_clear_x_flag(DisasContext *dc)
45
- of the condition (0 or 1) if it can */
46
-static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
47
+/*
48
+ * Return -1 if the condition can't be simplified,
49
+ * and the result of the condition (0 or 1) if it can.
50
+ */
51
+static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
105
{
52
{
106
- if (dc->flagx_known && dc->flags_x) {
53
TCGArg al = p1[0], ah = p1[1];
107
+ if (dc->flags_x) {
54
TCGArg bl = p2[0], bh = p2[1];
108
dc->flags_uptodate = 0;
55
@@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
56
if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
57
return do_constant_folding_cond_eq(c);
109
}
58
}
110
-
59
- return 2;
111
- dc->flagx_known = 1;
60
+ return -1;
112
dc->flags_x = 0;
113
}
61
}
114
62
115
@@ -XXX,XX +XXX,XX @@ static void cris_evaluate_flags(DisasContext *dc)
63
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
116
break;
64
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
117
}
65
break;
118
66
119
- if (dc->flagx_known) {
67
CASE_OP_32_64(setcond):
120
- if (dc->flags_x) {
68
- tmp = do_constant_folding_cond(opc, op->args[1],
121
- tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], X_FLAG);
69
- op->args[2], op->args[3]);
122
- } else if (dc->cc_op == CC_OP_FLAGS) {
70
- if (tmp != 2) {
123
- tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~X_FLAG);
71
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
124
- }
72
+ i = do_constant_folding_cond(opc, op->args[1],
125
+ if (dc->flags_x) {
73
+ op->args[2], op->args[3]);
126
+ tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], X_FLAG);
74
+ if (i >= 0) {
127
+ } else if (dc->cc_op == CC_OP_FLAGS) {
75
+ tcg_opt_gen_movi(&ctx, op, op->args[0], i);
128
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~X_FLAG);
76
continue;
129
}
77
}
130
dc->flags_uptodate = 1;
78
break;
131
}
79
132
@@ -XXX,XX +XXX,XX @@ static void cris_update_cc_op(DisasContext *dc, int op, int size)
80
CASE_OP_32_64(brcond):
133
static inline void cris_update_cc_x(DisasContext *dc)
81
- tmp = do_constant_folding_cond(opc, op->args[0],
134
{
82
- op->args[1], op->args[2]);
135
/* Save the x flag state at the time of the cc snapshot. */
83
- switch (tmp) {
136
- if (dc->flagx_known) {
84
- case 0:
137
- if (dc->cc_x_uptodate == (2 | dc->flags_x)) {
85
+ i = do_constant_folding_cond(opc, op->args[0],
138
- return;
86
+ op->args[1], op->args[2]);
139
- }
87
+ if (i == 0) {
140
- tcg_gen_movi_tl(cc_x, dc->flags_x);
88
tcg_op_remove(s, op);
141
- dc->cc_x_uptodate = 2 | dc->flags_x;
89
continue;
142
- } else {
90
- case 1:
143
- tcg_gen_andi_tl(cc_x, cpu_PR[PR_CCS], X_FLAG);
91
+ } else if (i > 0) {
144
- dc->cc_x_uptodate = 1;
92
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
145
+ if (dc->cc_x_uptodate == (2 | dc->flags_x)) {
93
op->opc = opc = INDEX_op_br;
146
+ return;
94
op->args[0] = op->args[3];
147
}
95
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
148
+ tcg_gen_movi_tl(cc_x, dc->flags_x);
96
break;
149
+ dc->cc_x_uptodate = 2 | dc->flags_x;
97
150
}
98
CASE_OP_32_64(movcond):
151
99
- tmp = do_constant_folding_cond(opc, op->args[1],
152
/* Update cc prior to executing ALU op. Needs source operands untouched. */
100
- op->args[2], op->args[5]);
153
@@ -XXX,XX +XXX,XX @@ static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
101
- if (tmp != 2) {
154
102
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
155
/* Conditional writes. We only support the kind were X and P are known
103
+ i = do_constant_folding_cond(opc, op->args[1],
156
at translation time. */
104
+ op->args[2], op->args[5]);
157
- if (dc->flagx_known && dc->flags_x && (dc->tb_flags & P_FLAG)) {
105
+ if (i >= 0) {
158
+ if (dc->flags_x && (dc->tb_flags & P_FLAG)) {
106
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]);
159
dc->postinc = 0;
107
continue;
160
cris_evaluate_flags(dc);
108
}
161
tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], C_FLAG);
109
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
162
@@ -XXX,XX +XXX,XX @@ static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
163
111
break;
164
tcg_gen_qemu_st_tl(val, addr, mem_index, MO_TE + ctz32(size));
112
165
113
case INDEX_op_brcond2_i32:
166
- if (dc->flagx_known && dc->flags_x) {
114
- tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
167
+ if (dc->flags_x) {
115
- op->args[4]);
168
cris_evaluate_flags(dc);
116
- if (tmp == 0) {
169
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~C_FLAG);
117
+ i = do_constant_folding_cond2(&op->args[0], &op->args[2],
170
}
118
+ op->args[4]);
171
@@ -XXX,XX +XXX,XX @@ static int dec_addc_r(CPUCRISState *env, DisasContext *dc)
119
+ if (i == 0) {
172
LOG_DIS("addc $r%u, $r%u\n",
120
do_brcond_false:
173
dc->op1, dc->op2);
121
tcg_op_remove(s, op);
174
cris_evaluate_flags(dc);
122
continue;
175
+
123
}
176
/* Set for this insn. */
124
- if (tmp == 1) {
177
- dc->flagx_known = 1;
125
+ if (i > 0) {
178
dc->flags_x = X_FLAG;
126
do_brcond_true:
179
127
op->opc = opc = INDEX_op_br;
180
cris_cc_mask(dc, CC_MASK_NZVC);
128
op->args[0] = op->args[5];
181
@@ -XXX,XX +XXX,XX @@ static int dec_setclrf(CPUCRISState *env, DisasContext *dc)
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
182
}
130
if (op->args[4] == TCG_COND_EQ) {
183
131
/* Simplify EQ comparisons where one of the pairs
184
if (flags & X_FLAG) {
132
can be simplified. */
185
- dc->flagx_known = 1;
133
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
186
if (set) {
134
- op->args[0], op->args[2],
187
dc->flags_x = X_FLAG;
135
- TCG_COND_EQ);
188
} else {
136
- if (tmp == 0) {
189
@@ -XXX,XX +XXX,XX @@ static int dec_addc_mr(CPUCRISState *env, DisasContext *dc)
137
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
190
cris_evaluate_flags(dc);
138
+ op->args[0], op->args[2],
191
139
+ TCG_COND_EQ);
192
/* Set for this insn. */
140
+ if (i == 0) {
193
- dc->flagx_known = 1;
141
goto do_brcond_false;
194
dc->flags_x = X_FLAG;
142
- } else if (tmp == 1) {
195
143
+ } else if (i > 0) {
196
cris_alu_m_alloc_temps(t);
144
goto do_brcond_high;
197
@@ -XXX,XX +XXX,XX @@ static void cris_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
145
}
198
dc->ppc = pc_start;
146
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
199
dc->pc = pc_start;
147
- op->args[1], op->args[3],
200
dc->flags_uptodate = 1;
148
- TCG_COND_EQ);
201
- dc->flagx_known = 1;
149
- if (tmp == 0) {
202
dc->flags_x = tb_flags & X_FLAG;
150
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
203
dc->cc_x_uptodate = 0;
151
+ op->args[1], op->args[3],
204
dc->cc_mask = 0;
152
+ TCG_COND_EQ);
205
@@ -XXX,XX +XXX,XX @@ static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
153
+ if (i == 0) {
206
}
154
goto do_brcond_false;
207
155
- } else if (tmp != 1) {
208
/* Fold unhandled changes to X_FLAG into cpustate_changed. */
156
+ } else if (i < 0) {
209
- dc->cpustate_changed |= !dc->flagx_known;
157
break;
210
dc->cpustate_changed |= dc->flags_x != (dc->base.tb->flags & X_FLAG);
158
}
211
159
do_brcond_low:
212
/*
160
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
213
diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc
161
if (op->args[4] == TCG_COND_NE) {
214
index XXXXXXX..XXXXXXX 100644
162
/* Simplify NE comparisons where one of the pairs
215
--- a/target/cris/translate_v10.c.inc
163
can be simplified. */
216
+++ b/target/cris/translate_v10.c.inc
164
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
217
@@ -XXX,XX +XXX,XX @@ static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
165
- op->args[0], op->args[2],
218
cris_store_direct_jmp(dc);
166
- TCG_COND_NE);
219
}
167
- if (tmp == 0) {
220
168
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
221
- /* Conditional writes. We only support the kind were X is known
169
+ op->args[0], op->args[2],
222
- at translation time. */
170
+ TCG_COND_NE);
223
- if (dc->flagx_known && dc->flags_x) {
171
+ if (i == 0) {
224
+ /* Conditional writes. */
172
goto do_brcond_high;
225
+ if (dc->flags_x) {
173
- } else if (tmp == 1) {
226
gen_store_v10_conditional(dc, addr, val, size, mem_index);
174
+ } else if (i > 0) {
227
return;
175
goto do_brcond_true;
228
}
176
}
229
@@ -XXX,XX +XXX,XX @@ static unsigned int dec10_setclrf(DisasContext *dc)
177
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
230
178
- op->args[1], op->args[3],
231
179
- TCG_COND_NE);
232
if (flags & X_FLAG) {
180
- if (tmp == 0) {
233
- dc->flagx_known = 1;
181
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
234
if (set)
182
+ op->args[1], op->args[3],
235
dc->flags_x = X_FLAG;
183
+ TCG_COND_NE);
236
else
184
+ if (i == 0) {
185
goto do_brcond_low;
186
- } else if (tmp == 1) {
187
+ } else if (i > 0) {
188
goto do_brcond_true;
189
}
190
}
191
break;
192
193
case INDEX_op_setcond2_i32:
194
- tmp = do_constant_folding_cond2(&op->args[1], &op->args[3],
195
- op->args[5]);
196
- if (tmp != 2) {
197
+ i = do_constant_folding_cond2(&op->args[1], &op->args[3],
198
+ op->args[5]);
199
+ if (i >= 0) {
200
do_setcond_const:
201
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
202
+ tcg_opt_gen_movi(&ctx, op, op->args[0], i);
203
continue;
204
}
205
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
206
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
207
if (op->args[5] == TCG_COND_EQ) {
208
/* Simplify EQ comparisons where one of the pairs
209
can be simplified. */
210
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
211
- op->args[1], op->args[3],
212
- TCG_COND_EQ);
213
- if (tmp == 0) {
214
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
215
+ op->args[1], op->args[3],
216
+ TCG_COND_EQ);
217
+ if (i == 0) {
218
goto do_setcond_const;
219
- } else if (tmp == 1) {
220
+ } else if (i > 0) {
221
goto do_setcond_high;
222
}
223
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
224
- op->args[2], op->args[4],
225
- TCG_COND_EQ);
226
- if (tmp == 0) {
227
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
228
+ op->args[2], op->args[4],
229
+ TCG_COND_EQ);
230
+ if (i == 0) {
231
goto do_setcond_high;
232
- } else if (tmp != 1) {
233
+ } else if (i < 0) {
234
break;
235
}
236
do_setcond_low:
237
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
238
if (op->args[5] == TCG_COND_NE) {
239
/* Simplify NE comparisons where one of the pairs
240
can be simplified. */
241
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
242
- op->args[1], op->args[3],
243
- TCG_COND_NE);
244
- if (tmp == 0) {
245
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
246
+ op->args[1], op->args[3],
247
+ TCG_COND_NE);
248
+ if (i == 0) {
249
goto do_setcond_high;
250
- } else if (tmp == 1) {
251
+ } else if (i > 0) {
252
goto do_setcond_const;
253
}
254
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
255
- op->args[2], op->args[4],
256
- TCG_COND_NE);
257
- if (tmp == 0) {
258
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
259
+ op->args[2], op->args[4],
260
+ TCG_COND_NE);
261
+ if (i == 0) {
262
goto do_setcond_low;
263
- } else if (tmp == 1) {
264
+ } else if (i > 0) {
265
goto do_setcond_const;
266
}
267
}
237
--
268
--
238
2.25.1
269
2.25.1
239
270
240
271
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
This will allow callers to tail call to these functions
2
and return true indicating processing complete.
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
8
---
4
target/nios2/translate.c | 8 ++------
9
tcg/optimize.c | 9 +++++----
5
1 file changed, 2 insertions(+), 6 deletions(-)
10
1 file changed, 5 insertions(+), 4 deletions(-)
6
11
7
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
9
--- a/target/nios2/translate.c
14
--- a/tcg/optimize.c
10
+++ b/target/nios2/translate.c
15
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void handle_instruction(DisasContext *dc, CPUNios2State *env)
16
@@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
12
op = get_opcode(code);
17
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
13
18
}
14
if (unlikely(op >= ARRAY_SIZE(i_type_instructions))) {
19
15
- goto illegal_op;
20
-static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
16
+ t_gen_helper_raise_exception(dc, EXCP_ILLEGAL);
21
+static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
17
+ return;
22
{
23
TCGTemp *dst_ts = arg_temp(dst);
24
TCGTemp *src_ts = arg_temp(src);
25
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
26
27
if (ts_are_copies(dst_ts, src_ts)) {
28
tcg_op_remove(ctx->tcg, op);
29
- return;
30
+ return true;
18
}
31
}
19
32
20
dc->zero = NULL;
33
reset_ts(dst_ts);
21
@@ -XXX,XX +XXX,XX @@ static void handle_instruction(DisasContext *dc, CPUNios2State *env)
34
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
22
if (dc->zero) {
35
di->is_const = si->is_const;
23
tcg_temp_free(dc->zero);
36
di->val = si->val;
24
}
37
}
25
-
38
+ return true;
26
- return;
27
-
28
-illegal_op:
29
- t_gen_helper_raise_exception(dc, EXCP_ILLEGAL);
30
}
39
}
31
40
32
static const char * const regnames[] = {
41
-static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
42
+static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
43
TCGArg dst, uint64_t val)
44
{
45
const TCGOpDef *def = &tcg_op_defs[op->opc];
46
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
47
/* Convert movi to mov with constant temp. */
48
tv = tcg_constant_internal(type, val);
49
init_ts_info(ctx, tv);
50
- tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
51
+ return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
52
}
53
54
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
33
--
55
--
34
2.25.1
56
2.25.1
35
57
36
58
diff view generated by jsdifflib
1
Tested-by: Michael Rolnik <mrolnik@gmail.com>
1
Copy z_mask into OptContext, for writeback to the
2
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
2
first output within the new function.
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
7
---
5
target/avr/translate.c | 234 ++++++++++++++++++++++-------------------
8
tcg/optimize.c | 49 +++++++++++++++++++++++++++++++++----------------
6
1 file changed, 128 insertions(+), 106 deletions(-)
9
1 file changed, 33 insertions(+), 16 deletions(-)
7
10
8
diff --git a/target/avr/translate.c b/target/avr/translate.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
10
--- a/target/avr/translate.c
13
--- a/tcg/optimize.c
11
+++ b/target/avr/translate.c
14
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool canonicalize_skip(DisasContext *ctx)
15
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
13
return true;
16
TCGContext *tcg;
17
TCGOp *prev_mb;
18
TCGTempSet temps_used;
19
+
20
+ /* In flight values from optimization. */
21
+ uint64_t z_mask;
22
} OptContext;
23
24
static inline TempOptInfo *ts_info(TCGTemp *ts)
25
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
26
}
14
}
27
}
15
28
16
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
29
+static void finish_folding(OptContext *ctx, TCGOp *op)
17
+static void gen_breakpoint(DisasContext *ctx)
18
{
19
+ canonicalize_skip(ctx);
20
+ tcg_gen_movi_tl(cpu_pc, ctx->npc);
21
+ gen_helper_debug(cpu_env);
22
+ ctx->base.is_jmp = DISAS_NORETURN;
23
+}
24
+
25
+static void avr_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
26
+{
30
+{
27
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
31
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
28
CPUAVRState *env = cs->env_ptr;
32
+ int i, nb_oargs;
29
- DisasContext ctx1 = {
30
- .base.tb = tb,
31
- .base.is_jmp = DISAS_NEXT,
32
- .base.pc_first = tb->pc,
33
- .base.pc_next = tb->pc,
34
- .base.singlestep_enabled = cs->singlestep_enabled,
35
- .cs = cs,
36
- .env = env,
37
- .memidx = 0,
38
- .skip_cond = TCG_COND_NEVER,
39
- };
40
- DisasContext *ctx = &ctx1;
41
- target_ulong pc_start = tb->pc / 2;
42
- int num_insns = 0;
43
+ uint32_t tb_flags = ctx->base.tb->flags;
44
45
- if (tb->flags & TB_FLAGS_FULL_ACCESS) {
46
- /*
47
- * This flag is set by ST/LD instruction we will regenerate it ONLY
48
- * with mem/cpu memory access instead of mem access
49
- */
50
- max_insns = 1;
51
- }
52
- if (ctx->base.singlestep_enabled) {
53
- max_insns = 1;
54
- }
55
+ ctx->cs = cs;
56
+ ctx->env = env;
57
+ ctx->npc = ctx->base.pc_first / 2;
58
59
- gen_tb_start(tb);
60
-
61
- ctx->npc = pc_start;
62
- if (tb->flags & TB_FLAGS_SKIP) {
63
+ ctx->skip_cond = TCG_COND_NEVER;
64
+ if (tb_flags & TB_FLAGS_SKIP) {
65
ctx->skip_cond = TCG_COND_ALWAYS;
66
ctx->skip_var0 = cpu_skip;
67
}
68
69
- do {
70
- TCGLabel *skip_label = NULL;
71
-
72
- /* translate current instruction */
73
- tcg_gen_insn_start(ctx->npc);
74
- num_insns++;
75
-
76
+ if (tb_flags & TB_FLAGS_FULL_ACCESS) {
77
/*
78
- * this is due to some strange GDB behavior
79
- * let's assume main has address 0x100
80
- * b main - sets breakpoint at address 0x00000100 (code)
81
- * b *0x100 - sets breakpoint at address 0x00800100 (data)
82
+ * This flag is set by ST/LD instruction we will regenerate it ONLY
83
+ * with mem/cpu memory access instead of mem access
84
*/
85
- if (unlikely(!ctx->base.singlestep_enabled &&
86
- (cpu_breakpoint_test(cs, OFFSET_CODE + ctx->npc * 2, BP_ANY) ||
87
- cpu_breakpoint_test(cs, OFFSET_DATA + ctx->npc * 2, BP_ANY)))) {
88
- canonicalize_skip(ctx);
89
- tcg_gen_movi_tl(cpu_pc, ctx->npc);
90
- gen_helper_debug(cpu_env);
91
- goto done_generating;
92
- }
93
+ ctx->base.max_insns = 1;
94
+ }
95
+}
96
97
- /* Conditionally skip the next instruction, if indicated. */
98
- if (ctx->skip_cond != TCG_COND_NEVER) {
99
- skip_label = gen_new_label();
100
- if (ctx->skip_var0 == cpu_skip) {
101
- /*
102
- * Copy cpu_skip so that we may zero it before the branch.
103
- * This ensures that cpu_skip is non-zero after the label
104
- * if and only if the skipped insn itself sets a skip.
105
- */
106
- ctx->free_skip_var0 = true;
107
- ctx->skip_var0 = tcg_temp_new();
108
- tcg_gen_mov_tl(ctx->skip_var0, cpu_skip);
109
- tcg_gen_movi_tl(cpu_skip, 0);
110
- }
111
- if (ctx->skip_var1 == NULL) {
112
- tcg_gen_brcondi_tl(ctx->skip_cond, ctx->skip_var0,
113
- 0, skip_label);
114
- } else {
115
- tcg_gen_brcond_tl(ctx->skip_cond, ctx->skip_var0,
116
- ctx->skip_var1, skip_label);
117
- ctx->skip_var1 = NULL;
118
- }
119
- if (ctx->free_skip_var0) {
120
- tcg_temp_free(ctx->skip_var0);
121
- ctx->free_skip_var0 = false;
122
- }
123
- ctx->skip_cond = TCG_COND_NEVER;
124
- ctx->skip_var0 = NULL;
125
- }
126
+static void avr_tr_tb_start(DisasContextBase *db, CPUState *cs)
127
+{
128
+}
129
130
- translate(ctx);
131
+static void avr_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
132
+{
133
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
134
135
- if (skip_label) {
136
- canonicalize_skip(ctx);
137
- gen_set_label(skip_label);
138
- if (ctx->base.is_jmp == DISAS_NORETURN) {
139
- ctx->base.is_jmp = DISAS_CHAIN;
140
- }
141
- }
142
- } while (ctx->base.is_jmp == DISAS_NEXT
143
- && num_insns < max_insns
144
- && (ctx->npc - pc_start) * 2 < TARGET_PAGE_SIZE - 4
145
- && !tcg_op_buf_full());
146
+ tcg_gen_insn_start(ctx->npc);
147
+}
148
149
- if (tb->cflags & CF_LAST_IO) {
150
- gen_io_end();
151
+static bool avr_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
152
+ const CPUBreakpoint *bp)
153
+{
154
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
155
+
156
+ gen_breakpoint(ctx);
157
+ return true;
158
+}
159
+
160
+static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
161
+{
162
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
163
+ TCGLabel *skip_label = NULL;
164
+
33
+
165
+ /*
34
+ /*
166
+ * This is due to some strange GDB behavior
35
+ * For an opcode that ends a BB, reset all temp data.
167
+ * Let's assume main has address 0x100:
36
+ * We do no cross-BB optimization.
168
+ * b main - sets breakpoint at address 0x00000100 (code)
169
+ * b *0x100 - sets breakpoint at address 0x00800100 (data)
170
+ *
171
+ * The translator driver has already taken care of the code pointer.
172
+ */
37
+ */
173
+ if (!ctx->base.singlestep_enabled &&
38
+ if (def->flags & TCG_OPF_BB_END) {
174
+ cpu_breakpoint_test(cs, OFFSET_DATA + ctx->base.pc_next, BP_ANY)) {
39
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
175
+ gen_breakpoint(ctx);
40
+ ctx->prev_mb = NULL;
176
+ return;
41
+ return;
177
}
178
179
+ /* Conditionally skip the next instruction, if indicated. */
180
+ if (ctx->skip_cond != TCG_COND_NEVER) {
181
+ skip_label = gen_new_label();
182
+ if (ctx->skip_var0 == cpu_skip) {
183
+ /*
184
+ * Copy cpu_skip so that we may zero it before the branch.
185
+ * This ensures that cpu_skip is non-zero after the label
186
+ * if and only if the skipped insn itself sets a skip.
187
+ */
188
+ ctx->free_skip_var0 = true;
189
+ ctx->skip_var0 = tcg_temp_new();
190
+ tcg_gen_mov_tl(ctx->skip_var0, cpu_skip);
191
+ tcg_gen_movi_tl(cpu_skip, 0);
192
+ }
193
+ if (ctx->skip_var1 == NULL) {
194
+ tcg_gen_brcondi_tl(ctx->skip_cond, ctx->skip_var0, 0, skip_label);
195
+ } else {
196
+ tcg_gen_brcond_tl(ctx->skip_cond, ctx->skip_var0,
197
+ ctx->skip_var1, skip_label);
198
+ ctx->skip_var1 = NULL;
199
+ }
200
+ if (ctx->free_skip_var0) {
201
+ tcg_temp_free(ctx->skip_var0);
202
+ ctx->free_skip_var0 = false;
203
+ }
204
+ ctx->skip_cond = TCG_COND_NEVER;
205
+ ctx->skip_var0 = NULL;
206
+ }
42
+ }
207
+
43
+
208
+ translate(ctx);
44
+ nb_oargs = def->nb_oargs;
209
+
45
+ for (i = 0; i < nb_oargs; i++) {
210
+ ctx->base.pc_next = ctx->npc * 2;
46
+ reset_temp(op->args[i]);
211
+
47
+ /*
212
+ if (skip_label) {
48
+ * Save the corresponding known-zero bits mask for the
213
+ canonicalize_skip(ctx);
49
+ * first output argument (only one supported so far).
214
+ gen_set_label(skip_label);
50
+ */
215
+ if (ctx->base.is_jmp == DISAS_NORETURN) {
51
+ if (i == 0) {
216
+ ctx->base.is_jmp = DISAS_CHAIN;
52
+ arg_info(op->args[i])->z_mask = ctx->z_mask;
217
+ }
218
+ }
219
+
220
+ if (ctx->base.is_jmp == DISAS_NEXT) {
221
+ target_ulong page_first = ctx->base.pc_first & TARGET_PAGE_MASK;
222
+
223
+ if ((ctx->base.pc_next - page_first) >= TARGET_PAGE_SIZE - 4) {
224
+ ctx->base.is_jmp = DISAS_TOO_MANY;
225
+ }
53
+ }
226
+ }
54
+ }
227
+}
55
+}
228
+
56
+
229
+static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
57
static bool fold_call(OptContext *ctx, TCGOp *op)
230
+{
58
{
231
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
59
TCGContext *s = ctx->tcg;
232
bool nonconst_skip = canonicalize_skip(ctx);
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
233
61
partmask &= 0xffffffffu;
234
switch (ctx->base.is_jmp) {
62
affected &= 0xffffffffu;
235
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
63
}
236
default:
64
+ ctx.z_mask = z_mask;
237
g_assert_not_reached();
65
238
}
66
if (partmask == 0) {
239
+}
67
tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
240
68
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
241
-done_generating:
69
break;
242
- gen_tb_end(tb, num_insns);
70
}
243
+static void avr_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
71
244
+{
72
- /* Some of the folding above can change opc. */
245
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
73
- opc = op->opc;
246
+ log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
74
- def = &tcg_op_defs[opc];
247
+}
75
- if (def->flags & TCG_OPF_BB_END) {
248
76
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
249
- tb->size = (ctx->npc - pc_start) * 2;
77
- } else {
250
- tb->icount = num_insns;
78
- int nb_oargs = def->nb_oargs;
251
+static const TranslatorOps avr_tr_ops = {
79
- for (i = 0; i < nb_oargs; i++) {
252
+ .init_disas_context = avr_tr_init_disas_context,
80
- reset_temp(op->args[i]);
253
+ .tb_start = avr_tr_tb_start,
81
- /* Save the corresponding known-zero bits mask for the
254
+ .insn_start = avr_tr_insn_start,
82
- first output argument (only one supported so far). */
255
+ .breakpoint_check = avr_tr_breakpoint_check,
83
- if (i == 0) {
256
+ .translate_insn = avr_tr_translate_insn,
84
- arg_info(op->args[i])->z_mask = z_mask;
257
+ .tb_stop = avr_tr_tb_stop,
85
- }
258
+ .disas_log = avr_tr_disas_log,
86
- }
259
+};
87
- }
260
88
+ finish_folding(&ctx, op);
261
-#ifdef DEBUG_DISAS
89
262
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
90
/* Eliminate duplicate and redundant fence instructions. */
263
- && qemu_log_in_addr_range(tb->pc)) {
91
if (ctx.prev_mb) {
264
- FILE *fd;
265
- fd = qemu_log_lock();
266
- qemu_log("IN: %s\n", lookup_symbol(tb->pc));
267
- log_target_disas(cs, tb->pc, tb->size);
268
- qemu_log("\n");
269
- qemu_log_unlock(fd);
270
- }
271
-#endif
272
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
273
+{
274
+ DisasContext dc = { };
275
+ translator_loop(&avr_tr_ops, &dc.base, cs, tb, max_insns);
276
}
277
278
void restore_state_to_opc(CPUAVRState *env, TranslationBlock *tb,
279
--
92
--
280
2.25.1
93
2.25.1
281
94
282
95
diff view generated by jsdifflib
1
Prepare for receiving it as a pointer input.
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
2
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
3
Tested-by: Michael Rolnik <mrolnik@gmail.com>
4
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
target/avr/translate.c | 84 +++++++++++++++++++++---------------------
6
tcg/optimize.c | 9 ++++++---
9
1 file changed, 43 insertions(+), 41 deletions(-)
7
1 file changed, 6 insertions(+), 3 deletions(-)
10
8
11
diff --git a/target/avr/translate.c b/target/avr/translate.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
13
--- a/target/avr/translate.c
11
--- a/tcg/optimize.c
14
+++ b/target/avr/translate.c
12
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ struct DisasContext {
13
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
16
* used in the following manner (sketch)
14
uint64_t z_mask, partmask, affected, tmp;
17
*
15
TCGOpcode opc = op->opc;
18
* TCGLabel *skip_label = NULL;
16
const TCGOpDef *def;
19
- * if (ctx.skip_cond != TCG_COND_NEVER) {
17
+ bool done = false;
20
+ * if (ctx->skip_cond != TCG_COND_NEVER) {
18
21
* skip_label = gen_new_label();
19
/* Calls are special. */
22
* tcg_gen_brcond_tl(skip_cond, skip_var0, skip_var1, skip_label);
20
if (opc == INDEX_op_call) {
23
* }
21
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
24
@@ -XXX,XX +XXX,XX @@ struct DisasContext {
22
allocator where needed and possible. Also detect copies. */
25
* free_skip_var0 = false;
23
switch (opc) {
26
* }
24
CASE_OP_32_64_VEC(mov):
27
*
25
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
28
- * translate(&ctx);
26
- continue;
29
+ * translate(ctx);
27
+ done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
30
*
28
+ break;
31
* if (skip_label) {
29
32
* gen_set_label(skip_label);
30
case INDEX_op_dup_vec:
33
@@ -XXX,XX +XXX,XX @@ static bool canonicalize_skip(DisasContext *ctx)
31
if (arg_is_const(op->args[1])) {
34
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
32
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
35
{
36
CPUAVRState *env = cs->env_ptr;
37
- DisasContext ctx = {
38
+ DisasContext ctx1 = {
39
.base.tb = tb,
40
.base.is_jmp = DISAS_NEXT,
41
.base.pc_first = tb->pc,
42
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
43
.memidx = 0,
44
.skip_cond = TCG_COND_NEVER,
45
};
46
+ DisasContext *ctx = &ctx1;
47
target_ulong pc_start = tb->pc / 2;
48
int num_insns = 0;
49
50
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
51
*/
52
max_insns = 1;
53
}
54
- if (ctx.base.singlestep_enabled) {
55
+ if (ctx->base.singlestep_enabled) {
56
max_insns = 1;
57
}
58
59
gen_tb_start(tb);
60
61
- ctx.npc = pc_start;
62
+ ctx->npc = pc_start;
63
if (tb->flags & TB_FLAGS_SKIP) {
64
- ctx.skip_cond = TCG_COND_ALWAYS;
65
- ctx.skip_var0 = cpu_skip;
66
+ ctx->skip_cond = TCG_COND_ALWAYS;
67
+ ctx->skip_var0 = cpu_skip;
68
}
69
70
do {
71
TCGLabel *skip_label = NULL;
72
73
/* translate current instruction */
74
- tcg_gen_insn_start(ctx.npc);
75
+ tcg_gen_insn_start(ctx->npc);
76
num_insns++;
77
78
/*
79
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
80
* b main - sets breakpoint at address 0x00000100 (code)
81
* b *0x100 - sets breakpoint at address 0x00800100 (data)
82
*/
83
- if (unlikely(!ctx.base.singlestep_enabled &&
84
- (cpu_breakpoint_test(cs, OFFSET_CODE + ctx.npc * 2, BP_ANY) ||
85
- cpu_breakpoint_test(cs, OFFSET_DATA + ctx.npc * 2, BP_ANY)))) {
86
- canonicalize_skip(&ctx);
87
- tcg_gen_movi_tl(cpu_pc, ctx.npc);
88
+ if (unlikely(!ctx->base.singlestep_enabled &&
89
+ (cpu_breakpoint_test(cs, OFFSET_CODE + ctx->npc * 2, BP_ANY) ||
90
+ cpu_breakpoint_test(cs, OFFSET_DATA + ctx->npc * 2, BP_ANY)))) {
91
+ canonicalize_skip(ctx);
92
+ tcg_gen_movi_tl(cpu_pc, ctx->npc);
93
gen_helper_debug(cpu_env);
94
goto done_generating;
95
}
96
97
/* Conditionally skip the next instruction, if indicated. */
98
- if (ctx.skip_cond != TCG_COND_NEVER) {
99
+ if (ctx->skip_cond != TCG_COND_NEVER) {
100
skip_label = gen_new_label();
101
- if (ctx.skip_var0 == cpu_skip) {
102
+ if (ctx->skip_var0 == cpu_skip) {
103
/*
104
* Copy cpu_skip so that we may zero it before the branch.
105
* This ensures that cpu_skip is non-zero after the label
106
* if and only if the skipped insn itself sets a skip.
107
*/
108
- ctx.free_skip_var0 = true;
109
- ctx.skip_var0 = tcg_temp_new();
110
- tcg_gen_mov_tl(ctx.skip_var0, cpu_skip);
111
+ ctx->free_skip_var0 = true;
112
+ ctx->skip_var0 = tcg_temp_new();
113
+ tcg_gen_mov_tl(ctx->skip_var0, cpu_skip);
114
tcg_gen_movi_tl(cpu_skip, 0);
115
}
116
- if (ctx.skip_var1 == NULL) {
117
- tcg_gen_brcondi_tl(ctx.skip_cond, ctx.skip_var0, 0, skip_label);
118
+ if (ctx->skip_var1 == NULL) {
119
+ tcg_gen_brcondi_tl(ctx->skip_cond, ctx->skip_var0,
120
+ 0, skip_label);
121
} else {
122
- tcg_gen_brcond_tl(ctx.skip_cond, ctx.skip_var0,
123
- ctx.skip_var1, skip_label);
124
- ctx.skip_var1 = NULL;
125
+ tcg_gen_brcond_tl(ctx->skip_cond, ctx->skip_var0,
126
+ ctx->skip_var1, skip_label);
127
+ ctx->skip_var1 = NULL;
128
}
129
- if (ctx.free_skip_var0) {
130
- tcg_temp_free(ctx.skip_var0);
131
- ctx.free_skip_var0 = false;
132
+ if (ctx->free_skip_var0) {
133
+ tcg_temp_free(ctx->skip_var0);
134
+ ctx->free_skip_var0 = false;
135
}
136
- ctx.skip_cond = TCG_COND_NEVER;
137
- ctx.skip_var0 = NULL;
138
+ ctx->skip_cond = TCG_COND_NEVER;
139
+ ctx->skip_var0 = NULL;
140
}
141
142
- translate(&ctx);
143
+ translate(ctx);
144
145
if (skip_label) {
146
- canonicalize_skip(&ctx);
147
+ canonicalize_skip(ctx);
148
gen_set_label(skip_label);
149
- if (ctx.base.is_jmp == DISAS_NORETURN) {
150
- ctx.base.is_jmp = DISAS_CHAIN;
151
+ if (ctx->base.is_jmp == DISAS_NORETURN) {
152
+ ctx->base.is_jmp = DISAS_CHAIN;
153
}
154
}
155
- } while (ctx.base.is_jmp == DISAS_NEXT
156
+ } while (ctx->base.is_jmp == DISAS_NEXT
157
&& num_insns < max_insns
158
- && (ctx.npc - pc_start) * 2 < TARGET_PAGE_SIZE - 4
159
+ && (ctx->npc - pc_start) * 2 < TARGET_PAGE_SIZE - 4
160
&& !tcg_op_buf_full());
161
162
if (tb->cflags & CF_LAST_IO) {
163
gen_io_end();
164
}
165
166
- bool nonconst_skip = canonicalize_skip(&ctx);
167
+ bool nonconst_skip = canonicalize_skip(ctx);
168
169
- switch (ctx.base.is_jmp) {
170
+ switch (ctx->base.is_jmp) {
171
case DISAS_NORETURN:
172
assert(!nonconst_skip);
173
break;
174
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
175
case DISAS_CHAIN:
176
if (!nonconst_skip) {
177
/* Note gen_goto_tb checks singlestep. */
178
- gen_goto_tb(&ctx, 1, ctx.npc);
179
+ gen_goto_tb(ctx, 1, ctx->npc);
180
break;
33
break;
181
}
34
}
182
- tcg_gen_movi_tl(cpu_pc, ctx.npc);
35
183
+ tcg_gen_movi_tl(cpu_pc, ctx->npc);
36
- finish_folding(&ctx, op);
184
/* fall through */
37
+ if (!done) {
185
case DISAS_LOOKUP:
38
+ finish_folding(&ctx, op);
186
- if (!ctx.base.singlestep_enabled) {
39
+ }
187
+ if (!ctx->base.singlestep_enabled) {
40
188
tcg_gen_lookup_and_goto_ptr();
41
/* Eliminate duplicate and redundant fence instructions. */
189
break;
42
if (ctx.prev_mb) {
190
}
191
/* fall through */
192
case DISAS_EXIT:
193
- if (ctx.base.singlestep_enabled) {
194
+ if (ctx->base.singlestep_enabled) {
195
gen_helper_debug(cpu_env);
196
} else {
197
tcg_gen_exit_tb(NULL, 0);
198
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
199
done_generating:
200
gen_tb_end(tb, num_insns);
201
202
- tb->size = (ctx.npc - pc_start) * 2;
203
+ tb->size = (ctx->npc - pc_start) * 2;
204
tb->icount = num_insns;
205
206
#ifdef DEBUG_DISAS
207
--
43
--
208
2.25.1
44
2.25.1
209
45
210
46
diff view generated by jsdifflib
1
We will shortly require these in other context;
1
This puts the separate mb optimization into the same framework
2
make the expansion as clear as possible.
2
as the others. While fold_qemu_{ld,st} are currently identical,
3
that won't last as more code gets moved.
3
4
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
tcg/ppc/tcg-target.c.inc | 31 +++++++++++++++++++++----------
9
tcg/optimize.c | 89 +++++++++++++++++++++++++++++---------------------
9
1 file changed, 21 insertions(+), 10 deletions(-)
10
1 file changed, 51 insertions(+), 38 deletions(-)
10
11
11
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/ppc/tcg-target.c.inc
14
--- a/tcg/optimize.c
14
+++ b/tcg/ppc/tcg-target.c.inc
15
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
16
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
17
return true;
17
}
18
}
18
19
19
+static inline void tcg_out_ext8s(TCGContext *s, TCGReg dst, TCGReg src)
20
+static bool fold_mb(OptContext *ctx, TCGOp *op)
20
+{
21
+{
21
+ tcg_out32(s, EXTSB | RA(dst) | RS(src));
22
+ /* Eliminate duplicate and redundant fence instructions. */
23
+ if (ctx->prev_mb) {
24
+ /*
25
+ * Merge two barriers of the same type into one,
26
+ * or a weaker barrier into a stronger one,
27
+ * or two weaker barriers into a stronger one.
28
+ * mb X; mb Y => mb X|Y
29
+ * mb; strl => mb; st
30
+ * ldaq; mb => ld; mb
31
+ * ldaq; strl => ld; mb; st
32
+ * Other combinations are also merged into a strong
33
+ * barrier. This is stricter than specified but for
34
+ * the purposes of TCG is better than not optimizing.
35
+ */
36
+ ctx->prev_mb->args[0] |= op->args[0];
37
+ tcg_op_remove(ctx->tcg, op);
38
+ } else {
39
+ ctx->prev_mb = op;
40
+ }
41
+ return true;
22
+}
42
+}
23
+
43
+
24
+static inline void tcg_out_ext16s(TCGContext *s, TCGReg dst, TCGReg src)
44
+static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
25
+{
45
+{
26
+ tcg_out32(s, EXTSH | RA(dst) | RS(src));
46
+ /* Opcodes that touch guest memory stop the mb optimization. */
47
+ ctx->prev_mb = NULL;
48
+ return false;
27
+}
49
+}
28
+
50
+
29
+static inline void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src)
51
+static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
30
+{
52
+{
31
+ tcg_out32(s, EXTSW | RA(dst) | RS(src));
53
+ /* Opcodes that touch guest memory stop the mb optimization. */
54
+ ctx->prev_mb = NULL;
55
+ return false;
32
+}
56
+}
33
+
57
+
34
static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
58
/* Propagate constants and copies, fold constant expressions. */
59
void tcg_optimize(TCGContext *s)
35
{
60
{
36
tcg_out_rld(s, RLDICL, dst, src, 0, 32);
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
62
}
38
const int const_args[TCG_MAX_OP_ARGS])
63
break;
39
{
64
40
TCGArg a0, a1, a2;
65
+ case INDEX_op_mb:
41
- int c;
66
+ done = fold_mb(&ctx, op);
42
67
+ break;
43
switch (opc) {
68
+ case INDEX_op_qemu_ld_i32:
44
case INDEX_op_exit_tb:
69
+ case INDEX_op_qemu_ld_i64:
45
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
70
+ done = fold_qemu_ld(&ctx, op);
46
case INDEX_op_ld8s_i32:
71
+ break;
47
case INDEX_op_ld8s_i64:
72
+ case INDEX_op_qemu_st_i32:
48
tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
73
+ case INDEX_op_qemu_st8_i32:
49
- tcg_out32(s, EXTSB | RS(args[0]) | RA(args[0]));
74
+ case INDEX_op_qemu_st_i64:
50
+ tcg_out_ext8s(s, args[0], args[0]);
75
+ done = fold_qemu_st(&ctx, op);
51
break;
76
+ break;
52
case INDEX_op_ld16u_i32:
77
+
53
case INDEX_op_ld16u_i64:
78
default:
54
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
79
break;
55
80
}
56
case INDEX_op_ext8s_i32:
81
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
57
case INDEX_op_ext8s_i64:
82
if (!done) {
58
- c = EXTSB;
83
finish_folding(&ctx, op);
59
- goto gen_ext;
84
}
60
+ tcg_out_ext8s(s, args[0], args[1]);
85
-
61
+ break;
86
- /* Eliminate duplicate and redundant fence instructions. */
62
case INDEX_op_ext16s_i32:
87
- if (ctx.prev_mb) {
63
case INDEX_op_ext16s_i64:
88
- switch (opc) {
64
- c = EXTSH;
89
- case INDEX_op_mb:
65
- goto gen_ext;
90
- /* Merge two barriers of the same type into one,
66
+ tcg_out_ext16s(s, args[0], args[1]);
91
- * or a weaker barrier into a stronger one,
67
+ break;
92
- * or two weaker barriers into a stronger one.
68
case INDEX_op_ext_i32_i64:
93
- * mb X; mb Y => mb X|Y
69
case INDEX_op_ext32s_i64:
94
- * mb; strl => mb; st
70
- c = EXTSW;
95
- * ldaq; mb => ld; mb
71
- goto gen_ext;
96
- * ldaq; strl => ld; mb; st
72
- gen_ext:
97
- * Other combinations are also merged into a strong
73
- tcg_out32(s, c | RS(args[1]) | RA(args[0]));
98
- * barrier. This is stricter than specified but for
74
+ tcg_out_ext32s(s, args[0], args[1]);
99
- * the purposes of TCG is better than not optimizing.
75
break;
100
- */
76
case INDEX_op_extu_i32_i64:
101
- ctx.prev_mb->args[0] |= op->args[0];
77
tcg_out_ext32u(s, args[0], args[1]);
102
- tcg_op_remove(s, op);
103
- break;
104
-
105
- default:
106
- /* Opcodes that end the block stop the optimization. */
107
- if ((def->flags & TCG_OPF_BB_END) == 0) {
108
- break;
109
- }
110
- /* fallthru */
111
- case INDEX_op_qemu_ld_i32:
112
- case INDEX_op_qemu_ld_i64:
113
- case INDEX_op_qemu_st_i32:
114
- case INDEX_op_qemu_st8_i32:
115
- case INDEX_op_qemu_st_i64:
116
- /* Opcodes that touch guest memory stop the optimization. */
117
- ctx.prev_mb = NULL;
118
- break;
119
- }
120
- } else if (opc == INDEX_op_mb) {
121
- ctx.prev_mb = op;
122
- }
123
}
124
}
78
--
125
--
79
2.25.1
126
2.25.1
80
127
81
128
diff view generated by jsdifflib
1
We do not need to copy this into DisasContext.
1
Split out a whole bunch of placeholder functions, which are
2
2
currently identical. That won't last as more code gets moved.
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
4
Use CASE_32_64_VEC for some logical operators that previously
5
missed the addition of vectors.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
10
---
6
target/nios2/translate.c | 73 +++++++++++++++++++---------------------
11
tcg/optimize.c | 271 +++++++++++++++++++++++++++++++++++++++----------
7
1 file changed, 34 insertions(+), 39 deletions(-)
12
1 file changed, 219 insertions(+), 52 deletions(-)
8
13
9
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
11
--- a/target/nios2/translate.c
16
--- a/tcg/optimize.c
12
+++ b/target/nios2/translate.c
17
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
14
}
15
16
typedef struct DisasContext {
17
- TCGv *cpu_R;
18
TCGv_i32 zero;
19
int is_jmp;
20
target_ulong pc;
21
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
22
bool singlestep_enabled;
23
} DisasContext;
24
25
+static TCGv cpu_R[NUM_CORE_REGS];
26
+
27
typedef struct Nios2Instruction {
28
void (*handler)(DisasContext *dc, uint32_t code, uint32_t flags);
29
uint32_t flags;
30
@@ -XXX,XX +XXX,XX @@ static TCGv load_zero(DisasContext *dc)
31
static TCGv load_gpr(DisasContext *dc, uint8_t reg)
32
{
33
if (likely(reg != R_ZERO)) {
34
- return dc->cpu_R[reg];
35
+ return cpu_R[reg];
36
} else {
37
return load_zero(dc);
38
}
39
@@ -XXX,XX +XXX,XX @@ static void t_gen_helper_raise_exception(DisasContext *dc,
40
{
41
TCGv_i32 tmp = tcg_const_i32(index);
42
43
- tcg_gen_movi_tl(dc->cpu_R[R_PC], dc->pc);
44
+ tcg_gen_movi_tl(cpu_R[R_PC], dc->pc);
45
gen_helper_raise_exception(cpu_env, tmp);
46
tcg_temp_free_i32(tmp);
47
dc->is_jmp = DISAS_NORETURN;
48
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *dc, int n, uint32_t dest)
49
50
if (use_goto_tb(dc, dest)) {
51
tcg_gen_goto_tb(n);
52
- tcg_gen_movi_tl(dc->cpu_R[R_PC], dest);
53
+ tcg_gen_movi_tl(cpu_R[R_PC], dest);
54
tcg_gen_exit_tb(tb, n);
55
} else {
56
- tcg_gen_movi_tl(dc->cpu_R[R_PC], dest);
57
+ tcg_gen_movi_tl(cpu_R[R_PC], dest);
58
tcg_gen_exit_tb(NULL, 0);
59
}
19
}
60
}
20
}
61
@@ -XXX,XX +XXX,XX @@ static void jmpi(DisasContext *dc, uint32_t code, uint32_t flags)
21
62
22
+/*
63
static void call(DisasContext *dc, uint32_t code, uint32_t flags)
23
+ * The fold_* functions return true when processing is complete,
24
+ * usually by folding the operation to a constant or to a copy,
25
+ * and calling tcg_opt_gen_{mov,movi}. They may do other things,
26
+ * like collect information about the value produced, for use in
27
+ * optimizing a subsequent operation.
28
+ *
29
+ * These first fold_* functions are all helpers, used by other
30
+ * folders for more specific operations.
31
+ */
32
+
33
+static bool fold_const1(OptContext *ctx, TCGOp *op)
34
+{
35
+ if (arg_is_const(op->args[1])) {
36
+ uint64_t t;
37
+
38
+ t = arg_info(op->args[1])->val;
39
+ t = do_constant_folding(op->opc, t, 0);
40
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
41
+ }
42
+ return false;
43
+}
44
+
45
+static bool fold_const2(OptContext *ctx, TCGOp *op)
46
+{
47
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
48
+ uint64_t t1 = arg_info(op->args[1])->val;
49
+ uint64_t t2 = arg_info(op->args[2])->val;
50
+
51
+ t1 = do_constant_folding(op->opc, t1, t2);
52
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
53
+ }
54
+ return false;
55
+}
56
+
57
+/*
58
+ * These outermost fold_<op> functions are sorted alphabetically.
59
+ */
60
+
61
+static bool fold_add(OptContext *ctx, TCGOp *op)
62
+{
63
+ return fold_const2(ctx, op);
64
+}
65
+
66
+static bool fold_and(OptContext *ctx, TCGOp *op)
67
+{
68
+ return fold_const2(ctx, op);
69
+}
70
+
71
+static bool fold_andc(OptContext *ctx, TCGOp *op)
72
+{
73
+ return fold_const2(ctx, op);
74
+}
75
+
76
static bool fold_call(OptContext *ctx, TCGOp *op)
64
{
77
{
65
- tcg_gen_movi_tl(dc->cpu_R[R_RA], dc->pc + 4);
78
TCGContext *s = ctx->tcg;
66
+ tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
79
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
67
jmpi(dc, code, flags);
80
return true;
68
}
81
}
69
82
70
@@ -XXX,XX +XXX,XX @@ static void gen_ldx(DisasContext *dc, uint32_t code, uint32_t flags)
83
+static bool fold_ctpop(OptContext *ctx, TCGOp *op)
71
* the Nios2 CPU.
84
+{
72
*/
85
+ return fold_const1(ctx, op);
73
if (likely(instr.b != R_ZERO)) {
86
+}
74
- data = dc->cpu_R[instr.b];
87
+
75
+ data = cpu_R[instr.b];
88
+static bool fold_divide(OptContext *ctx, TCGOp *op)
76
} else {
89
+{
77
data = tcg_temp_new();
90
+ return fold_const2(ctx, op);
78
}
91
+}
79
@@ -XXX,XX +XXX,XX @@ static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags)
92
+
80
I_TYPE(instr, code);
93
+static bool fold_eqv(OptContext *ctx, TCGOp *op)
81
94
+{
82
TCGLabel *l1 = gen_new_label();
95
+ return fold_const2(ctx, op);
83
- tcg_gen_brcond_tl(flags, dc->cpu_R[instr.a], dc->cpu_R[instr.b], l1);
96
+}
84
+ tcg_gen_brcond_tl(flags, cpu_R[instr.a], cpu_R[instr.b], l1);
97
+
85
gen_goto_tb(dc, 0, dc->pc + 4);
98
+static bool fold_exts(OptContext *ctx, TCGOp *op)
86
gen_set_label(l1);
99
+{
87
gen_goto_tb(dc, 1, dc->pc + 4 + (instr.imm16.s & -4));
100
+ return fold_const1(ctx, op);
88
@@ -XXX,XX +XXX,XX @@ static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags)
101
+}
89
static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
102
+
90
{ \
103
+static bool fold_extu(OptContext *ctx, TCGOp *op)
91
I_TYPE(instr, (code)); \
104
+{
92
- tcg_gen_setcondi_tl(flags, (dc)->cpu_R[instr.b], (dc)->cpu_R[instr.a], \
105
+ return fold_const1(ctx, op);
93
- (op3)); \
106
+}
94
+ tcg_gen_setcondi_tl(flags, cpu_R[instr.b], cpu_R[instr.a], (op3)); \
107
+
108
static bool fold_mb(OptContext *ctx, TCGOp *op)
109
{
110
/* Eliminate duplicate and redundant fence instructions. */
111
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
112
return true;
95
}
113
}
96
114
97
gen_i_cmpxx(gen_cmpxxsi, instr.imm16.s)
115
+static bool fold_mul(OptContext *ctx, TCGOp *op)
98
@@ -XXX,XX +XXX,XX @@ static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
116
+{
99
if (unlikely(instr.b == R_ZERO)) { /* Store to R_ZERO is ignored */ \
117
+ return fold_const2(ctx, op);
100
return; \
118
+}
101
} else if (instr.a == R_ZERO) { /* MOVxI optimizations */ \
119
+
102
- tcg_gen_movi_tl(dc->cpu_R[instr.b], (resimm) ? (op3) : 0); \
120
+static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
103
+ tcg_gen_movi_tl(cpu_R[instr.b], (resimm) ? (op3) : 0); \
121
+{
104
} else { \
122
+ return fold_const2(ctx, op);
105
- tcg_gen_##insn##_tl((dc)->cpu_R[instr.b], (dc)->cpu_R[instr.a], \
123
+}
106
- (op3)); \
124
+
107
+ tcg_gen_##insn##_tl(cpu_R[instr.b], cpu_R[instr.a], (op3)); \
125
+static bool fold_nand(OptContext *ctx, TCGOp *op)
108
} \
126
+{
127
+ return fold_const2(ctx, op);
128
+}
129
+
130
+static bool fold_neg(OptContext *ctx, TCGOp *op)
131
+{
132
+ return fold_const1(ctx, op);
133
+}
134
+
135
+static bool fold_nor(OptContext *ctx, TCGOp *op)
136
+{
137
+ return fold_const2(ctx, op);
138
+}
139
+
140
+static bool fold_not(OptContext *ctx, TCGOp *op)
141
+{
142
+ return fold_const1(ctx, op);
143
+}
144
+
145
+static bool fold_or(OptContext *ctx, TCGOp *op)
146
+{
147
+ return fold_const2(ctx, op);
148
+}
149
+
150
+static bool fold_orc(OptContext *ctx, TCGOp *op)
151
+{
152
+ return fold_const2(ctx, op);
153
+}
154
+
155
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
156
{
157
/* Opcodes that touch guest memory stop the mb optimization. */
158
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
159
return false;
109
}
160
}
110
161
111
@@ -XXX,XX +XXX,XX @@ static const Nios2Instruction i_type_instructions[] = {
162
+static bool fold_remainder(OptContext *ctx, TCGOp *op)
112
*/
163
+{
113
static void eret(DisasContext *dc, uint32_t code, uint32_t flags)
164
+ return fold_const2(ctx, op);
165
+}
166
+
167
+static bool fold_shift(OptContext *ctx, TCGOp *op)
168
+{
169
+ return fold_const2(ctx, op);
170
+}
171
+
172
+static bool fold_sub(OptContext *ctx, TCGOp *op)
173
+{
174
+ return fold_const2(ctx, op);
175
+}
176
+
177
+static bool fold_xor(OptContext *ctx, TCGOp *op)
178
+{
179
+ return fold_const2(ctx, op);
180
+}
181
+
182
/* Propagate constants and copies, fold constant expressions. */
183
void tcg_optimize(TCGContext *s)
114
{
184
{
115
- tcg_gen_mov_tl(dc->cpu_R[CR_STATUS], dc->cpu_R[CR_ESTATUS]);
185
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
116
- tcg_gen_mov_tl(dc->cpu_R[R_PC], dc->cpu_R[R_EA]);
186
}
117
+ tcg_gen_mov_tl(cpu_R[CR_STATUS], cpu_R[CR_ESTATUS]);
187
break;
118
+ tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_EA]);
188
119
189
- CASE_OP_32_64(not):
120
dc->is_jmp = DISAS_JUMP;
190
- CASE_OP_32_64(neg):
121
}
191
- CASE_OP_32_64(ext8s):
122
@@ -XXX,XX +XXX,XX @@ static void eret(DisasContext *dc, uint32_t code, uint32_t flags)
192
- CASE_OP_32_64(ext8u):
123
/* PC <- ra */
193
- CASE_OP_32_64(ext16s):
124
static void ret(DisasContext *dc, uint32_t code, uint32_t flags)
194
- CASE_OP_32_64(ext16u):
125
{
195
- CASE_OP_32_64(ctpop):
126
- tcg_gen_mov_tl(dc->cpu_R[R_PC], dc->cpu_R[R_RA]);
196
- case INDEX_op_ext32s_i64:
127
+ tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_RA]);
197
- case INDEX_op_ext32u_i64:
128
198
- case INDEX_op_ext_i32_i64:
129
dc->is_jmp = DISAS_JUMP;
199
- case INDEX_op_extu_i32_i64:
130
}
200
- case INDEX_op_extrl_i64_i32:
131
@@ -XXX,XX +XXX,XX @@ static void ret(DisasContext *dc, uint32_t code, uint32_t flags)
201
- case INDEX_op_extrh_i64_i32:
132
/* PC <- ba */
202
- if (arg_is_const(op->args[1])) {
133
static void bret(DisasContext *dc, uint32_t code, uint32_t flags)
203
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
134
{
204
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
135
- tcg_gen_mov_tl(dc->cpu_R[R_PC], dc->cpu_R[R_BA]);
205
- continue;
136
+ tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_BA]);
206
- }
137
207
- break;
138
dc->is_jmp = DISAS_JUMP;
208
-
139
}
209
CASE_OP_32_64(bswap16):
140
@@ -XXX,XX +XXX,XX @@ static void jmp(DisasContext *dc, uint32_t code, uint32_t flags)
210
CASE_OP_32_64(bswap32):
141
{
211
case INDEX_op_bswap64_i64:
142
R_TYPE(instr, code);
212
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
143
213
}
144
- tcg_gen_mov_tl(dc->cpu_R[R_PC], load_gpr(dc, instr.a));
214
break;
145
+ tcg_gen_mov_tl(cpu_R[R_PC], load_gpr(dc, instr.a));
215
146
216
- CASE_OP_32_64(add):
147
dc->is_jmp = DISAS_JUMP;
217
- CASE_OP_32_64(sub):
148
}
218
- CASE_OP_32_64(mul):
149
@@ -XXX,XX +XXX,XX @@ static void nextpc(DisasContext *dc, uint32_t code, uint32_t flags)
219
- CASE_OP_32_64(or):
150
R_TYPE(instr, code);
220
- CASE_OP_32_64(and):
151
221
- CASE_OP_32_64(xor):
152
if (likely(instr.c != R_ZERO)) {
222
- CASE_OP_32_64(shl):
153
- tcg_gen_movi_tl(dc->cpu_R[instr.c], dc->pc + 4);
223
- CASE_OP_32_64(shr):
154
+ tcg_gen_movi_tl(cpu_R[instr.c], dc->pc + 4);
224
- CASE_OP_32_64(sar):
155
}
225
- CASE_OP_32_64(rotl):
156
}
226
- CASE_OP_32_64(rotr):
157
227
- CASE_OP_32_64(andc):
158
@@ -XXX,XX +XXX,XX @@ static void callr(DisasContext *dc, uint32_t code, uint32_t flags)
228
- CASE_OP_32_64(orc):
159
{
229
- CASE_OP_32_64(eqv):
160
R_TYPE(instr, code);
230
- CASE_OP_32_64(nand):
161
231
- CASE_OP_32_64(nor):
162
- tcg_gen_mov_tl(dc->cpu_R[R_PC], load_gpr(dc, instr.a));
232
- CASE_OP_32_64(muluh):
163
- tcg_gen_movi_tl(dc->cpu_R[R_RA], dc->pc + 4);
233
- CASE_OP_32_64(mulsh):
164
+ tcg_gen_mov_tl(cpu_R[R_PC], load_gpr(dc, instr.a));
234
- CASE_OP_32_64(div):
165
+ tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
235
- CASE_OP_32_64(divu):
166
236
- CASE_OP_32_64(rem):
167
dc->is_jmp = DISAS_JUMP;
237
- CASE_OP_32_64(remu):
168
}
238
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
169
@@ -XXX,XX +XXX,XX @@ static void rdctl(DisasContext *dc, uint32_t code, uint32_t flags)
239
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
170
{
240
- arg_info(op->args[2])->val);
171
#if !defined(CONFIG_USER_ONLY)
241
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
172
if (likely(instr.c != R_ZERO)) {
242
- continue;
173
- tcg_gen_mov_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.imm5 + CR_BASE]);
243
- }
174
+ tcg_gen_mov_tl(cpu_R[instr.c], cpu_R[instr.imm5 + CR_BASE]);
244
- break;
175
#ifdef DEBUG_MMU
245
-
176
TCGv_i32 tmp = tcg_const_i32(instr.imm5 + CR_BASE);
246
CASE_OP_32_64(clz):
177
- gen_helper_mmu_read_debug(dc->cpu_R[instr.c], cpu_env, tmp);
247
CASE_OP_32_64(ctz):
178
+ gen_helper_mmu_read_debug(cpu_R[instr.c], cpu_env, tmp);
248
if (arg_is_const(op->args[1])) {
179
tcg_temp_free_i32(tmp);
249
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
180
#endif
250
}
251
break;
252
253
+ default:
254
+ break;
255
+
256
+ /* ---------------------------------------------------------- */
257
+ /* Sorted alphabetically by opcode as much as possible. */
258
+
259
+ CASE_OP_32_64_VEC(add):
260
+ done = fold_add(&ctx, op);
261
+ break;
262
+ CASE_OP_32_64_VEC(and):
263
+ done = fold_and(&ctx, op);
264
+ break;
265
+ CASE_OP_32_64_VEC(andc):
266
+ done = fold_andc(&ctx, op);
267
+ break;
268
+ CASE_OP_32_64(ctpop):
269
+ done = fold_ctpop(&ctx, op);
270
+ break;
271
+ CASE_OP_32_64(div):
272
+ CASE_OP_32_64(divu):
273
+ done = fold_divide(&ctx, op);
274
+ break;
275
+ CASE_OP_32_64(eqv):
276
+ done = fold_eqv(&ctx, op);
277
+ break;
278
+ CASE_OP_32_64(ext8s):
279
+ CASE_OP_32_64(ext16s):
280
+ case INDEX_op_ext32s_i64:
281
+ case INDEX_op_ext_i32_i64:
282
+ done = fold_exts(&ctx, op);
283
+ break;
284
+ CASE_OP_32_64(ext8u):
285
+ CASE_OP_32_64(ext16u):
286
+ case INDEX_op_ext32u_i64:
287
+ case INDEX_op_extu_i32_i64:
288
+ case INDEX_op_extrl_i64_i32:
289
+ case INDEX_op_extrh_i64_i32:
290
+ done = fold_extu(&ctx, op);
291
+ break;
292
case INDEX_op_mb:
293
done = fold_mb(&ctx, op);
294
break;
295
+ CASE_OP_32_64(mul):
296
+ done = fold_mul(&ctx, op);
297
+ break;
298
+ CASE_OP_32_64(mulsh):
299
+ CASE_OP_32_64(muluh):
300
+ done = fold_mul_highpart(&ctx, op);
301
+ break;
302
+ CASE_OP_32_64(nand):
303
+ done = fold_nand(&ctx, op);
304
+ break;
305
+ CASE_OP_32_64(neg):
306
+ done = fold_neg(&ctx, op);
307
+ break;
308
+ CASE_OP_32_64(nor):
309
+ done = fold_nor(&ctx, op);
310
+ break;
311
+ CASE_OP_32_64_VEC(not):
312
+ done = fold_not(&ctx, op);
313
+ break;
314
+ CASE_OP_32_64_VEC(or):
315
+ done = fold_or(&ctx, op);
316
+ break;
317
+ CASE_OP_32_64_VEC(orc):
318
+ done = fold_orc(&ctx, op);
319
+ break;
320
case INDEX_op_qemu_ld_i32:
321
case INDEX_op_qemu_ld_i64:
322
done = fold_qemu_ld(&ctx, op);
323
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
324
case INDEX_op_qemu_st_i64:
325
done = fold_qemu_st(&ctx, op);
326
break;
327
-
328
- default:
329
+ CASE_OP_32_64(rem):
330
+ CASE_OP_32_64(remu):
331
+ done = fold_remainder(&ctx, op);
332
+ break;
333
+ CASE_OP_32_64(rotl):
334
+ CASE_OP_32_64(rotr):
335
+ CASE_OP_32_64(sar):
336
+ CASE_OP_32_64(shl):
337
+ CASE_OP_32_64(shr):
338
+ done = fold_shift(&ctx, op);
339
+ break;
340
+ CASE_OP_32_64_VEC(sub):
341
+ done = fold_sub(&ctx, op);
342
+ break;
343
+ CASE_OP_32_64_VEC(xor):
344
+ done = fold_xor(&ctx, op);
345
break;
181
}
346
}
182
@@ -XXX,XX +XXX,XX @@ static void rdctl(DisasContext *dc, uint32_t code, uint32_t flags)
347
183
184
default:
185
if (likely(instr.c != R_ZERO)) {
186
- tcg_gen_mov_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.imm5 + CR_BASE]);
187
+ tcg_gen_mov_tl(cpu_R[instr.c], cpu_R[instr.imm5 + CR_BASE]);
188
}
189
break;
190
}
191
@@ -XXX,XX +XXX,XX @@ static void wrctl(DisasContext *dc, uint32_t code, uint32_t flags)
192
}
193
194
default:
195
- tcg_gen_mov_tl(dc->cpu_R[instr.imm5 + CR_BASE], load_gpr(dc, instr.a));
196
+ tcg_gen_mov_tl(cpu_R[instr.imm5 + CR_BASE], load_gpr(dc, instr.a));
197
break;
198
}
199
200
@@ -XXX,XX +XXX,XX @@ static void gen_cmpxx(DisasContext *dc, uint32_t code, uint32_t flags)
201
{
202
R_TYPE(instr, code);
203
if (likely(instr.c != R_ZERO)) {
204
- tcg_gen_setcond_tl(flags, dc->cpu_R[instr.c], dc->cpu_R[instr.a],
205
- dc->cpu_R[instr.b]);
206
+ tcg_gen_setcond_tl(flags, cpu_R[instr.c], cpu_R[instr.a],
207
+ cpu_R[instr.b]);
208
}
209
}
210
211
@@ -XXX,XX +XXX,XX @@ static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
212
{ \
213
R_TYPE(instr, (code)); \
214
if (likely(instr.c != R_ZERO)) { \
215
- tcg_gen_##insn((dc)->cpu_R[instr.c], load_gpr((dc), instr.a), \
216
- (op3)); \
217
+ tcg_gen_##insn(cpu_R[instr.c], load_gpr((dc), instr.a), (op3)); \
218
} \
219
}
220
221
@@ -XXX,XX +XXX,XX @@ static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
222
R_TYPE(instr, (code)); \
223
if (likely(instr.c != R_ZERO)) { \
224
TCGv t0 = tcg_temp_new(); \
225
- tcg_gen_##insn(t0, dc->cpu_R[instr.c], \
226
- load_gpr(dc, instr.a), load_gpr(dc, instr.b)); \
227
+ tcg_gen_##insn(t0, cpu_R[instr.c], \
228
+ load_gpr(dc, instr.a), load_gpr(dc, instr.b)); \
229
tcg_temp_free(t0); \
230
} \
231
}
232
@@ -XXX,XX +XXX,XX @@ static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
233
if (likely(instr.c != R_ZERO)) { \
234
TCGv t0 = tcg_temp_new(); \
235
tcg_gen_andi_tl(t0, load_gpr((dc), instr.b), 31); \
236
- tcg_gen_##insn((dc)->cpu_R[instr.c], load_gpr((dc), instr.a), t0); \
237
+ tcg_gen_##insn(cpu_R[instr.c], load_gpr((dc), instr.a), t0); \
238
tcg_temp_free(t0); \
239
} \
240
}
241
@@ -XXX,XX +XXX,XX @@ static void divs(DisasContext *dc, uint32_t code, uint32_t flags)
242
tcg_gen_or_tl(t2, t2, t3);
243
tcg_gen_movi_tl(t3, 0);
244
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
245
- tcg_gen_div_tl(dc->cpu_R[instr.c], t0, t1);
246
- tcg_gen_ext32s_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.c]);
247
+ tcg_gen_div_tl(cpu_R[instr.c], t0, t1);
248
+ tcg_gen_ext32s_tl(cpu_R[instr.c], cpu_R[instr.c]);
249
250
tcg_temp_free(t3);
251
tcg_temp_free(t2);
252
@@ -XXX,XX +XXX,XX @@ static void divu(DisasContext *dc, uint32_t code, uint32_t flags)
253
tcg_gen_ext32u_tl(t0, load_gpr(dc, instr.a));
254
tcg_gen_ext32u_tl(t1, load_gpr(dc, instr.b));
255
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
256
- tcg_gen_divu_tl(dc->cpu_R[instr.c], t0, t1);
257
- tcg_gen_ext32s_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.c]);
258
+ tcg_gen_divu_tl(cpu_R[instr.c], t0, t1);
259
+ tcg_gen_ext32s_tl(cpu_R[instr.c], cpu_R[instr.c]);
260
261
tcg_temp_free(t3);
262
tcg_temp_free(t2);
263
@@ -XXX,XX +XXX,XX @@ static const char * const regnames[] = {
264
"rpc"
265
};
266
267
-static TCGv cpu_R[NUM_CORE_REGS];
268
-
269
#include "exec/gen-icount.h"
270
271
static void gen_exception(DisasContext *dc, uint32_t excp)
272
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
273
int num_insns;
274
275
/* Initialize DC */
276
- dc->cpu_R = cpu_R;
277
dc->is_jmp = DISAS_NEXT;
278
dc->pc = tb->pc;
279
dc->tb = tb;
280
--
348
--
281
2.25.1
349
2.25.1
282
350
283
351
diff view generated by jsdifflib
1
The existing interpreter zero-extends, ignoring high bits.
1
Reduce some code duplication by folding the NE and EQ cases.
2
Simply add a separate sign-extension opcode if required.
3
Ensure that the interpreter supports ext16s when bswap16 is enabled.
4
2
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/tci.c | 3 ++-
7
tcg/optimize.c | 145 ++++++++++++++++++++++++-------------------------
9
tcg/tci/tcg-target.c.inc | 23 ++++++++++++++++++++---
8
1 file changed, 72 insertions(+), 73 deletions(-)
10
2 files changed, 22 insertions(+), 4 deletions(-)
11
9
12
diff --git a/tcg/tci.c b/tcg/tci.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/tci.c
12
--- a/tcg/optimize.c
15
+++ b/tcg/tci.c
13
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
17
regs[r0] = (int8_t)regs[r1];
15
return fold_const2(ctx, op);
18
break;
16
}
19
#endif
17
20
-#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
18
+static bool fold_setcond2(OptContext *ctx, TCGOp *op)
21
+#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
19
+{
22
+ TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
20
+ TCGCond cond = op->args[5];
23
CASE_32_64(ext16s)
21
+ int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
24
tci_args_rr(insn, &r0, &r1);
22
+ int inv = 0;
25
regs[r0] = (int16_t)regs[r1];
26
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
27
index XXXXXXX..XXXXXXX 100644
28
--- a/tcg/tci/tcg-target.c.inc
29
+++ b/tcg/tci/tcg-target.c.inc
30
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
31
const TCGArg args[TCG_MAX_OP_ARGS],
32
const int const_args[TCG_MAX_OP_ARGS])
33
{
34
+ TCGOpcode exts;
35
+
23
+
36
switch (opc) {
24
+ if (i >= 0) {
37
case INDEX_op_exit_tb:
25
+ goto do_setcond_const;
38
tcg_out_op_p(s, opc, (void *)args[0]);
26
+ }
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
27
+
40
CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */
28
+ switch (cond) {
41
CASE_64(ext_i32)
29
+ case TCG_COND_LT:
42
CASE_64(extu_i32)
30
+ case TCG_COND_GE:
43
- CASE_32_64(bswap16) /* Optional (TCG_TARGET_HAS_bswap16_*). */
31
+ /*
44
- CASE_32_64(bswap32) /* Optional (TCG_TARGET_HAS_bswap32_*). */
32
+ * Simplify LT/GE comparisons vs zero to a single compare
45
- CASE_64(bswap64) /* Optional (TCG_TARGET_HAS_bswap64_i64). */
33
+ * vs the high word of the input.
46
CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */
34
+ */
47
+ case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
35
+ if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 &&
48
+ case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
36
+ arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) {
49
tcg_out_op_rr(s, opc, args[0], args[1]);
37
+ goto do_setcond_high;
50
break;
51
52
+ case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
53
+ exts = INDEX_op_ext16s_i32;
54
+ goto do_bswap;
55
+ case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
56
+ exts = INDEX_op_ext16s_i64;
57
+ goto do_bswap;
58
+ case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
59
+ exts = INDEX_op_ext32s_i64;
60
+ do_bswap:
61
+ /* The base tci bswaps zero-extend, and ignore high bits. */
62
+ tcg_out_op_rr(s, opc, args[0], args[1]);
63
+ if (args[2] & TCG_BSWAP_OS) {
64
+ tcg_out_op_rr(s, exts, args[0], args[0]);
65
+ }
38
+ }
66
+ break;
39
+ break;
67
+
40
+
68
CASE_32_64(add2)
41
+ case TCG_COND_NE:
69
CASE_32_64(sub2)
42
+ inv = 1;
70
tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
43
+ QEMU_FALLTHROUGH;
44
+ case TCG_COND_EQ:
45
+ /*
46
+ * Simplify EQ/NE comparisons where one of the pairs
47
+ * can be simplified.
48
+ */
49
+ i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
50
+ op->args[3], cond);
51
+ switch (i ^ inv) {
52
+ case 0:
53
+ goto do_setcond_const;
54
+ case 1:
55
+ goto do_setcond_high;
56
+ }
57
+
58
+ i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
59
+ op->args[4], cond);
60
+ switch (i ^ inv) {
61
+ case 0:
62
+ goto do_setcond_const;
63
+ case 1:
64
+ op->args[2] = op->args[3];
65
+ op->args[3] = cond;
66
+ op->opc = INDEX_op_setcond_i32;
67
+ break;
68
+ }
69
+ break;
70
+
71
+ default:
72
+ break;
73
+
74
+ do_setcond_high:
75
+ op->args[1] = op->args[2];
76
+ op->args[2] = op->args[4];
77
+ op->args[3] = cond;
78
+ op->opc = INDEX_op_setcond_i32;
79
+ break;
80
+ }
81
+ return false;
82
+
83
+ do_setcond_const:
84
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
85
+}
86
+
87
static bool fold_shift(OptContext *ctx, TCGOp *op)
88
{
89
return fold_const2(ctx, op);
90
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
91
}
92
break;
93
94
- case INDEX_op_setcond2_i32:
95
- i = do_constant_folding_cond2(&op->args[1], &op->args[3],
96
- op->args[5]);
97
- if (i >= 0) {
98
- do_setcond_const:
99
- tcg_opt_gen_movi(&ctx, op, op->args[0], i);
100
- continue;
101
- }
102
- if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
103
- && arg_is_const(op->args[3])
104
- && arg_info(op->args[3])->val == 0
105
- && arg_is_const(op->args[4])
106
- && arg_info(op->args[4])->val == 0) {
107
- /* Simplify LT/GE comparisons vs zero to a single compare
108
- vs the high word of the input. */
109
- do_setcond_high:
110
- reset_temp(op->args[0]);
111
- arg_info(op->args[0])->z_mask = 1;
112
- op->opc = INDEX_op_setcond_i32;
113
- op->args[1] = op->args[2];
114
- op->args[2] = op->args[4];
115
- op->args[3] = op->args[5];
116
- break;
117
- }
118
- if (op->args[5] == TCG_COND_EQ) {
119
- /* Simplify EQ comparisons where one of the pairs
120
- can be simplified. */
121
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
122
- op->args[1], op->args[3],
123
- TCG_COND_EQ);
124
- if (i == 0) {
125
- goto do_setcond_const;
126
- } else if (i > 0) {
127
- goto do_setcond_high;
128
- }
129
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
130
- op->args[2], op->args[4],
131
- TCG_COND_EQ);
132
- if (i == 0) {
133
- goto do_setcond_high;
134
- } else if (i < 0) {
135
- break;
136
- }
137
- do_setcond_low:
138
- reset_temp(op->args[0]);
139
- arg_info(op->args[0])->z_mask = 1;
140
- op->opc = INDEX_op_setcond_i32;
141
- op->args[2] = op->args[3];
142
- op->args[3] = op->args[5];
143
- break;
144
- }
145
- if (op->args[5] == TCG_COND_NE) {
146
- /* Simplify NE comparisons where one of the pairs
147
- can be simplified. */
148
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
149
- op->args[1], op->args[3],
150
- TCG_COND_NE);
151
- if (i == 0) {
152
- goto do_setcond_high;
153
- } else if (i > 0) {
154
- goto do_setcond_const;
155
- }
156
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
157
- op->args[2], op->args[4],
158
- TCG_COND_NE);
159
- if (i == 0) {
160
- goto do_setcond_low;
161
- } else if (i > 0) {
162
- goto do_setcond_const;
163
- }
164
- }
165
- break;
166
-
167
default:
168
break;
169
170
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
171
CASE_OP_32_64(shr):
172
done = fold_shift(&ctx, op);
173
break;
174
+ case INDEX_op_setcond2_i32:
175
+ done = fold_setcond2(&ctx, op);
176
+ break;
177
CASE_OP_32_64_VEC(sub):
178
done = fold_sub(&ctx, op);
179
break;
71
--
180
--
72
2.25.1
181
2.25.1
73
182
74
183
diff view generated by jsdifflib
1
For INDEX_op_bswap16_i64, use 64-bit instructions so that we can
1
Reduce some code duplication by folding the NE and EQ cases.
2
easily provide the extension to 64-bits. Drop the special case,
3
previously used, where the input is already zero-extended -- the
4
minor code size savings is not worth the complication.
5
2
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
tcg/s390/tcg-target.c.inc | 34 ++++++++++++++++++++++++++++------
6
tcg/optimize.c | 159 +++++++++++++++++++++++++------------------------
10
1 file changed, 28 insertions(+), 6 deletions(-)
7
1 file changed, 81 insertions(+), 78 deletions(-)
11
8
12
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/s390/tcg-target.c.inc
11
--- a/tcg/optimize.c
15
+++ b/tcg/s390/tcg-target.c.inc
12
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
13
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
17
tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
14
return fold_const2(ctx, op);
18
break;
15
}
19
16
20
- OP_32_64(bswap16):
17
+static bool fold_brcond2(OptContext *ctx, TCGOp *op)
21
- /* The TCG bswap definition requires bits 0-47 already be zero.
18
+{
22
- Thus we don't need the G-type insns to implement bswap16_i64. */
19
+ TCGCond cond = op->args[4];
23
- tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
20
+ int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
24
- tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
21
+ TCGArg label = op->args[5];
25
+ case INDEX_op_bswap16_i32:
22
+ int inv = 0;
26
+ a0 = args[0], a1 = args[1], a2 = args[2];
23
+
27
+ tcg_out_insn(s, RRE, LRVR, a0, a1);
24
+ if (i >= 0) {
28
+ if (a2 & TCG_BSWAP_OS) {
25
+ goto do_brcond_const;
29
+ tcg_out_sh32(s, RS_SRA, a0, TCG_REG_NONE, 16);
26
+ }
30
+ } else {
27
+
31
+ tcg_out_sh32(s, RS_SRL, a0, TCG_REG_NONE, 16);
28
+ switch (cond) {
32
+ }
29
+ case TCG_COND_LT:
33
break;
30
+ case TCG_COND_GE:
34
- OP_32_64(bswap32):
31
+ /*
35
+ case INDEX_op_bswap16_i64:
32
+ * Simplify LT/GE comparisons vs zero to a single compare
36
+ a0 = args[0], a1 = args[1], a2 = args[2];
33
+ * vs the high word of the input.
37
+ tcg_out_insn(s, RRE, LRVGR, a0, a1);
34
+ */
38
+ if (a2 & TCG_BSWAP_OS) {
35
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 &&
39
+ tcg_out_sh64(s, RSY_SRAG, a0, a0, TCG_REG_NONE, 48);
36
+ arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) {
40
+ } else {
37
+ goto do_brcond_high;
41
+ tcg_out_sh64(s, RSY_SRLG, a0, a0, TCG_REG_NONE, 48);
42
+ }
38
+ }
43
+ break;
39
+ break;
44
+
40
+
45
+ case INDEX_op_bswap32_i32:
41
+ case TCG_COND_NE:
46
tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
42
+ inv = 1;
47
break;
43
+ QEMU_FALLTHROUGH;
48
+ case INDEX_op_bswap32_i64:
44
+ case TCG_COND_EQ:
49
+ a0 = args[0], a1 = args[1], a2 = args[2];
45
+ /*
50
+ tcg_out_insn(s, RRE, LRVR, a0, a1);
46
+ * Simplify EQ/NE comparisons where one of the pairs
51
+ if (a2 & TCG_BSWAP_OS) {
47
+ * can be simplified.
52
+ tgen_ext32s(s, a0, a0);
48
+ */
53
+ } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
49
+ i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
54
+ tgen_ext32u(s, a0, a0);
50
+ op->args[2], cond);
51
+ switch (i ^ inv) {
52
+ case 0:
53
+ goto do_brcond_const;
54
+ case 1:
55
+ goto do_brcond_high;
56
+ }
57
+
58
+ i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
59
+ op->args[3], cond);
60
+ switch (i ^ inv) {
61
+ case 0:
62
+ goto do_brcond_const;
63
+ case 1:
64
+ op->opc = INDEX_op_brcond_i32;
65
+ op->args[1] = op->args[2];
66
+ op->args[2] = cond;
67
+ op->args[3] = label;
68
+ break;
55
+ }
69
+ }
56
+ break;
70
+ break;
57
71
+
58
case INDEX_op_add2_i32:
72
+ default:
59
if (const_args[4]) {
73
+ break;
74
+
75
+ do_brcond_high:
76
+ op->opc = INDEX_op_brcond_i32;
77
+ op->args[0] = op->args[1];
78
+ op->args[1] = op->args[3];
79
+ op->args[2] = cond;
80
+ op->args[3] = label;
81
+ break;
82
+
83
+ do_brcond_const:
84
+ if (i == 0) {
85
+ tcg_op_remove(ctx->tcg, op);
86
+ return true;
87
+ }
88
+ op->opc = INDEX_op_br;
89
+ op->args[0] = label;
90
+ break;
91
+ }
92
+ return false;
93
+}
94
+
95
static bool fold_call(OptContext *ctx, TCGOp *op)
96
{
97
TCGContext *s = ctx->tcg;
98
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
99
}
100
break;
101
102
- case INDEX_op_brcond2_i32:
103
- i = do_constant_folding_cond2(&op->args[0], &op->args[2],
104
- op->args[4]);
105
- if (i == 0) {
106
- do_brcond_false:
107
- tcg_op_remove(s, op);
108
- continue;
109
- }
110
- if (i > 0) {
111
- do_brcond_true:
112
- op->opc = opc = INDEX_op_br;
113
- op->args[0] = op->args[5];
114
- break;
115
- }
116
- if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE)
117
- && arg_is_const(op->args[2])
118
- && arg_info(op->args[2])->val == 0
119
- && arg_is_const(op->args[3])
120
- && arg_info(op->args[3])->val == 0) {
121
- /* Simplify LT/GE comparisons vs zero to a single compare
122
- vs the high word of the input. */
123
- do_brcond_high:
124
- op->opc = opc = INDEX_op_brcond_i32;
125
- op->args[0] = op->args[1];
126
- op->args[1] = op->args[3];
127
- op->args[2] = op->args[4];
128
- op->args[3] = op->args[5];
129
- break;
130
- }
131
- if (op->args[4] == TCG_COND_EQ) {
132
- /* Simplify EQ comparisons where one of the pairs
133
- can be simplified. */
134
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
135
- op->args[0], op->args[2],
136
- TCG_COND_EQ);
137
- if (i == 0) {
138
- goto do_brcond_false;
139
- } else if (i > 0) {
140
- goto do_brcond_high;
141
- }
142
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
143
- op->args[1], op->args[3],
144
- TCG_COND_EQ);
145
- if (i == 0) {
146
- goto do_brcond_false;
147
- } else if (i < 0) {
148
- break;
149
- }
150
- do_brcond_low:
151
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
152
- op->opc = INDEX_op_brcond_i32;
153
- op->args[1] = op->args[2];
154
- op->args[2] = op->args[4];
155
- op->args[3] = op->args[5];
156
- break;
157
- }
158
- if (op->args[4] == TCG_COND_NE) {
159
- /* Simplify NE comparisons where one of the pairs
160
- can be simplified. */
161
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
162
- op->args[0], op->args[2],
163
- TCG_COND_NE);
164
- if (i == 0) {
165
- goto do_brcond_high;
166
- } else if (i > 0) {
167
- goto do_brcond_true;
168
- }
169
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
170
- op->args[1], op->args[3],
171
- TCG_COND_NE);
172
- if (i == 0) {
173
- goto do_brcond_low;
174
- } else if (i > 0) {
175
- goto do_brcond_true;
176
- }
177
- }
178
- break;
179
-
180
default:
181
break;
182
183
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
184
CASE_OP_32_64_VEC(andc):
185
done = fold_andc(&ctx, op);
186
break;
187
+ case INDEX_op_brcond2_i32:
188
+ done = fold_brcond2(&ctx, op);
189
+ break;
190
CASE_OP_32_64(ctpop):
191
done = fold_ctpop(&ctx, op);
192
break;
60
--
193
--
61
2.25.1
194
2.25.1
62
195
63
196
diff view generated by jsdifflib
1
Merge tcg_out_bswap32 and tcg_out_bswap32s.
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Use the flags in the internal uses for loads and stores.
3
4
For mips32r2 bswap32 with zero-extension, standardize on
5
WSBH+ROTR+DEXT. This is the same number of insns as the
6
previous DSBH+DSHD+DSRL but fits in better with the flags check.
7
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
4
---
11
tcg/mips/tcg-target.c.inc | 39 ++++++++++++++++-----------------------
5
tcg/optimize.c | 33 +++++++++++++++++++--------------
12
1 file changed, 16 insertions(+), 23 deletions(-)
6
1 file changed, 19 insertions(+), 14 deletions(-)
13
7
14
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/mips/tcg-target.c.inc
10
--- a/tcg/optimize.c
17
+++ b/tcg/mips/tcg-target.c.inc
11
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bswap_subr(TCGContext *s, const tcg_insn_unit *sub)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
19
tcg_debug_assert(ok);
13
return fold_const2(ctx, op);
20
}
14
}
21
15
22
-static void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg)
16
+static bool fold_brcond(OptContext *ctx, TCGOp *op)
23
+static void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg, int flags)
17
+{
18
+ TCGCond cond = op->args[2];
19
+ int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
20
+
21
+ if (i == 0) {
22
+ tcg_op_remove(ctx->tcg, op);
23
+ return true;
24
+ }
25
+ if (i > 0) {
26
+ op->opc = INDEX_op_br;
27
+ op->args[0] = op->args[3];
28
+ }
29
+ return false;
30
+}
31
+
32
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
24
{
33
{
25
if (use_mips32r2_instructions) {
34
TCGCond cond = op->args[4];
26
tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
35
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
27
tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
36
}
28
+ if (flags & TCG_BSWAP_OZ) {
37
break;
29
+ tcg_out_opc_bf(s, OPC_DEXT, ret, ret, 31, 0);
38
30
+ }
39
- CASE_OP_32_64(brcond):
31
} else {
40
- i = do_constant_folding_cond(opc, op->args[0],
32
- tcg_out_bswap_subr(s, bswap32_addr);
41
- op->args[1], op->args[2]);
33
- /* delay slot -- never omit the insn, like tcg_out_mov might. */
42
- if (i == 0) {
34
- tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
43
- tcg_op_remove(s, op);
35
- tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
44
- continue;
36
- }
45
- } else if (i > 0) {
37
-}
46
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
47
- op->opc = opc = INDEX_op_br;
48
- op->args[0] = op->args[3];
49
- break;
50
- }
51
- break;
38
-
52
-
39
-static void tcg_out_bswap32u(TCGContext *s, TCGReg ret, TCGReg arg)
53
CASE_OP_32_64(movcond):
40
-{
54
i = do_constant_folding_cond(opc, op->args[1],
41
- if (use_mips32r2_instructions) {
55
op->args[2], op->args[5]);
42
- tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg);
56
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
43
- tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret);
57
CASE_OP_32_64_VEC(andc):
44
- tcg_out_dsrl(s, ret, ret, 32);
58
done = fold_andc(&ctx, op);
45
- } else {
59
break;
46
- tcg_out_bswap_subr(s, bswap32u_addr);
60
+ CASE_OP_32_64(brcond):
47
+ if (flags & TCG_BSWAP_OZ) {
61
+ done = fold_brcond(&ctx, op);
48
+ tcg_out_bswap_subr(s, bswap32u_addr);
62
+ break;
49
+ } else {
63
case INDEX_op_brcond2_i32:
50
+ tcg_out_bswap_subr(s, bswap32_addr);
64
done = fold_brcond2(&ctx, op);
51
+ }
65
break;
52
/* delay slot -- never omit the insn, like tcg_out_mov might. */
53
tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
54
tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
56
if (TCG_TARGET_REG_BITS == 64 && is_64) {
57
if (use_mips32r2_instructions) {
58
tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
59
- tcg_out_bswap32u(s, lo, lo);
60
+ tcg_out_bswap32(s, lo, lo, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
61
} else {
62
tcg_out_bswap_subr(s, bswap32u_addr);
63
/* delay slot */
64
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
65
case MO_SL | MO_BSWAP:
66
if (use_mips32r2_instructions) {
67
tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
68
- tcg_out_bswap32(s, lo, lo);
69
+ tcg_out_bswap32(s, lo, lo, 0);
70
} else {
71
tcg_out_bswap_subr(s, bswap32_addr);
72
/* delay slot */
73
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
74
break;
75
76
case MO_32 | MO_BSWAP:
77
- tcg_out_bswap32(s, TCG_TMP3, lo);
78
+ tcg_out_bswap32(s, TCG_TMP3, lo, 0);
79
lo = TCG_TMP3;
80
/* FALLTHRU */
81
case MO_32:
82
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
83
tcg_out_opc_imm(s, OPC_SW, TCG_TMP0, base, 0);
84
tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, 4);
85
} else {
86
- tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi);
87
+ tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi, 0);
88
tcg_out_opc_imm(s, OPC_SW, TCG_TMP3, base, 0);
89
- tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo);
90
+ tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo, 0);
91
tcg_out_opc_imm(s, OPC_SW, TCG_TMP3, base, 4);
92
}
93
break;
94
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
95
tcg_out_bswap16(s, a0, a1, a2);
96
break;
97
case INDEX_op_bswap32_i32:
98
- tcg_out_bswap32(s, a0, a1);
99
+ tcg_out_bswap32(s, a0, a1, 0);
100
break;
101
case INDEX_op_bswap32_i64:
102
- tcg_out_bswap32u(s, a0, a1);
103
+ tcg_out_bswap32(s, a0, a1, a2);
104
break;
105
case INDEX_op_bswap64_i64:
106
tcg_out_bswap64(s, a0, a1);
107
--
66
--
108
2.25.1
67
2.25.1
109
68
110
69
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
---
4
tcg/ppc/tcg-target.c.inc | 38 ++++++++++++++++++++++----------------
5
tcg/optimize.c | 23 ++++++++++++++---------
5
1 file changed, 22 insertions(+), 16 deletions(-)
6
1 file changed, 14 insertions(+), 9 deletions(-)
6
7
7
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/ppc/tcg-target.c.inc
10
--- a/tcg/optimize.c
10
+++ b/tcg/ppc/tcg-target.c.inc
11
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
13
return fold_const2(ctx, op);
13
}
14
}
14
15
15
+static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src)
16
+static bool fold_setcond(OptContext *ctx, TCGOp *op)
16
+{
17
+{
17
+ TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
18
+ TCGCond cond = op->args[3];
19
+ int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
18
+
20
+
19
+ /*
21
+ if (i >= 0) {
20
+ * Stolen from gcc's builtin_bswap32.
22
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
21
+ * In the following,
23
+ }
22
+ * dep(a, b, m) -> (a & ~m) | (b & m)
24
+ return false;
23
+ *
24
+ * Begin with: src = xxxxabcd
25
+ */
26
+ /* tmp = rol32(src, 8) & 0xffffffff = 0000bcda */
27
+ tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31);
28
+ /* tmp = dep(tmp, rol32(src, 24), 0xff000000) = 0000dcda */
29
+ tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7);
30
+ /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */
31
+ tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
32
+
33
+ tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
34
+}
25
+}
35
+
26
+
36
/* Emit a move into ret of arg, if it can be done in one insn. */
27
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
37
static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
38
{
28
{
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
29
TCGCond cond = op->args[5];
40
case INDEX_op_bswap16_i64:
30
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
41
tcg_out_bswap16(s, args[0], args[1]);
31
}
42
break;
32
break;
33
34
- CASE_OP_32_64(setcond):
35
- i = do_constant_folding_cond(opc, op->args[1],
36
- op->args[2], op->args[3]);
37
- if (i >= 0) {
38
- tcg_opt_gen_movi(&ctx, op, op->args[0], i);
39
- continue;
40
- }
41
- break;
43
-
42
-
44
case INDEX_op_bswap32_i32:
43
CASE_OP_32_64(movcond):
45
case INDEX_op_bswap32_i64:
44
i = do_constant_folding_cond(opc, op->args[1],
46
- /* Stolen from gcc's builtin_bswap32 */
45
op->args[2], op->args[5]);
47
- a1 = args[1];
46
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
48
- a0 = args[0] == a1 ? TCG_REG_R0 : args[0];
47
CASE_OP_32_64(shr):
49
-
48
done = fold_shift(&ctx, op);
50
- /* a1 = args[1] # abcd */
49
break;
51
- /* a0 = rotate_left (a1, 8) # bcda */
50
+ CASE_OP_32_64(setcond):
52
- tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
51
+ done = fold_setcond(&ctx, op);
53
- /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */
52
+ break;
54
- tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
53
case INDEX_op_setcond2_i32:
55
- /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */
54
done = fold_setcond2(&ctx, op);
56
- tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
55
break;
57
-
58
- if (a0 == TCG_REG_R0) {
59
- tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
60
- }
61
+ tcg_out_bswap32(s, args[0], args[1]);
62
break;
63
64
case INDEX_op_bswap64_i64:
65
--
56
--
66
2.25.1
57
2.25.1
67
58
68
59
diff view generated by jsdifflib
1
With the use of a suitable temporary, we can use the same
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
algorithm when src overlaps dst. The result is the same
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
number of instructions either way.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
tcg/ppc/tcg-target.c.inc | 34 +++++++++++++++++++---------------
5
tcg/optimize.c | 37 +++++++++++++++++++++----------------
9
1 file changed, 19 insertions(+), 15 deletions(-)
6
1 file changed, 21 insertions(+), 16 deletions(-)
10
7
11
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/ppc/tcg-target.c.inc
10
--- a/tcg/optimize.c
14
+++ b/tcg/ppc/tcg-target.c.inc
11
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
16
tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2));
13
return fold_const2(ctx, op);
17
}
14
}
18
15
19
+static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src)
16
+static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
20
+{
17
+{
21
+ TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
18
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
19
+ uint32_t a = arg_info(op->args[2])->val;
20
+ uint32_t b = arg_info(op->args[3])->val;
21
+ uint64_t r = (uint64_t)a * b;
22
+ TCGArg rl, rh;
23
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
22
+
24
+
23
+ /*
25
+ rl = op->args[0];
24
+ * In the following,
26
+ rh = op->args[1];
25
+ * dep(a, b, m) -> (a & ~m) | (b & m)
27
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
26
+ *
28
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
27
+ * Begin with: src = xxxxabcd
29
+ return true;
28
+ */
30
+ }
29
+ /* tmp = rol32(src, 24) & 0x000000ff = 0000000c */
31
+ return false;
30
+ tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31);
31
+ /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */
32
+ tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
33
+
34
+ tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
35
+}
32
+}
36
+
33
+
37
/* Emit a move into ret of arg, if it can be done in one insn. */
34
static bool fold_nand(OptContext *ctx, TCGOp *op)
38
static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
39
{
35
{
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
36
return fold_const2(ctx, op);
41
37
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
42
case INDEX_op_bswap16_i32:
38
}
43
case INDEX_op_bswap16_i64:
39
break;
44
- a0 = args[0], a1 = args[1];
40
45
- /* a1 = abcd */
41
- case INDEX_op_mulu2_i32:
46
- if (a0 != a1) {
42
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
47
- /* a0 = (a1 r<< 24) & 0xff # 000c */
43
- uint32_t a = arg_info(op->args[2])->val;
48
- tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
44
- uint32_t b = arg_info(op->args[3])->val;
49
- /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */
45
- uint64_t r = (uint64_t)a * b;
50
- tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23);
46
- TCGArg rl, rh;
51
- } else {
47
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
52
- /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */
48
-
53
- tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23);
49
- rl = op->args[0];
54
- /* a0 = (a1 r<< 24) & 0xff # 000c */
50
- rh = op->args[1];
55
- tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
51
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
56
- /* a0 = a0 | r0 # 00dc */
52
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
57
- tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0));
53
- continue;
58
- }
54
- }
59
+ tcg_out_bswap16(s, args[0], args[1]);
55
- break;
60
break;
56
-
61
57
default:
62
case INDEX_op_bswap32_i32:
58
break;
59
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
61
CASE_OP_32_64(muluh):
62
done = fold_mul_highpart(&ctx, op);
63
break;
64
+ case INDEX_op_mulu2_i32:
65
+ done = fold_mulu2_i32(&ctx, op);
66
+ break;
67
CASE_OP_32_64(nand):
68
done = fold_nand(&ctx, op);
69
break;
63
--
70
--
64
2.25.1
71
2.25.1
65
72
66
73
diff view generated by jsdifflib
1
Do not skip the page check for user-only -- mmap/mprotect can
1
Add two additional helpers, fold_add2_i32 and fold_sub2_i32
2
still change page mappings. Only check dc->base.pc_first, not
2
which will not be simple wrappers forever.
3
dc->ppc -- the start page is the only one that's relevant.
4
3
5
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
target/cris/translate.c | 9 ++-------
8
tcg/optimize.c | 70 +++++++++++++++++++++++++++++++-------------------
10
1 file changed, 2 insertions(+), 7 deletions(-)
9
1 file changed, 44 insertions(+), 26 deletions(-)
11
10
12
diff --git a/target/cris/translate.c b/target/cris/translate.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/target/cris/translate.c
13
--- a/tcg/optimize.c
15
+++ b/target/cris/translate.c
14
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
17
gen_set_label(l1);
16
return fold_const2(ctx, op);
18
}
17
}
19
18
20
-static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
19
+static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
21
+static bool use_goto_tb(DisasContext *dc, target_ulong dest)
20
+{
21
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
22
+ arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
23
+ uint32_t al = arg_info(op->args[2])->val;
24
+ uint32_t ah = arg_info(op->args[3])->val;
25
+ uint32_t bl = arg_info(op->args[4])->val;
26
+ uint32_t bh = arg_info(op->args[5])->val;
27
+ uint64_t a = ((uint64_t)ah << 32) | al;
28
+ uint64_t b = ((uint64_t)bh << 32) | bl;
29
+ TCGArg rl, rh;
30
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
31
+
32
+ if (add) {
33
+ a += b;
34
+ } else {
35
+ a -= b;
36
+ }
37
+
38
+ rl = op->args[0];
39
+ rh = op->args[1];
40
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
41
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
42
+ return true;
43
+ }
44
+ return false;
45
+}
46
+
47
+static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
48
+{
49
+ return fold_addsub2_i32(ctx, op, true);
50
+}
51
+
52
static bool fold_and(OptContext *ctx, TCGOp *op)
22
{
53
{
23
-#ifndef CONFIG_USER_ONLY
54
return fold_const2(ctx, op);
24
- return (dc->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
55
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
25
- (dc->ppc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
56
return fold_const2(ctx, op);
26
-#else
27
- return true;
28
-#endif
29
+ return ((dest ^ dc->base.pc_first) & TARGET_PAGE_MASK) == 0;
30
}
57
}
31
58
32
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
59
+static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
60
+{
61
+ return fold_addsub2_i32(ctx, op, false);
62
+}
63
+
64
static bool fold_xor(OptContext *ctx, TCGOp *op)
65
{
66
return fold_const2(ctx, op);
67
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
68
}
69
break;
70
71
- case INDEX_op_add2_i32:
72
- case INDEX_op_sub2_i32:
73
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])
74
- && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
75
- uint32_t al = arg_info(op->args[2])->val;
76
- uint32_t ah = arg_info(op->args[3])->val;
77
- uint32_t bl = arg_info(op->args[4])->val;
78
- uint32_t bh = arg_info(op->args[5])->val;
79
- uint64_t a = ((uint64_t)ah << 32) | al;
80
- uint64_t b = ((uint64_t)bh << 32) | bl;
81
- TCGArg rl, rh;
82
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
83
-
84
- if (opc == INDEX_op_add2_i32) {
85
- a += b;
86
- } else {
87
- a -= b;
88
- }
89
-
90
- rl = op->args[0];
91
- rh = op->args[1];
92
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
93
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
94
- continue;
95
- }
96
- break;
97
98
default:
99
break;
100
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
101
CASE_OP_32_64_VEC(add):
102
done = fold_add(&ctx, op);
103
break;
104
+ case INDEX_op_add2_i32:
105
+ done = fold_add2_i32(&ctx, op);
106
+ break;
107
CASE_OP_32_64_VEC(and):
108
done = fold_and(&ctx, op);
109
break;
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
CASE_OP_32_64_VEC(sub):
112
done = fold_sub(&ctx, op);
113
break;
114
+ case INDEX_op_sub2_i32:
115
+ done = fold_sub2_i32(&ctx, op);
116
+ break;
117
CASE_OP_32_64_VEC(xor):
118
done = fold_xor(&ctx, op);
119
break;
33
--
120
--
34
2.25.1
121
2.25.1
35
122
36
123
diff view generated by jsdifflib
1
The memory bswap support in the aarch64 backend merely dates from
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
a time when it was required. There is nothing special about the
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
backend support that could not have been provided by the middle-end
4
even prior to the introduction of the bswap flags.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
4
---
9
tcg/aarch64/tcg-target.h | 2 +-
5
tcg/optimize.c | 56 ++++++++++++++++++++++++++++----------------------
10
tcg/aarch64/tcg-target.c.inc | 87 +++++++++++++-----------------------
6
1 file changed, 31 insertions(+), 25 deletions(-)
11
2 files changed, 32 insertions(+), 57 deletions(-)
12
7
13
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/aarch64/tcg-target.h
10
--- a/tcg/optimize.c
16
+++ b/tcg/aarch64/tcg-target.h
11
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ typedef enum {
12
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
18
#define TCG_TARGET_HAS_cmpsel_vec 0
19
20
#define TCG_TARGET_DEFAULT_MO (0)
21
-#define TCG_TARGET_HAS_MEMORY_BSWAP 1
22
+#define TCG_TARGET_HAS_MEMORY_BSWAP 0
23
24
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
25
26
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
27
index XXXXXXX..XXXXXXX 100644
28
--- a/tcg/aarch64/tcg-target.c.inc
29
+++ b/tcg/aarch64/tcg-target.c.inc
30
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
31
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
32
* TCGMemOpIdx oi, uintptr_t ra)
33
*/
34
-static void * const qemu_ld_helpers[16] = {
35
- [MO_UB] = helper_ret_ldub_mmu,
36
- [MO_LEUW] = helper_le_lduw_mmu,
37
- [MO_LEUL] = helper_le_ldul_mmu,
38
- [MO_LEQ] = helper_le_ldq_mmu,
39
- [MO_BEUW] = helper_be_lduw_mmu,
40
- [MO_BEUL] = helper_be_ldul_mmu,
41
- [MO_BEQ] = helper_be_ldq_mmu,
42
+static void * const qemu_ld_helpers[4] = {
43
+ [MO_8] = helper_ret_ldub_mmu,
44
+#ifdef HOST_WORDS_BIGENDIAN
45
+ [MO_16] = helper_be_lduw_mmu,
46
+ [MO_32] = helper_be_ldul_mmu,
47
+ [MO_64] = helper_be_ldq_mmu,
48
+#else
49
+ [MO_16] = helper_le_lduw_mmu,
50
+ [MO_32] = helper_le_ldul_mmu,
51
+ [MO_64] = helper_le_ldq_mmu,
52
+#endif
53
};
54
55
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
56
* uintxx_t val, TCGMemOpIdx oi,
57
* uintptr_t ra)
58
*/
59
-static void * const qemu_st_helpers[16] = {
60
- [MO_UB] = helper_ret_stb_mmu,
61
- [MO_LEUW] = helper_le_stw_mmu,
62
- [MO_LEUL] = helper_le_stl_mmu,
63
- [MO_LEQ] = helper_le_stq_mmu,
64
- [MO_BEUW] = helper_be_stw_mmu,
65
- [MO_BEUL] = helper_be_stl_mmu,
66
- [MO_BEQ] = helper_be_stq_mmu,
67
+static void * const qemu_st_helpers[4] = {
68
+ [MO_8] = helper_ret_stb_mmu,
69
+#ifdef HOST_WORDS_BIGENDIAN
70
+ [MO_16] = helper_be_stw_mmu,
71
+ [MO_32] = helper_be_stl_mmu,
72
+ [MO_64] = helper_be_stq_mmu,
73
+#else
74
+ [MO_16] = helper_le_stw_mmu,
75
+ [MO_32] = helper_le_stl_mmu,
76
+ [MO_64] = helper_le_stq_mmu,
77
+#endif
78
};
79
80
static inline void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
81
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
82
tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
83
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, oi);
84
tcg_out_adr(s, TCG_REG_X3, lb->raddr);
85
- tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
86
+ tcg_out_call(s, qemu_ld_helpers[opc & MO_SIZE]);
87
if (opc & MO_SIGN) {
88
tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0);
89
} else {
90
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
91
tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg);
92
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, oi);
93
tcg_out_adr(s, TCG_REG_X4, lb->raddr);
94
- tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
95
+ tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE]);
96
tcg_out_goto(s, lb->raddr);
97
return true;
13
return true;
98
}
14
}
99
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
15
100
TCGReg data_r, TCGReg addr_r,
16
+static bool fold_movcond(OptContext *ctx, TCGOp *op)
101
TCGType otype, TCGReg off_r)
17
+{
18
+ TCGOpcode opc = op->opc;
19
+ TCGCond cond = op->args[5];
20
+ int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
21
+
22
+ if (i >= 0) {
23
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
+ }
25
+
26
+ if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
27
+ uint64_t tv = arg_info(op->args[3])->val;
28
+ uint64_t fv = arg_info(op->args[4])->val;
29
+
30
+ opc = (opc == INDEX_op_movcond_i32
31
+ ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
32
+
33
+ if (tv == 1 && fv == 0) {
34
+ op->opc = opc;
35
+ op->args[3] = cond;
36
+ } else if (fv == 1 && tv == 0) {
37
+ op->opc = opc;
38
+ op->args[3] = tcg_invert_cond(cond);
39
+ }
40
+ }
41
+ return false;
42
+}
43
+
44
static bool fold_mul(OptContext *ctx, TCGOp *op)
102
{
45
{
103
- const MemOp bswap = memop & MO_BSWAP;
46
return fold_const2(ctx, op);
104
+ /* Byte swapping is left to middle-end expansion. */
47
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
105
+ tcg_debug_assert((memop & MO_BSWAP) == 0);
48
}
106
49
break;
107
switch (memop & MO_SSIZE) {
50
108
case MO_UB:
51
- CASE_OP_32_64(movcond):
109
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
52
- i = do_constant_folding_cond(opc, op->args[1],
110
break;
53
- op->args[2], op->args[5]);
111
case MO_UW:
54
- if (i >= 0) {
112
tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
55
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]);
113
- if (bswap) {
56
- continue;
114
- tcg_out_rev(s, TCG_TYPE_I32, MO_16, data_r, data_r);
57
- }
115
- }
58
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
116
break;
59
- uint64_t tv = arg_info(op->args[3])->val;
117
case MO_SW:
60
- uint64_t fv = arg_info(op->args[4])->val;
118
- if (bswap) {
61
- TCGCond cond = op->args[5];
119
- tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
62
-
120
- tcg_out_rev(s, TCG_TYPE_I32, MO_16, data_r, data_r);
63
- if (fv == 1 && tv == 0) {
121
- tcg_out_sxt(s, ext, MO_16, data_r, data_r);
64
- cond = tcg_invert_cond(cond);
122
- } else {
65
- } else if (!(tv == 1 && fv == 0)) {
123
- tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
66
- break;
124
- data_r, addr_r, otype, off_r);
67
- }
125
- }
68
- op->args[3] = cond;
126
+ tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
69
- op->opc = opc = (opc == INDEX_op_movcond_i32
127
+ data_r, addr_r, otype, off_r);
70
- ? INDEX_op_setcond_i32
128
break;
71
- : INDEX_op_setcond_i64);
129
case MO_UL:
72
- }
130
tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
73
- break;
131
- if (bswap) {
74
-
132
- tcg_out_rev(s, TCG_TYPE_I32, MO_32, data_r, data_r);
75
-
133
- }
76
default:
134
break;
77
break;
135
case MO_SL:
78
136
- if (bswap) {
79
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
137
- tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
80
case INDEX_op_mb:
138
- tcg_out_rev(s, TCG_TYPE_I32, MO_32, data_r, data_r);
81
done = fold_mb(&ctx, op);
139
- tcg_out_sxt(s, TCG_TYPE_I64, MO_32, data_r, data_r);
82
break;
140
- } else {
83
+ CASE_OP_32_64(movcond):
141
- tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
84
+ done = fold_movcond(&ctx, op);
142
- }
85
+ break;
143
+ tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
86
CASE_OP_32_64(mul):
144
break;
87
done = fold_mul(&ctx, op);
145
case MO_Q:
88
break;
146
tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
147
- if (bswap) {
148
- tcg_out_rev(s, TCG_TYPE_I64, MO_64, data_r, data_r);
149
- }
150
break;
151
default:
152
tcg_abort();
153
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
154
TCGReg data_r, TCGReg addr_r,
155
TCGType otype, TCGReg off_r)
156
{
157
- const MemOp bswap = memop & MO_BSWAP;
158
+ /* Byte swapping is left to middle-end expansion. */
159
+ tcg_debug_assert((memop & MO_BSWAP) == 0);
160
161
switch (memop & MO_SIZE) {
162
case MO_8:
163
tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);
164
break;
165
case MO_16:
166
- if (bswap && data_r != TCG_REG_XZR) {
167
- tcg_out_rev(s, TCG_TYPE_I32, MO_16, TCG_REG_TMP, data_r);
168
- data_r = TCG_REG_TMP;
169
- }
170
tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r);
171
break;
172
case MO_32:
173
- if (bswap && data_r != TCG_REG_XZR) {
174
- tcg_out_rev(s, TCG_TYPE_I32, MO_32, TCG_REG_TMP, data_r);
175
- data_r = TCG_REG_TMP;
176
- }
177
tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r);
178
break;
179
case MO_64:
180
- if (bswap && data_r != TCG_REG_XZR) {
181
- tcg_out_rev(s, TCG_TYPE_I64, MO_64, TCG_REG_TMP, data_r);
182
- data_r = TCG_REG_TMP;
183
- }
184
tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r);
185
break;
186
default:
187
--
89
--
188
2.25.1
90
2.25.1
189
91
190
92
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
---
4
target/nios2/translate.c | 128 ++++++++++++++++++++-------------------
5
tcg/optimize.c | 39 ++++++++++++++++++++++-----------------
5
1 file changed, 65 insertions(+), 63 deletions(-)
6
1 file changed, 22 insertions(+), 17 deletions(-)
6
7
7
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
9
--- a/target/nios2/translate.c
10
--- a/tcg/optimize.c
10
+++ b/target/nios2/translate.c
11
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void gen_exception(DisasContext *dc, uint32_t excp)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
12
}
14
}
13
15
14
/* generate intermediate code for basic block 'tb'. */
16
+static bool fold_extract2(OptContext *ctx, TCGOp *op)
15
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
16
+static void nios2_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
17
{
18
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
19
CPUNios2State *env = cs->env_ptr;
20
- DisasContext dc1, *dc = &dc1;
21
- int num_insns;
22
-
23
- /* Initialize DC */
24
-
25
- dc->base.tb = tb;
26
- dc->base.singlestep_enabled = cs->singlestep_enabled;
27
- dc->base.is_jmp = DISAS_NEXT;
28
- dc->base.pc_first = tb->pc;
29
- dc->base.pc_next = tb->pc;
30
+ int page_insns;
31
32
dc->mem_idx = cpu_mmu_index(env, false);
33
34
- /* Set up instruction counts */
35
- num_insns = 0;
36
- if (max_insns > 1) {
37
- int page_insns = (TARGET_PAGE_SIZE - (tb->pc & ~TARGET_PAGE_MASK)) / 4;
38
- if (max_insns > page_insns) {
39
- max_insns = page_insns;
40
- }
41
- }
42
+ /* Bound the number of insns to execute to those left on the page. */
43
+ page_insns = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
44
+ dc->base.max_insns = MIN(page_insns, dc->base.max_insns);
45
+}
46
47
- gen_tb_start(tb);
48
- do {
49
- tcg_gen_insn_start(dc->base.pc_next);
50
- num_insns++;
51
+static void nios2_tr_tb_start(DisasContextBase *db, CPUState *cs)
52
+{
17
+{
53
+}
18
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
54
19
+ uint64_t v1 = arg_info(op->args[1])->val;
55
- if (unlikely(cpu_breakpoint_test(cs, dc->base.pc_next, BP_ANY))) {
20
+ uint64_t v2 = arg_info(op->args[2])->val;
56
- gen_exception(dc, EXCP_DEBUG);
21
+ int shr = op->args[3];
57
- /* The address covered by the breakpoint must be included in
58
- [tb->pc, tb->pc + tb->size) in order to for it to be
59
- properly cleared -- thus we increment the PC here so that
60
- the logic setting tb->size below does the right thing. */
61
- dc->pc += 4;
62
- break;
63
- }
64
+static void nios2_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
65
+{
66
+ tcg_gen_insn_start(dcbase->pc_next);
67
+}
68
69
- if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
70
- gen_io_start();
71
- }
72
+static bool nios2_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
73
+ const CPUBreakpoint *bp)
74
+{
75
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
76
77
- dc->pc = dc->base.pc_next;
78
- dc->base.pc_next += 4;
79
+ gen_exception(dc, EXCP_DEBUG);
80
+ /*
81
+ * The address covered by the breakpoint must be included in
82
+ * [tb->pc, tb->pc + tb->size) in order to for it to be
83
+ * properly cleared -- thus we increment the PC here so that
84
+ * the logic setting tb->size below does the right thing.
85
+ */
86
+ dc->base.pc_next += 4;
87
+ return true;
88
+}
89
90
- /* Decode an instruction */
91
- handle_instruction(dc, env);
92
+static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
93
+{
94
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
95
+ CPUNios2State *env = cs->env_ptr;
96
97
- /* Translation stops when a conditional branch is encountered.
98
- * Otherwise the subsequent code could get translated several times.
99
- * Also stop translation when a page boundary is reached. This
100
- * ensures prefetch aborts occur at the right place. */
101
- } while (!dc->base.is_jmp &&
102
- !tcg_op_buf_full() &&
103
- num_insns < max_insns);
104
+ dc->pc = dc->base.pc_next;
105
+ dc->base.pc_next += 4;
106
+
22
+
107
+ /* Decode an instruction */
23
+ if (op->opc == INDEX_op_extract2_i64) {
108
+ handle_instruction(dc, env);
24
+ v1 >>= shr;
25
+ v2 <<= 64 - shr;
26
+ } else {
27
+ v1 = (uint32_t)v1 >> shr;
28
+ v2 = (int32_t)v2 << (32 - shr);
29
+ }
30
+ return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
31
+ }
32
+ return false;
109
+}
33
+}
110
+
34
+
111
+static void nios2_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
35
static bool fold_exts(OptContext *ctx, TCGOp *op)
112
+{
36
{
113
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
37
return fold_const1(ctx, op);
114
38
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
115
/* Indicate where the next block should start */
39
}
116
switch (dc->base.is_jmp) {
40
break;
117
- case DISAS_NEXT:
41
118
+ case DISAS_TOO_MANY:
42
- CASE_OP_32_64(extract2):
119
case DISAS_UPDATE:
43
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
120
/* Save the current PC back into the CPU register */
44
- uint64_t v1 = arg_info(op->args[1])->val;
121
tcg_gen_movi_tl(cpu_R[R_PC], dc->base.pc_next);
45
- uint64_t v2 = arg_info(op->args[2])->val;
122
tcg_gen_exit_tb(NULL, 0);
46
- int shr = op->args[3];
123
break;
47
-
124
48
- if (opc == INDEX_op_extract2_i64) {
125
- default:
49
- tmp = (v1 >> shr) | (v2 << (64 - shr));
126
case DISAS_JUMP:
50
- } else {
127
/* The jump will already have updated the PC register */
51
- tmp = (int32_t)(((uint32_t)v1 >> shr) |
128
tcg_gen_exit_tb(NULL, 0);
52
- ((uint32_t)v2 << (32 - shr)));
129
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
53
- }
130
case DISAS_NORETURN:
54
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
131
/* nothing more to generate */
55
- continue;
132
break;
56
- }
133
+
57
- break;
134
+ default:
58
-
135
+ g_assert_not_reached();
59
default:
136
}
60
break;
137
+}
61
138
62
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
139
- /* End off the block */
63
CASE_OP_32_64(eqv):
140
- gen_tb_end(tb, num_insns);
64
done = fold_eqv(&ctx, op);
141
+static void nios2_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
65
break;
142
+{
66
+ CASE_OP_32_64(extract2):
143
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
67
+ done = fold_extract2(&ctx, op);
144
+ log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
68
+ break;
145
+}
69
CASE_OP_32_64(ext8s):
146
70
CASE_OP_32_64(ext16s):
147
- /* Mark instruction starts for the final generated instruction */
71
case INDEX_op_ext32s_i64:
148
- tb->size = dc->base.pc_next - dc->base.pc_first;
149
- tb->icount = num_insns;
150
+static const TranslatorOps nios2_tr_ops = {
151
+ .init_disas_context = nios2_tr_init_disas_context,
152
+ .tb_start = nios2_tr_tb_start,
153
+ .insn_start = nios2_tr_insn_start,
154
+ .breakpoint_check = nios2_tr_breakpoint_check,
155
+ .translate_insn = nios2_tr_translate_insn,
156
+ .tb_stop = nios2_tr_tb_stop,
157
+ .disas_log = nios2_tr_disas_log,
158
+};
159
160
-#ifdef DEBUG_DISAS
161
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
162
- && qemu_log_in_addr_range(dc->base.pc_first)) {
163
- FILE *logfile = qemu_log_lock();
164
- qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
165
- log_target_disas(cs, tb->pc, tb->size);
166
- qemu_log("\n");
167
- qemu_log_unlock(logfile);
168
- }
169
-#endif
170
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
171
+{
172
+ DisasContext dc;
173
+ translator_loop(&nios2_tr_ops, &dc.base, cs, tb, max_insns);
174
}
175
176
void nios2_cpu_dump_state(CPUState *cs, FILE *f, int flags)
177
--
72
--
178
2.25.1
73
2.25.1
179
74
180
75
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Implement tcg_gen_vec_add{sub}8_tl by adding corresponging i32 OP.
4
5
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
6
Message-Id: <20210624105023.3852-3-zhiwei_liu@c-sky.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
4
---
9
include/tcg/tcg-op-gvec.h | 6 ++++++
5
tcg/optimize.c | 48 ++++++++++++++++++++++++++++++------------------
10
tcg/tcg-op-gvec.c | 38 ++++++++++++++++++++++++++++++++++++++
6
1 file changed, 30 insertions(+), 18 deletions(-)
11
2 files changed, 44 insertions(+)
12
7
13
diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-op-gvec.h
10
--- a/tcg/optimize.c
16
+++ b/include/tcg/tcg-op-gvec.h
11
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_rotl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
12
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
18
void tcg_gen_vec_rotl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
13
return fold_const2(ctx, op);
19
20
/* 32-bit vector operations. */
21
+void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
22
void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
23
24
+void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
25
void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
26
27
#if TARGET_LONG_BITS == 64
28
+#define tcg_gen_vec_add8_tl tcg_gen_vec_add8_i64
29
+#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i64
30
#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i64
31
#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i64
32
#else
33
+#define tcg_gen_vec_add8_tl tcg_gen_vec_add8_i32
34
+#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i32
35
#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i32
36
#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i32
37
#endif
38
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/tcg/tcg-op-gvec.c
41
+++ b/tcg/tcg-op-gvec.c
42
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
43
gen_addv_mask(d, a, b, m);
44
}
14
}
45
15
46
+void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
16
+static bool fold_extract(OptContext *ctx, TCGOp *op)
47
+{
17
+{
48
+ TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80));
18
+ if (arg_is_const(op->args[1])) {
49
+ TCGv_i32 t1 = tcg_temp_new_i32();
19
+ uint64_t t;
50
+ TCGv_i32 t2 = tcg_temp_new_i32();
51
+ TCGv_i32 t3 = tcg_temp_new_i32();
52
+
20
+
53
+ tcg_gen_andc_i32(t1, a, m);
21
+ t = arg_info(op->args[1])->val;
54
+ tcg_gen_andc_i32(t2, b, m);
22
+ t = extract64(t, op->args[2], op->args[3]);
55
+ tcg_gen_xor_i32(t3, a, b);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
56
+ tcg_gen_add_i32(d, t1, t2);
24
+ }
57
+ tcg_gen_and_i32(t3, t3, m);
25
+ return false;
58
+ tcg_gen_xor_i32(d, d, t3);
59
+
60
+ tcg_temp_free_i32(t1);
61
+ tcg_temp_free_i32(t2);
62
+ tcg_temp_free_i32(t3);
63
+}
26
+}
64
+
27
+
65
void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
28
static bool fold_extract2(OptContext *ctx, TCGOp *op)
66
{
29
{
67
TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000));
30
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
68
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
31
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
69
gen_subv_mask(d, a, b, m);
32
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
70
}
33
}
71
34
72
+void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
35
+static bool fold_sextract(OptContext *ctx, TCGOp *op)
73
+{
36
+{
74
+ TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80));
37
+ if (arg_is_const(op->args[1])) {
75
+ TCGv_i32 t1 = tcg_temp_new_i32();
38
+ uint64_t t;
76
+ TCGv_i32 t2 = tcg_temp_new_i32();
77
+ TCGv_i32 t3 = tcg_temp_new_i32();
78
+
39
+
79
+ tcg_gen_or_i32(t1, a, m);
40
+ t = arg_info(op->args[1])->val;
80
+ tcg_gen_andc_i32(t2, b, m);
41
+ t = sextract64(t, op->args[2], op->args[3]);
81
+ tcg_gen_eqv_i32(t3, a, b);
42
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
82
+ tcg_gen_sub_i32(d, t1, t2);
43
+ }
83
+ tcg_gen_and_i32(t3, t3, m);
44
+ return false;
84
+ tcg_gen_xor_i32(d, d, t3);
85
+
86
+ tcg_temp_free_i32(t1);
87
+ tcg_temp_free_i32(t2);
88
+ tcg_temp_free_i32(t3);
89
+}
45
+}
90
+
46
+
91
void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
47
static bool fold_shift(OptContext *ctx, TCGOp *op)
92
{
48
{
93
TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000));
49
return fold_const2(ctx, op);
50
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
51
}
52
break;
53
54
- CASE_OP_32_64(extract):
55
- if (arg_is_const(op->args[1])) {
56
- tmp = extract64(arg_info(op->args[1])->val,
57
- op->args[2], op->args[3]);
58
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
59
- continue;
60
- }
61
- break;
62
-
63
- CASE_OP_32_64(sextract):
64
- if (arg_is_const(op->args[1])) {
65
- tmp = sextract64(arg_info(op->args[1])->val,
66
- op->args[2], op->args[3]);
67
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
68
- continue;
69
- }
70
- break;
71
-
72
default:
73
break;
74
75
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
76
CASE_OP_32_64(eqv):
77
done = fold_eqv(&ctx, op);
78
break;
79
+ CASE_OP_32_64(extract):
80
+ done = fold_extract(&ctx, op);
81
+ break;
82
CASE_OP_32_64(extract2):
83
done = fold_extract2(&ctx, op);
84
break;
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
86
case INDEX_op_setcond2_i32:
87
done = fold_setcond2(&ctx, op);
88
break;
89
+ CASE_OP_32_64(sextract):
90
+ done = fold_sextract(&ctx, op);
91
+ break;
92
CASE_OP_32_64_VEC(sub):
93
done = fold_sub(&ctx, op);
94
break;
94
--
95
--
95
2.25.1
96
2.25.1
96
97
97
98
diff view generated by jsdifflib
1
Use movcond instead of brcond to set env_pc.
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Discard the btarget and btaken variables to improve
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
register allocation and avoid unnecessary writeback.
4
5
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
4
---
9
target/cris/translate.c | 22 ++++++++++------------
5
tcg/optimize.c | 25 +++++++++++++++----------
10
1 file changed, 10 insertions(+), 12 deletions(-)
6
1 file changed, 15 insertions(+), 10 deletions(-)
11
7
12
diff --git a/target/cris/translate.c b/target/cris/translate.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
14
--- a/target/cris/translate.c
10
--- a/tcg/optimize.c
15
+++ b/target/cris/translate.c
11
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static void t_gen_swapr(TCGv d, TCGv s)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
17
tcg_temp_free(org_s);
13
return fold_const1(ctx, op);
18
}
14
}
19
15
20
-static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
16
+static bool fold_deposit(OptContext *ctx, TCGOp *op)
21
-{
17
+{
22
- TCGLabel *l1 = gen_new_label();
18
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
19
+ uint64_t t1 = arg_info(op->args[1])->val;
20
+ uint64_t t2 = arg_info(op->args[2])->val;
21
+
22
+ t1 = deposit64(t1, op->args[3], op->args[4], t2);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
24
+ }
25
+ return false;
26
+}
27
+
28
static bool fold_divide(OptContext *ctx, TCGOp *op)
29
{
30
return fold_const2(ctx, op);
31
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
32
}
33
break;
34
35
- CASE_OP_32_64(deposit):
36
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
37
- tmp = deposit64(arg_info(op->args[1])->val,
38
- op->args[3], op->args[4],
39
- arg_info(op->args[2])->val);
40
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
41
- continue;
42
- }
43
- break;
23
-
44
-
24
- /* Conditional jmp. */
45
default:
25
- tcg_gen_mov_tl(env_pc, pc_false);
26
- tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
27
- tcg_gen_mov_tl(env_pc, pc_true);
28
- gen_set_label(l1);
29
-}
30
-
31
static bool use_goto_tb(DisasContext *dc, target_ulong dest)
32
{
33
return ((dest ^ dc->base.pc_first) & TARGET_PAGE_MASK) == 0;
34
@@ -XXX,XX +XXX,XX @@ static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
35
/* fall through */
36
37
case JMP_INDIRECT:
38
- t_gen_cc_jmp(env_btarget, tcg_constant_tl(npc));
39
+ tcg_gen_movcond_tl(TCG_COND_NE, env_pc,
40
+ env_btaken, tcg_constant_tl(0),
41
+ env_btarget, tcg_constant_tl(npc));
42
is_jmp = dc->cpustate_changed ? DISAS_UPDATE : DISAS_JUMP;
43
+
44
+ /*
45
+ * We have now consumed btaken and btarget. Hint to the
46
+ * tcg compiler that the writeback to env may be dropped.
47
+ */
48
+ tcg_gen_discard_tl(env_btaken);
49
+ tcg_gen_discard_tl(env_btarget);
50
break;
46
break;
51
47
52
default:
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
49
CASE_OP_32_64(ctpop):
50
done = fold_ctpop(&ctx, op);
51
break;
52
+ CASE_OP_32_64(deposit):
53
+ done = fold_deposit(&ctx, op);
54
+ break;
55
CASE_OP_32_64(div):
56
CASE_OP_32_64(divu):
57
done = fold_divide(&ctx, op);
53
--
58
--
54
2.25.1
59
2.25.1
55
60
56
61
diff view generated by jsdifflib
1
This value is unused.
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
4
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
4
---
7
target/cris/translate.c | 2 --
5
tcg/optimize.c | 32 ++++++++++++++++++--------------
8
1 file changed, 2 deletions(-)
6
1 file changed, 18 insertions(+), 14 deletions(-)
9
7
10
diff --git a/target/cris/translate.c b/target/cris/translate.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
12
--- a/target/cris/translate.c
10
--- a/tcg/optimize.c
13
+++ b/target/cris/translate.c
11
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@
12
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
15
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
13
return true;
16
#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
14
}
17
#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
15
18
-#define DISAS_SWI DISAS_TARGET_3
16
+static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
19
17
+{
20
/* Used by the decoder. */
18
+ if (arg_is_const(op->args[1])) {
21
#define EXTRACT_FIELD(src, start, end) \
19
+ uint64_t t = arg_info(op->args[1])->val;
22
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
20
+
23
to find the next TB */
21
+ if (t != 0) {
24
tcg_gen_exit_tb(NULL, 0);
22
+ t = do_constant_folding(op->opc, t, 0);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
24
+ }
25
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
26
+ }
27
+ return false;
28
+}
29
+
30
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
31
{
32
return fold_const1(ctx, op);
33
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
34
}
25
break;
35
break;
26
- case DISAS_SWI:
36
27
case DISAS_TB_JUMP:
37
- CASE_OP_32_64(clz):
28
/* nothing more to generate */
38
- CASE_OP_32_64(ctz):
39
- if (arg_is_const(op->args[1])) {
40
- TCGArg v = arg_info(op->args[1])->val;
41
- if (v != 0) {
42
- tmp = do_constant_folding(opc, v, 0);
43
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
44
- } else {
45
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
46
- }
47
- continue;
48
- }
49
- break;
50
-
51
default:
52
break;
53
54
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
55
case INDEX_op_brcond2_i32:
56
done = fold_brcond2(&ctx, op);
57
break;
58
+ CASE_OP_32_64(clz):
59
+ CASE_OP_32_64(ctz):
60
+ done = fold_count_zeros(&ctx, op);
61
+ break;
62
CASE_OP_32_64(ctpop):
63
done = fold_ctpop(&ctx, op);
29
break;
64
break;
30
--
65
--
31
2.25.1
66
2.25.1
32
67
33
68
diff view generated by jsdifflib
1
Notice when the input is known to be zero-extended and force
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
the TCG_BSWAP_IZ flag on. Honor the TCG_BSWAP_OS bit during
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
constant folding. Propagate the input to the output mask.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
tcg/optimize.c | 56 +++++++++++++++++++++++++++++++++++++++++++++-----
5
tcg/optimize.c | 27 ++++++++++++++++-----------
9
1 file changed, 51 insertions(+), 5 deletions(-)
6
1 file changed, 16 insertions(+), 11 deletions(-)
10
7
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
10
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
11
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
return (uint16_t)x;
13
return false;
17
14
}
18
CASE_OP_32_64(bswap16):
15
19
- return bswap16(x);
16
+static bool fold_bswap(OptContext *ctx, TCGOp *op)
20
+ x = bswap16(x);
17
+{
21
+ return y & TCG_BSWAP_OS ? (int16_t)x : x;
18
+ if (arg_is_const(op->args[1])) {
22
19
+ uint64_t t = arg_info(op->args[1])->val;
23
CASE_OP_32_64(bswap32):
20
+
24
- return bswap32(x);
21
+ t = do_constant_folding(op->opc, t, op->args[2]);
25
+ x = bswap32(x);
22
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ return y & TCG_BSWAP_OS ? (int32_t)x : x;
23
+ }
27
24
+ return false;
28
case INDEX_op_bswap64_i64:
25
+}
29
return bswap64(x);
26
+
27
static bool fold_call(OptContext *ctx, TCGOp *op)
28
{
29
TCGContext *s = ctx->tcg;
30
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
30
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
31
}
31
}
32
break;
32
break;
33
33
34
+ CASE_OP_32_64(bswap16):
35
+ mask = arg_info(op->args[1])->mask;
36
+ if (mask <= 0xffff) {
37
+ op->args[2] |= TCG_BSWAP_IZ;
38
+ }
39
+ mask = bswap16(mask);
40
+ switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
41
+ case TCG_BSWAP_OZ:
42
+ break;
43
+ case TCG_BSWAP_OS:
44
+ mask = (int16_t)mask;
45
+ break;
46
+ default: /* undefined high bits */
47
+ mask |= MAKE_64BIT_MASK(16, 48);
48
+ break;
49
+ }
50
+ break;
51
+
52
+ case INDEX_op_bswap32_i64:
53
+ mask = arg_info(op->args[1])->mask;
54
+ if (mask <= 0xffffffffu) {
55
+ op->args[2] |= TCG_BSWAP_IZ;
56
+ }
57
+ mask = bswap32(mask);
58
+ switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
59
+ case TCG_BSWAP_OZ:
60
+ break;
61
+ case TCG_BSWAP_OS:
62
+ mask = (int32_t)mask;
63
+ break;
64
+ default: /* undefined high bits */
65
+ mask |= MAKE_64BIT_MASK(32, 32);
66
+ break;
67
+ }
68
+ break;
69
+
70
default:
71
break;
72
}
73
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
74
CASE_OP_32_64(ext16s):
75
CASE_OP_32_64(ext16u):
76
CASE_OP_32_64(ctpop):
77
- CASE_OP_32_64(bswap16):
34
- CASE_OP_32_64(bswap16):
78
- CASE_OP_32_64(bswap32):
35
- CASE_OP_32_64(bswap32):
79
- case INDEX_op_bswap64_i64:
36
- case INDEX_op_bswap64_i64:
80
case INDEX_op_ext32s_i64:
37
- if (arg_is_const(op->args[1])) {
81
case INDEX_op_ext32u_i64:
38
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
82
case INDEX_op_ext_i32_i64:
39
- op->args[2]);
40
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
41
- continue;
42
- }
43
- break;
44
-
45
default:
46
break;
47
83
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
84
}
49
case INDEX_op_brcond2_i32:
85
goto do_default;
50
done = fold_brcond2(&ctx, op);
86
51
break;
87
+ CASE_OP_32_64(bswap16):
52
+ CASE_OP_32_64(bswap16):
88
+ CASE_OP_32_64(bswap32):
53
+ CASE_OP_32_64(bswap32):
89
+ case INDEX_op_bswap64_i64:
54
+ case INDEX_op_bswap64_i64:
90
+ if (arg_is_const(op->args[1])) {
55
+ done = fold_bswap(&ctx, op);
91
+ tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
56
+ break;
92
+ op->args[2]);
57
CASE_OP_32_64(clz):
93
+ tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
58
CASE_OP_32_64(ctz):
94
+ break;
59
done = fold_count_zeros(&ctx, op);
95
+ }
96
+ goto do_default;
97
+
98
CASE_OP_32_64(add):
99
CASE_OP_32_64(sub):
100
CASE_OP_32_64(mul):
101
--
60
--
102
2.25.1
61
2.25.1
103
62
104
63
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
---
4
tcg/ppc/tcg-target.c.inc | 34 ++++++++++++++++++++++++++++++++++
5
tcg/optimize.c | 53 +++++++++++++++++++++++++++++---------------------
5
1 file changed, 34 insertions(+)
6
1 file changed, 31 insertions(+), 22 deletions(-)
6
7
7
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/ppc/tcg-target.c.inc
10
--- a/tcg/optimize.c
10
+++ b/tcg/ppc/tcg-target.c.inc
11
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
#define SRAD XO31(794)
13
return fold_const2(ctx, op);
13
#define SRADI XO31(413<<1)
14
15
+#define BRH XO31(219)
16
+#define BRW XO31(155)
17
+#define BRD XO31(187)
18
+
19
#define TW XO31( 4)
20
#define TRAP (TW | TO(31))
21
22
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_ext16s(TCGContext *s, TCGReg dst, TCGReg src)
23
tcg_out32(s, EXTSH | RA(dst) | RS(src));
24
}
14
}
25
15
26
+static inline void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src)
16
+static bool fold_dup(OptContext *ctx, TCGOp *op)
27
+{
17
+{
28
+ tcg_out32(s, ANDI | SAI(src, dst, 0xffff));
18
+ if (arg_is_const(op->args[1])) {
19
+ uint64_t t = arg_info(op->args[1])->val;
20
+ t = dup_const(TCGOP_VECE(op), t);
21
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
22
+ }
23
+ return false;
29
+}
24
+}
30
+
25
+
31
static inline void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src)
26
+static bool fold_dup2(OptContext *ctx, TCGOp *op)
32
{
27
+{
33
tcg_out32(s, EXTSW | RA(dst) | RS(src));
28
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
29
+ uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
35
{
30
+ arg_info(op->args[2])->val);
36
TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
31
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
37
38
+ if (have_isa_3_10) {
39
+ tcg_out32(s, BRH | RA(dst) | RS(src));
40
+ if (flags & TCG_BSWAP_OS) {
41
+ tcg_out_ext16s(s, dst, dst);
42
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
43
+ tcg_out_ext16u(s, dst, dst);
44
+ }
45
+ return;
46
+ }
32
+ }
47
+
33
+
48
/*
34
+ if (args_are_copies(op->args[1], op->args[2])) {
49
* In the following,
35
+ op->opc = INDEX_op_dup_vec;
50
* dep(a, b, m) -> (a & ~m) | (b & m)
36
+ TCGOP_VECE(op) = MO_32;
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags)
37
+ }
38
+ return false;
39
+}
40
+
41
static bool fold_eqv(OptContext *ctx, TCGOp *op)
52
{
42
{
53
TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
43
return fold_const2(ctx, op);
54
44
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
55
+ if (have_isa_3_10) {
45
done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
56
+ tcg_out32(s, BRW | RA(dst) | RS(src));
46
break;
57
+ if (flags & TCG_BSWAP_OS) {
47
58
+ tcg_out_ext32s(s, dst, dst);
48
- case INDEX_op_dup_vec:
59
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
49
- if (arg_is_const(op->args[1])) {
60
+ tcg_out_ext32u(s, dst, dst);
50
- tmp = arg_info(op->args[1])->val;
61
+ }
51
- tmp = dup_const(TCGOP_VECE(op), tmp);
62
+ return;
52
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
63
+ }
53
- continue;
64
+
54
- }
65
/*
55
- break;
66
* Stolen from gcc's builtin_bswap32.
56
-
67
* In the following,
57
- case INDEX_op_dup2_vec:
68
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
58
- assert(TCG_TARGET_REG_BITS == 32);
69
TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
59
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
70
TCGReg t1 = dst == src ? dst : TCG_REG_R0;
60
- tcg_opt_gen_movi(&ctx, op, op->args[0],
71
61
- deposit64(arg_info(op->args[1])->val, 32, 32,
72
+ if (have_isa_3_10) {
62
- arg_info(op->args[2])->val));
73
+ tcg_out32(s, BRD | RA(dst) | RS(src));
63
- continue;
74
+ return;
64
- } else if (args_are_copies(op->args[1], op->args[2])) {
75
+ }
65
- op->opc = INDEX_op_dup_vec;
76
+
66
- TCGOP_VECE(op) = MO_32;
77
/*
67
- }
78
* In the following,
68
- break;
79
* dep(a, b, m) -> (a & ~m) | (b & m)
69
-
70
default:
71
break;
72
73
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
74
CASE_OP_32_64(divu):
75
done = fold_divide(&ctx, op);
76
break;
77
+ case INDEX_op_dup_vec:
78
+ done = fold_dup(&ctx, op);
79
+ break;
80
+ case INDEX_op_dup2_vec:
81
+ done = fold_dup2(&ctx, op);
82
+ break;
83
CASE_OP_32_64(eqv):
84
done = fold_eqv(&ctx, op);
85
break;
80
--
86
--
81
2.25.1
87
2.25.1
82
88
83
89
diff view generated by jsdifflib
1
We really do this already, by including them into the same test.
1
This is the final entry in the main switch that was in a
2
This just hoists the expression up a bit.
2
different form. After this, we have the option to convert
3
the switch into a function dispatch table.
3
4
4
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
target/cris/translate.c | 13 ++++++-------
9
tcg/optimize.c | 27 ++++++++++++++-------------
9
1 file changed, 6 insertions(+), 7 deletions(-)
10
1 file changed, 14 insertions(+), 13 deletions(-)
10
11
11
diff --git a/target/cris/translate.c b/target/cris/translate.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/target/cris/translate.c
14
--- a/tcg/optimize.c
14
+++ b/target/cris/translate.c
15
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
16
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
16
cris_clear_x_flag(dc);
17
return true;
17
}
18
}
18
19
19
+ /* Fold unhandled changes to X_FLAG into cpustate_changed. */
20
+static bool fold_mov(OptContext *ctx, TCGOp *op)
20
+ dc->cpustate_changed |= !dc->flagx_known;
21
+{
21
+ dc->cpustate_changed |= dc->flags_x != (dc->base.tb->flags & X_FLAG);
22
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
23
+}
22
+
24
+
23
/*
25
static bool fold_movcond(OptContext *ctx, TCGOp *op)
24
* Check for delayed branches here. If we do it before
26
{
25
* actually generating any host code, the simulator will just
27
TCGOpcode opc = op->opc;
26
@@ -XXX,XX +XXX,XX @@ static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
28
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
27
t_gen_movi_env_TN(dslot, 0);
29
break;
28
}
30
}
29
31
30
- if (dc->cpustate_changed
32
- /* Propagate constants through copy operations and do constant
31
- || !dc->flagx_known
33
- folding. Constants will be substituted to arguments by register
32
- || (dc->flags_x != (dc->base.tb->flags & X_FLAG))) {
34
- allocator where needed and possible. Also detect copies. */
33
+ if (dc->cpustate_changed) {
35
+ /*
34
cris_store_direct_jmp(dc);
36
+ * Process each opcode.
37
+ * Sorted alphabetically by opcode as much as possible.
38
+ */
39
switch (opc) {
40
- CASE_OP_32_64_VEC(mov):
41
- done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
42
- break;
43
-
44
- default:
45
- break;
46
-
47
- /* ---------------------------------------------------------- */
48
- /* Sorted alphabetically by opcode as much as possible. */
49
-
50
CASE_OP_32_64_VEC(add):
51
done = fold_add(&ctx, op);
52
break;
53
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
54
case INDEX_op_mb:
55
done = fold_mb(&ctx, op);
56
break;
57
+ CASE_OP_32_64_VEC(mov):
58
+ done = fold_mov(&ctx, op);
59
+ break;
60
CASE_OP_32_64(movcond):
61
done = fold_movcond(&ctx, op);
62
break;
63
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
64
CASE_OP_32_64_VEC(xor):
65
done = fold_xor(&ctx, op);
66
break;
67
+ default:
68
+ break;
35
}
69
}
36
70
37
@@ -XXX,XX +XXX,XX @@ static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
71
if (!done) {
38
}
39
40
/* Force an update if the per-tb cpu state has changed. */
41
- if (dc->base.is_jmp == DISAS_NEXT
42
- && (dc->cpustate_changed
43
- || !dc->flagx_known
44
- || (dc->flags_x != (dc->base.tb->flags & X_FLAG)))) {
45
+ if (dc->base.is_jmp == DISAS_NEXT && dc->cpustate_changed) {
46
dc->base.is_jmp = DISAS_UPDATE;
47
tcg_gen_movi_tl(env_pc, dc->pc);
48
}
49
--
72
--
50
2.25.1
73
2.25.1
51
74
52
75
diff view generated by jsdifflib
1
The only semantic of DISAS_TB_JUMP is that we've done goto_tb,
1
Pull the "op r, a, a => movi r, 0" optimization into a function,
2
which is the same as DISAS_NORETURN -- we've exited the tb.
2
and use it in the outer opcode fold functions.
3
3
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
target/nios2/translate.c | 8 +++-----
8
tcg/optimize.c | 41 ++++++++++++++++++++++++-----------------
8
1 file changed, 3 insertions(+), 5 deletions(-)
9
1 file changed, 24 insertions(+), 17 deletions(-)
9
10
10
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/target/nios2/translate.c
13
--- a/tcg/optimize.c
13
+++ b/target/nios2/translate.c
14
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
15
/* is_jmp field values */
16
return false;
16
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
17
}
17
#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
18
18
-#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
19
+/* If the binary operation has both arguments equal, fold to @i. */
19
20
+static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
20
#define INSTRUCTION_FLG(func, flags) { (func), (flags) }
21
+{
21
#define INSTRUCTION(func) \
22
+ if (args_are_copies(op->args[1], op->args[2])) {
22
@@ -XXX,XX +XXX,XX @@ static void jmpi(DisasContext *dc, uint32_t code, uint32_t flags)
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
24
+ }
25
+ return false;
26
+}
27
+
28
/*
29
* These outermost fold_<op> functions are sorted alphabetically.
30
*/
31
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
32
33
static bool fold_andc(OptContext *ctx, TCGOp *op)
23
{
34
{
24
J_TYPE(instr, code);
35
- return fold_const2(ctx, op);
25
gen_goto_tb(dc, 0, (dc->pc & 0xF0000000) | (instr.imm26 << 2));
36
+ if (fold_const2(ctx, op) ||
26
- dc->is_jmp = DISAS_TB_JUMP;
37
+ fold_xx_to_i(ctx, op, 0)) {
27
+ dc->is_jmp = DISAS_NORETURN;
38
+ return true;
39
+ }
40
+ return false;
28
}
41
}
29
42
30
static void call(DisasContext *dc, uint32_t code, uint32_t flags)
43
static bool fold_brcond(OptContext *ctx, TCGOp *op)
31
@@ -XXX,XX +XXX,XX @@ static void br(DisasContext *dc, uint32_t code, uint32_t flags)
44
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
32
I_TYPE(instr, code);
45
33
46
static bool fold_sub(OptContext *ctx, TCGOp *op)
34
gen_goto_tb(dc, 0, dc->pc + 4 + (instr.imm16.s & -4));
47
{
35
- dc->is_jmp = DISAS_TB_JUMP;
48
- return fold_const2(ctx, op);
36
+ dc->is_jmp = DISAS_NORETURN;
49
+ if (fold_const2(ctx, op) ||
50
+ fold_xx_to_i(ctx, op, 0)) {
51
+ return true;
52
+ }
53
+ return false;
37
}
54
}
38
55
39
static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags)
56
static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
40
@@ -XXX,XX +XXX,XX @@ static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags)
57
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
41
gen_goto_tb(dc, 0, dc->pc + 4);
58
42
gen_set_label(l1);
59
static bool fold_xor(OptContext *ctx, TCGOp *op)
43
gen_goto_tb(dc, 1, dc->pc + 4 + (instr.imm16.s & -4));
60
{
44
- dc->is_jmp = DISAS_TB_JUMP;
61
- return fold_const2(ctx, op);
45
+ dc->is_jmp = DISAS_NORETURN;
62
+ if (fold_const2(ctx, op) ||
63
+ fold_xx_to_i(ctx, op, 0)) {
64
+ return true;
65
+ }
66
+ return false;
46
}
67
}
47
68
48
/* Comparison instructions */
69
/* Propagate constants and copies, fold constant expressions. */
49
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
70
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
50
break;
71
break;
51
72
}
52
case DISAS_NORETURN:
73
53
- case DISAS_TB_JUMP:
74
- /* Simplify expression for "op r, a, a => movi r, 0" cases */
54
/* nothing more to generate */
75
- switch (opc) {
55
break;
76
- CASE_OP_32_64_VEC(andc):
56
}
77
- CASE_OP_32_64_VEC(sub):
78
- CASE_OP_32_64_VEC(xor):
79
- if (args_are_copies(op->args[1], op->args[2])) {
80
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
81
- continue;
82
- }
83
- break;
84
- default:
85
- break;
86
- }
87
-
88
/*
89
* Process each opcode.
90
* Sorted alphabetically by opcode as much as possible.
57
--
91
--
58
2.25.1
92
2.25.1
59
93
60
94
diff view generated by jsdifflib
Deleted patch
1
We do not need to copy this into DisasContext.
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/nios2/translate.c | 10 ++++------
7
1 file changed, 4 insertions(+), 6 deletions(-)
8
9
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/nios2/translate.c
12
+++ b/target/nios2/translate.c
13
@@ -XXX,XX +XXX,XX @@
14
}
15
16
typedef struct DisasContext {
17
- TCGv_ptr cpu_env;
18
TCGv *cpu_R;
19
TCGv_i32 zero;
20
int is_jmp;
21
@@ -XXX,XX +XXX,XX @@ static void t_gen_helper_raise_exception(DisasContext *dc,
22
TCGv_i32 tmp = tcg_const_i32(index);
23
24
tcg_gen_movi_tl(dc->cpu_R[R_PC], dc->pc);
25
- gen_helper_raise_exception(dc->cpu_env, tmp);
26
+ gen_helper_raise_exception(cpu_env, tmp);
27
tcg_temp_free_i32(tmp);
28
dc->is_jmp = DISAS_NORETURN;
29
}
30
@@ -XXX,XX +XXX,XX @@ static void rdctl(DisasContext *dc, uint32_t code, uint32_t flags)
31
tcg_gen_mov_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.imm5 + CR_BASE]);
32
#ifdef DEBUG_MMU
33
TCGv_i32 tmp = tcg_const_i32(instr.imm5 + CR_BASE);
34
- gen_helper_mmu_read_debug(dc->cpu_R[instr.c], dc->cpu_env, tmp);
35
+ gen_helper_mmu_read_debug(dc->cpu_R[instr.c], cpu_env, tmp);
36
tcg_temp_free_i32(tmp);
37
#endif
38
}
39
@@ -XXX,XX +XXX,XX @@ static void wrctl(DisasContext *dc, uint32_t code, uint32_t flags)
40
{
41
#if !defined(CONFIG_USER_ONLY)
42
TCGv_i32 tmp = tcg_const_i32(instr.imm5 + CR_BASE);
43
- gen_helper_mmu_write(dc->cpu_env, tmp, load_gpr(dc, instr.a));
44
+ gen_helper_mmu_write(cpu_env, tmp, load_gpr(dc, instr.a));
45
tcg_temp_free_i32(tmp);
46
#endif
47
break;
48
@@ -XXX,XX +XXX,XX @@ static void wrctl(DisasContext *dc, uint32_t code, uint32_t flags)
49
if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
50
gen_io_start();
51
}
52
- gen_helper_check_interrupts(dc->cpu_env);
53
+ gen_helper_check_interrupts(cpu_env);
54
dc->is_jmp = DISAS_UPDATE;
55
}
56
#endif
57
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
58
int num_insns;
59
60
/* Initialize DC */
61
- dc->cpu_env = cpu_env;
62
dc->cpu_R = cpu_R;
63
dc->is_jmp = DISAS_NEXT;
64
dc->pc = tb->pc;
65
--
66
2.25.1
67
68
diff view generated by jsdifflib
1
For INDEX_op_bswap32_i32, pass 0 for flags: input not zero-extended,
1
Pull the "op r, a, a => mov r, a" optimization into a function,
2
output does not need extension within the host 64-bit register.
2
and use it in the outer opcode fold functions.
3
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
tcg/ppc/tcg-target.c.inc | 22 ++++++++++++++++------
8
tcg/optimize.c | 39 ++++++++++++++++++++++++---------------
8
1 file changed, 16 insertions(+), 6 deletions(-)
9
1 file changed, 24 insertions(+), 15 deletions(-)
9
10
10
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/ppc/tcg-target.c.inc
13
--- a/tcg/optimize.c
13
+++ b/tcg/ppc/tcg-target.c.inc
14
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
15
tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2));
16
return false;
16
}
17
}
17
18
18
-static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src)
19
+/* If the binary operation has both arguments equal, fold to identity. */
19
+static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
20
+static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
21
+{
22
+ if (args_are_copies(op->args[1], op->args[2])) {
23
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
24
+ }
25
+ return false;
26
+}
27
+
28
/*
29
* These outermost fold_<op> functions are sorted alphabetically.
30
+ *
31
+ * The ordering of the transformations should be:
32
+ * 1) those that produce a constant
33
+ * 2) those that produce a copy
34
+ * 3) those that produce information about the result value.
35
*/
36
37
static bool fold_add(OptContext *ctx, TCGOp *op)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
39
40
static bool fold_and(OptContext *ctx, TCGOp *op)
20
{
41
{
21
TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
42
- return fold_const2(ctx, op);
22
43
+ if (fold_const2(ctx, op) ||
23
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src)
44
+ fold_xx_to_x(ctx, op)) {
24
/* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */
45
+ return true;
25
tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
26
27
- tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
28
+ if (flags & TCG_BSWAP_OS) {
29
+ tcg_out_ext16s(s, dst, tmp);
30
+ } else {
31
+ tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
32
+ }
46
+ }
47
+ return false;
33
}
48
}
34
49
35
-static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src)
50
static bool fold_andc(OptContext *ctx, TCGOp *op)
36
+static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags)
51
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
52
53
static bool fold_or(OptContext *ctx, TCGOp *op)
37
{
54
{
38
TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
55
- return fold_const2(ctx, op);
39
56
+ if (fold_const2(ctx, op) ||
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src)
57
+ fold_xx_to_x(ctx, op)) {
41
/* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */
58
+ return true;
42
tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
43
44
- tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
45
+ if (flags & TCG_BSWAP_OS) {
46
+ tcg_out_ext32s(s, dst, tmp);
47
+ } else {
48
+ tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
49
+ }
59
+ }
60
+ return false;
50
}
61
}
51
62
52
static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
63
static bool fold_orc(OptContext *ctx, TCGOp *op)
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
64
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
54
65
break;
55
case INDEX_op_bswap16_i32:
66
}
56
case INDEX_op_bswap16_i64:
67
57
- tcg_out_bswap16(s, args[0], args[1]);
68
- /* Simplify expression for "op r, a, a => mov r, a" cases */
58
+ tcg_out_bswap16(s, args[0], args[1], args[2]);
69
- switch (opc) {
59
break;
70
- CASE_OP_32_64_VEC(or):
60
case INDEX_op_bswap32_i32:
71
- CASE_OP_32_64_VEC(and):
61
+ tcg_out_bswap32(s, args[0], args[1], 0);
72
- if (args_are_copies(op->args[1], op->args[2])) {
62
+ break;
73
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
63
case INDEX_op_bswap32_i64:
74
- continue;
64
- tcg_out_bswap32(s, args[0], args[1]);
75
- }
65
+ tcg_out_bswap32(s, args[0], args[1], args[2]);
76
- break;
66
break;
77
- default:
67
case INDEX_op_bswap64_i64:
78
- break;
68
tcg_out_bswap64(s, args[0], args[1]);
79
- }
80
-
81
/*
82
* Process each opcode.
83
* Sorted alphabetically by opcode as much as possible.
69
--
84
--
70
2.25.1
85
2.25.1
71
86
72
87
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
Pull the "op r, a, 0 => movi r, 0" optimization into a function,
2
and use it in the outer opcode fold functions.
2
3
3
Implement tcg_gen_vec_shl{shr}{sar}16i_tl by adding corresponging i32 OP.
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
6
Message-Id: <20210624105023.3852-4-zhiwei_liu@c-sky.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
include/tcg/tcg-op-gvec.h | 10 ++++++++++
8
tcg/optimize.c | 38 ++++++++++++++++++++------------------
10
tcg/tcg-op-gvec.c | 28 ++++++++++++++++++++++++++++
9
1 file changed, 20 insertions(+), 18 deletions(-)
11
2 files changed, 38 insertions(+)
12
10
13
diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-op-gvec.h
13
--- a/tcg/optimize.c
16
+++ b/include/tcg/tcg-op-gvec.h
14
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
18
void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
16
return false;
19
void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
20
21
+void tcg_gen_vec_shl16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
22
+void tcg_gen_vec_shr16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
23
+void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
24
+
25
#if TARGET_LONG_BITS == 64
26
#define tcg_gen_vec_add8_tl tcg_gen_vec_add8_i64
27
#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i64
28
#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i64
29
#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i64
30
+#define tcg_gen_vec_shl16i_tl tcg_gen_vec_shl16i_i64
31
+#define tcg_gen_vec_shr16i_tl tcg_gen_vec_shr16i_i64
32
+#define tcg_gen_vec_sar16i_tl tcg_gen_vec_sar16i_i64
33
#else
34
#define tcg_gen_vec_add8_tl tcg_gen_vec_add8_i32
35
#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i32
36
#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i32
37
#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i32
38
+#define tcg_gen_vec_shl16i_tl tcg_gen_vec_shl16i_i32
39
+#define tcg_gen_vec_shr16i_tl tcg_gen_vec_shr16i_i32
40
+#define tcg_gen_vec_sar16i_tl tcg_gen_vec_sar16i_i32
41
#endif
42
43
#endif
44
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/tcg-op-gvec.c
47
+++ b/tcg/tcg-op-gvec.c
48
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_shl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
49
tcg_gen_andi_i64(d, d, mask);
50
}
17
}
51
18
52
+void tcg_gen_vec_shl16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
19
+/* If the binary operation has second argument @i, fold to @i. */
20
+static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
53
+{
21
+{
54
+ uint32_t mask = dup_const(MO_16, 0xffff << c);
22
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
55
+ tcg_gen_shli_i32(d, a, c);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
56
+ tcg_gen_andi_i32(d, d, mask);
24
+ }
25
+ return false;
57
+}
26
+}
58
+
27
+
59
void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
28
/* If the binary operation has both arguments equal, fold to @i. */
60
int64_t shift, uint32_t oprsz, uint32_t maxsz)
29
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
61
{
30
{
62
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_shr16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
31
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
63
tcg_gen_andi_i64(d, d, mask);
32
static bool fold_and(OptContext *ctx, TCGOp *op)
33
{
34
if (fold_const2(ctx, op) ||
35
+ fold_xi_to_i(ctx, op, 0) ||
36
fold_xx_to_x(ctx, op)) {
37
return true;
38
}
39
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
40
41
static bool fold_mul(OptContext *ctx, TCGOp *op)
42
{
43
- return fold_const2(ctx, op);
44
+ if (fold_const2(ctx, op) ||
45
+ fold_xi_to_i(ctx, op, 0)) {
46
+ return true;
47
+ }
48
+ return false;
64
}
49
}
65
50
66
+void tcg_gen_vec_shr16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
51
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
67
+{
68
+ uint32_t mask = dup_const(MO_16, 0xffff >> c);
69
+ tcg_gen_shri_i32(d, a, c);
70
+ tcg_gen_andi_i32(d, d, mask);
71
+}
72
+
73
void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
74
int64_t shift, uint32_t oprsz, uint32_t maxsz)
75
{
52
{
76
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
53
- return fold_const2(ctx, op);
77
tcg_temp_free_i64(s);
54
+ if (fold_const2(ctx, op) ||
55
+ fold_xi_to_i(ctx, op, 0)) {
56
+ return true;
57
+ }
58
+ return false;
78
}
59
}
79
60
80
+void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
61
static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
81
+{
62
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
82
+ uint32_t s_mask = dup_const(MO_16, 0x8000 >> c);
63
continue;
83
+ uint32_t c_mask = dup_const(MO_16, 0xffff >> c);
64
}
84
+ TCGv_i32 s = tcg_temp_new_i32();
65
85
+
66
- /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
86
+ tcg_gen_shri_i32(d, a, c);
67
- switch (opc) {
87
+ tcg_gen_andi_i32(s, d, s_mask); /* isolate (shifted) sign bit */
68
- CASE_OP_32_64_VEC(and):
88
+ tcg_gen_andi_i32(d, d, c_mask); /* clear out bits above sign */
69
- CASE_OP_32_64_VEC(mul):
89
+ tcg_gen_muli_i32(s, s, (2 << c) - 2); /* replicate isolated signs */
70
- CASE_OP_32_64(muluh):
90
+ tcg_gen_or_i32(d, d, s); /* include sign extension */
71
- CASE_OP_32_64(mulsh):
91
+ tcg_temp_free_i32(s);
72
- if (arg_is_const(op->args[2])
92
+}
73
- && arg_info(op->args[2])->val == 0) {
93
+
74
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
94
void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
75
- continue;
95
int64_t shift, uint32_t oprsz, uint32_t maxsz)
76
- }
96
{
77
- break;
78
- default:
79
- break;
80
- }
81
-
82
/*
83
* Process each opcode.
84
* Sorted alphabetically by opcode as much as possible.
97
--
85
--
98
2.25.1
86
2.25.1
99
87
100
88
diff view generated by jsdifflib
1
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
1
Compute the type of the operation early.
2
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
2
3
There are at least 4 places that used a def->flags ladder
4
to determine the type of the operation being optimized.
5
6
There were two places that assumed !TCG_OPF_64BIT means
7
TCG_TYPE_I32, and so could potentially compute incorrect
8
results for vector operations.
9
10
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
12
---
5
target/cris/translate.c | 317 ++++++++++++++++++++++------------------
13
tcg/optimize.c | 149 +++++++++++++++++++++++++++++--------------------
6
1 file changed, 174 insertions(+), 143 deletions(-)
14
1 file changed, 89 insertions(+), 60 deletions(-)
7
15
8
diff --git a/target/cris/translate.c b/target/cris/translate.c
16
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
10
--- a/target/cris/translate.c
18
--- a/tcg/optimize.c
11
+++ b/target/cris/translate.c
19
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
20
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
13
*
21
14
*/
22
/* In flight values from optimization. */
15
23
uint64_t z_mask;
16
-/* generate intermediate code for basic block 'tb'. */
24
+ TCGType type;
17
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
25
} OptContext;
18
+static void cris_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
26
19
{
27
static inline TempOptInfo *ts_info(TCGTemp *ts)
20
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
28
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
21
CPUCRISState *env = cs->env_ptr;
29
{
22
+ uint32_t tb_flags = dc->base.tb->flags;
30
TCGTemp *dst_ts = arg_temp(dst);
23
uint32_t pc_start;
31
TCGTemp *src_ts = arg_temp(src);
24
- unsigned int insn_len;
32
- const TCGOpDef *def;
25
- struct DisasContext ctx;
33
TempOptInfo *di;
26
- struct DisasContext *dc = &ctx;
34
TempOptInfo *si;
27
- uint32_t page_start;
35
uint64_t z_mask;
28
- target_ulong npc;
36
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
29
- int num_insns;
37
reset_ts(dst_ts);
30
38
di = ts_info(dst_ts);
31
if (env->pregs[PR_VR] == 32) {
39
si = ts_info(src_ts);
32
dc->decoder = crisv32_decoder;
40
- def = &tcg_op_defs[op->opc];
33
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
41
- if (def->flags & TCG_OPF_VECTOR) {
34
dc->clear_locked_irq = 1;
42
- new_op = INDEX_op_mov_vec;
35
}
43
- } else if (def->flags & TCG_OPF_64BIT) {
36
44
- new_op = INDEX_op_mov_i64;
37
- /* Odd PC indicates that branch is rexecuting due to exception in the
45
- } else {
38
+ /*
39
+ * Odd PC indicates that branch is rexecuting due to exception in the
40
* delayslot, like in real hw.
41
*/
42
- pc_start = tb->pc & ~1;
43
-
44
- dc->base.tb = tb;
45
+ pc_start = dc->base.pc_first & ~1;
46
dc->base.pc_first = pc_start;
47
dc->base.pc_next = pc_start;
48
- dc->base.is_jmp = DISAS_NEXT;
49
- dc->base.singlestep_enabled = cs->singlestep_enabled;
50
51
dc->cpu = env_archcpu(env);
52
dc->ppc = pc_start;
53
dc->pc = pc_start;
54
dc->flags_uptodate = 1;
55
dc->flagx_known = 1;
56
- dc->flags_x = tb->flags & X_FLAG;
57
+ dc->flags_x = tb_flags & X_FLAG;
58
dc->cc_x_uptodate = 0;
59
dc->cc_mask = 0;
60
dc->update_cc = 0;
61
dc->clear_prefix = 0;
62
+ dc->cpustate_changed = 0;
63
64
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
65
dc->cc_size_uptodate = -1;
66
67
/* Decode TB flags. */
68
- dc->tb_flags = tb->flags & (S_FLAG | P_FLAG | U_FLAG \
69
- | X_FLAG | PFIX_FLAG);
70
- dc->delayed_branch = !!(tb->flags & 7);
71
+ dc->tb_flags = tb_flags & (S_FLAG | P_FLAG | U_FLAG | X_FLAG | PFIX_FLAG);
72
+ dc->delayed_branch = !!(tb_flags & 7);
73
if (dc->delayed_branch) {
74
dc->jmp = JMP_INDIRECT;
75
} else {
76
dc->jmp = JMP_NOJMP;
77
}
78
+}
79
80
- dc->cpustate_changed = 0;
81
+static void cris_tr_tb_start(DisasContextBase *db, CPUState *cpu)
82
+{
83
+}
84
85
- page_start = pc_start & TARGET_PAGE_MASK;
86
- num_insns = 0;
87
+static void cris_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
88
+{
89
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
90
91
- gen_tb_start(tb);
92
- do {
93
- tcg_gen_insn_start(dc->delayed_branch == 1
94
- ? dc->ppc | 1 : dc->pc);
95
- num_insns++;
96
+ tcg_gen_insn_start(dc->delayed_branch == 1 ? dc->ppc | 1 : dc->pc);
97
+}
98
99
- if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
100
+static bool cris_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
101
+ const CPUBreakpoint *bp)
102
+{
103
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
104
+
46
+
105
+ cris_evaluate_flags(dc);
47
+ switch (ctx->type) {
106
+ tcg_gen_movi_tl(env_pc, dc->pc);
48
+ case TCG_TYPE_I32:
107
+ t_gen_raise_exception(EXCP_DEBUG);
49
new_op = INDEX_op_mov_i32;
108
+ dc->base.is_jmp = DISAS_NORETURN;
109
+ /*
110
+ * The address covered by the breakpoint must be included in
111
+ * [tb->pc, tb->pc + tb->size) in order to for it to be
112
+ * properly cleared -- thus we increment the PC here so that
113
+ * the logic setting tb->size below does the right thing.
114
+ */
115
+ dc->pc += 2;
116
+ return true;
117
+}
118
+
119
+static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
120
+{
121
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
122
+ CPUCRISState *env = cs->env_ptr;
123
+ unsigned int insn_len;
124
+
125
+ /* Pretty disas. */
126
+ LOG_DIS("%8.8x:\t", dc->pc);
127
+
128
+ dc->clear_x = 1;
129
+
130
+ insn_len = dc->decoder(env, dc);
131
+ dc->ppc = dc->pc;
132
+ dc->pc += insn_len;
133
+ dc->base.pc_next += insn_len;
134
+
135
+ if (dc->base.is_jmp == DISAS_NORETURN) {
136
+ return;
137
+ }
138
+
139
+ if (dc->clear_x) {
140
+ cris_clear_x_flag(dc);
141
+ }
142
+
143
+ /*
144
+ * Check for delayed branches here. If we do it before
145
+ * actually generating any host code, the simulator will just
146
+ * loop doing nothing for on this program location.
147
+ */
148
+ if (dc->delayed_branch && --dc->delayed_branch == 0) {
149
+ if (dc->base.tb->flags & 7) {
150
+ t_gen_movi_env_TN(dslot, 0);
151
+ }
152
+
153
+ if (dc->cpustate_changed
154
+ || !dc->flagx_known
155
+ || (dc->flags_x != (dc->base.tb->flags & X_FLAG))) {
156
+ cris_store_direct_jmp(dc);
157
+ }
158
+
159
+ if (dc->clear_locked_irq) {
160
+ dc->clear_locked_irq = 0;
161
+ t_gen_movi_env_TN(locked_irq, 0);
162
+ }
163
+
164
+ if (dc->jmp == JMP_DIRECT_CC) {
165
+ TCGLabel *l1 = gen_new_label();
166
cris_evaluate_flags(dc);
167
- tcg_gen_movi_tl(env_pc, dc->pc);
168
- t_gen_raise_exception(EXCP_DEBUG);
169
+
170
+ /* Conditional jmp. */
171
+ tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
172
+ gen_goto_tb(dc, 1, dc->jmp_pc);
173
+ gen_set_label(l1);
174
+ gen_goto_tb(dc, 0, dc->pc);
175
dc->base.is_jmp = DISAS_NORETURN;
176
- /* The address covered by the breakpoint must be included in
177
- [tb->pc, tb->pc + tb->size) in order to for it to be
178
- properly cleared -- thus we increment the PC here so that
179
- the logic setting tb->size below does the right thing. */
180
- dc->pc += 2;
181
- break;
182
+ dc->jmp = JMP_NOJMP;
183
+ } else if (dc->jmp == JMP_DIRECT) {
184
+ cris_evaluate_flags(dc);
185
+ gen_goto_tb(dc, 0, dc->jmp_pc);
186
+ dc->base.is_jmp = DISAS_NORETURN;
187
+ dc->jmp = JMP_NOJMP;
188
+ } else {
189
+ TCGv c = tcg_const_tl(dc->pc);
190
+ t_gen_cc_jmp(env_btarget, c);
191
+ tcg_temp_free(c);
192
+ dc->base.is_jmp = DISAS_JUMP;
193
}
194
+ }
195
196
- /* Pretty disas. */
197
- LOG_DIS("%8.8x:\t", dc->pc);
198
+ /* Force an update if the per-tb cpu state has changed. */
199
+ if (dc->base.is_jmp == DISAS_NEXT
200
+ && (dc->cpustate_changed
201
+ || !dc->flagx_known
202
+ || (dc->flags_x != (dc->base.tb->flags & X_FLAG)))) {
203
+ dc->base.is_jmp = DISAS_UPDATE;
204
+ tcg_gen_movi_tl(env_pc, dc->pc);
205
+ }
206
207
- if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
208
- gen_io_start();
209
- }
210
- dc->clear_x = 1;
211
+ /*
212
+ * FIXME: Only the first insn in the TB should cross a page boundary.
213
+ * If we can detect the length of the next insn easily, we should.
214
+ * In the meantime, simply stop when we do cross.
215
+ */
216
+ if (dc->base.is_jmp == DISAS_NEXT
217
+ && ((dc->pc ^ dc->base.pc_first) & TARGET_PAGE_MASK) != 0) {
218
+ dc->base.is_jmp = DISAS_TOO_MANY;
219
+ }
220
+}
221
222
- insn_len = dc->decoder(env, dc);
223
- dc->ppc = dc->pc;
224
- dc->pc += insn_len;
225
- if (dc->clear_x) {
226
- cris_clear_x_flag(dc);
227
- }
228
+static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
229
+{
230
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
231
+ DisasJumpType is_jmp = dc->base.is_jmp;
232
+ target_ulong npc = dc->pc;
233
234
- /* Check for delayed branches here. If we do it before
235
- actually generating any host code, the simulator will just
236
- loop doing nothing for on this program location. */
237
- if (dc->delayed_branch) {
238
- dc->delayed_branch--;
239
- if (dc->delayed_branch == 0) {
240
- if (tb->flags & 7) {
241
- t_gen_movi_env_TN(dslot, 0);
242
- }
243
- if (dc->cpustate_changed || !dc->flagx_known
244
- || (dc->flags_x != (tb->flags & X_FLAG))) {
245
- cris_store_direct_jmp(dc);
246
- }
247
-
248
- if (dc->clear_locked_irq) {
249
- dc->clear_locked_irq = 0;
250
- t_gen_movi_env_TN(locked_irq, 0);
251
- }
252
-
253
- if (dc->jmp == JMP_DIRECT_CC) {
254
- TCGLabel *l1 = gen_new_label();
255
- cris_evaluate_flags(dc);
256
-
257
- /* Conditional jmp. */
258
- tcg_gen_brcondi_tl(TCG_COND_EQ,
259
- env_btaken, 0, l1);
260
- gen_goto_tb(dc, 1, dc->jmp_pc);
261
- gen_set_label(l1);
262
- gen_goto_tb(dc, 0, dc->pc);
263
- dc->base.is_jmp = DISAS_NORETURN;
264
- dc->jmp = JMP_NOJMP;
265
- } else if (dc->jmp == JMP_DIRECT) {
266
- cris_evaluate_flags(dc);
267
- gen_goto_tb(dc, 0, dc->jmp_pc);
268
- dc->base.is_jmp = DISAS_NORETURN;
269
- dc->jmp = JMP_NOJMP;
270
- } else {
271
- TCGv c = tcg_const_tl(dc->pc);
272
- t_gen_cc_jmp(env_btarget, c);
273
- tcg_temp_free(c);
274
- dc->base.is_jmp = DISAS_JUMP;
275
- }
276
- break;
277
- }
278
- }
279
-
280
- /* If we are rexecuting a branch due to exceptions on
281
- delay slots don't break. */
282
- if (!(tb->pc & 1) && cs->singlestep_enabled) {
283
- break;
284
- }
285
- } while (!dc->base.is_jmp && !dc->cpustate_changed
286
- && !tcg_op_buf_full()
287
- && !singlestep
288
- && (dc->pc - page_start < TARGET_PAGE_SIZE)
289
- && num_insns < max_insns);
290
+ if (is_jmp == DISAS_NORETURN) {
291
+ /* If we have a broken branch+delayslot sequence, it's too late. */
292
+ assert(dc->delayed_branch != 1);
293
+ return;
294
+ }
295
296
if (dc->clear_locked_irq) {
297
t_gen_movi_env_TN(locked_irq, 0);
298
}
299
300
- npc = dc->pc;
301
-
302
- /* Force an update if the per-tb cpu state has changed. */
303
- if (dc->base.is_jmp == DISAS_NEXT
304
- && (dc->cpustate_changed || !dc->flagx_known
305
- || (dc->flags_x != (tb->flags & X_FLAG)))) {
306
- dc->base.is_jmp = DISAS_UPDATE;
307
- tcg_gen_movi_tl(env_pc, npc);
308
- }
309
/* Broken branch+delayslot sequence. */
310
if (dc->delayed_branch == 1) {
311
/* Set env->dslot to the size of the branch insn. */
312
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
313
314
cris_evaluate_flags(dc);
315
316
- if (unlikely(cs->singlestep_enabled)) {
317
- if (dc->base.is_jmp == DISAS_NEXT) {
318
+ if (unlikely(dc->base.singlestep_enabled)) {
319
+ switch (is_jmp) {
320
+ case DISAS_TOO_MANY:
321
tcg_gen_movi_tl(env_pc, npc);
322
- }
323
- t_gen_raise_exception(EXCP_DEBUG);
324
- } else {
325
- switch (dc->base.is_jmp) {
326
- case DISAS_NEXT:
327
- gen_goto_tb(dc, 1, npc);
328
- break;
329
- default:
330
+ /* fall through */
331
case DISAS_JUMP:
332
case DISAS_UPDATE:
333
- /* indicate that the hash table must be used
334
- to find the next TB */
335
- tcg_gen_exit_tb(NULL, 0);
336
- break;
337
- case DISAS_NORETURN:
338
- /* nothing more to generate */
339
+ t_gen_raise_exception(EXCP_DEBUG);
340
+ return;
341
+ default:
342
break;
343
}
344
+ g_assert_not_reached();
345
}
346
- gen_tb_end(tb, num_insns);
347
348
- tb->size = dc->pc - pc_start;
349
- tb->icount = num_insns;
350
-
351
-#ifdef DEBUG_DISAS
352
-#if !DISAS_CRIS
353
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
354
- && qemu_log_in_addr_range(pc_start)) {
355
- FILE *logfile = qemu_log_lock();
356
- qemu_log("--------------\n");
357
- qemu_log("IN: %s\n", lookup_symbol(pc_start));
358
- log_target_disas(cs, pc_start, dc->pc - pc_start);
359
- qemu_log_unlock(logfile);
360
+ switch (is_jmp) {
361
+ case DISAS_TOO_MANY:
362
+ gen_goto_tb(dc, 0, npc);
363
+ break;
50
+ break;
364
+ case DISAS_JUMP:
51
+ case TCG_TYPE_I64:
365
+ case DISAS_UPDATE:
52
+ new_op = INDEX_op_mov_i64;
366
+ /* Indicate that interupts must be re-evaluated before the next TB. */
53
+ break;
367
+ tcg_gen_exit_tb(NULL, 0);
54
+ case TCG_TYPE_V64:
55
+ case TCG_TYPE_V128:
56
+ case TCG_TYPE_V256:
57
+ /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
58
+ new_op = INDEX_op_mov_vec;
368
+ break;
59
+ break;
369
+ default:
60
+ default:
370
+ g_assert_not_reached();
61
+ g_assert_not_reached();
371
}
62
}
372
-#endif
63
op->opc = new_op;
373
-#endif
64
- /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
374
+}
65
op->args[0] = dst;
66
op->args[1] = src;
67
68
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
69
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
70
TCGArg dst, uint64_t val)
71
{
72
- const TCGOpDef *def = &tcg_op_defs[op->opc];
73
- TCGType type;
74
- TCGTemp *tv;
75
-
76
- if (def->flags & TCG_OPF_VECTOR) {
77
- type = TCGOP_VECL(op) + TCG_TYPE_V64;
78
- } else if (def->flags & TCG_OPF_64BIT) {
79
- type = TCG_TYPE_I64;
80
- } else {
81
- type = TCG_TYPE_I32;
82
- }
83
-
84
/* Convert movi to mov with constant temp. */
85
- tv = tcg_constant_internal(type, val);
86
+ TCGTemp *tv = tcg_constant_internal(ctx->type, val);
375
+
87
+
376
+static void cris_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
88
init_ts_info(ctx, tv);
377
+{
89
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
378
+ if (!DISAS_CRIS) {
90
}
379
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
91
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
380
+ log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
92
}
381
+ }
93
}
382
+}
94
95
-static uint64_t do_constant_folding(TCGOpcode op, uint64_t x, uint64_t y)
96
+static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
97
+ uint64_t x, uint64_t y)
98
{
99
- const TCGOpDef *def = &tcg_op_defs[op];
100
uint64_t res = do_constant_folding_2(op, x, y);
101
- if (!(def->flags & TCG_OPF_64BIT)) {
102
+ if (type == TCG_TYPE_I32) {
103
res = (int32_t)res;
104
}
105
return res;
106
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
107
* Return -1 if the condition can't be simplified,
108
* and the result of the condition (0 or 1) if it can.
109
*/
110
-static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
111
+static int do_constant_folding_cond(TCGType type, TCGArg x,
112
TCGArg y, TCGCond c)
113
{
114
uint64_t xv = arg_info(x)->val;
115
uint64_t yv = arg_info(y)->val;
116
117
if (arg_is_const(x) && arg_is_const(y)) {
118
- const TCGOpDef *def = &tcg_op_defs[op];
119
- tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR));
120
- if (def->flags & TCG_OPF_64BIT) {
121
- return do_constant_folding_cond_64(xv, yv, c);
122
- } else {
123
+ switch (type) {
124
+ case TCG_TYPE_I32:
125
return do_constant_folding_cond_32(xv, yv, c);
126
+ case TCG_TYPE_I64:
127
+ return do_constant_folding_cond_64(xv, yv, c);
128
+ default:
129
+ /* Only scalar comparisons are optimizable */
130
+ return -1;
131
}
132
} else if (args_are_copies(x, y)) {
133
return do_constant_folding_cond_eq(c);
134
@@ -XXX,XX +XXX,XX @@ static bool fold_const1(OptContext *ctx, TCGOp *op)
135
uint64_t t;
136
137
t = arg_info(op->args[1])->val;
138
- t = do_constant_folding(op->opc, t, 0);
139
+ t = do_constant_folding(op->opc, ctx->type, t, 0);
140
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
141
}
142
return false;
143
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
144
uint64_t t1 = arg_info(op->args[1])->val;
145
uint64_t t2 = arg_info(op->args[2])->val;
146
147
- t1 = do_constant_folding(op->opc, t1, t2);
148
+ t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
149
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
150
}
151
return false;
152
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
153
static bool fold_brcond(OptContext *ctx, TCGOp *op)
154
{
155
TCGCond cond = op->args[2];
156
- int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
157
+ int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
158
159
if (i == 0) {
160
tcg_op_remove(ctx->tcg, op);
161
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
162
* Simplify EQ/NE comparisons where one of the pairs
163
* can be simplified.
164
*/
165
- i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
166
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
167
op->args[2], cond);
168
switch (i ^ inv) {
169
case 0:
170
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
171
goto do_brcond_high;
172
}
173
174
- i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
175
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
176
op->args[3], cond);
177
switch (i ^ inv) {
178
case 0:
179
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
180
if (arg_is_const(op->args[1])) {
181
uint64_t t = arg_info(op->args[1])->val;
182
183
- t = do_constant_folding(op->opc, t, op->args[2]);
184
+ t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
185
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
186
}
187
return false;
188
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
189
uint64_t t = arg_info(op->args[1])->val;
190
191
if (t != 0) {
192
- t = do_constant_folding(op->opc, t, 0);
193
+ t = do_constant_folding(op->opc, ctx->type, t, 0);
194
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
195
}
196
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
197
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
198
199
static bool fold_movcond(OptContext *ctx, TCGOp *op)
200
{
201
- TCGOpcode opc = op->opc;
202
TCGCond cond = op->args[5];
203
- int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
204
+ int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
205
206
if (i >= 0) {
207
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
208
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
209
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
210
uint64_t tv = arg_info(op->args[3])->val;
211
uint64_t fv = arg_info(op->args[4])->val;
212
+ TCGOpcode opc;
213
214
- opc = (opc == INDEX_op_movcond_i32
215
- ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
216
+ switch (ctx->type) {
217
+ case TCG_TYPE_I32:
218
+ opc = INDEX_op_setcond_i32;
219
+ break;
220
+ case TCG_TYPE_I64:
221
+ opc = INDEX_op_setcond_i64;
222
+ break;
223
+ default:
224
+ g_assert_not_reached();
225
+ }
226
227
if (tv == 1 && fv == 0) {
228
op->opc = opc;
229
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
230
static bool fold_setcond(OptContext *ctx, TCGOp *op)
231
{
232
TCGCond cond = op->args[3];
233
- int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
234
+ int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
235
236
if (i >= 0) {
237
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
238
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
239
* Simplify EQ/NE comparisons where one of the pairs
240
* can be simplified.
241
*/
242
- i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
243
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
244
op->args[3], cond);
245
switch (i ^ inv) {
246
case 0:
247
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
248
goto do_setcond_high;
249
}
250
251
- i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
252
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
253
op->args[4], cond);
254
switch (i ^ inv) {
255
case 0:
256
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
257
init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
258
copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
259
260
+ /* Pre-compute the type of the operation. */
261
+ if (def->flags & TCG_OPF_VECTOR) {
262
+ ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
263
+ } else if (def->flags & TCG_OPF_64BIT) {
264
+ ctx.type = TCG_TYPE_I64;
265
+ } else {
266
+ ctx.type = TCG_TYPE_I32;
267
+ }
383
+
268
+
384
+static const TranslatorOps cris_tr_ops = {
269
/* For commutative operations make constant second argument */
385
+ .init_disas_context = cris_tr_init_disas_context,
270
switch (opc) {
386
+ .tb_start = cris_tr_tb_start,
271
CASE_OP_32_64_VEC(add):
387
+ .insn_start = cris_tr_insn_start,
272
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
388
+ .breakpoint_check = cris_tr_breakpoint_check,
273
/* Proceed with possible constant folding. */
389
+ .translate_insn = cris_tr_translate_insn,
274
break;
390
+ .tb_stop = cris_tr_tb_stop,
275
}
391
+ .disas_log = cris_tr_disas_log,
276
- if (opc == INDEX_op_sub_i32) {
392
+};
277
+ switch (ctx.type) {
393
+
278
+ case TCG_TYPE_I32:
394
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
279
neg_op = INDEX_op_neg_i32;
395
+{
280
have_neg = TCG_TARGET_HAS_neg_i32;
396
+ DisasContext dc;
281
- } else if (opc == INDEX_op_sub_i64) {
397
+ translator_loop(&cris_tr_ops, &dc.base, cs, tb, max_insns);
282
+ break;
398
}
283
+ case TCG_TYPE_I64:
399
284
neg_op = INDEX_op_neg_i64;
400
void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags)
285
have_neg = TCG_TARGET_HAS_neg_i64;
286
- } else if (TCG_TARGET_HAS_neg_vec) {
287
- TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64;
288
- unsigned vece = TCGOP_VECE(op);
289
- neg_op = INDEX_op_neg_vec;
290
- have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0;
291
- } else {
292
break;
293
+ case TCG_TYPE_V64:
294
+ case TCG_TYPE_V128:
295
+ case TCG_TYPE_V256:
296
+ neg_op = INDEX_op_neg_vec;
297
+ have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
298
+ TCGOP_VECE(op)) > 0;
299
+ break;
300
+ default:
301
+ g_assert_not_reached();
302
}
303
if (!have_neg) {
304
break;
305
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
306
TCGOpcode not_op;
307
bool have_not;
308
309
- if (def->flags & TCG_OPF_VECTOR) {
310
- not_op = INDEX_op_not_vec;
311
- have_not = TCG_TARGET_HAS_not_vec;
312
- } else if (def->flags & TCG_OPF_64BIT) {
313
- not_op = INDEX_op_not_i64;
314
- have_not = TCG_TARGET_HAS_not_i64;
315
- } else {
316
+ switch (ctx.type) {
317
+ case TCG_TYPE_I32:
318
not_op = INDEX_op_not_i32;
319
have_not = TCG_TARGET_HAS_not_i32;
320
+ break;
321
+ case TCG_TYPE_I64:
322
+ not_op = INDEX_op_not_i64;
323
+ have_not = TCG_TARGET_HAS_not_i64;
324
+ break;
325
+ case TCG_TYPE_V64:
326
+ case TCG_TYPE_V128:
327
+ case TCG_TYPE_V256:
328
+ not_op = INDEX_op_not_vec;
329
+ have_not = TCG_TARGET_HAS_not_vec;
330
+ break;
331
+ default:
332
+ g_assert_not_reached();
333
}
334
if (!have_not) {
335
break;
336
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
337
below, we can ignore high bits, but for further optimizations we
338
need to record that the high bits contain garbage. */
339
partmask = z_mask;
340
- if (!(def->flags & TCG_OPF_64BIT)) {
341
+ if (ctx.type == TCG_TYPE_I32) {
342
z_mask |= ~(tcg_target_ulong)0xffffffffu;
343
partmask &= 0xffffffffu;
344
affected &= 0xffffffffu;
401
--
345
--
402
2.25.1
346
2.25.1
403
347
404
348
diff view generated by jsdifflib
1
We will shortly require sari in other context;
1
Split out the conditional conversion from a more complex logical
2
split out both for cleanliness sake.
2
operation to a simple NOT. Create a couple more helpers to make
3
3
this easy for the outer-most logical operations.
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
tcg/ppc/tcg-target.c.inc | 17 +++++++++++++----
8
tcg/optimize.c | 158 +++++++++++++++++++++++++++----------------------
8
1 file changed, 13 insertions(+), 4 deletions(-)
9
1 file changed, 86 insertions(+), 72 deletions(-)
9
10
10
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/ppc/tcg-target.c.inc
13
--- a/tcg/optimize.c
13
+++ b/tcg/ppc/tcg-target.c.inc
14
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
15
tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
16
return false;
16
}
17
}
17
18
18
+static inline void tcg_out_sari32(TCGContext *s, TCGReg dst, TCGReg src, int c)
19
+/*
20
+ * Convert @op to NOT, if NOT is supported by the host.
21
+ * Return true f the conversion is successful, which will still
22
+ * indicate that the processing is complete.
23
+ */
24
+static bool fold_not(OptContext *ctx, TCGOp *op);
25
+static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
19
+{
26
+{
20
+ /* Limit immediate shift count lest we create an illegal insn. */
27
+ TCGOpcode not_op;
21
+ tcg_out32(s, SRAWI | RA(dst) | RS(src) | SH(c & 31));
28
+ bool have_not;
29
+
30
+ switch (ctx->type) {
31
+ case TCG_TYPE_I32:
32
+ not_op = INDEX_op_not_i32;
33
+ have_not = TCG_TARGET_HAS_not_i32;
34
+ break;
35
+ case TCG_TYPE_I64:
36
+ not_op = INDEX_op_not_i64;
37
+ have_not = TCG_TARGET_HAS_not_i64;
38
+ break;
39
+ case TCG_TYPE_V64:
40
+ case TCG_TYPE_V128:
41
+ case TCG_TYPE_V256:
42
+ not_op = INDEX_op_not_vec;
43
+ have_not = TCG_TARGET_HAS_not_vec;
44
+ break;
45
+ default:
46
+ g_assert_not_reached();
47
+ }
48
+ if (have_not) {
49
+ op->opc = not_op;
50
+ op->args[1] = op->args[idx];
51
+ return fold_not(ctx, op);
52
+ }
53
+ return false;
22
+}
54
+}
23
+
55
+
24
static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
56
+/* If the binary operation has first argument @i, fold to NOT. */
25
{
57
+static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
26
tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
27
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
28
tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
29
}
30
31
+static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c)
32
+{
58
+{
33
+ tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2));
59
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
60
+ return fold_to_not(ctx, op, 2);
61
+ }
62
+ return false;
34
+}
63
+}
35
+
64
+
36
/* Emit a move into ret of arg, if it can be done in one insn. */
65
/* If the binary operation has second argument @i, fold to @i. */
37
static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
66
static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
38
{
67
{
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
68
@@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
40
break;
69
return false;
41
case INDEX_op_sar_i32:
70
}
42
if (const_args[2]) {
71
43
- /* Limit immediate shift count lest we create an illegal insn. */
72
+/* If the binary operation has second argument @i, fold to NOT. */
44
- tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2] & 31));
73
+static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
45
+ tcg_out_sari32(s, args[0], args[1], args[2]);
74
+{
46
} else {
75
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
47
tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
76
+ return fold_to_not(ctx, op, 1);
48
}
77
+ }
49
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
78
+ return false;
50
break;
79
+}
51
case INDEX_op_sar_i64:
80
+
52
if (const_args[2]) {
81
/* If the binary operation has both arguments equal, fold to @i. */
53
- int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
82
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
54
- tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh);
83
{
55
+ tcg_out_sari64(s, args[0], args[1], args[2]);
84
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
56
} else {
85
static bool fold_andc(OptContext *ctx, TCGOp *op)
57
tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
86
{
87
if (fold_const2(ctx, op) ||
88
- fold_xx_to_i(ctx, op, 0)) {
89
+ fold_xx_to_i(ctx, op, 0) ||
90
+ fold_ix_to_not(ctx, op, -1)) {
91
return true;
92
}
93
return false;
94
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
95
96
static bool fold_eqv(OptContext *ctx, TCGOp *op)
97
{
98
- return fold_const2(ctx, op);
99
+ if (fold_const2(ctx, op) ||
100
+ fold_xi_to_not(ctx, op, 0)) {
101
+ return true;
102
+ }
103
+ return false;
104
}
105
106
static bool fold_extract(OptContext *ctx, TCGOp *op)
107
@@ -XXX,XX +XXX,XX @@ static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
108
109
static bool fold_nand(OptContext *ctx, TCGOp *op)
110
{
111
- return fold_const2(ctx, op);
112
+ if (fold_const2(ctx, op) ||
113
+ fold_xi_to_not(ctx, op, -1)) {
114
+ return true;
115
+ }
116
+ return false;
117
}
118
119
static bool fold_neg(OptContext *ctx, TCGOp *op)
120
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
121
122
static bool fold_nor(OptContext *ctx, TCGOp *op)
123
{
124
- return fold_const2(ctx, op);
125
+ if (fold_const2(ctx, op) ||
126
+ fold_xi_to_not(ctx, op, 0)) {
127
+ return true;
128
+ }
129
+ return false;
130
}
131
132
static bool fold_not(OptContext *ctx, TCGOp *op)
133
{
134
- return fold_const1(ctx, op);
135
+ if (fold_const1(ctx, op)) {
136
+ return true;
137
+ }
138
+
139
+ /* Because of fold_to_not, we want to always return true, via finish. */
140
+ finish_folding(ctx, op);
141
+ return true;
142
}
143
144
static bool fold_or(OptContext *ctx, TCGOp *op)
145
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
146
147
static bool fold_orc(OptContext *ctx, TCGOp *op)
148
{
149
- return fold_const2(ctx, op);
150
+ if (fold_const2(ctx, op) ||
151
+ fold_ix_to_not(ctx, op, 0)) {
152
+ return true;
153
+ }
154
+ return false;
155
}
156
157
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
158
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
159
static bool fold_xor(OptContext *ctx, TCGOp *op)
160
{
161
if (fold_const2(ctx, op) ||
162
- fold_xx_to_i(ctx, op, 0)) {
163
+ fold_xx_to_i(ctx, op, 0) ||
164
+ fold_xi_to_not(ctx, op, -1)) {
165
return true;
166
}
167
return false;
168
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
169
}
170
}
171
break;
172
- CASE_OP_32_64_VEC(xor):
173
- CASE_OP_32_64(nand):
174
- if (!arg_is_const(op->args[1])
175
- && arg_is_const(op->args[2])
176
- && arg_info(op->args[2])->val == -1) {
177
- i = 1;
178
- goto try_not;
179
- }
180
- break;
181
- CASE_OP_32_64(nor):
182
- if (!arg_is_const(op->args[1])
183
- && arg_is_const(op->args[2])
184
- && arg_info(op->args[2])->val == 0) {
185
- i = 1;
186
- goto try_not;
187
- }
188
- break;
189
- CASE_OP_32_64_VEC(andc):
190
- if (!arg_is_const(op->args[2])
191
- && arg_is_const(op->args[1])
192
- && arg_info(op->args[1])->val == -1) {
193
- i = 2;
194
- goto try_not;
195
- }
196
- break;
197
- CASE_OP_32_64_VEC(orc):
198
- CASE_OP_32_64(eqv):
199
- if (!arg_is_const(op->args[2])
200
- && arg_is_const(op->args[1])
201
- && arg_info(op->args[1])->val == 0) {
202
- i = 2;
203
- goto try_not;
204
- }
205
- break;
206
- try_not:
207
- {
208
- TCGOpcode not_op;
209
- bool have_not;
210
-
211
- switch (ctx.type) {
212
- case TCG_TYPE_I32:
213
- not_op = INDEX_op_not_i32;
214
- have_not = TCG_TARGET_HAS_not_i32;
215
- break;
216
- case TCG_TYPE_I64:
217
- not_op = INDEX_op_not_i64;
218
- have_not = TCG_TARGET_HAS_not_i64;
219
- break;
220
- case TCG_TYPE_V64:
221
- case TCG_TYPE_V128:
222
- case TCG_TYPE_V256:
223
- not_op = INDEX_op_not_vec;
224
- have_not = TCG_TARGET_HAS_not_vec;
225
- break;
226
- default:
227
- g_assert_not_reached();
228
- }
229
- if (!have_not) {
230
- break;
231
- }
232
- op->opc = not_op;
233
- reset_temp(op->args[0]);
234
- op->args[1] = op->args[i];
235
- continue;
236
- }
237
default:
238
break;
58
}
239
}
59
--
240
--
60
2.25.1
241
2.25.1
61
242
62
243
diff view generated by jsdifflib
1
The only semantic of DISAS_TB_JUMP is that we've done goto_tb,
1
Even though there is only one user, place this more complex
2
which is the same as DISAS_NORETURN -- we've exited the tb.
2
conversion into its own helper.
3
3
4
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
target/cris/translate.c | 7 +++----
7
tcg/optimize.c | 89 ++++++++++++++++++++++++++------------------------
9
1 file changed, 3 insertions(+), 4 deletions(-)
8
1 file changed, 47 insertions(+), 42 deletions(-)
10
9
11
diff --git a/target/cris/translate.c b/target/cris/translate.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/target/cris/translate.c
12
--- a/tcg/optimize.c
14
+++ b/target/cris/translate.c
13
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
16
/* is_jmp field values */
15
17
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
16
static bool fold_neg(OptContext *ctx, TCGOp *op)
18
#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
17
{
19
-#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
18
- return fold_const1(ctx, op);
20
19
+ if (fold_const1(ctx, op)) {
21
/* Used by the decoder. */
20
+ return true;
22
#define EXTRACT_FIELD(src, start, end) \
21
+ }
23
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
22
+ /*
24
gen_goto_tb(dc, 1, dc->jmp_pc);
23
+ * Because of fold_sub_to_neg, we want to always return true,
25
gen_set_label(l1);
24
+ * via finish_folding.
26
gen_goto_tb(dc, 0, dc->pc);
25
+ */
27
- dc->base.is_jmp = DISAS_TB_JUMP;
26
+ finish_folding(ctx, op);
28
+ dc->base.is_jmp = DISAS_NORETURN;
27
+ return true;
29
dc->jmp = JMP_NOJMP;
28
}
30
} else if (dc->jmp == JMP_DIRECT) {
29
31
cris_evaluate_flags(dc);
30
static bool fold_nor(OptContext *ctx, TCGOp *op)
32
gen_goto_tb(dc, 0, dc->jmp_pc);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
33
- dc->base.is_jmp = DISAS_TB_JUMP;
32
return fold_const2(ctx, op);
34
+ dc->base.is_jmp = DISAS_NORETURN;
33
}
35
dc->jmp = JMP_NOJMP;
34
36
} else {
35
+static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
37
TCGv c = tcg_const_tl(dc->pc);
36
+{
38
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
37
+ TCGOpcode neg_op;
39
to find the next TB */
38
+ bool have_neg;
40
tcg_gen_exit_tb(NULL, 0);
39
+
40
+ if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
41
+ return false;
42
+ }
43
+
44
+ switch (ctx->type) {
45
+ case TCG_TYPE_I32:
46
+ neg_op = INDEX_op_neg_i32;
47
+ have_neg = TCG_TARGET_HAS_neg_i32;
48
+ break;
49
+ case TCG_TYPE_I64:
50
+ neg_op = INDEX_op_neg_i64;
51
+ have_neg = TCG_TARGET_HAS_neg_i64;
52
+ break;
53
+ case TCG_TYPE_V64:
54
+ case TCG_TYPE_V128:
55
+ case TCG_TYPE_V256:
56
+ neg_op = INDEX_op_neg_vec;
57
+ have_neg = (TCG_TARGET_HAS_neg_vec &&
58
+ tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
59
+ break;
60
+ default:
61
+ g_assert_not_reached();
62
+ }
63
+ if (have_neg) {
64
+ op->opc = neg_op;
65
+ op->args[1] = op->args[2];
66
+ return fold_neg(ctx, op);
67
+ }
68
+ return false;
69
+}
70
+
71
static bool fold_sub(OptContext *ctx, TCGOp *op)
72
{
73
if (fold_const2(ctx, op) ||
74
- fold_xx_to_i(ctx, op, 0)) {
75
+ fold_xx_to_i(ctx, op, 0) ||
76
+ fold_sub_to_neg(ctx, op)) {
77
return true;
78
}
79
return false;
80
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
81
continue;
82
}
41
break;
83
break;
42
- case DISAS_TB_JUMP:
84
- CASE_OP_32_64_VEC(sub):
43
+ case DISAS_NORETURN:
85
- {
44
/* nothing more to generate */
86
- TCGOpcode neg_op;
87
- bool have_neg;
88
-
89
- if (arg_is_const(op->args[2])) {
90
- /* Proceed with possible constant folding. */
91
- break;
92
- }
93
- switch (ctx.type) {
94
- case TCG_TYPE_I32:
95
- neg_op = INDEX_op_neg_i32;
96
- have_neg = TCG_TARGET_HAS_neg_i32;
97
- break;
98
- case TCG_TYPE_I64:
99
- neg_op = INDEX_op_neg_i64;
100
- have_neg = TCG_TARGET_HAS_neg_i64;
101
- break;
102
- case TCG_TYPE_V64:
103
- case TCG_TYPE_V128:
104
- case TCG_TYPE_V256:
105
- neg_op = INDEX_op_neg_vec;
106
- have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
107
- TCGOP_VECE(op)) > 0;
108
- break;
109
- default:
110
- g_assert_not_reached();
111
- }
112
- if (!have_neg) {
113
- break;
114
- }
115
- if (arg_is_const(op->args[1])
116
- && arg_info(op->args[1])->val == 0) {
117
- op->opc = neg_op;
118
- reset_temp(op->args[0]);
119
- op->args[1] = op->args[2];
120
- continue;
121
- }
122
- }
123
- break;
124
default:
45
break;
125
break;
46
}
126
}
47
--
127
--
48
2.25.1
128
2.25.1
49
129
50
130
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
Pull the "op r, a, i => mov r, a" optimization into a function,
2
and use them in the outer-most logical operations.
2
3
3
Implement tcg_gen_vec_add{sub}16_tl by adding corresponding i32 OP.
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
5
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
6
Message-Id: <20210624105023.3852-2-zhiwei_liu@c-sky.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
include/tcg/tcg-op-gvec.h | 13 +++++++++++++
7
tcg/optimize.c | 61 +++++++++++++++++++++-----------------------------
10
tcg/tcg-op-gvec.c | 28 ++++++++++++++++++++++++++++
8
1 file changed, 26 insertions(+), 35 deletions(-)
11
2 files changed, 41 insertions(+)
12
9
13
diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-op-gvec.h
12
--- a/tcg/optimize.c
16
+++ b/include/tcg/tcg-op-gvec.h
13
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
18
void tcg_gen_vec_rotl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
15
return false;
19
void tcg_gen_vec_rotl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
20
21
+/* 32-bit vector operations. */
22
+void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
23
+
24
+void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
25
+
26
+#if TARGET_LONG_BITS == 64
27
+#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i64
28
+#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i64
29
+#else
30
+#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i32
31
+#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i32
32
+#endif
33
+
34
#endif
35
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/tcg/tcg-op-gvec.c
38
+++ b/tcg/tcg-op-gvec.c
39
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
40
gen_addv_mask(d, a, b, m);
41
}
16
}
42
17
43
+void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
18
+/* If the binary operation has second argument @i, fold to identity. */
19
+static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
44
+{
20
+{
45
+ TCGv_i32 t1 = tcg_temp_new_i32();
21
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
46
+ TCGv_i32 t2 = tcg_temp_new_i32();
22
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
47
+
23
+ }
48
+ tcg_gen_andi_i32(t1, a, ~0xffff);
24
+ return false;
49
+ tcg_gen_add_i32(t2, a, b);
50
+ tcg_gen_add_i32(t1, t1, b);
51
+ tcg_gen_deposit_i32(d, t1, t2, 0, 16);
52
+
53
+ tcg_temp_free_i32(t1);
54
+ tcg_temp_free_i32(t2);
55
+}
25
+}
56
+
26
+
57
void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
27
/* If the binary operation has second argument @i, fold to NOT. */
28
static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
58
{
29
{
59
TCGv_i64 t1 = tcg_temp_new_i64();
30
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
60
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
31
61
gen_subv_mask(d, a, b, m);
32
static bool fold_add(OptContext *ctx, TCGOp *op)
33
{
34
- return fold_const2(ctx, op);
35
+ if (fold_const2(ctx, op) ||
36
+ fold_xi_to_x(ctx, op, 0)) {
37
+ return true;
38
+ }
39
+ return false;
62
}
40
}
63
41
64
+void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
42
static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
65
+{
43
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
66
+ TCGv_i32 t1 = tcg_temp_new_i32();
67
+ TCGv_i32 t2 = tcg_temp_new_i32();
68
+
69
+ tcg_gen_andi_i32(t1, b, ~0xffff);
70
+ tcg_gen_sub_i32(t2, a, b);
71
+ tcg_gen_sub_i32(t1, a, t1);
72
+ tcg_gen_deposit_i32(d, t1, t2, 0, 16);
73
+
74
+ tcg_temp_free_i32(t1);
75
+ tcg_temp_free_i32(t2);
76
+}
77
+
78
void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
79
{
44
{
80
TCGv_i64 t1 = tcg_temp_new_i64();
45
if (fold_const2(ctx, op) ||
46
fold_xi_to_i(ctx, op, 0) ||
47
+ fold_xi_to_x(ctx, op, -1) ||
48
fold_xx_to_x(ctx, op)) {
49
return true;
50
}
51
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
52
{
53
if (fold_const2(ctx, op) ||
54
fold_xx_to_i(ctx, op, 0) ||
55
+ fold_xi_to_x(ctx, op, 0) ||
56
fold_ix_to_not(ctx, op, -1)) {
57
return true;
58
}
59
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
60
static bool fold_eqv(OptContext *ctx, TCGOp *op)
61
{
62
if (fold_const2(ctx, op) ||
63
+ fold_xi_to_x(ctx, op, -1) ||
64
fold_xi_to_not(ctx, op, 0)) {
65
return true;
66
}
67
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
68
static bool fold_or(OptContext *ctx, TCGOp *op)
69
{
70
if (fold_const2(ctx, op) ||
71
+ fold_xi_to_x(ctx, op, 0) ||
72
fold_xx_to_x(ctx, op)) {
73
return true;
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
76
static bool fold_orc(OptContext *ctx, TCGOp *op)
77
{
78
if (fold_const2(ctx, op) ||
79
+ fold_xi_to_x(ctx, op, -1) ||
80
fold_ix_to_not(ctx, op, 0)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
84
85
static bool fold_shift(OptContext *ctx, TCGOp *op)
86
{
87
- return fold_const2(ctx, op);
88
+ if (fold_const2(ctx, op) ||
89
+ fold_xi_to_x(ctx, op, 0)) {
90
+ return true;
91
+ }
92
+ return false;
93
}
94
95
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
96
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
97
{
98
if (fold_const2(ctx, op) ||
99
fold_xx_to_i(ctx, op, 0) ||
100
+ fold_xi_to_x(ctx, op, 0) ||
101
fold_sub_to_neg(ctx, op)) {
102
return true;
103
}
104
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
105
{
106
if (fold_const2(ctx, op) ||
107
fold_xx_to_i(ctx, op, 0) ||
108
+ fold_xi_to_x(ctx, op, 0) ||
109
fold_xi_to_not(ctx, op, -1)) {
110
return true;
111
}
112
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
113
break;
114
}
115
116
- /* Simplify expression for "op r, a, const => mov r, a" cases */
117
- switch (opc) {
118
- CASE_OP_32_64_VEC(add):
119
- CASE_OP_32_64_VEC(sub):
120
- CASE_OP_32_64_VEC(or):
121
- CASE_OP_32_64_VEC(xor):
122
- CASE_OP_32_64_VEC(andc):
123
- CASE_OP_32_64(shl):
124
- CASE_OP_32_64(shr):
125
- CASE_OP_32_64(sar):
126
- CASE_OP_32_64(rotl):
127
- CASE_OP_32_64(rotr):
128
- if (!arg_is_const(op->args[1])
129
- && arg_is_const(op->args[2])
130
- && arg_info(op->args[2])->val == 0) {
131
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
132
- continue;
133
- }
134
- break;
135
- CASE_OP_32_64_VEC(and):
136
- CASE_OP_32_64_VEC(orc):
137
- CASE_OP_32_64(eqv):
138
- if (!arg_is_const(op->args[1])
139
- && arg_is_const(op->args[2])
140
- && arg_info(op->args[2])->val == -1) {
141
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
142
- continue;
143
- }
144
- break;
145
- default:
146
- break;
147
- }
148
-
149
/* Simplify using known-zero bits. Currently only ops with a single
150
output argument is supported. */
151
z_mask = -1;
81
--
152
--
82
2.25.1
153
2.25.1
83
154
84
155
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
Pull the "op r, 0, b => movi r, 0" optimization into a function,
2
and use it in fold_shift.
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
7
---
4
tcg/ppc/tcg-target.c.inc | 64 +++++++++++++++++++++-------------------
8
tcg/optimize.c | 28 ++++++++++------------------
5
1 file changed, 34 insertions(+), 30 deletions(-)
9
1 file changed, 10 insertions(+), 18 deletions(-)
6
10
7
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/ppc/tcg-target.c.inc
13
--- a/tcg/optimize.c
10
+++ b/tcg/ppc/tcg-target.c.inc
14
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
12
tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
16
return false;
13
}
17
}
14
18
15
+static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
19
+/* If the binary operation has first argument @i, fold to @i. */
20
+static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
16
+{
21
+{
17
+ TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
22
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
18
+ TCGReg t1 = dst == src ? dst : TCG_REG_R0;
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
19
+
24
+ }
20
+ /*
25
+ return false;
21
+ * In the following,
22
+ * dep(a, b, m) -> (a & ~m) | (b & m)
23
+ *
24
+ * Begin with: src = abcdefgh
25
+ */
26
+ /* t0 = rol32(src, 8) & 0xffffffff = 0000fghe */
27
+ tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31);
28
+ /* t0 = dep(t0, rol32(src, 24), 0xff000000) = 0000hghe */
29
+ tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7);
30
+ /* t0 = dep(t0, rol32(src, 24), 0x0000ff00) = 0000hgfe */
31
+ tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23);
32
+
33
+ /* t0 = rol64(t0, 32) = hgfe0000 */
34
+ tcg_out_rld(s, RLDICL, t0, t0, 32, 0);
35
+ /* t1 = rol64(src, 32) = efghabcd */
36
+ tcg_out_rld(s, RLDICL, t1, src, 32, 0);
37
+
38
+ /* t0 = dep(t0, rol32(t1, 24), 0xffffffff) = hgfebcda */
39
+ tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31);
40
+ /* t0 = dep(t0, rol32(t1, 24), 0xff000000) = hgfedcda */
41
+ tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7);
42
+ /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00) = hgfedcba */
43
+ tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23);
44
+
45
+ tcg_out_mov(s, TCG_TYPE_REG, dst, t0);
46
+}
26
+}
47
+
27
+
48
/* Emit a move into ret of arg, if it can be done in one insn. */
28
/* If the binary operation has first argument @i, fold to NOT. */
49
static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
29
static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
50
{
30
{
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
31
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
52
case INDEX_op_bswap32_i64:
32
static bool fold_shift(OptContext *ctx, TCGOp *op)
53
tcg_out_bswap32(s, args[0], args[1]);
33
{
54
break;
34
if (fold_const2(ctx, op) ||
55
-
35
+ fold_ix_to_i(ctx, op, 0) ||
56
case INDEX_op_bswap64_i64:
36
fold_xi_to_x(ctx, op, 0)) {
57
- a0 = args[0], a1 = args[1], a2 = TCG_REG_R0;
37
return true;
58
- if (a0 == a1) {
38
}
59
- a0 = TCG_REG_R0;
39
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
60
- a2 = a1;
40
break;
41
}
42
43
- /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
44
- and "sub r, 0, a => neg r, a" case. */
45
- switch (opc) {
46
- CASE_OP_32_64(shl):
47
- CASE_OP_32_64(shr):
48
- CASE_OP_32_64(sar):
49
- CASE_OP_32_64(rotl):
50
- CASE_OP_32_64(rotr):
51
- if (arg_is_const(op->args[1])
52
- && arg_info(op->args[1])->val == 0) {
53
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
54
- continue;
55
- }
56
- break;
57
- default:
58
- break;
61
- }
59
- }
62
-
60
-
63
- /* a1 = # abcd efgh */
61
/* Simplify using known-zero bits. Currently only ops with a single
64
- /* a0 = rl32(a1, 8) # 0000 fghe */
62
output argument is supported. */
65
- tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
63
z_mask = -1;
66
- /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */
67
- tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
68
- /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */
69
- tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
70
-
71
- /* a0 = rl64(a0, 32) # hgfe 0000 */
72
- /* a2 = rl64(a1, 32) # efgh abcd */
73
- tcg_out_rld(s, RLDICL, a0, a0, 32, 0);
74
- tcg_out_rld(s, RLDICL, a2, a1, 32, 0);
75
-
76
- /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */
77
- tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31);
78
- /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */
79
- tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7);
80
- /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */
81
- tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23);
82
-
83
- if (a0 == 0) {
84
- tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
85
- }
86
+ tcg_out_bswap64(s, args[0], args[1]);
87
break;
88
89
case INDEX_op_deposit_i32:
90
--
64
--
91
2.25.1
65
2.25.1
92
66
93
67
diff view generated by jsdifflib
1
Now that the middle-end can replicate the same tricks as tcg/arm
1
Move all of the known-zero optimizations into the per-opcode
2
used for optimizing bswap for signed loads and for stores, do not
2
functions. Use fold_masks when there is a possibility of the
3
pretend to have these memory ops in the backend.
3
result being determined, and simply set ctx->z_mask otherwise.
4
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
tcg/arm/tcg-target.h | 2 +-
9
tcg/optimize.c | 545 ++++++++++++++++++++++++++-----------------------
9
tcg/arm/tcg-target.c.inc | 214 ++++++++++++++-------------------------
10
1 file changed, 294 insertions(+), 251 deletions(-)
10
2 files changed, 77 insertions(+), 139 deletions(-)
11
11
12
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/arm/tcg-target.h
14
--- a/tcg/optimize.c
15
+++ b/tcg/arm/tcg-target.h
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
16
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
17
#define TCG_TARGET_HAS_cmpsel_vec 0
17
TCGTempSet temps_used;
18
18
19
#define TCG_TARGET_DEFAULT_MO (0)
19
/* In flight values from optimization. */
20
-#define TCG_TARGET_HAS_MEMORY_BSWAP 1
20
- uint64_t z_mask;
21
+#define TCG_TARGET_HAS_MEMORY_BSWAP 0
21
+ uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
22
22
+ uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
23
/* not defined -- call should be eliminated at compile time */
23
TCGType type;
24
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
24
} OptContext;
25
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
25
26
index XXXXXXX..XXXXXXX 100644
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
27
--- a/tcg/arm/tcg-target.c.inc
27
return false;
28
+++ b/tcg/arm/tcg-target.c.inc
28
}
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
29
30
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
30
+static bool fold_masks(OptContext *ctx, TCGOp *op)
31
* int mmu_idx, uintptr_t ra)
31
+{
32
*/
32
+ uint64_t a_mask = ctx->a_mask;
33
-static void * const qemu_ld_helpers[16] = {
33
+ uint64_t z_mask = ctx->z_mask;
34
+static void * const qemu_ld_helpers[8] = {
34
+
35
[MO_UB] = helper_ret_ldub_mmu,
35
+ /*
36
[MO_SB] = helper_ret_ldsb_mmu,
36
+ * 32-bit ops generate 32-bit results. For the result is zero test
37
-
37
+ * below, we can ignore high bits, but for further optimizations we
38
- [MO_LEUW] = helper_le_lduw_mmu,
38
+ * need to record that the high bits contain garbage.
39
- [MO_LEUL] = helper_le_ldul_mmu,
39
+ */
40
- [MO_LEQ] = helper_le_ldq_mmu,
40
+ if (ctx->type == TCG_TYPE_I32) {
41
- [MO_LESW] = helper_le_ldsw_mmu,
41
+ ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
42
- [MO_LESL] = helper_le_ldul_mmu,
42
+ a_mask &= MAKE_64BIT_MASK(0, 32);
43
-
43
+ z_mask &= MAKE_64BIT_MASK(0, 32);
44
- [MO_BEUW] = helper_be_lduw_mmu,
44
+ }
45
- [MO_BEUL] = helper_be_ldul_mmu,
45
+
46
- [MO_BEQ] = helper_be_ldq_mmu,
46
+ if (z_mask == 0) {
47
- [MO_BESW] = helper_be_ldsw_mmu,
47
+ return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
48
- [MO_BESL] = helper_be_ldul_mmu,
48
+ }
49
+#ifdef HOST_WORDS_BIGENDIAN
49
+ if (a_mask == 0) {
50
+ [MO_UW] = helper_be_lduw_mmu,
50
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
51
+ [MO_UL] = helper_be_ldul_mmu,
51
+ }
52
+ [MO_Q] = helper_be_ldq_mmu,
52
+ return false;
53
+ [MO_SW] = helper_be_ldsw_mmu,
53
+}
54
+ [MO_SL] = helper_be_ldul_mmu,
54
+
55
+#else
55
/*
56
+ [MO_UW] = helper_le_lduw_mmu,
56
* Convert @op to NOT, if NOT is supported by the host.
57
+ [MO_UL] = helper_le_ldul_mmu,
57
* Return true f the conversion is successful, which will still
58
+ [MO_Q] = helper_le_ldq_mmu,
58
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
59
+ [MO_SW] = helper_le_ldsw_mmu,
59
60
+ [MO_SL] = helper_le_ldul_mmu,
60
static bool fold_and(OptContext *ctx, TCGOp *op)
61
+#endif
61
{
62
};
62
+ uint64_t z1, z2;
63
63
+
64
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
64
if (fold_const2(ctx, op) ||
65
* uintxx_t val, int mmu_idx, uintptr_t ra)
65
fold_xi_to_i(ctx, op, 0) ||
66
*/
66
fold_xi_to_x(ctx, op, -1) ||
67
-static void * const qemu_st_helpers[16] = {
67
fold_xx_to_x(ctx, op)) {
68
- [MO_UB] = helper_ret_stb_mmu,
68
return true;
69
- [MO_LEUW] = helper_le_stw_mmu,
69
}
70
- [MO_LEUL] = helper_le_stl_mmu,
70
- return false;
71
- [MO_LEQ] = helper_le_stq_mmu,
71
+
72
- [MO_BEUW] = helper_be_stw_mmu,
72
+ z1 = arg_info(op->args[1])->z_mask;
73
- [MO_BEUL] = helper_be_stl_mmu,
73
+ z2 = arg_info(op->args[2])->z_mask;
74
- [MO_BEQ] = helper_be_stq_mmu,
74
+ ctx->z_mask = z1 & z2;
75
+static void * const qemu_st_helpers[4] = {
75
+
76
+ [MO_8] = helper_ret_stb_mmu,
76
+ /*
77
+#ifdef HOST_WORDS_BIGENDIAN
77
+ * Known-zeros does not imply known-ones. Therefore unless
78
+ [MO_16] = helper_be_stw_mmu,
78
+ * arg2 is constant, we can't infer affected bits from it.
79
+ [MO_32] = helper_be_stl_mmu,
79
+ */
80
+ [MO_64] = helper_be_stq_mmu,
80
+ if (arg_is_const(op->args[2])) {
81
+#else
81
+ ctx->a_mask = z1 & ~z2;
82
+ [MO_16] = helper_le_stw_mmu,
82
+ }
83
+ [MO_32] = helper_le_stl_mmu,
83
+
84
+ [MO_64] = helper_le_stq_mmu,
84
+ return fold_masks(ctx, op);
85
+#endif
85
}
86
};
86
87
87
static bool fold_andc(OptContext *ctx, TCGOp *op)
88
/* Helper routines for marshalling helper function arguments into
88
{
89
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
89
+ uint64_t z1;
90
icache usage. For pre-armv6, use the signed helpers since we do
90
+
91
not have a single insn sign-extend. */
91
if (fold_const2(ctx, op) ||
92
if (use_armv6_instructions) {
92
fold_xx_to_i(ctx, op, 0) ||
93
- func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)];
93
fold_xi_to_x(ctx, op, 0) ||
94
+ func = qemu_ld_helpers[opc & MO_SIZE];
94
fold_ix_to_not(ctx, op, -1)) {
95
} else {
95
return true;
96
- func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)];
96
}
97
+ func = qemu_ld_helpers[opc & MO_SSIZE];
97
- return false;
98
if (opc & MO_SIGN) {
98
+
99
opc = MO_UL;
99
+ z1 = arg_info(op->args[1])->z_mask;
100
}
100
+
101
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
101
+ /*
102
argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
102
+ * Known-zeros does not imply known-ones. Therefore unless
103
103
+ * arg2 is constant, we can't infer anything from it.
104
/* Tail-call to the helper, which will return to the fast path. */
104
+ */
105
- tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
105
+ if (arg_is_const(op->args[2])) {
106
+ tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
106
+ uint64_t z2 = ~arg_info(op->args[2])->z_mask;
107
return true;
107
+ ctx->a_mask = z1 & ~z2;
108
}
108
+ z1 &= z2;
109
#endif /* SOFTMMU */
109
+ }
110
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
110
+ ctx->z_mask = z1;
111
TCGReg datalo, TCGReg datahi,
111
+
112
TCGReg addrlo, TCGReg addend)
112
+ return fold_masks(ctx, op);
113
{
113
}
114
- MemOp bswap = opc & MO_BSWAP;
114
115
+ /* Byte swapping is left to middle-end expansion. */
115
static bool fold_brcond(OptContext *ctx, TCGOp *op)
116
+ tcg_debug_assert((opc & MO_BSWAP) == 0);
116
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
117
117
118
switch (opc & MO_SSIZE) {
118
static bool fold_bswap(OptContext *ctx, TCGOp *op)
119
case MO_UB:
119
{
120
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
120
+ uint64_t z_mask, sign;
121
break;
121
+
122
case MO_UW:
122
if (arg_is_const(op->args[1])) {
123
tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
123
uint64_t t = arg_info(op->args[1])->val;
124
- if (bswap) {
124
125
- tcg_out_bswap16(s, COND_AL, datalo, datalo,
125
t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
126
- TCG_BSWAP_IZ | TCG_BSWAP_OZ);
126
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
127
- }
127
}
128
break;
128
- return false;
129
case MO_SW:
129
+
130
- if (bswap) {
130
+ z_mask = arg_info(op->args[1])->z_mask;
131
- tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
131
+ switch (op->opc) {
132
- tcg_out_bswap16(s, COND_AL, datalo, datalo,
132
+ case INDEX_op_bswap16_i32:
133
- TCG_BSWAP_IZ | TCG_BSWAP_OS);
133
+ case INDEX_op_bswap16_i64:
134
- } else {
134
+ z_mask = bswap16(z_mask);
135
- tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
135
+ sign = INT16_MIN;
136
- }
136
+ break;
137
+ tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
137
+ case INDEX_op_bswap32_i32:
138
break;
138
+ case INDEX_op_bswap32_i64:
139
case MO_UL:
139
+ z_mask = bswap32(z_mask);
140
- default:
140
+ sign = INT32_MIN;
141
tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
141
+ break;
142
- if (bswap) {
142
+ case INDEX_op_bswap64_i64:
143
- tcg_out_bswap32(s, COND_AL, datalo, datalo);
143
+ z_mask = bswap64(z_mask);
144
- }
144
+ sign = INT64_MIN;
145
break;
145
+ break;
146
case MO_Q:
147
- {
148
- TCGReg dl = (bswap ? datahi : datalo);
149
- TCGReg dh = (bswap ? datalo : datahi);
150
-
151
- /* Avoid ldrd for user-only emulation, to handle unaligned. */
152
- if (USING_SOFTMMU && use_armv6_instructions
153
- && (dl & 1) == 0 && dh == dl + 1) {
154
- tcg_out_ldrd_r(s, COND_AL, dl, addrlo, addend);
155
- } else if (dl != addend) {
156
- tcg_out_ld32_rwb(s, COND_AL, dl, addend, addrlo);
157
- tcg_out_ld32_12(s, COND_AL, dh, addend, 4);
158
- } else {
159
- tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
160
- addend, addrlo, SHIFT_IMM_LSL(0));
161
- tcg_out_ld32_12(s, COND_AL, dl, TCG_REG_TMP, 0);
162
- tcg_out_ld32_12(s, COND_AL, dh, TCG_REG_TMP, 4);
163
- }
164
- if (bswap) {
165
- tcg_out_bswap32(s, COND_AL, dl, dl);
166
- tcg_out_bswap32(s, COND_AL, dh, dh);
167
- }
168
+ /* Avoid ldrd for user-only emulation, to handle unaligned. */
169
+ if (USING_SOFTMMU && use_armv6_instructions
170
+ && (datalo & 1) == 0 && datahi == datalo + 1) {
171
+ tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
172
+ } else if (datalo != addend) {
173
+ tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo);
174
+ tcg_out_ld32_12(s, COND_AL, datahi, addend, 4);
175
+ } else {
176
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
177
+ addend, addrlo, SHIFT_IMM_LSL(0));
178
+ tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0);
179
+ tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4);
180
}
181
break;
182
+ default:
146
+ default:
183
+ g_assert_not_reached();
147
+ g_assert_not_reached();
184
}
148
+ }
185
}
149
+
186
150
+ switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
187
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
151
+ case TCG_BSWAP_OZ:
188
TCGReg datalo, TCGReg datahi,
152
+ break;
189
TCGReg addrlo)
153
+ case TCG_BSWAP_OS:
190
{
154
+ /* If the sign bit may be 1, force all the bits above to 1. */
191
- MemOp bswap = opc & MO_BSWAP;
155
+ if (z_mask & sign) {
192
+ /* Byte swapping is left to middle-end expansion. */
156
+ z_mask |= sign;
193
+ tcg_debug_assert((opc & MO_BSWAP) == 0);
157
+ }
194
158
+ break;
195
switch (opc & MO_SSIZE) {
159
+ default:
196
case MO_UB:
160
+ /* The high bits are undefined: force all bits above the sign to 1. */
197
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
161
+ z_mask |= sign << 1;
198
break;
162
+ break;
199
case MO_UW:
163
+ }
200
tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
164
+ ctx->z_mask = z_mask;
201
- if (bswap) {
165
+
202
- tcg_out_bswap16(s, COND_AL, datalo, datalo,
166
+ return fold_masks(ctx, op);
203
- TCG_BSWAP_IZ | TCG_BSWAP_OZ);
167
}
204
- }
168
205
break;
169
static bool fold_call(OptContext *ctx, TCGOp *op)
206
case MO_SW:
170
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
207
- if (bswap) {
171
208
- tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
172
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
209
- tcg_out_bswap16(s, COND_AL, datalo, datalo,
173
{
210
- TCG_BSWAP_IZ | TCG_BSWAP_OS);
174
+ uint64_t z_mask;
211
- } else {
175
+
212
- tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
176
if (arg_is_const(op->args[1])) {
213
- }
177
uint64_t t = arg_info(op->args[1])->val;
214
+ tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
178
215
break;
179
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
216
case MO_UL:
217
- default:
218
tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
219
- if (bswap) {
220
- tcg_out_bswap32(s, COND_AL, datalo, datalo);
221
- }
222
break;
223
case MO_Q:
224
- {
225
- TCGReg dl = (bswap ? datahi : datalo);
226
- TCGReg dh = (bswap ? datalo : datahi);
227
-
228
- /* Avoid ldrd for user-only emulation, to handle unaligned. */
229
- if (USING_SOFTMMU && use_armv6_instructions
230
- && (dl & 1) == 0 && dh == dl + 1) {
231
- tcg_out_ldrd_8(s, COND_AL, dl, addrlo, 0);
232
- } else if (dl == addrlo) {
233
- tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
234
- tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
235
- } else {
236
- tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
237
- tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
238
- }
239
- if (bswap) {
240
- tcg_out_bswap32(s, COND_AL, dl, dl);
241
- tcg_out_bswap32(s, COND_AL, dh, dh);
242
- }
243
+ /* Avoid ldrd for user-only emulation, to handle unaligned. */
244
+ if (USING_SOFTMMU && use_armv6_instructions
245
+ && (datalo & 1) == 0 && datahi == datalo + 1) {
246
+ tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
247
+ } else if (datalo == addrlo) {
248
+ tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
249
+ tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
250
+ } else {
251
+ tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
252
+ tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
253
}
180
}
254
break;
181
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
182
}
183
+
184
+ switch (ctx->type) {
185
+ case TCG_TYPE_I32:
186
+ z_mask = 31;
187
+ break;
188
+ case TCG_TYPE_I64:
189
+ z_mask = 63;
190
+ break;
255
+ default:
191
+ default:
256
+ g_assert_not_reached();
192
+ g_assert_not_reached();
257
}
193
+ }
258
}
194
+ ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
259
195
+
260
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc,
196
return false;
261
TCGReg datalo, TCGReg datahi,
197
}
262
TCGReg addrlo, TCGReg addend)
198
263
{
199
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
264
- MemOp bswap = opc & MO_BSWAP;
200
{
265
+ /* Byte swapping is left to middle-end expansion. */
201
- return fold_const1(ctx, op);
266
+ tcg_debug_assert((opc & MO_BSWAP) == 0);
202
+ if (fold_const1(ctx, op)) {
267
203
+ return true;
268
switch (opc & MO_SIZE) {
204
+ }
269
case MO_8:
205
+
270
tcg_out_st8_r(s, cond, datalo, addrlo, addend);
206
+ switch (ctx->type) {
271
break;
207
+ case TCG_TYPE_I32:
272
case MO_16:
208
+ ctx->z_mask = 32 | 31;
273
- if (bswap) {
209
+ break;
274
- tcg_out_bswap16(s, cond, TCG_REG_R0, datalo, 0);
210
+ case TCG_TYPE_I64:
275
- tcg_out_st16_r(s, cond, TCG_REG_R0, addrlo, addend);
211
+ ctx->z_mask = 64 | 63;
276
- } else {
212
+ break;
277
- tcg_out_st16_r(s, cond, datalo, addrlo, addend);
278
- }
279
+ tcg_out_st16_r(s, cond, datalo, addrlo, addend);
280
break;
281
case MO_32:
282
- default:
283
- if (bswap) {
284
- tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
285
- tcg_out_st32_r(s, cond, TCG_REG_R0, addrlo, addend);
286
- } else {
287
- tcg_out_st32_r(s, cond, datalo, addrlo, addend);
288
- }
289
+ tcg_out_st32_r(s, cond, datalo, addrlo, addend);
290
break;
291
case MO_64:
292
/* Avoid strd for user-only emulation, to handle unaligned. */
293
- if (bswap) {
294
- tcg_out_bswap32(s, cond, TCG_REG_R0, datahi);
295
- tcg_out_st32_rwb(s, cond, TCG_REG_R0, addend, addrlo);
296
- tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
297
- tcg_out_st32_12(s, cond, TCG_REG_R0, addend, 4);
298
- } else if (USING_SOFTMMU && use_armv6_instructions
299
- && (datalo & 1) == 0 && datahi == datalo + 1) {
300
+ if (USING_SOFTMMU && use_armv6_instructions
301
+ && (datalo & 1) == 0 && datahi == datalo + 1) {
302
tcg_out_strd_r(s, cond, datalo, addrlo, addend);
303
} else {
304
tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
305
tcg_out_st32_12(s, cond, datahi, addend, 4);
306
}
307
break;
308
+ default:
213
+ default:
309
+ g_assert_not_reached();
214
+ g_assert_not_reached();
310
}
215
+ }
311
}
216
+ return false;
312
217
}
313
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc,
218
314
TCGReg datalo, TCGReg datahi,
219
static bool fold_deposit(OptContext *ctx, TCGOp *op)
315
TCGReg addrlo)
220
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
316
{
221
t1 = deposit64(t1, op->args[3], op->args[4], t2);
317
- MemOp bswap = opc & MO_BSWAP;
222
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
318
+ /* Byte swapping is left to middle-end expansion. */
223
}
319
+ tcg_debug_assert((opc & MO_BSWAP) == 0);
224
+
320
225
+ ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
321
switch (opc & MO_SIZE) {
226
+ op->args[3], op->args[4],
322
case MO_8:
227
+ arg_info(op->args[2])->z_mask);
323
tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
228
return false;
324
break;
229
}
325
case MO_16:
230
326
- if (bswap) {
231
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
327
- tcg_out_bswap16(s, COND_AL, TCG_REG_R0, datalo, 0);
232
328
- tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addrlo, 0);
233
static bool fold_extract(OptContext *ctx, TCGOp *op)
329
- } else {
234
{
330
- tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
235
+ uint64_t z_mask_old, z_mask;
331
- }
236
+
332
+ tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
237
if (arg_is_const(op->args[1])) {
333
break;
238
uint64_t t;
334
case MO_32:
239
335
- default:
240
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
336
- if (bswap) {
241
t = extract64(t, op->args[2], op->args[3]);
337
- tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
242
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
338
- tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
243
}
339
- } else {
244
- return false;
340
- tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
245
+
341
- }
246
+ z_mask_old = arg_info(op->args[1])->z_mask;
342
+ tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
247
+ z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
343
break;
248
+ if (op->args[2] == 0) {
344
case MO_64:
249
+ ctx->a_mask = z_mask_old ^ z_mask;
345
/* Avoid strd for user-only emulation, to handle unaligned. */
250
+ }
346
- if (bswap) {
251
+ ctx->z_mask = z_mask;
347
- tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datahi);
252
+
348
- tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
253
+ return fold_masks(ctx, op);
349
- tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
254
}
350
- tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 4);
255
351
- } else if (USING_SOFTMMU && use_armv6_instructions
256
static bool fold_extract2(OptContext *ctx, TCGOp *op)
352
- && (datalo & 1) == 0 && datahi == datalo + 1) {
257
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
353
+ if (USING_SOFTMMU && use_armv6_instructions
258
354
+ && (datalo & 1) == 0 && datahi == datalo + 1) {
259
static bool fold_exts(OptContext *ctx, TCGOp *op)
355
tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
260
{
356
} else {
261
- return fold_const1(ctx, op);
357
tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
262
+ uint64_t z_mask_old, z_mask, sign;
358
tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
263
+ bool type_change = false;
359
}
264
+
360
break;
265
+ if (fold_const1(ctx, op)) {
266
+ return true;
267
+ }
268
+
269
+ z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
270
+
271
+ switch (op->opc) {
272
+ CASE_OP_32_64(ext8s):
273
+ sign = INT8_MIN;
274
+ z_mask = (uint8_t)z_mask;
275
+ break;
276
+ CASE_OP_32_64(ext16s):
277
+ sign = INT16_MIN;
278
+ z_mask = (uint16_t)z_mask;
279
+ break;
280
+ case INDEX_op_ext_i32_i64:
281
+ type_change = true;
282
+ QEMU_FALLTHROUGH;
283
+ case INDEX_op_ext32s_i64:
284
+ sign = INT32_MIN;
285
+ z_mask = (uint32_t)z_mask;
286
+ break;
361
+ default:
287
+ default:
362
+ g_assert_not_reached();
288
+ g_assert_not_reached();
363
}
289
+ }
364
}
290
+
365
291
+ if (z_mask & sign) {
292
+ z_mask |= sign;
293
+ } else if (!type_change) {
294
+ ctx->a_mask = z_mask_old ^ z_mask;
295
+ }
296
+ ctx->z_mask = z_mask;
297
+
298
+ return fold_masks(ctx, op);
299
}
300
301
static bool fold_extu(OptContext *ctx, TCGOp *op)
302
{
303
- return fold_const1(ctx, op);
304
+ uint64_t z_mask_old, z_mask;
305
+ bool type_change = false;
306
+
307
+ if (fold_const1(ctx, op)) {
308
+ return true;
309
+ }
310
+
311
+ z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
312
+
313
+ switch (op->opc) {
314
+ CASE_OP_32_64(ext8u):
315
+ z_mask = (uint8_t)z_mask;
316
+ break;
317
+ CASE_OP_32_64(ext16u):
318
+ z_mask = (uint16_t)z_mask;
319
+ break;
320
+ case INDEX_op_extrl_i64_i32:
321
+ case INDEX_op_extu_i32_i64:
322
+ type_change = true;
323
+ QEMU_FALLTHROUGH;
324
+ case INDEX_op_ext32u_i64:
325
+ z_mask = (uint32_t)z_mask;
326
+ break;
327
+ case INDEX_op_extrh_i64_i32:
328
+ type_change = true;
329
+ z_mask >>= 32;
330
+ break;
331
+ default:
332
+ g_assert_not_reached();
333
+ }
334
+
335
+ ctx->z_mask = z_mask;
336
+ if (!type_change) {
337
+ ctx->a_mask = z_mask_old ^ z_mask;
338
+ }
339
+ return fold_masks(ctx, op);
340
}
341
342
static bool fold_mb(OptContext *ctx, TCGOp *op)
343
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
344
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
345
}
346
347
+ ctx->z_mask = arg_info(op->args[3])->z_mask
348
+ | arg_info(op->args[4])->z_mask;
349
+
350
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
351
uint64_t tv = arg_info(op->args[3])->val;
352
uint64_t fv = arg_info(op->args[4])->val;
353
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
354
355
static bool fold_neg(OptContext *ctx, TCGOp *op)
356
{
357
+ uint64_t z_mask;
358
+
359
if (fold_const1(ctx, op)) {
360
return true;
361
}
362
+
363
+ /* Set to 1 all bits to the left of the rightmost. */
364
+ z_mask = arg_info(op->args[1])->z_mask;
365
+ ctx->z_mask = -(z_mask & -z_mask);
366
+
367
/*
368
* Because of fold_sub_to_neg, we want to always return true,
369
* via finish_folding.
370
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
371
fold_xx_to_x(ctx, op)) {
372
return true;
373
}
374
- return false;
375
+
376
+ ctx->z_mask = arg_info(op->args[1])->z_mask
377
+ | arg_info(op->args[2])->z_mask;
378
+ return fold_masks(ctx, op);
379
}
380
381
static bool fold_orc(OptContext *ctx, TCGOp *op)
382
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
383
384
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
385
{
386
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
387
+ MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
388
+ MemOp mop = get_memop(oi);
389
+ int width = 8 * memop_size(mop);
390
+
391
+ if (!(mop & MO_SIGN) && width < 64) {
392
+ ctx->z_mask = MAKE_64BIT_MASK(0, width);
393
+ }
394
+
395
/* Opcodes that touch guest memory stop the mb optimization. */
396
ctx->prev_mb = NULL;
397
return false;
398
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
399
if (i >= 0) {
400
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
401
}
402
+
403
+ ctx->z_mask = 1;
404
return false;
405
}
406
407
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
408
op->opc = INDEX_op_setcond_i32;
409
break;
410
}
411
+
412
+ ctx->z_mask = 1;
413
return false;
414
415
do_setcond_const:
416
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
417
418
static bool fold_sextract(OptContext *ctx, TCGOp *op)
419
{
420
+ int64_t z_mask_old, z_mask;
421
+
422
if (arg_is_const(op->args[1])) {
423
uint64_t t;
424
425
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
426
t = sextract64(t, op->args[2], op->args[3]);
427
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
428
}
429
- return false;
430
+
431
+ z_mask_old = arg_info(op->args[1])->z_mask;
432
+ z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
433
+ if (op->args[2] == 0 && z_mask >= 0) {
434
+ ctx->a_mask = z_mask_old ^ z_mask;
435
+ }
436
+ ctx->z_mask = z_mask;
437
+
438
+ return fold_masks(ctx, op);
439
}
440
441
static bool fold_shift(OptContext *ctx, TCGOp *op)
442
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
443
fold_xi_to_x(ctx, op, 0)) {
444
return true;
445
}
446
+
447
+ if (arg_is_const(op->args[2])) {
448
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type,
449
+ arg_info(op->args[1])->z_mask,
450
+ arg_info(op->args[2])->val);
451
+ return fold_masks(ctx, op);
452
+ }
453
return false;
454
}
455
456
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
457
return fold_addsub2_i32(ctx, op, false);
458
}
459
460
+static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
461
+{
462
+ /* We can't do any folding with a load, but we can record bits. */
463
+ switch (op->opc) {
464
+ CASE_OP_32_64(ld8u):
465
+ ctx->z_mask = MAKE_64BIT_MASK(0, 8);
466
+ break;
467
+ CASE_OP_32_64(ld16u):
468
+ ctx->z_mask = MAKE_64BIT_MASK(0, 16);
469
+ break;
470
+ case INDEX_op_ld32u_i64:
471
+ ctx->z_mask = MAKE_64BIT_MASK(0, 32);
472
+ break;
473
+ default:
474
+ g_assert_not_reached();
475
+ }
476
+ return false;
477
+}
478
+
479
static bool fold_xor(OptContext *ctx, TCGOp *op)
480
{
481
if (fold_const2(ctx, op) ||
482
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
483
fold_xi_to_not(ctx, op, -1)) {
484
return true;
485
}
486
- return false;
487
+
488
+ ctx->z_mask = arg_info(op->args[1])->z_mask
489
+ | arg_info(op->args[2])->z_mask;
490
+ return fold_masks(ctx, op);
491
}
492
493
/* Propagate constants and copies, fold constant expressions. */
494
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
495
}
496
497
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
498
- uint64_t z_mask, partmask, affected, tmp;
499
TCGOpcode opc = op->opc;
500
const TCGOpDef *def;
501
bool done = false;
502
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
503
break;
504
}
505
506
- /* Simplify using known-zero bits. Currently only ops with a single
507
- output argument is supported. */
508
- z_mask = -1;
509
- affected = -1;
510
- switch (opc) {
511
- CASE_OP_32_64(ext8s):
512
- if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
513
- break;
514
- }
515
- QEMU_FALLTHROUGH;
516
- CASE_OP_32_64(ext8u):
517
- z_mask = 0xff;
518
- goto and_const;
519
- CASE_OP_32_64(ext16s):
520
- if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
521
- break;
522
- }
523
- QEMU_FALLTHROUGH;
524
- CASE_OP_32_64(ext16u):
525
- z_mask = 0xffff;
526
- goto and_const;
527
- case INDEX_op_ext32s_i64:
528
- if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
529
- break;
530
- }
531
- QEMU_FALLTHROUGH;
532
- case INDEX_op_ext32u_i64:
533
- z_mask = 0xffffffffU;
534
- goto and_const;
535
-
536
- CASE_OP_32_64(and):
537
- z_mask = arg_info(op->args[2])->z_mask;
538
- if (arg_is_const(op->args[2])) {
539
- and_const:
540
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
541
- }
542
- z_mask = arg_info(op->args[1])->z_mask & z_mask;
543
- break;
544
-
545
- case INDEX_op_ext_i32_i64:
546
- if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
547
- break;
548
- }
549
- QEMU_FALLTHROUGH;
550
- case INDEX_op_extu_i32_i64:
551
- /* We do not compute affected as it is a size changing op. */
552
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
553
- break;
554
-
555
- CASE_OP_32_64(andc):
556
- /* Known-zeros does not imply known-ones. Therefore unless
557
- op->args[2] is constant, we can't infer anything from it. */
558
- if (arg_is_const(op->args[2])) {
559
- z_mask = ~arg_info(op->args[2])->z_mask;
560
- goto and_const;
561
- }
562
- /* But we certainly know nothing outside args[1] may be set. */
563
- z_mask = arg_info(op->args[1])->z_mask;
564
- break;
565
-
566
- case INDEX_op_sar_i32:
567
- if (arg_is_const(op->args[2])) {
568
- tmp = arg_info(op->args[2])->val & 31;
569
- z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
570
- }
571
- break;
572
- case INDEX_op_sar_i64:
573
- if (arg_is_const(op->args[2])) {
574
- tmp = arg_info(op->args[2])->val & 63;
575
- z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
576
- }
577
- break;
578
-
579
- case INDEX_op_shr_i32:
580
- if (arg_is_const(op->args[2])) {
581
- tmp = arg_info(op->args[2])->val & 31;
582
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
583
- }
584
- break;
585
- case INDEX_op_shr_i64:
586
- if (arg_is_const(op->args[2])) {
587
- tmp = arg_info(op->args[2])->val & 63;
588
- z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
589
- }
590
- break;
591
-
592
- case INDEX_op_extrl_i64_i32:
593
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
594
- break;
595
- case INDEX_op_extrh_i64_i32:
596
- z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
597
- break;
598
-
599
- CASE_OP_32_64(shl):
600
- if (arg_is_const(op->args[2])) {
601
- tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
602
- z_mask = arg_info(op->args[1])->z_mask << tmp;
603
- }
604
- break;
605
-
606
- CASE_OP_32_64(neg):
607
- /* Set to 1 all bits to the left of the rightmost. */
608
- z_mask = -(arg_info(op->args[1])->z_mask
609
- & -arg_info(op->args[1])->z_mask);
610
- break;
611
-
612
- CASE_OP_32_64(deposit):
613
- z_mask = deposit64(arg_info(op->args[1])->z_mask,
614
- op->args[3], op->args[4],
615
- arg_info(op->args[2])->z_mask);
616
- break;
617
-
618
- CASE_OP_32_64(extract):
619
- z_mask = extract64(arg_info(op->args[1])->z_mask,
620
- op->args[2], op->args[3]);
621
- if (op->args[2] == 0) {
622
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
623
- }
624
- break;
625
- CASE_OP_32_64(sextract):
626
- z_mask = sextract64(arg_info(op->args[1])->z_mask,
627
- op->args[2], op->args[3]);
628
- if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
629
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
630
- }
631
- break;
632
-
633
- CASE_OP_32_64(or):
634
- CASE_OP_32_64(xor):
635
- z_mask = arg_info(op->args[1])->z_mask
636
- | arg_info(op->args[2])->z_mask;
637
- break;
638
-
639
- case INDEX_op_clz_i32:
640
- case INDEX_op_ctz_i32:
641
- z_mask = arg_info(op->args[2])->z_mask | 31;
642
- break;
643
-
644
- case INDEX_op_clz_i64:
645
- case INDEX_op_ctz_i64:
646
- z_mask = arg_info(op->args[2])->z_mask | 63;
647
- break;
648
-
649
- case INDEX_op_ctpop_i32:
650
- z_mask = 32 | 31;
651
- break;
652
- case INDEX_op_ctpop_i64:
653
- z_mask = 64 | 63;
654
- break;
655
-
656
- CASE_OP_32_64(setcond):
657
- case INDEX_op_setcond2_i32:
658
- z_mask = 1;
659
- break;
660
-
661
- CASE_OP_32_64(movcond):
662
- z_mask = arg_info(op->args[3])->z_mask
663
- | arg_info(op->args[4])->z_mask;
664
- break;
665
-
666
- CASE_OP_32_64(ld8u):
667
- z_mask = 0xff;
668
- break;
669
- CASE_OP_32_64(ld16u):
670
- z_mask = 0xffff;
671
- break;
672
- case INDEX_op_ld32u_i64:
673
- z_mask = 0xffffffffu;
674
- break;
675
-
676
- CASE_OP_32_64(qemu_ld):
677
- {
678
- MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
679
- MemOp mop = get_memop(oi);
680
- if (!(mop & MO_SIGN)) {
681
- z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
682
- }
683
- }
684
- break;
685
-
686
- CASE_OP_32_64(bswap16):
687
- z_mask = arg_info(op->args[1])->z_mask;
688
- if (z_mask <= 0xffff) {
689
- op->args[2] |= TCG_BSWAP_IZ;
690
- }
691
- z_mask = bswap16(z_mask);
692
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
693
- case TCG_BSWAP_OZ:
694
- break;
695
- case TCG_BSWAP_OS:
696
- z_mask = (int16_t)z_mask;
697
- break;
698
- default: /* undefined high bits */
699
- z_mask |= MAKE_64BIT_MASK(16, 48);
700
- break;
701
- }
702
- break;
703
-
704
- case INDEX_op_bswap32_i64:
705
- z_mask = arg_info(op->args[1])->z_mask;
706
- if (z_mask <= 0xffffffffu) {
707
- op->args[2] |= TCG_BSWAP_IZ;
708
- }
709
- z_mask = bswap32(z_mask);
710
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
711
- case TCG_BSWAP_OZ:
712
- break;
713
- case TCG_BSWAP_OS:
714
- z_mask = (int32_t)z_mask;
715
- break;
716
- default: /* undefined high bits */
717
- z_mask |= MAKE_64BIT_MASK(32, 32);
718
- break;
719
- }
720
- break;
721
-
722
- default:
723
- break;
724
- }
725
-
726
- /* 32-bit ops generate 32-bit results. For the result is zero test
727
- below, we can ignore high bits, but for further optimizations we
728
- need to record that the high bits contain garbage. */
729
- partmask = z_mask;
730
- if (ctx.type == TCG_TYPE_I32) {
731
- z_mask |= ~(tcg_target_ulong)0xffffffffu;
732
- partmask &= 0xffffffffu;
733
- affected &= 0xffffffffu;
734
- }
735
- ctx.z_mask = z_mask;
736
-
737
- if (partmask == 0) {
738
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
739
- continue;
740
- }
741
- if (affected == 0) {
742
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
743
- continue;
744
- }
745
+ /* Assume all bits affected, and no bits known zero. */
746
+ ctx.a_mask = -1;
747
+ ctx.z_mask = -1;
748
749
/*
750
* Process each opcode.
751
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
752
case INDEX_op_extrh_i64_i32:
753
done = fold_extu(&ctx, op);
754
break;
755
+ CASE_OP_32_64(ld8u):
756
+ CASE_OP_32_64(ld16u):
757
+ case INDEX_op_ld32u_i64:
758
+ done = fold_tcg_ld(&ctx, op);
759
+ break;
760
case INDEX_op_mb:
761
done = fold_mb(&ctx, op);
762
break;
366
--
763
--
367
2.25.1
764
2.25.1
368
765
369
766
diff view generated by jsdifflib
1
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
1
Rename to fold_multiply2, and handle muls2_i32, mulu2_i64,
2
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
2
and muls2_i64.
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
7
---
5
target/cris/translate.c | 19 ++++++++++---------
8
tcg/optimize.c | 44 +++++++++++++++++++++++++++++++++++---------
6
target/cris/translate_v10.c.inc | 6 +++---
9
1 file changed, 35 insertions(+), 9 deletions(-)
7
2 files changed, 13 insertions(+), 12 deletions(-)
8
10
9
diff --git a/target/cris/translate.c b/target/cris/translate.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
11
--- a/target/cris/translate.c
13
--- a/tcg/optimize.c
12
+++ b/target/cris/translate.c
14
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static void gen_BUG(DisasContext *dc, const char *file, int line)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
14
cpu_abort(CPU(dc->cpu), "%s:%d pc=%x\n", file, line, dc->pc);
16
return false;
15
}
17
}
16
18
17
-static const char *regnames_v32[] =
19
-static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
18
+static const char * const regnames_v32[] =
20
+static bool fold_multiply2(OptContext *ctx, TCGOp *op)
19
{
21
{
20
"$r0", "$r1", "$r2", "$r3",
22
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
21
"$r4", "$r5", "$r6", "$r7",
23
- uint32_t a = arg_info(op->args[2])->val;
22
"$r8", "$r9", "$r10", "$r11",
24
- uint32_t b = arg_info(op->args[3])->val;
23
"$r12", "$r13", "$sp", "$acr",
25
- uint64_t r = (uint64_t)a * b;
24
};
26
+ uint64_t a = arg_info(op->args[2])->val;
25
-static const char *pregnames_v32[] =
27
+ uint64_t b = arg_info(op->args[3])->val;
28
+ uint64_t h, l;
29
TCGArg rl, rh;
30
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
31
+ TCGOp *op2;
26
+
32
+
27
+static const char * const pregnames_v32[] =
33
+ switch (op->opc) {
28
{
34
+ case INDEX_op_mulu2_i32:
29
"$bz", "$vr", "$pid", "$srs",
35
+ l = (uint64_t)(uint32_t)a * (uint32_t)b;
30
"$wz", "$exs", "$eda", "$mof",
36
+ h = (int32_t)(l >> 32);
31
@@ -XXX,XX +XXX,XX @@ static const char *pregnames_v32[] =
37
+ l = (int32_t)l;
32
};
38
+ break;
33
39
+ case INDEX_op_muls2_i32:
34
/* We need this table to handle preg-moves with implicit width. */
40
+ l = (int64_t)(int32_t)a * (int32_t)b;
35
-static int preg_sizes[] = {
41
+ h = l >> 32;
36
+static const int preg_sizes[] = {
42
+ l = (int32_t)l;
37
1, /* bz. */
43
+ break;
38
1, /* vr. */
44
+ case INDEX_op_mulu2_i64:
39
4, /* pid. */
45
+ mulu64(&l, &h, a, b);
40
@@ -XXX,XX +XXX,XX @@ static inline void t_gen_swapw(TCGv d, TCGv s)
46
+ break;
41
((T0 >> 5) & 0x02020202) |
47
+ case INDEX_op_muls2_i64:
42
((T0 >> 7) & 0x01010101));
48
+ muls64(&l, &h, a, b);
43
*/
49
+ break;
44
-static inline void t_gen_swapr(TCGv d, TCGv s)
50
+ default:
45
+static void t_gen_swapr(TCGv d, TCGv s)
51
+ g_assert_not_reached();
46
{
52
+ }
47
- struct {
53
48
+ static const struct {
54
rl = op->args[0];
49
int shift; /* LSL when positive, LSR when negative. */
55
rh = op->args[1];
50
uint32_t mask;
56
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
51
} bitrev[] = {
57
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
52
@@ -XXX,XX +XXX,XX @@ static int dec_prep_alu_m(CPUCRISState *env, DisasContext *dc,
58
+
53
#if DISAS_CRIS
59
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
54
static const char *cc_name(int cc)
60
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
55
{
61
+
56
- static const char *cc_names[16] = {
62
+ tcg_opt_gen_movi(ctx, op, rl, l);
57
+ static const char * const cc_names[16] = {
63
+ tcg_opt_gen_movi(ctx, op2, rh, h);
58
"cc", "cs", "ne", "eq", "vc", "vs", "pl", "mi",
64
return true;
59
"ls", "hi", "ge", "lt", "gt", "le", "a", "p"
65
}
60
};
66
return false;
61
@@ -XXX,XX +XXX,XX @@ static int dec_null(CPUCRISState *env, DisasContext *dc)
67
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
62
return 2;
68
CASE_OP_32_64(muluh):
63
}
69
done = fold_mul_highpart(&ctx, op);
64
70
break;
65
-static struct decoder_info {
71
- case INDEX_op_mulu2_i32:
66
+static const struct decoder_info {
72
- done = fold_mulu2_i32(&ctx, op);
67
struct {
73
+ CASE_OP_32_64(muls2):
68
uint32_t bits;
74
+ CASE_OP_32_64(mulu2):
69
uint32_t mask;
75
+ done = fold_multiply2(&ctx, op);
70
@@ -XXX,XX +XXX,XX @@ void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags)
76
break;
71
{
77
CASE_OP_32_64(nand):
72
CRISCPU *cpu = CRIS_CPU(cs);
78
done = fold_nand(&ctx, op);
73
CPUCRISState *env = &cpu->env;
74
- const char **regnames;
75
- const char **pregnames;
76
+ const char * const *regnames;
77
+ const char * const *pregnames;
78
int i;
79
80
if (!env) {
81
diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/cris/translate_v10.c.inc
84
+++ b/target/cris/translate_v10.c.inc
85
@@ -XXX,XX +XXX,XX @@
86
#include "qemu/osdep.h"
87
#include "crisv10-decode.h"
88
89
-static const char *regnames_v10[] =
90
+static const char * const regnames_v10[] =
91
{
92
"$r0", "$r1", "$r2", "$r3",
93
"$r4", "$r5", "$r6", "$r7",
94
@@ -XXX,XX +XXX,XX @@ static const char *regnames_v10[] =
95
"$r12", "$r13", "$sp", "$pc",
96
};
97
98
-static const char *pregnames_v10[] =
99
+static const char * const pregnames_v10[] =
100
{
101
"$bz", "$vr", "$p2", "$p3",
102
"$wz", "$ccr", "$p6-prefix", "$mof",
103
@@ -XXX,XX +XXX,XX @@ static const char *pregnames_v10[] =
104
};
105
106
/* We need this table to handle preg-moves with implicit width. */
107
-static int preg_sizes_v10[] = {
108
+static const int preg_sizes_v10[] = {
109
1, /* bz. */
110
1, /* vr. */
111
1, /* pid. */
112
--
79
--
113
2.25.1
80
2.25.1
114
81
115
82
diff view generated by jsdifflib
1
We have pre-computed the next instruction address into
1
Rename to fold_addsub2.
2
dc->base.pc_next, so we might as well use it.
2
Use Int128 to implement the wider operation.
3
3
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
target/nios2/translate.c | 12 ++++++------
9
tcg/optimize.c | 65 ++++++++++++++++++++++++++++++++++----------------
9
1 file changed, 6 insertions(+), 6 deletions(-)
10
1 file changed, 44 insertions(+), 21 deletions(-)
10
11
11
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/target/nios2/translate.c
14
--- a/tcg/optimize.c
14
+++ b/target/nios2/translate.c
15
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void jmpi(DisasContext *dc, uint32_t code, uint32_t flags)
16
@@ -XXX,XX +XXX,XX @@
16
17
*/
17
static void call(DisasContext *dc, uint32_t code, uint32_t flags)
18
19
#include "qemu/osdep.h"
20
+#include "qemu/int128.h"
21
#include "tcg/tcg-op.h"
22
#include "tcg-internal.h"
23
24
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
25
return false;
26
}
27
28
-static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
29
+static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
18
{
30
{
19
- tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
31
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
20
+ tcg_gen_movi_tl(cpu_R[R_RA], dc->base.pc_next);
32
arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
21
jmpi(dc, code, flags);
33
- uint32_t al = arg_info(op->args[2])->val;
34
- uint32_t ah = arg_info(op->args[3])->val;
35
- uint32_t bl = arg_info(op->args[4])->val;
36
- uint32_t bh = arg_info(op->args[5])->val;
37
- uint64_t a = ((uint64_t)ah << 32) | al;
38
- uint64_t b = ((uint64_t)bh << 32) | bl;
39
+ uint64_t al = arg_info(op->args[2])->val;
40
+ uint64_t ah = arg_info(op->args[3])->val;
41
+ uint64_t bl = arg_info(op->args[4])->val;
42
+ uint64_t bh = arg_info(op->args[5])->val;
43
TCGArg rl, rh;
44
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
45
+ TCGOp *op2;
46
47
- if (add) {
48
- a += b;
49
+ if (ctx->type == TCG_TYPE_I32) {
50
+ uint64_t a = deposit64(al, 32, 32, ah);
51
+ uint64_t b = deposit64(bl, 32, 32, bh);
52
+
53
+ if (add) {
54
+ a += b;
55
+ } else {
56
+ a -= b;
57
+ }
58
+
59
+ al = sextract64(a, 0, 32);
60
+ ah = sextract64(a, 32, 32);
61
} else {
62
- a -= b;
63
+ Int128 a = int128_make128(al, ah);
64
+ Int128 b = int128_make128(bl, bh);
65
+
66
+ if (add) {
67
+ a = int128_add(a, b);
68
+ } else {
69
+ a = int128_sub(a, b);
70
+ }
71
+
72
+ al = int128_getlo(a);
73
+ ah = int128_gethi(a);
74
}
75
76
rl = op->args[0];
77
rh = op->args[1];
78
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
79
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
80
+
81
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
82
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
83
+
84
+ tcg_opt_gen_movi(ctx, op, rl, al);
85
+ tcg_opt_gen_movi(ctx, op2, rh, ah);
86
return true;
87
}
88
return false;
22
}
89
}
23
90
24
@@ -XXX,XX +XXX,XX @@ static void br(DisasContext *dc, uint32_t code, uint32_t flags)
91
-static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
92
+static bool fold_add2(OptContext *ctx, TCGOp *op)
25
{
93
{
26
I_TYPE(instr, code);
94
- return fold_addsub2_i32(ctx, op, true);
27
95
+ return fold_addsub2(ctx, op, true);
28
- gen_goto_tb(dc, 0, dc->pc + 4 + (instr.imm16.s & -4));
29
+ gen_goto_tb(dc, 0, dc->base.pc_next + (instr.imm16.s & -4));
30
dc->base.is_jmp = DISAS_NORETURN;
31
}
96
}
32
97
33
@@ -XXX,XX +XXX,XX @@ static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags)
98
static bool fold_and(OptContext *ctx, TCGOp *op)
34
99
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
35
TCGLabel *l1 = gen_new_label();
100
return false;
36
tcg_gen_brcond_tl(flags, cpu_R[instr.a], cpu_R[instr.b], l1);
37
- gen_goto_tb(dc, 0, dc->pc + 4);
38
+ gen_goto_tb(dc, 0, dc->base.pc_next);
39
gen_set_label(l1);
40
- gen_goto_tb(dc, 1, dc->pc + 4 + (instr.imm16.s & -4));
41
+ gen_goto_tb(dc, 1, dc->base.pc_next + (instr.imm16.s & -4));
42
dc->base.is_jmp = DISAS_NORETURN;
43
}
101
}
44
102
45
@@ -XXX,XX +XXX,XX @@ static void nextpc(DisasContext *dc, uint32_t code, uint32_t flags)
103
-static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
46
R_TYPE(instr, code);
104
+static bool fold_sub2(OptContext *ctx, TCGOp *op)
47
105
{
48
if (likely(instr.c != R_ZERO)) {
106
- return fold_addsub2_i32(ctx, op, false);
49
- tcg_gen_movi_tl(cpu_R[instr.c], dc->pc + 4);
107
+ return fold_addsub2(ctx, op, false);
50
+ tcg_gen_movi_tl(cpu_R[instr.c], dc->base.pc_next);
51
}
52
}
108
}
53
109
54
@@ -XXX,XX +XXX,XX @@ static void callr(DisasContext *dc, uint32_t code, uint32_t flags)
110
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
55
R_TYPE(instr, code);
111
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
56
112
CASE_OP_32_64_VEC(add):
57
tcg_gen_mov_tl(cpu_R[R_PC], load_gpr(dc, instr.a));
113
done = fold_add(&ctx, op);
58
- tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
114
break;
59
+ tcg_gen_movi_tl(cpu_R[R_RA], dc->base.pc_next);
115
- case INDEX_op_add2_i32:
60
116
- done = fold_add2_i32(&ctx, op);
61
dc->base.is_jmp = DISAS_JUMP;
117
+ CASE_OP_32_64(add2):
62
}
118
+ done = fold_add2(&ctx, op);
119
break;
120
CASE_OP_32_64_VEC(and):
121
done = fold_and(&ctx, op);
122
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
123
CASE_OP_32_64_VEC(sub):
124
done = fold_sub(&ctx, op);
125
break;
126
- case INDEX_op_sub2_i32:
127
- done = fold_sub2_i32(&ctx, op);
128
+ CASE_OP_32_64(sub2):
129
+ done = fold_sub2(&ctx, op);
130
break;
131
CASE_OP_32_64_VEC(xor):
132
done = fold_xor(&ctx, op);
63
--
133
--
64
2.25.1
134
2.25.1
65
135
66
136
diff view generated by jsdifflib
1
Migrate the is_jmp, tb and singlestep_enabled fields from
1
Most of these are handled by creating a fold_const2_commutative
2
DisasContext into the base. Use pc_first instead of tb->pc.
2
to handle all of the binary operators. The rest were already
3
Increment pc_next prior to decode, leaving the address of
3
handled on a case-by-case basis in the switch, and have their
4
the current insn in dc->pc.
4
own fold function in which to place the call.
5
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
We now have only one major switch on TCGOpcode.
7
8
Introduce NO_DEST and a block comment for swap_commutative in
9
order to make the handling of brcond and movcond opcodes cleaner.
10
11
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
13
---
9
target/nios2/translate.c | 70 +++++++++++++++++++++-------------------
14
tcg/optimize.c | 142 ++++++++++++++++++++++++-------------------------
10
1 file changed, 36 insertions(+), 34 deletions(-)
15
1 file changed, 70 insertions(+), 72 deletions(-)
11
16
12
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
17
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
14
--- a/target/nios2/translate.c
19
--- a/tcg/optimize.c
15
+++ b/target/nios2/translate.c
20
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
17
}
22
return -1;
18
19
typedef struct DisasContext {
20
+ DisasContextBase base;
21
TCGv_i32 zero;
22
- int is_jmp;
23
target_ulong pc;
24
- TranslationBlock *tb;
25
int mem_idx;
26
- bool singlestep_enabled;
27
} DisasContext;
28
29
static TCGv cpu_R[NUM_CORE_REGS];
30
@@ -XXX,XX +XXX,XX @@ static void t_gen_helper_raise_exception(DisasContext *dc,
31
tcg_gen_movi_tl(cpu_R[R_PC], dc->pc);
32
gen_helper_raise_exception(cpu_env, tmp);
33
tcg_temp_free_i32(tmp);
34
- dc->is_jmp = DISAS_NORETURN;
35
+ dc->base.is_jmp = DISAS_NORETURN;
36
}
23
}
37
24
38
static bool use_goto_tb(DisasContext *dc, uint32_t dest)
25
+/**
39
{
26
+ * swap_commutative:
40
- if (unlikely(dc->singlestep_enabled)) {
27
+ * @dest: TCGArg of the destination argument, or NO_DEST.
41
+ if (unlikely(dc->base.singlestep_enabled)) {
28
+ * @p1: first paired argument
42
return false;
29
+ * @p2: second paired argument
43
}
30
+ *
44
31
+ * If *@p1 is a constant and *@p2 is not, swap.
45
#ifndef CONFIG_USER_ONLY
32
+ * If *@p2 matches @dest, swap.
46
- return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
33
+ * Return true if a swap was performed.
47
+ return (dc->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
34
+ */
48
#else
35
+
49
return true;
36
+#define NO_DEST temp_arg(NULL)
50
#endif
37
+
51
@@ -XXX,XX +XXX,XX @@ static bool use_goto_tb(DisasContext *dc, uint32_t dest)
38
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
52
39
{
53
static void gen_goto_tb(DisasContext *dc, int n, uint32_t dest)
40
TCGArg a1 = *p1, a2 = *p2;
54
{
41
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
55
- TranslationBlock *tb = dc->tb;
42
return false;
56
+ const TranslationBlock *tb = dc->base.tb;
57
58
if (use_goto_tb(dc, dest)) {
59
tcg_gen_goto_tb(n);
60
@@ -XXX,XX +XXX,XX @@ static void gen_excp(DisasContext *dc, uint32_t code, uint32_t flags)
61
62
static void gen_check_supervisor(DisasContext *dc)
63
{
64
- if (dc->tb->flags & CR_STATUS_U) {
65
+ if (dc->base.tb->flags & CR_STATUS_U) {
66
/* CPU in user mode, privileged instruction called, stop. */
67
t_gen_helper_raise_exception(dc, EXCP_SUPERI);
68
}
69
@@ -XXX,XX +XXX,XX @@ static void jmpi(DisasContext *dc, uint32_t code, uint32_t flags)
70
{
71
J_TYPE(instr, code);
72
gen_goto_tb(dc, 0, (dc->pc & 0xF0000000) | (instr.imm26 << 2));
73
- dc->is_jmp = DISAS_NORETURN;
74
+ dc->base.is_jmp = DISAS_NORETURN;
75
}
43
}
76
44
77
static void call(DisasContext *dc, uint32_t code, uint32_t flags)
45
+static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
78
@@ -XXX,XX +XXX,XX @@ static void br(DisasContext *dc, uint32_t code, uint32_t flags)
46
+{
79
I_TYPE(instr, code);
47
+ swap_commutative(op->args[0], &op->args[1], &op->args[2]);
80
48
+ return fold_const2(ctx, op);
81
gen_goto_tb(dc, 0, dc->pc + 4 + (instr.imm16.s & -4));
49
+}
82
- dc->is_jmp = DISAS_NORETURN;
50
+
83
+ dc->base.is_jmp = DISAS_NORETURN;
51
static bool fold_masks(OptContext *ctx, TCGOp *op)
52
{
53
uint64_t a_mask = ctx->a_mask;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
55
56
static bool fold_add(OptContext *ctx, TCGOp *op)
57
{
58
- if (fold_const2(ctx, op) ||
59
+ if (fold_const2_commutative(ctx, op) ||
60
fold_xi_to_x(ctx, op, 0)) {
61
return true;
62
}
63
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
64
65
static bool fold_add2(OptContext *ctx, TCGOp *op)
66
{
67
+ /* Note that the high and low parts may be independently swapped. */
68
+ swap_commutative(op->args[0], &op->args[2], &op->args[4]);
69
+ swap_commutative(op->args[1], &op->args[3], &op->args[5]);
70
+
71
return fold_addsub2(ctx, op, true);
84
}
72
}
85
73
86
static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags)
74
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
87
@@ -XXX,XX +XXX,XX @@ static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags)
75
{
88
gen_goto_tb(dc, 0, dc->pc + 4);
76
uint64_t z1, z2;
89
gen_set_label(l1);
77
90
gen_goto_tb(dc, 1, dc->pc + 4 + (instr.imm16.s & -4));
78
- if (fold_const2(ctx, op) ||
91
- dc->is_jmp = DISAS_NORETURN;
79
+ if (fold_const2_commutative(ctx, op) ||
92
+ dc->base.is_jmp = DISAS_NORETURN;
80
fold_xi_to_i(ctx, op, 0) ||
93
}
81
fold_xi_to_x(ctx, op, -1) ||
94
82
fold_xx_to_x(ctx, op)) {
95
/* Comparison instructions */
83
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
96
@@ -XXX,XX +XXX,XX @@ static void eret(DisasContext *dc, uint32_t code, uint32_t flags)
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
97
tcg_gen_mov_tl(cpu_R[CR_STATUS], cpu_R[CR_ESTATUS]);
85
{
98
tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_EA]);
86
TCGCond cond = op->args[2];
99
87
- int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
100
- dc->is_jmp = DISAS_JUMP;
88
+ int i;
101
+ dc->base.is_jmp = DISAS_JUMP;
89
102
}
90
+ if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) {
103
91
+ op->args[2] = cond = tcg_swap_cond(cond);
104
/* PC <- ra */
92
+ }
105
@@ -XXX,XX +XXX,XX @@ static void ret(DisasContext *dc, uint32_t code, uint32_t flags)
93
+
106
{
94
+ i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
107
tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_RA]);
95
if (i == 0) {
108
96
tcg_op_remove(ctx->tcg, op);
109
- dc->is_jmp = DISAS_JUMP;
97
return true;
110
+ dc->base.is_jmp = DISAS_JUMP;
98
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
111
}
99
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
112
100
{
113
/* PC <- ba */
101
TCGCond cond = op->args[4];
114
@@ -XXX,XX +XXX,XX @@ static void bret(DisasContext *dc, uint32_t code, uint32_t flags)
102
- int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
115
{
103
TCGArg label = op->args[5];
116
tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_BA]);
104
- int inv = 0;
117
105
+ int i, inv = 0;
118
- dc->is_jmp = DISAS_JUMP;
106
119
+ dc->base.is_jmp = DISAS_JUMP;
107
+ if (swap_commutative2(&op->args[0], &op->args[2])) {
120
}
108
+ op->args[4] = cond = tcg_swap_cond(cond);
121
109
+ }
122
/* PC <- rA */
110
+
123
@@ -XXX,XX +XXX,XX @@ static void jmp(DisasContext *dc, uint32_t code, uint32_t flags)
111
+ i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
124
112
if (i >= 0) {
125
tcg_gen_mov_tl(cpu_R[R_PC], load_gpr(dc, instr.a));
113
goto do_brcond_const;
126
114
}
127
- dc->is_jmp = DISAS_JUMP;
115
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
128
+ dc->base.is_jmp = DISAS_JUMP;
116
129
}
117
static bool fold_eqv(OptContext *ctx, TCGOp *op)
130
118
{
131
/* rC <- PC + 4 */
119
- if (fold_const2(ctx, op) ||
132
@@ -XXX,XX +XXX,XX @@ static void callr(DisasContext *dc, uint32_t code, uint32_t flags)
120
+ if (fold_const2_commutative(ctx, op) ||
133
tcg_gen_mov_tl(cpu_R[R_PC], load_gpr(dc, instr.a));
121
fold_xi_to_x(ctx, op, -1) ||
134
tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
122
fold_xi_to_not(ctx, op, 0)) {
135
123
return true;
136
- dc->is_jmp = DISAS_JUMP;
124
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
137
+ dc->base.is_jmp = DISAS_JUMP;
125
static bool fold_movcond(OptContext *ctx, TCGOp *op)
138
}
126
{
139
127
TCGCond cond = op->args[5];
140
/* rC <- ctlN */
128
- int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
141
@@ -XXX,XX +XXX,XX @@ static void wrctl(DisasContext *dc, uint32_t code, uint32_t flags)
129
+ int i;
142
/* If interrupts were enabled using WRCTL, trigger them. */
130
143
#if !defined(CONFIG_USER_ONLY)
131
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
144
if ((instr.imm5 + CR_BASE) == CR_STATUS) {
132
+ op->args[5] = cond = tcg_swap_cond(cond);
145
- if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
133
+ }
146
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
134
+ /*
147
gen_io_start();
135
+ * Canonicalize the "false" input reg to match the destination reg so
136
+ * that the tcg backend can implement a "move if true" operation.
137
+ */
138
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
139
+ op->args[5] = cond = tcg_invert_cond(cond);
140
+ }
141
+
142
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
143
if (i >= 0) {
144
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
145
}
146
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
147
148
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
149
{
150
- if (fold_const2(ctx, op) ||
151
+ if (fold_const2_commutative(ctx, op) ||
152
fold_xi_to_i(ctx, op, 0)) {
153
return true;
154
}
155
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
156
157
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
158
{
159
+ swap_commutative(op->args[0], &op->args[2], &op->args[3]);
160
+
161
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
162
uint64_t a = arg_info(op->args[2])->val;
163
uint64_t b = arg_info(op->args[3])->val;
164
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
165
166
static bool fold_nand(OptContext *ctx, TCGOp *op)
167
{
168
- if (fold_const2(ctx, op) ||
169
+ if (fold_const2_commutative(ctx, op) ||
170
fold_xi_to_not(ctx, op, -1)) {
171
return true;
172
}
173
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
174
175
static bool fold_nor(OptContext *ctx, TCGOp *op)
176
{
177
- if (fold_const2(ctx, op) ||
178
+ if (fold_const2_commutative(ctx, op) ||
179
fold_xi_to_not(ctx, op, 0)) {
180
return true;
181
}
182
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
183
184
static bool fold_or(OptContext *ctx, TCGOp *op)
185
{
186
- if (fold_const2(ctx, op) ||
187
+ if (fold_const2_commutative(ctx, op) ||
188
fold_xi_to_x(ctx, op, 0) ||
189
fold_xx_to_x(ctx, op)) {
190
return true;
191
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
192
static bool fold_setcond(OptContext *ctx, TCGOp *op)
193
{
194
TCGCond cond = op->args[3];
195
- int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
196
+ int i;
197
198
+ if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
199
+ op->args[3] = cond = tcg_swap_cond(cond);
200
+ }
201
+
202
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
203
if (i >= 0) {
204
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
205
}
206
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
207
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
208
{
209
TCGCond cond = op->args[5];
210
- int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
211
- int inv = 0;
212
+ int i, inv = 0;
213
214
+ if (swap_commutative2(&op->args[1], &op->args[3])) {
215
+ op->args[5] = cond = tcg_swap_cond(cond);
216
+ }
217
+
218
+ i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
219
if (i >= 0) {
220
goto do_setcond_const;
221
}
222
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
223
224
static bool fold_xor(OptContext *ctx, TCGOp *op)
225
{
226
- if (fold_const2(ctx, op) ||
227
+ if (fold_const2_commutative(ctx, op) ||
228
fold_xx_to_i(ctx, op, 0) ||
229
fold_xi_to_x(ctx, op, 0) ||
230
fold_xi_to_not(ctx, op, -1)) {
231
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
232
ctx.type = TCG_TYPE_I32;
148
}
233
}
149
gen_helper_check_interrupts(cpu_env);
234
150
- dc->is_jmp = DISAS_UPDATE;
235
- /* For commutative operations make constant second argument */
151
+ dc->base.is_jmp = DISAS_UPDATE;
236
- switch (opc) {
152
}
237
- CASE_OP_32_64_VEC(add):
153
#endif
238
- CASE_OP_32_64_VEC(mul):
154
}
239
- CASE_OP_32_64_VEC(and):
155
@@ -XXX,XX +XXX,XX @@ static void gen_exception(DisasContext *dc, uint32_t excp)
240
- CASE_OP_32_64_VEC(or):
156
tcg_gen_movi_tl(cpu_R[R_PC], dc->pc);
241
- CASE_OP_32_64_VEC(xor):
157
gen_helper_raise_exception(cpu_env, tmp);
242
- CASE_OP_32_64(eqv):
158
tcg_temp_free_i32(tmp);
243
- CASE_OP_32_64(nand):
159
- dc->is_jmp = DISAS_NORETURN;
244
- CASE_OP_32_64(nor):
160
+ dc->base.is_jmp = DISAS_NORETURN;
245
- CASE_OP_32_64(muluh):
161
}
246
- CASE_OP_32_64(mulsh):
162
247
- swap_commutative(op->args[0], &op->args[1], &op->args[2]);
163
/* generate intermediate code for basic block 'tb'. */
248
- break;
164
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
249
- CASE_OP_32_64(brcond):
165
int num_insns;
250
- if (swap_commutative(-1, &op->args[0], &op->args[1])) {
166
251
- op->args[2] = tcg_swap_cond(op->args[2]);
167
/* Initialize DC */
252
- }
168
- dc->is_jmp = DISAS_NEXT;
253
- break;
169
- dc->pc = tb->pc;
254
- CASE_OP_32_64(setcond):
170
- dc->tb = tb;
255
- if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
171
+
256
- op->args[3] = tcg_swap_cond(op->args[3]);
172
+ dc->base.tb = tb;
257
- }
173
+ dc->base.singlestep_enabled = cs->singlestep_enabled;
258
- break;
174
+ dc->base.is_jmp = DISAS_NEXT;
259
- CASE_OP_32_64(movcond):
175
+ dc->base.pc_first = tb->pc;
260
- if (swap_commutative(-1, &op->args[1], &op->args[2])) {
176
+ dc->base.pc_next = tb->pc;
261
- op->args[5] = tcg_swap_cond(op->args[5]);
177
+
262
- }
178
dc->mem_idx = cpu_mmu_index(env, false);
263
- /* For movcond, we canonicalize the "false" input reg to match
179
- dc->singlestep_enabled = cs->singlestep_enabled;
264
- the destination reg so that the tcg backend can implement
180
265
- a "move if true" operation. */
181
/* Set up instruction counts */
266
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
182
num_insns = 0;
267
- op->args[5] = tcg_invert_cond(op->args[5]);
183
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
268
- }
184
269
- break;
185
gen_tb_start(tb);
270
- CASE_OP_32_64(add2):
186
do {
271
- swap_commutative(op->args[0], &op->args[2], &op->args[4]);
187
- tcg_gen_insn_start(dc->pc);
272
- swap_commutative(op->args[1], &op->args[3], &op->args[5]);
188
+ tcg_gen_insn_start(dc->base.pc_next);
273
- break;
189
num_insns++;
274
- CASE_OP_32_64(mulu2):
190
275
- CASE_OP_32_64(muls2):
191
- if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
276
- swap_commutative(op->args[0], &op->args[2], &op->args[3]);
192
+ if (unlikely(cpu_breakpoint_test(cs, dc->base.pc_next, BP_ANY))) {
277
- break;
193
gen_exception(dc, EXCP_DEBUG);
278
- case INDEX_op_brcond2_i32:
194
/* The address covered by the breakpoint must be included in
279
- if (swap_commutative2(&op->args[0], &op->args[2])) {
195
[tb->pc, tb->pc + tb->size) in order to for it to be
280
- op->args[4] = tcg_swap_cond(op->args[4]);
196
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
281
- }
197
gen_io_start();
282
- break;
198
}
283
- case INDEX_op_setcond2_i32:
199
284
- if (swap_commutative2(&op->args[1], &op->args[3])) {
200
+ dc->pc = dc->base.pc_next;
285
- op->args[5] = tcg_swap_cond(op->args[5]);
201
+ dc->base.pc_next += 4;
286
- }
202
+
287
- break;
203
/* Decode an instruction */
288
- default:
204
handle_instruction(dc, env);
289
- break;
205
290
- }
206
- dc->pc += 4;
207
-
291
-
208
/* Translation stops when a conditional branch is encountered.
292
/* Assume all bits affected, and no bits known zero. */
209
* Otherwise the subsequent code could get translated several times.
293
ctx.a_mask = -1;
210
* Also stop translation when a page boundary is reached. This
294
ctx.z_mask = -1;
211
* ensures prefetch aborts occur at the right place. */
212
- } while (!dc->is_jmp &&
213
+ } while (!dc->base.is_jmp &&
214
!tcg_op_buf_full() &&
215
num_insns < max_insns);
216
217
/* Indicate where the next block should start */
218
- switch (dc->is_jmp) {
219
+ switch (dc->base.is_jmp) {
220
case DISAS_NEXT:
221
case DISAS_UPDATE:
222
/* Save the current PC back into the CPU register */
223
- tcg_gen_movi_tl(cpu_R[R_PC], dc->pc);
224
+ tcg_gen_movi_tl(cpu_R[R_PC], dc->base.pc_next);
225
tcg_gen_exit_tb(NULL, 0);
226
break;
227
228
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
229
gen_tb_end(tb, num_insns);
230
231
/* Mark instruction starts for the final generated instruction */
232
- tb->size = dc->pc - tb->pc;
233
+ tb->size = dc->base.pc_next - dc->base.pc_first;
234
tb->icount = num_insns;
235
236
#ifdef DEBUG_DISAS
237
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
238
- && qemu_log_in_addr_range(tb->pc)) {
239
+ && qemu_log_in_addr_range(dc->base.pc_first)) {
240
FILE *logfile = qemu_log_lock();
241
- qemu_log("IN: %s\n", lookup_symbol(tb->pc));
242
- log_target_disas(cs, tb->pc, dc->pc - tb->pc);
243
+ qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
244
+ log_target_disas(cs, tb->pc, tb->size);
245
qemu_log("\n");
246
qemu_log_unlock(logfile);
247
}
248
--
295
--
249
2.25.1
296
2.25.1
250
297
251
298
diff view generated by jsdifflib
1
We always know the exact value of X, that's all that matters.
1
This "garbage" setting pre-dates the addition of the type
2
This avoids splitting the TB e.g. between "ax" and "addq".
2
changing opcodes INDEX_op_ext_i32_i64, INDEX_op_extu_i32_i64,
3
and INDEX_op_extr{l,h}_i64_i32.
3
4
4
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
So now we have a definitive points at which to adjust z_mask
5
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
to eliminate such bits from the 32-bit operands.
7
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
11
---
8
target/cris/translate.c | 3 ---
12
tcg/optimize.c | 35 ++++++++++++++++-------------------
9
1 file changed, 3 deletions(-)
13
1 file changed, 16 insertions(+), 19 deletions(-)
10
14
11
diff --git a/target/cris/translate.c b/target/cris/translate.c
15
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/target/cris/translate.c
17
--- a/tcg/optimize.c
14
+++ b/target/cris/translate.c
18
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
19
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
16
cris_clear_x_flag(dc);
20
ti->is_const = true;
21
ti->val = ts->val;
22
ti->z_mask = ts->val;
23
- if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
24
- /* High bits of a 32-bit quantity are garbage. */
25
- ti->z_mask |= ~0xffffffffull;
26
- }
27
} else {
28
ti->is_const = false;
29
ti->z_mask = -1;
30
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
31
TCGTemp *src_ts = arg_temp(src);
32
TempOptInfo *di;
33
TempOptInfo *si;
34
- uint64_t z_mask;
35
TCGOpcode new_op;
36
37
if (ts_are_copies(dst_ts, src_ts)) {
38
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
39
op->args[0] = dst;
40
op->args[1] = src;
41
42
- z_mask = si->z_mask;
43
- if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
44
- /* High bits of the destination are now garbage. */
45
- z_mask |= ~0xffffffffull;
46
- }
47
- di->z_mask = z_mask;
48
+ di->z_mask = si->z_mask;
49
50
if (src_ts->type == dst_ts->type) {
51
TempOptInfo *ni = ts_info(si->next_copy);
52
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
53
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
54
TCGArg dst, uint64_t val)
55
{
56
- /* Convert movi to mov with constant temp. */
57
- TCGTemp *tv = tcg_constant_internal(ctx->type, val);
58
+ TCGTemp *tv;
59
60
+ if (ctx->type == TCG_TYPE_I32) {
61
+ val = (int32_t)val;
62
+ }
63
+
64
+ /* Convert movi to mov with constant temp. */
65
+ tv = tcg_constant_internal(ctx->type, val);
66
init_ts_info(ctx, tv);
67
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
68
}
69
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
70
uint64_t z_mask = ctx->z_mask;
71
72
/*
73
- * 32-bit ops generate 32-bit results. For the result is zero test
74
- * below, we can ignore high bits, but for further optimizations we
75
- * need to record that the high bits contain garbage.
76
+ * 32-bit ops generate 32-bit results, which for the purpose of
77
+ * simplifying tcg are sign-extended. Certainly that's how we
78
+ * represent our constants elsewhere. Note that the bits will
79
+ * be reset properly for a 64-bit value when encountering the
80
+ * type changing opcodes.
81
*/
82
if (ctx->type == TCG_TYPE_I32) {
83
- ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
84
- a_mask &= MAKE_64BIT_MASK(0, 32);
85
- z_mask &= MAKE_64BIT_MASK(0, 32);
86
+ a_mask = (int32_t)a_mask;
87
+ z_mask = (int32_t)z_mask;
88
+ ctx->z_mask = z_mask;
17
}
89
}
18
90
19
- /* Fold unhandled changes to X_FLAG into cpustate_changed. */
91
if (z_mask == 0) {
20
- dc->cpustate_changed |= dc->flags_x != (dc->base.tb->flags & X_FLAG);
21
-
22
/*
23
* All branches are delayed branches, handled immediately below.
24
* We don't expect to see odd combinations of exit conditions.
25
--
92
--
26
2.25.1
93
2.25.1
27
94
28
95
diff view generated by jsdifflib
1
We can perform any required sign-extension via TCG_BSWAP_OS.
1
Recognize the constant function for or-complement.
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
tcg/tcg-op.c | 24 ++++++++++--------------
8
tcg/optimize.c | 1 +
8
1 file changed, 10 insertions(+), 14 deletions(-)
9
1 file changed, 1 insertion(+)
9
10
10
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tcg-op.c
13
--- a/tcg/optimize.c
13
+++ b/tcg/tcg-op.c
14
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
15
orig_memop = memop;
16
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
17
{
17
memop &= ~MO_BSWAP;
18
if (fold_const2(ctx, op) ||
18
- /* The bswap primitive requires zero-extended input. */
19
+ fold_xx_to_i(ctx, op, -1) ||
19
+ /* The bswap primitive benefits from zero-extended input. */
20
fold_xi_to_x(ctx, op, -1) ||
20
if ((memop & MO_SSIZE) == MO_SW) {
21
fold_ix_to_not(ctx, op, 0)) {
21
memop &= ~MO_SIGN;
22
return true;
22
}
23
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
24
if ((orig_memop ^ memop) & MO_BSWAP) {
25
switch (orig_memop & MO_SIZE) {
26
case MO_16:
27
- tcg_gen_bswap16_i32(val, val, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
28
- if (orig_memop & MO_SIGN) {
29
- tcg_gen_ext16s_i32(val, val);
30
- }
31
+ tcg_gen_bswap16_i32(val, val, (orig_memop & MO_SIGN
32
+ ? TCG_BSWAP_IZ | TCG_BSWAP_OS
33
+ : TCG_BSWAP_IZ | TCG_BSWAP_OZ));
34
break;
35
case MO_32:
36
tcg_gen_bswap32_i32(val, val);
37
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
38
orig_memop = memop;
39
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
40
memop &= ~MO_BSWAP;
41
- /* The bswap primitive requires zero-extended input. */
42
+ /* The bswap primitive benefits from zero-extended input. */
43
if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) {
44
memop &= ~MO_SIGN;
45
}
46
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
47
plugin_gen_mem_callbacks(addr, info);
48
49
if ((orig_memop ^ memop) & MO_BSWAP) {
50
+ int flags = (orig_memop & MO_SIGN
51
+ ? TCG_BSWAP_IZ | TCG_BSWAP_OS
52
+ : TCG_BSWAP_IZ | TCG_BSWAP_OZ);
53
switch (orig_memop & MO_SIZE) {
54
case MO_16:
55
- tcg_gen_bswap16_i64(val, val, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
56
- if (orig_memop & MO_SIGN) {
57
- tcg_gen_ext16s_i64(val, val);
58
- }
59
+ tcg_gen_bswap16_i64(val, val, flags);
60
break;
61
case MO_32:
62
- tcg_gen_bswap32_i64(val, val, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
63
- if (orig_memop & MO_SIGN) {
64
- tcg_gen_ext32s_i64(val, val);
65
- }
66
+ tcg_gen_bswap32_i64(val, val, flags);
67
break;
68
case MO_64:
69
tcg_gen_bswap64_i64(val, val);
70
--
23
--
71
2.25.1
24
2.25.1
72
25
73
26
diff view generated by jsdifflib
1
Direct assignments to env during translation do not work.
1
Recognize the identity function for low-part multiply.
2
2
3
As it happens, the only way we can get here is if env->pc
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
4
is already set to dc->pc. We will trap on the first insn
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
we execute anywhere on the page.
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
target/nios2/translate.c | 3 ++-
8
tcg/optimize.c | 3 ++-
11
1 file changed, 2 insertions(+), 1 deletion(-)
9
1 file changed, 2 insertions(+), 1 deletion(-)
12
10
13
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/target/nios2/translate.c
13
--- a/tcg/optimize.c
16
+++ b/target/nios2/translate.c
14
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static void handle_instruction(DisasContext *dc, CPUNios2State *env)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
18
uint32_t code;
16
static bool fold_mul(OptContext *ctx, TCGOp *op)
19
uint8_t op;
17
{
20
const Nios2Instruction *instr;
18
if (fold_const2(ctx, op) ||
21
+
19
- fold_xi_to_i(ctx, op, 0)) {
22
#if defined(CONFIG_USER_ONLY)
20
+ fold_xi_to_i(ctx, op, 0) ||
23
/* FIXME: Is this needed ? */
21
+ fold_xi_to_x(ctx, op, 1)) {
24
if (dc->pc >= 0x1000 && dc->pc < 0x2000) {
22
return true;
25
- env->regs[R_PC] = dc->pc;
26
t_gen_helper_raise_exception(dc, 0xaa);
27
return;
28
}
23
}
29
#endif
24
return false;
30
+
31
code = cpu_ldl_code(env, dc->pc);
32
op = get_opcode(code);
33
34
--
25
--
35
2.25.1
26
2.25.1
36
27
37
28
diff view generated by jsdifflib
1
The new bswap flags can implement the semantics exactly.
1
Recognize the identity function for division.
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
target/arm/translate.c | 4 +---
8
tcg/optimize.c | 6 +++++-
8
1 file changed, 1 insertion(+), 3 deletions(-)
9
1 file changed, 5 insertions(+), 1 deletion(-)
9
10
10
diff --git a/target/arm/translate.c b/target/arm/translate.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/target/arm/translate.c
13
--- a/tcg/optimize.c
13
+++ b/target/arm/translate.c
14
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
15
/* Byteswap low halfword and sign extend. */
16
16
static void gen_revsh(TCGv_i32 dest, TCGv_i32 var)
17
static bool fold_divide(OptContext *ctx, TCGOp *op)
17
{
18
{
18
- tcg_gen_ext16u_i32(var, var);
19
- return fold_const2(ctx, op);
19
- tcg_gen_bswap16_i32(var, var, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
20
+ if (fold_const2(ctx, op) ||
20
- tcg_gen_ext16s_i32(dest, var);
21
+ fold_xi_to_x(ctx, op, 1)) {
21
+ tcg_gen_bswap16_i32(var, var, TCG_BSWAP_OS);
22
+ return true;
23
+ }
24
+ return false;
22
}
25
}
23
26
24
/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
27
static bool fold_dup(OptContext *ctx, TCGOp *op)
25
--
28
--
26
2.25.1
29
2.25.1
27
30
28
31
diff view generated by jsdifflib
1
Merge tcg_out_bswap16 and tcg_out_bswap16s. Use the flags
1
Recognize the constant function for remainder.
2
in the internal uses for loads and stores.
3
2
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
tcg/mips/tcg-target.c.inc | 63 +++++++++++++++++++--------------------
7
tcg/optimize.c | 6 +++++-
8
1 file changed, 30 insertions(+), 33 deletions(-)
8
1 file changed, 5 insertions(+), 1 deletion(-)
9
9
10
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/mips/tcg-target.c.inc
12
--- a/tcg/optimize.c
13
+++ b/tcg/mips/tcg-target.c.inc
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
15
}
15
16
static bool fold_remainder(OptContext *ctx, TCGOp *op)
17
{
18
- return fold_const2(ctx, op);
19
+ if (fold_const2(ctx, op) ||
20
+ fold_xx_to_i(ctx, op, 0)) {
21
+ return true;
22
+ }
23
+ return false;
16
}
24
}
17
25
18
-static inline void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg)
26
static bool fold_setcond(OptContext *ctx, TCGOp *op)
19
+static void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg, int flags)
20
{
21
+ /* ret and arg can't be register tmp0 */
22
+ tcg_debug_assert(ret != TCG_TMP0);
23
+ tcg_debug_assert(arg != TCG_TMP0);
24
+
25
+ /* With arg = abcd: */
26
if (use_mips32r2_instructions) {
27
- tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
28
- } else {
29
- /* ret and arg can't be register at */
30
- if (ret == TCG_TMP0 || arg == TCG_TMP0) {
31
- tcg_abort();
32
+ tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); /* badc */
33
+ if (flags & TCG_BSWAP_OS) {
34
+ tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret); /* ssdc */
35
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
36
+ tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xffff); /* 00dc */
37
}
38
-
39
- tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
40
- tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8);
41
- tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00);
42
- tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
43
+ return;
44
}
45
-}
46
47
-static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg)
48
-{
49
- if (use_mips32r2_instructions) {
50
- tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
51
- tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret);
52
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); /* 0abc */
53
+ if (!(flags & TCG_BSWAP_IZ)) {
54
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0x00ff); /* 000c */
55
+ }
56
+ if (flags & TCG_BSWAP_OS) {
57
+ tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); /* d000 */
58
+ tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); /* ssd0 */
59
} else {
60
- /* ret and arg can't be register at */
61
- if (ret == TCG_TMP0 || arg == TCG_TMP0) {
62
- tcg_abort();
63
+ tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8); /* bcd0 */
64
+ if (flags & TCG_BSWAP_OZ) {
65
+ tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00); /* 00d0 */
66
}
67
-
68
- tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
69
- tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
70
- tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
71
- tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
72
}
73
+ tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); /* ssdc */
74
}
75
76
static void tcg_out_bswap_subr(TCGContext *s, const tcg_insn_unit *sub)
77
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
78
break;
79
case MO_UW | MO_BSWAP:
80
tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
81
- tcg_out_bswap16(s, lo, TCG_TMP1);
82
+ tcg_out_bswap16(s, lo, TCG_TMP1, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
83
break;
84
case MO_UW:
85
tcg_out_opc_imm(s, OPC_LHU, lo, base, 0);
86
break;
87
case MO_SW | MO_BSWAP:
88
tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
89
- tcg_out_bswap16s(s, lo, TCG_TMP1);
90
+ tcg_out_bswap16(s, lo, TCG_TMP1, TCG_BSWAP_IZ | TCG_BSWAP_OS);
91
break;
92
case MO_SW:
93
tcg_out_opc_imm(s, OPC_LH, lo, base, 0);
94
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
95
break;
96
97
case MO_16 | MO_BSWAP:
98
- tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, lo, 0xffff);
99
- tcg_out_bswap16(s, TCG_TMP1, TCG_TMP1);
100
+ tcg_out_bswap16(s, TCG_TMP1, lo, 0);
101
lo = TCG_TMP1;
102
/* FALLTHRU */
103
case MO_16:
104
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
105
case INDEX_op_not_i64:
106
i1 = OPC_NOR;
107
goto do_unary;
108
- case INDEX_op_bswap16_i32:
109
- case INDEX_op_bswap16_i64:
110
- i1 = OPC_WSBH;
111
- goto do_unary;
112
case INDEX_op_ext8s_i32:
113
case INDEX_op_ext8s_i64:
114
i1 = OPC_SEB;
115
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
116
tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1);
117
break;
118
119
+ case INDEX_op_bswap16_i32:
120
+ case INDEX_op_bswap16_i64:
121
+ tcg_out_bswap16(s, a0, a1, a2);
122
+ break;
123
case INDEX_op_bswap32_i32:
124
tcg_out_bswap32(s, a0, a1);
125
break;
126
--
27
--
127
2.25.1
28
2.25.1
128
29
129
30
diff view generated by jsdifflib
1
Move delayed branch handling to tb_stop, where we can re-use other
1
Certain targets, like riscv, produce signed 32-bit results.
2
end-of-tb code, e.g. the evaluation of flags. Honor single stepping.
2
This can lead to lots of redundant extensions as values are
3
Validate that we aren't losing state by overwriting is_jmp.
3
manipulated.
4
4
5
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Begin by tracking only the obvious sign-extensions, and
6
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
converting them to simple copies when possible.
7
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
11
---
9
target/cris/translate.c | 96 ++++++++++++++++++++++++-----------------
12
tcg/optimize.c | 123 ++++++++++++++++++++++++++++++++++++++++---------
10
1 file changed, 56 insertions(+), 40 deletions(-)
13
1 file changed, 102 insertions(+), 21 deletions(-)
11
14
12
diff --git a/target/cris/translate.c b/target/cris/translate.c
15
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
14
--- a/target/cris/translate.c
17
--- a/tcg/optimize.c
15
+++ b/target/cris/translate.c
18
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
17
#define DISAS_UPDATE DISAS_TARGET_1
20
TCGTemp *next_copy;
18
/* Cpu state was modified dynamically, excluding pc -- use npc */
21
uint64_t val;
19
#define DISAS_UPDATE_NEXT DISAS_TARGET_2
22
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
20
+/* PC update for delayed branch, see cpustate_changed otherwise */
23
+ uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
21
+#define DISAS_DBRANCH DISAS_TARGET_3
24
} TempOptInfo;
22
25
23
/* Used by the decoder. */
26
typedef struct OptContext {
24
#define EXTRACT_FIELD(src, start, end) \
27
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
25
@@ -XXX,XX +XXX,XX @@ static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
28
/* In flight values from optimization. */
26
dc->cpustate_changed |= dc->flags_x != (dc->base.tb->flags & X_FLAG);
29
uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
30
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
31
+ uint64_t s_mask; /* mask of clrsb(value) bits */
32
TCGType type;
33
} OptContext;
34
35
+/* Calculate the smask for a specific value. */
36
+static uint64_t smask_from_value(uint64_t value)
37
+{
38
+ int rep = clrsb64(value);
39
+ return ~(~0ull >> rep);
40
+}
41
+
42
+/*
43
+ * Calculate the smask for a given set of known-zeros.
44
+ * If there are lots of zeros on the left, we can consider the remainder
45
+ * an unsigned field, and thus the corresponding signed field is one bit
46
+ * larger.
47
+ */
48
+static uint64_t smask_from_zmask(uint64_t zmask)
49
+{
50
+ /*
51
+ * Only the 0 bits are significant for zmask, thus the msb itself
52
+ * must be zero, else we have no sign information.
53
+ */
54
+ int rep = clz64(zmask);
55
+ if (rep == 0) {
56
+ return 0;
57
+ }
58
+ rep -= 1;
59
+ return ~(~0ull >> rep);
60
+}
61
+
62
static inline TempOptInfo *ts_info(TCGTemp *ts)
63
{
64
return ts->state_ptr;
65
@@ -XXX,XX +XXX,XX @@ static void reset_ts(TCGTemp *ts)
66
ti->prev_copy = ts;
67
ti->is_const = false;
68
ti->z_mask = -1;
69
+ ti->s_mask = 0;
70
}
71
72
static void reset_temp(TCGArg arg)
73
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
74
ti->is_const = true;
75
ti->val = ts->val;
76
ti->z_mask = ts->val;
77
+ ti->s_mask = smask_from_value(ts->val);
78
} else {
79
ti->is_const = false;
80
ti->z_mask = -1;
81
+ ti->s_mask = 0;
82
}
83
}
84
85
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
86
op->args[1] = src;
87
88
di->z_mask = si->z_mask;
89
+ di->s_mask = si->s_mask;
90
91
if (src_ts->type == dst_ts->type) {
92
TempOptInfo *ni = ts_info(si->next_copy);
93
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
94
95
nb_oargs = def->nb_oargs;
96
for (i = 0; i < nb_oargs; i++) {
97
- reset_temp(op->args[i]);
98
+ TCGTemp *ts = arg_temp(op->args[i]);
99
+ reset_ts(ts);
100
/*
101
- * Save the corresponding known-zero bits mask for the
102
+ * Save the corresponding known-zero/sign bits mask for the
103
* first output argument (only one supported so far).
104
*/
105
if (i == 0) {
106
- arg_info(op->args[i])->z_mask = ctx->z_mask;
107
+ ts_info(ts)->z_mask = ctx->z_mask;
108
+ ts_info(ts)->s_mask = ctx->s_mask;
109
}
110
}
111
}
112
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
113
{
114
uint64_t a_mask = ctx->a_mask;
115
uint64_t z_mask = ctx->z_mask;
116
+ uint64_t s_mask = ctx->s_mask;
27
117
28
/*
118
/*
29
- * Check for delayed branches here. If we do it before
119
* 32-bit ops generate 32-bit results, which for the purpose of
30
- * actually generating any host code, the simulator will just
120
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
31
- * loop doing nothing for on this program location.
121
if (ctx->type == TCG_TYPE_I32) {
32
+ * All branches are delayed branches, handled immediately below.
122
a_mask = (int32_t)a_mask;
33
+ * We don't expect to see odd combinations of exit conditions.
123
z_mask = (int32_t)z_mask;
34
*/
124
+ s_mask |= MAKE_64BIT_MASK(32, 32);
35
+ assert(dc->base.is_jmp == DISAS_NEXT || dc->cpustate_changed);
125
ctx->z_mask = z_mask;
36
+
126
+ ctx->s_mask = s_mask;
37
if (dc->delayed_branch && --dc->delayed_branch == 0) {
127
}
38
- if (dc->base.tb->flags & 7) {
128
39
- t_gen_movi_env_TN(dslot, 0);
129
if (z_mask == 0) {
40
- }
130
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
41
+ dc->base.is_jmp = DISAS_DBRANCH;
131
42
+ return;
132
static bool fold_bswap(OptContext *ctx, TCGOp *op)
133
{
134
- uint64_t z_mask, sign;
135
+ uint64_t z_mask, s_mask, sign;
136
137
if (arg_is_const(op->args[1])) {
138
uint64_t t = arg_info(op->args[1])->val;
139
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
140
}
141
142
z_mask = arg_info(op->args[1])->z_mask;
143
+
144
switch (op->opc) {
145
case INDEX_op_bswap16_i32:
146
case INDEX_op_bswap16_i64:
147
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
148
default:
149
g_assert_not_reached();
150
}
151
+ s_mask = smask_from_zmask(z_mask);
152
153
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
154
case TCG_BSWAP_OZ:
155
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
156
/* If the sign bit may be 1, force all the bits above to 1. */
157
if (z_mask & sign) {
158
z_mask |= sign;
159
+ s_mask = sign << 1;
160
}
161
break;
162
default:
163
/* The high bits are undefined: force all bits above the sign to 1. */
164
z_mask |= sign << 1;
165
+ s_mask = 0;
166
break;
167
}
168
ctx->z_mask = z_mask;
169
+ ctx->s_mask = s_mask;
170
171
return fold_masks(ctx, op);
172
}
173
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
174
static bool fold_extract(OptContext *ctx, TCGOp *op)
175
{
176
uint64_t z_mask_old, z_mask;
177
+ int pos = op->args[2];
178
+ int len = op->args[3];
179
180
if (arg_is_const(op->args[1])) {
181
uint64_t t;
182
183
t = arg_info(op->args[1])->val;
184
- t = extract64(t, op->args[2], op->args[3]);
185
+ t = extract64(t, pos, len);
186
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
187
}
188
189
z_mask_old = arg_info(op->args[1])->z_mask;
190
- z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
191
- if (op->args[2] == 0) {
192
+ z_mask = extract64(z_mask_old, pos, len);
193
+ if (pos == 0) {
194
ctx->a_mask = z_mask_old ^ z_mask;
195
}
196
ctx->z_mask = z_mask;
197
+ ctx->s_mask = smask_from_zmask(z_mask);
198
199
return fold_masks(ctx, op);
200
}
201
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
202
203
static bool fold_exts(OptContext *ctx, TCGOp *op)
204
{
205
- uint64_t z_mask_old, z_mask, sign;
206
+ uint64_t s_mask_old, s_mask, z_mask, sign;
207
bool type_change = false;
208
209
if (fold_const1(ctx, op)) {
210
return true;
211
}
212
213
- z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
214
+ z_mask = arg_info(op->args[1])->z_mask;
215
+ s_mask = arg_info(op->args[1])->s_mask;
216
+ s_mask_old = s_mask;
217
218
switch (op->opc) {
219
CASE_OP_32_64(ext8s):
220
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
221
222
if (z_mask & sign) {
223
z_mask |= sign;
224
- } else if (!type_change) {
225
- ctx->a_mask = z_mask_old ^ z_mask;
226
}
227
+ s_mask |= sign << 1;
228
+
229
ctx->z_mask = z_mask;
230
+ ctx->s_mask = s_mask;
231
+ if (!type_change) {
232
+ ctx->a_mask = s_mask & ~s_mask_old;
43
+ }
233
+ }
44
234
45
- if (dc->cpustate_changed) {
235
return fold_masks(ctx, op);
46
- cris_store_direct_jmp(dc);
236
}
47
- }
237
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
48
-
238
}
49
- if (dc->clear_locked_irq) {
239
50
- dc->clear_locked_irq = 0;
240
ctx->z_mask = z_mask;
51
- t_gen_movi_env_TN(locked_irq, 0);
241
+ ctx->s_mask = smask_from_zmask(z_mask);
52
- }
242
if (!type_change) {
53
-
243
ctx->a_mask = z_mask_old ^ z_mask;
54
- if (dc->jmp == JMP_DIRECT_CC) {
244
}
55
- TCGLabel *l1 = gen_new_label();
245
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
56
- cris_evaluate_flags(dc);
246
MemOp mop = get_memop(oi);
57
-
247
int width = 8 * memop_size(mop);
58
- /* Conditional jmp. */
248
59
- tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
249
- if (!(mop & MO_SIGN) && width < 64) {
60
- gen_goto_tb(dc, 1, dc->jmp_pc);
250
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
61
- gen_set_label(l1);
251
+ if (width < 64) {
62
- gen_goto_tb(dc, 0, dc->pc);
252
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
63
- dc->base.is_jmp = DISAS_NORETURN;
253
+ if (!(mop & MO_SIGN)) {
64
- dc->jmp = JMP_NOJMP;
254
+ ctx->z_mask = MAKE_64BIT_MASK(0, width);
65
- } else if (dc->jmp == JMP_DIRECT) {
255
+ ctx->s_mask <<= 1;
66
- cris_evaluate_flags(dc);
67
- gen_goto_tb(dc, 0, dc->jmp_pc);
68
- dc->base.is_jmp = DISAS_NORETURN;
69
- dc->jmp = JMP_NOJMP;
70
- } else {
71
- TCGv c = tcg_const_tl(dc->pc);
72
- t_gen_cc_jmp(env_btarget, c);
73
- tcg_temp_free(c);
74
- dc->base.is_jmp = DISAS_JUMP;
75
- }
76
+ if (dc->base.is_jmp != DISAS_NEXT) {
77
+ return;
78
}
79
80
/* Force an update if the per-tb cpu state has changed. */
81
- if (dc->base.is_jmp == DISAS_NEXT && dc->cpustate_changed) {
82
+ if (dc->cpustate_changed) {
83
dc->base.is_jmp = DISAS_UPDATE_NEXT;
84
return;
85
}
86
@@ -XXX,XX +XXX,XX @@ static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
87
* If we can detect the length of the next insn easily, we should.
88
* In the meantime, simply stop when we do cross.
89
*/
90
- if (dc->base.is_jmp == DISAS_NEXT
91
- && ((dc->pc ^ dc->base.pc_first) & TARGET_PAGE_MASK) != 0) {
92
+ if ((dc->pc ^ dc->base.pc_first) & TARGET_PAGE_MASK) {
93
dc->base.is_jmp = DISAS_TOO_MANY;
94
}
95
}
96
@@ -XXX,XX +XXX,XX @@ static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
97
98
cris_evaluate_flags(dc);
99
100
+ /* Evaluate delayed branch destination and fold to another is_jmp case. */
101
+ if (is_jmp == DISAS_DBRANCH) {
102
+ if (dc->base.tb->flags & 7) {
103
+ t_gen_movi_env_TN(dslot, 0);
104
+ }
256
+ }
105
+
257
}
106
+ switch (dc->jmp) {
258
107
+ case JMP_DIRECT:
259
/* Opcodes that touch guest memory stop the mb optimization. */
108
+ npc = dc->jmp_pc;
260
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
109
+ is_jmp = dc->cpustate_changed ? DISAS_UPDATE_NEXT : DISAS_TOO_MANY;
261
110
+ break;
262
static bool fold_sextract(OptContext *ctx, TCGOp *op)
111
+
263
{
112
+ case JMP_DIRECT_CC:
264
- int64_t z_mask_old, z_mask;
113
+ /*
265
+ uint64_t z_mask, s_mask, s_mask_old;
114
+ * Use a conditional branch if either taken or not-taken path
266
+ int pos = op->args[2];
115
+ * can use goto_tb. If neither can, then treat it as indirect.
267
+ int len = op->args[3];
116
+ */
268
117
+ if (likely(!dc->base.singlestep_enabled)
269
if (arg_is_const(op->args[1])) {
118
+ && likely(!dc->cpustate_changed)
270
uint64_t t;
119
+ && (use_goto_tb(dc, dc->jmp_pc) || use_goto_tb(dc, npc))) {
271
120
+ TCGLabel *not_taken = gen_new_label();
272
t = arg_info(op->args[1])->val;
121
+
273
- t = sextract64(t, op->args[2], op->args[3]);
122
+ tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, not_taken);
274
+ t = sextract64(t, pos, len);
123
+ gen_goto_tb(dc, 1, dc->jmp_pc);
275
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
124
+ gen_set_label(not_taken);
276
}
125
+
277
126
+ /* not-taken case handled below. */
278
- z_mask_old = arg_info(op->args[1])->z_mask;
127
+ is_jmp = DISAS_TOO_MANY;
279
- z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
128
+ break;
280
- if (op->args[2] == 0 && z_mask >= 0) {
129
+ }
281
- ctx->a_mask = z_mask_old ^ z_mask;
130
+ tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
282
- }
131
+ /* fall through */
283
+ z_mask = arg_info(op->args[1])->z_mask;
132
+
284
+ z_mask = sextract64(z_mask, pos, len);
133
+ case JMP_INDIRECT:
285
ctx->z_mask = z_mask;
134
+ t_gen_cc_jmp(env_btarget, tcg_constant_tl(npc));
286
135
+ is_jmp = dc->cpustate_changed ? DISAS_UPDATE : DISAS_JUMP;
287
+ s_mask_old = arg_info(op->args[1])->s_mask;
136
+ break;
288
+ s_mask = sextract64(s_mask_old, pos, len);
137
+
289
+ s_mask |= MAKE_64BIT_MASK(len, 64 - len);
138
+ default:
290
+ ctx->s_mask = s_mask;
139
+ g_assert_not_reached();
291
+
140
+ }
292
+ if (pos == 0) {
293
+ ctx->a_mask = s_mask & ~s_mask_old;
141
+ }
294
+ }
142
+
295
+
143
if (unlikely(dc->base.singlestep_enabled)) {
296
return fold_masks(ctx, op);
144
switch (is_jmp) {
297
}
145
case DISAS_TOO_MANY:
298
299
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
300
{
301
/* We can't do any folding with a load, but we can record bits. */
302
switch (op->opc) {
303
+ CASE_OP_32_64(ld8s):
304
+ ctx->s_mask = MAKE_64BIT_MASK(8, 56);
305
+ break;
306
CASE_OP_32_64(ld8u):
307
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
308
+ ctx->s_mask = MAKE_64BIT_MASK(9, 55);
309
+ break;
310
+ CASE_OP_32_64(ld16s):
311
+ ctx->s_mask = MAKE_64BIT_MASK(16, 48);
312
break;
313
CASE_OP_32_64(ld16u):
314
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
315
+ ctx->s_mask = MAKE_64BIT_MASK(17, 47);
316
+ break;
317
+ case INDEX_op_ld32s_i64:
318
+ ctx->s_mask = MAKE_64BIT_MASK(32, 32);
319
break;
320
case INDEX_op_ld32u_i64:
321
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
322
+ ctx->s_mask = MAKE_64BIT_MASK(33, 31);
323
break;
324
default:
325
g_assert_not_reached();
326
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
327
ctx.type = TCG_TYPE_I32;
328
}
329
330
- /* Assume all bits affected, and no bits known zero. */
331
+ /* Assume all bits affected, no bits known zero, no sign reps. */
332
ctx.a_mask = -1;
333
ctx.z_mask = -1;
334
+ ctx.s_mask = 0;
335
336
/*
337
* Process each opcode.
338
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
339
case INDEX_op_extrh_i64_i32:
340
done = fold_extu(&ctx, op);
341
break;
342
+ CASE_OP_32_64(ld8s):
343
CASE_OP_32_64(ld8u):
344
+ CASE_OP_32_64(ld16s):
345
CASE_OP_32_64(ld16u):
346
+ case INDEX_op_ld32s_i64:
347
case INDEX_op_ld32u_i64:
348
done = fold_tcg_ld(&ctx, op);
349
break;
146
--
350
--
147
2.25.1
351
2.25.1
148
352
149
353
diff view generated by jsdifflib
1
Implement the new semantics in the fallback expansion.
1
Sign repetitions are perforce all identical, whether they are 1 or 0.
2
Change all callers to supply the flags that keep the
2
Bitwise operations preserve the relative quantity of the repetitions.
3
semantics unchanged locally.
4
3
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
include/tcg/tcg-op.h | 8 +--
9
tcg/optimize.c | 29 +++++++++++++++++++++++++++++
10
target/arm/translate-a64.c | 12 ++--
10
1 file changed, 29 insertions(+)
11
target/arm/translate.c | 2 +-
12
target/i386/tcg/translate.c | 2 +-
13
target/mips/tcg/mxu_translate.c | 2 +-
14
target/s390x/translate.c | 4 +-
15
target/sh4/translate.c | 2 +-
16
tcg/tcg-op.c | 121 ++++++++++++++++++++++----------
17
8 files changed, 99 insertions(+), 54 deletions(-)
18
11
19
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
20
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
21
--- a/include/tcg/tcg-op.h
14
--- a/tcg/optimize.c
22
+++ b/include/tcg/tcg-op.h
15
+++ b/tcg/optimize.c
23
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg);
16
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
24
void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg);
17
z2 = arg_info(op->args[2])->z_mask;
25
void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg);
18
ctx->z_mask = z1 & z2;
26
void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg);
19
27
-void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg);
20
+ /*
28
+void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags);
21
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
29
void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg);
22
+ * Bitwise operations preserve the relative quantity of the repetitions.
30
void tcg_gen_smin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
23
+ */
31
void tcg_gen_smax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
24
+ ctx->s_mask = arg_info(op->args[1])->s_mask
32
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg);
25
+ & arg_info(op->args[2])->s_mask;
33
void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg);
26
+
34
void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg);
27
/*
35
void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg);
28
* Known-zeros does not imply known-ones. Therefore unless
36
-void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg);
29
* arg2 is constant, we can't infer affected bits from it.
37
-void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg);
30
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
38
+void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags);
39
+void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags);
40
void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg);
41
void tcg_gen_smin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
42
void tcg_gen_smax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
43
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
44
#define tcg_gen_ext32u_tl tcg_gen_mov_i32
45
#define tcg_gen_ext32s_tl tcg_gen_mov_i32
46
#define tcg_gen_bswap16_tl tcg_gen_bswap16_i32
47
-#define tcg_gen_bswap32_tl tcg_gen_bswap32_i32
48
+#define tcg_gen_bswap32_tl(D, S, F) tcg_gen_bswap32_i32(D, S)
49
#define tcg_gen_bswap_tl tcg_gen_bswap32_i32
50
#define tcg_gen_concat_tl_i64 tcg_gen_concat_i32_i64
51
#define tcg_gen_extr_i64_tl tcg_gen_extr_i64_i32
52
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/translate-a64.c
55
+++ b/target/arm/translate-a64.c
56
@@ -XXX,XX +XXX,XX @@ static void handle_rev32(DisasContext *s, unsigned int sf,
57
58
/* bswap32_i64 requires zero high word */
59
tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
60
- tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
61
+ tcg_gen_bswap32_i64(tcg_rd, tcg_tmp, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
62
tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
63
- tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
64
+ tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
65
tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
66
67
tcg_temp_free_i64(tcg_tmp);
68
} else {
69
tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
70
- tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
71
+ tcg_gen_bswap32_i64(tcg_rd, tcg_rd, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
72
}
31
}
32
ctx->z_mask = z1;
33
34
+ ctx->s_mask = arg_info(op->args[1])->s_mask
35
+ & arg_info(op->args[2])->s_mask;
36
return fold_masks(ctx, op);
73
}
37
}
74
38
75
@@ -XXX,XX +XXX,XX @@ static void handle_rev(DisasContext *s, int opcode, bool u,
39
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
76
read_vec_element(s, tcg_tmp, rn, i, grp_size);
40
fold_xi_to_not(ctx, op, 0)) {
77
switch (grp_size) {
41
return true;
78
case MO_16:
42
}
79
- tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
43
+
80
+ tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp,
44
+ ctx->s_mask = arg_info(op->args[1])->s_mask
81
+ TCG_BSWAP_IZ | TCG_BSWAP_OZ);
45
+ & arg_info(op->args[2])->s_mask;
82
break;
46
return false;
83
case MO_32:
84
- tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
85
+ tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp,
86
+ TCG_BSWAP_IZ | TCG_BSWAP_OZ);
87
break;
88
case MO_64:
89
tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
90
diff --git a/target/arm/translate.c b/target/arm/translate.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/translate.c
93
+++ b/target/arm/translate.c
94
@@ -XXX,XX +XXX,XX @@ void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
95
static void gen_revsh(TCGv_i32 dest, TCGv_i32 var)
96
{
97
tcg_gen_ext16u_i32(var, var);
98
- tcg_gen_bswap16_i32(var, var);
99
+ tcg_gen_bswap16_i32(var, var, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
100
tcg_gen_ext16s_i32(dest, var);
101
}
47
}
102
48
103
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
49
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
104
index XXXXXXX..XXXXXXX 100644
50
105
--- a/target/i386/tcg/translate.c
51
ctx->z_mask = arg_info(op->args[3])->z_mask
106
+++ b/target/i386/tcg/translate.c
52
| arg_info(op->args[4])->z_mask;
107
@@ -XXX,XX +XXX,XX @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
53
+ ctx->s_mask = arg_info(op->args[3])->s_mask
108
{
54
+ & arg_info(op->args[4])->s_mask;
109
gen_op_mov_v_reg(s, MO_32, s->T0, reg);
55
110
tcg_gen_ext32u_tl(s->T0, s->T0);
56
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
111
- tcg_gen_bswap32_tl(s->T0, s->T0);
57
uint64_t tv = arg_info(op->args[3])->val;
112
+ tcg_gen_bswap32_tl(s->T0, s->T0, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
58
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
113
gen_op_mov_reg_v(s, MO_32, reg, s->T0);
59
fold_xi_to_not(ctx, op, -1)) {
114
}
60
return true;
115
break;
116
diff --git a/target/mips/tcg/mxu_translate.c b/target/mips/tcg/mxu_translate.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/target/mips/tcg/mxu_translate.c
119
+++ b/target/mips/tcg/mxu_translate.c
120
@@ -XXX,XX +XXX,XX @@ static void gen_mxu_s32ldd_s32lddr(DisasContext *ctx)
121
122
if (sel == 1) {
123
/* S32LDDR */
124
- tcg_gen_bswap32_tl(t1, t1);
125
+ tcg_gen_bswap32_tl(t1, t1, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
126
}
61
}
127
gen_store_mxu_gpr(t1, XRa);
62
+
128
63
+ ctx->s_mask = arg_info(op->args[1])->s_mask
129
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
64
+ & arg_info(op->args[2])->s_mask;
130
index XXXXXXX..XXXXXXX 100644
65
return false;
131
--- a/target/s390x/translate.c
132
+++ b/target/s390x/translate.c
133
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
134
135
static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
136
{
137
- tcg_gen_bswap16_i64(o->out, o->in2);
138
+ tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
139
return DISAS_NEXT;
140
}
66
}
141
67
142
static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
68
@@ -XXX,XX +XXX,XX @@ static bool fold_nor(OptContext *ctx, TCGOp *op)
143
{
69
fold_xi_to_not(ctx, op, 0)) {
144
- tcg_gen_bswap32_i64(o->out, o->in2);
70
return true;
145
+ tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
71
}
146
return DISAS_NEXT;
72
+
73
+ ctx->s_mask = arg_info(op->args[1])->s_mask
74
+ & arg_info(op->args[2])->s_mask;
75
return false;
147
}
76
}
148
77
149
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
78
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
150
index XXXXXXX..XXXXXXX 100644
79
return true;
151
--- a/target/sh4/translate.c
152
+++ b/target/sh4/translate.c
153
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
154
    {
155
TCGv low = tcg_temp_new();
156
     tcg_gen_ext16u_i32(low, REG(B7_4));
157
-     tcg_gen_bswap16_i32(low, low);
158
+     tcg_gen_bswap16_i32(low, low, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
159
tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
160
     tcg_temp_free(low);
161
    }
162
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
163
index XXXXXXX..XXXXXXX 100644
164
--- a/tcg/tcg-op.c
165
+++ b/tcg/tcg-op.c
166
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg)
167
}
80
}
81
82
+ ctx->s_mask = arg_info(op->args[1])->s_mask;
83
+
84
/* Because of fold_to_not, we want to always return true, via finish. */
85
finish_folding(ctx, op);
86
return true;
87
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
88
89
ctx->z_mask = arg_info(op->args[1])->z_mask
90
| arg_info(op->args[2])->z_mask;
91
+ ctx->s_mask = arg_info(op->args[1])->s_mask
92
+ & arg_info(op->args[2])->s_mask;
93
return fold_masks(ctx, op);
168
}
94
}
169
95
170
-/* Note: we assume the two high bytes are set to zero */
96
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
171
-void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg)
97
fold_ix_to_not(ctx, op, 0)) {
172
+void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags)
98
return true;
173
{
99
}
174
+ /* Only one extension flag may be present. */
175
+ tcg_debug_assert(!(flags & TCG_BSWAP_OS) || !(flags & TCG_BSWAP_OZ));
176
+
100
+
177
if (TCG_TARGET_HAS_bswap16_i32) {
101
+ ctx->s_mask = arg_info(op->args[1])->s_mask
178
- tcg_gen_op3i_i32(INDEX_op_bswap16_i32, ret, arg,
102
+ & arg_info(op->args[2])->s_mask;
179
- TCG_BSWAP_IZ | TCG_BSWAP_OZ);
103
return false;
180
+ tcg_gen_op3i_i32(INDEX_op_bswap16_i32, ret, arg, flags);
181
} else {
182
TCGv_i32 t0 = tcg_temp_new_i32();
183
+ TCGv_i32 t1 = tcg_temp_new_i32();
184
185
- tcg_gen_ext8u_i32(t0, arg);
186
- tcg_gen_shli_i32(t0, t0, 8);
187
- tcg_gen_shri_i32(ret, arg, 8);
188
- tcg_gen_or_i32(ret, ret, t0);
189
+ tcg_gen_shri_i32(t0, arg, 8);
190
+ if (!(flags & TCG_BSWAP_IZ)) {
191
+ tcg_gen_ext8u_i32(t0, t0);
192
+ }
193
+
194
+ if (flags & TCG_BSWAP_OS) {
195
+ tcg_gen_shli_i32(t1, arg, 24);
196
+ tcg_gen_sari_i32(t1, t1, 16);
197
+ } else if (flags & TCG_BSWAP_OZ) {
198
+ tcg_gen_ext8u_i32(t1, arg);
199
+ tcg_gen_shli_i32(t1, t1, 8);
200
+ } else {
201
+ tcg_gen_shli_i32(t1, arg, 8);
202
+ }
203
+
204
+ tcg_gen_or_i32(ret, t0, t1);
205
tcg_temp_free_i32(t0);
206
+ tcg_temp_free_i32(t1);
207
}
208
}
104
}
209
105
210
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg)
106
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
211
}
107
108
ctx->z_mask = arg_info(op->args[1])->z_mask
109
| arg_info(op->args[2])->z_mask;
110
+ ctx->s_mask = arg_info(op->args[1])->s_mask
111
+ & arg_info(op->args[2])->s_mask;
112
return fold_masks(ctx, op);
212
}
113
}
213
114
214
-/* Note: we assume the six high bytes are set to zero */
215
-void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg)
216
+void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
217
{
218
+ /* Only one extension flag may be present. */
219
+ tcg_debug_assert(!(flags & TCG_BSWAP_OS) || !(flags & TCG_BSWAP_OZ));
220
+
221
if (TCG_TARGET_REG_BITS == 32) {
222
- tcg_gen_bswap16_i32(TCGV_LOW(ret), TCGV_LOW(arg));
223
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
224
+ tcg_gen_bswap16_i32(TCGV_LOW(ret), TCGV_LOW(arg), flags);
225
+ if (flags & TCG_BSWAP_OS) {
226
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
227
+ } else {
228
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
229
+ }
230
} else if (TCG_TARGET_HAS_bswap16_i64) {
231
- tcg_gen_op3i_i64(INDEX_op_bswap16_i64, ret, arg,
232
- TCG_BSWAP_IZ | TCG_BSWAP_OZ);
233
+ tcg_gen_op3i_i64(INDEX_op_bswap16_i64, ret, arg, flags);
234
} else {
235
TCGv_i64 t0 = tcg_temp_new_i64();
236
+ TCGv_i64 t1 = tcg_temp_new_i64();
237
238
- tcg_gen_ext8u_i64(t0, arg);
239
- tcg_gen_shli_i64(t0, t0, 8);
240
- tcg_gen_shri_i64(ret, arg, 8);
241
- tcg_gen_or_i64(ret, ret, t0);
242
+ tcg_gen_shri_i64(t0, arg, 8);
243
+ if (!(flags & TCG_BSWAP_IZ)) {
244
+ tcg_gen_ext8u_i64(t0, t0);
245
+ }
246
+
247
+ if (flags & TCG_BSWAP_OS) {
248
+ tcg_gen_shli_i64(t1, arg, 56);
249
+ tcg_gen_sari_i64(t1, t1, 48);
250
+ } else if (flags & TCG_BSWAP_OZ) {
251
+ tcg_gen_ext8u_i64(t1, arg);
252
+ tcg_gen_shli_i64(t1, t1, 8);
253
+ } else {
254
+ tcg_gen_shli_i64(t1, arg, 8);
255
+ }
256
+
257
+ tcg_gen_or_i64(ret, t0, t1);
258
tcg_temp_free_i64(t0);
259
+ tcg_temp_free_i64(t1);
260
}
261
}
262
263
-/* Note: we assume the four high bytes are set to zero */
264
-void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg)
265
+void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
266
{
267
+ /* Only one extension flag may be present. */
268
+ tcg_debug_assert(!(flags & TCG_BSWAP_OS) || !(flags & TCG_BSWAP_OZ));
269
+
270
if (TCG_TARGET_REG_BITS == 32) {
271
tcg_gen_bswap32_i32(TCGV_LOW(ret), TCGV_LOW(arg));
272
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
273
+ if (flags & TCG_BSWAP_OS) {
274
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
275
+ } else {
276
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
277
+ }
278
} else if (TCG_TARGET_HAS_bswap32_i64) {
279
- tcg_gen_op3i_i64(INDEX_op_bswap32_i64, ret, arg,
280
- TCG_BSWAP_IZ | TCG_BSWAP_OZ);
281
+ tcg_gen_op3i_i64(INDEX_op_bswap32_i64, ret, arg, flags);
282
} else {
283
TCGv_i64 t0 = tcg_temp_new_i64();
284
TCGv_i64 t1 = tcg_temp_new_i64();
285
TCGv_i64 t2 = tcg_constant_i64(0x00ff00ff);
286
287
- /* arg = ....abcd */
288
- tcg_gen_shri_i64(t0, arg, 8); /* t0 = .....abc */
289
- tcg_gen_and_i64(t1, arg, t2); /* t1 = .....b.d */
290
- tcg_gen_and_i64(t0, t0, t2); /* t0 = .....a.c */
291
- tcg_gen_shli_i64(t1, t1, 8); /* t1 = ....b.d. */
292
- tcg_gen_or_i64(ret, t0, t1); /* ret = ....badc */
293
+ /* arg = xxxxabcd */
294
+ tcg_gen_shri_i64(t0, arg, 8); /* t0 = .xxxxabc */
295
+ tcg_gen_and_i64(t1, arg, t2); /* t1 = .....b.d */
296
+ tcg_gen_and_i64(t0, t0, t2); /* t0 = .....a.c */
297
+ tcg_gen_shli_i64(t1, t1, 8); /* t1 = ....b.d. */
298
+ tcg_gen_or_i64(ret, t0, t1); /* ret = ....badc */
299
300
- tcg_gen_shli_i64(t1, ret, 48); /* t1 = dc...... */
301
- tcg_gen_shri_i64(t0, ret, 16); /* t0 = ......ba */
302
- tcg_gen_shri_i64(t1, t1, 32); /* t1 = ....dc.. */
303
- tcg_gen_or_i64(ret, t0, t1); /* ret = ....dcba */
304
+ tcg_gen_shli_i64(t1, ret, 48); /* t1 = dc...... */
305
+ tcg_gen_shri_i64(t0, ret, 16); /* t0 = ......ba */
306
+ if (flags & TCG_BSWAP_OS) {
307
+ tcg_gen_sari_i64(t1, t1, 32); /* t1 = ssssdc.. */
308
+ } else {
309
+ tcg_gen_shri_i64(t1, t1, 32); /* t1 = ....dc.. */
310
+ }
311
+ tcg_gen_or_i64(ret, t0, t1); /* ret = ssssdcba */
312
313
tcg_temp_free_i64(t0);
314
tcg_temp_free_i64(t1);
315
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
316
if ((orig_memop ^ memop) & MO_BSWAP) {
317
switch (orig_memop & MO_SIZE) {
318
case MO_16:
319
- tcg_gen_bswap16_i32(val, val);
320
+ tcg_gen_bswap16_i32(val, val, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
321
if (orig_memop & MO_SIGN) {
322
tcg_gen_ext16s_i32(val, val);
323
}
324
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
325
switch (memop & MO_SIZE) {
326
case MO_16:
327
tcg_gen_ext16u_i32(swap, val);
328
- tcg_gen_bswap16_i32(swap, swap);
329
+ tcg_gen_bswap16_i32(swap, swap, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
330
break;
331
case MO_32:
332
tcg_gen_bswap32_i32(swap, val);
333
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
334
if ((orig_memop ^ memop) & MO_BSWAP) {
335
switch (orig_memop & MO_SIZE) {
336
case MO_16:
337
- tcg_gen_bswap16_i64(val, val);
338
+ tcg_gen_bswap16_i64(val, val, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
339
if (orig_memop & MO_SIGN) {
340
tcg_gen_ext16s_i64(val, val);
341
}
342
break;
343
case MO_32:
344
- tcg_gen_bswap32_i64(val, val);
345
+ tcg_gen_bswap32_i64(val, val, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
346
if (orig_memop & MO_SIGN) {
347
tcg_gen_ext32s_i64(val, val);
348
}
349
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
350
switch (memop & MO_SIZE) {
351
case MO_16:
352
tcg_gen_ext16u_i64(swap, val);
353
- tcg_gen_bswap16_i64(swap, swap);
354
+ tcg_gen_bswap16_i64(swap, swap, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
355
break;
356
case MO_32:
357
tcg_gen_ext32u_i64(swap, val);
358
- tcg_gen_bswap32_i64(swap, swap);
359
+ tcg_gen_bswap32_i64(swap, swap, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
360
break;
361
case MO_64:
362
tcg_gen_bswap64_i64(swap, val);
363
--
115
--
364
2.25.1
116
2.25.1
365
117
366
118
diff view generated by jsdifflib
1
After we've raised the exception, we have left the TB.
1
The result is either 0 or 1, which means that we have
2
a 2 bit signed result, and thus 62 bits of sign.
3
For clarity, use the smask_from_zmask function.
2
4
3
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
8
---
7
target/cris/translate.c | 5 +++--
9
tcg/optimize.c | 2 ++
8
target/cris/translate_v10.c.inc | 3 ++-
10
1 file changed, 2 insertions(+)
9
2 files changed, 5 insertions(+), 3 deletions(-)
10
11
11
diff --git a/target/cris/translate.c b/target/cris/translate.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/target/cris/translate.c
14
--- a/tcg/optimize.c
14
+++ b/target/cris/translate.c
15
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
16
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
16
-offsetof(CRISCPU, env) + offsetof(CPUState, halted));
17
tcg_gen_movi_tl(env_pc, dc->pc + 2);
18
t_gen_raise_exception(EXCP_HLT);
19
+ dc->base.is_jmp = DISAS_NORETURN;
20
return 2;
21
}
17
}
22
18
23
@@ -XXX,XX +XXX,XX @@ static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
19
ctx->z_mask = 1;
24
/* Breaks start at 16 in the exception vector. */
20
+ ctx->s_mask = smask_from_zmask(1);
25
t_gen_movi_env_TN(trap_vector, dc->op1 + 16);
21
return false;
26
t_gen_raise_exception(EXCP_BREAK);
27
- dc->base.is_jmp = DISAS_UPDATE;
28
+ dc->base.is_jmp = DISAS_NORETURN;
29
break;
30
default:
31
printf("op2=%x\n", dc->op2);
32
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
33
cris_evaluate_flags(dc);
34
tcg_gen_movi_tl(env_pc, dc->pc);
35
t_gen_raise_exception(EXCP_DEBUG);
36
- dc->base.is_jmp = DISAS_UPDATE;
37
+ dc->base.is_jmp = DISAS_NORETURN;
38
/* The address covered by the breakpoint must be included in
39
[tb->pc, tb->pc + tb->size) in order to for it to be
40
properly cleared -- thus we increment the PC here so that
41
diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/cris/translate_v10.c.inc
44
+++ b/target/cris/translate_v10.c.inc
45
@@ -XXX,XX +XXX,XX @@ static inline void cris_illegal_insn(DisasContext *dc)
46
{
47
qemu_log_mask(LOG_GUEST_ERROR, "illegal insn at pc=%x\n", dc->pc);
48
t_gen_raise_exception(EXCP_BREAK);
49
+ dc->base.is_jmp = DISAS_NORETURN;
50
}
22
}
51
23
52
static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
53
@@ -XXX,XX +XXX,XX @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
25
}
54
t_gen_mov_env_TN(trap_vector, c);
26
55
tcg_temp_free(c);
27
ctx->z_mask = 1;
56
t_gen_raise_exception(EXCP_BREAK);
28
+ ctx->s_mask = smask_from_zmask(1);
57
- dc->base.is_jmp = DISAS_UPDATE;
29
return false;
58
+ dc->base.is_jmp = DISAS_NORETURN;
30
59
return insn_len;
31
do_setcond_const:
60
}
61
LOG_DIS("%d: jump.%d %d r%d r%d\n", __LINE__, size,
62
--
32
--
63
2.25.1
33
2.25.1
64
34
65
35
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
2
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/cris/helper.h | 2 +-
6
1 file changed, 1 insertion(+), 1 deletion(-)
7
1
8
diff --git a/target/cris/helper.h b/target/cris/helper.h
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/cris/helper.h
11
+++ b/target/cris/helper.h
12
@@ -XXX,XX +XXX,XX @@
13
-DEF_HELPER_2(raise_exception, void, env, i32)
14
+DEF_HELPER_2(raise_exception, noreturn, env, i32)
15
DEF_HELPER_2(tlb_flush_pid, void, env, i32)
16
DEF_HELPER_2(spc_write, void, env, i32)
17
DEF_HELPER_1(rfe, void, env)
18
--
19
2.25.1
20
21
diff view generated by jsdifflib
Deleted patch
1
These insns set DISAS_UPDATE without cpustate_changed,
2
which isn't quite right.
3
1
4
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/cris/translate.c | 2 ++
9
1 file changed, 2 insertions(+)
10
11
diff --git a/target/cris/translate.c b/target/cris/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/cris/translate.c
14
+++ b/target/cris/translate.c
15
@@ -XXX,XX +XXX,XX @@ static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
16
cris_evaluate_flags(dc);
17
gen_helper_rfe(cpu_env);
18
dc->base.is_jmp = DISAS_UPDATE;
19
+ dc->cpustate_changed = true;
20
break;
21
case 5:
22
/* rfn. */
23
@@ -XXX,XX +XXX,XX @@ static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
24
cris_evaluate_flags(dc);
25
gen_helper_rfn(cpu_env);
26
dc->base.is_jmp = DISAS_UPDATE;
27
+ dc->cpustate_changed = true;
28
break;
29
case 6:
30
LOG_DIS("break %d\n", dc->op1);
31
--
32
2.25.1
33
34
diff view generated by jsdifflib
Deleted patch
1
Move this pc update into tb_stop.
2
We will be able to re-use this code shortly.
3
1
4
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/cris/translate.c | 20 +++++++++++++++-----
9
1 file changed, 15 insertions(+), 5 deletions(-)
10
11
diff --git a/target/cris/translate.c b/target/cris/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/cris/translate.c
14
+++ b/target/cris/translate.c
15
@@ -XXX,XX +XXX,XX @@
16
#define BUG() (gen_BUG(dc, __FILE__, __LINE__))
17
#define BUG_ON(x) ({if (x) BUG();})
18
19
-/* is_jmp field values */
20
-#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
21
-#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
22
+/*
23
+ * Target-specific is_jmp field values
24
+ */
25
+/* Only pc was modified dynamically */
26
+#define DISAS_JUMP DISAS_TARGET_0
27
+/* Cpu state was modified dynamically, including pc */
28
+#define DISAS_UPDATE DISAS_TARGET_1
29
+/* Cpu state was modified dynamically, excluding pc -- use npc */
30
+#define DISAS_UPDATE_NEXT DISAS_TARGET_2
31
32
/* Used by the decoder. */
33
#define EXTRACT_FIELD(src, start, end) \
34
@@ -XXX,XX +XXX,XX @@ static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
35
36
/* Force an update if the per-tb cpu state has changed. */
37
if (dc->base.is_jmp == DISAS_NEXT && dc->cpustate_changed) {
38
- dc->base.is_jmp = DISAS_UPDATE;
39
- tcg_gen_movi_tl(env_pc, dc->pc);
40
+ dc->base.is_jmp = DISAS_UPDATE_NEXT;
41
+ return;
42
}
43
44
/*
45
@@ -XXX,XX +XXX,XX @@ static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
46
if (unlikely(dc->base.singlestep_enabled)) {
47
switch (is_jmp) {
48
case DISAS_TOO_MANY:
49
+ case DISAS_UPDATE_NEXT:
50
tcg_gen_movi_tl(env_pc, npc);
51
/* fall through */
52
case DISAS_JUMP:
53
@@ -XXX,XX +XXX,XX @@ static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
54
case DISAS_TOO_MANY:
55
gen_goto_tb(dc, 0, npc);
56
break;
57
+ case DISAS_UPDATE_NEXT:
58
+ tcg_gen_movi_tl(env_pc, npc);
59
+ /* fall through */
60
case DISAS_JUMP:
61
case DISAS_UPDATE:
62
/* Indicate that interupts must be re-evaluated before the next TB. */
63
--
64
2.25.1
65
66
diff view generated by jsdifflib
Deleted patch
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
2
1
3
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
4
Message-Id: <20210624105023.3852-6-zhiwei_liu@c-sky.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/tcg-op-gvec.h | 4 ++++
8
1 file changed, 4 insertions(+)
9
10
diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/tcg/tcg-op-gvec.h
13
+++ b/include/tcg/tcg-op-gvec.h
14
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
15
#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i64
16
#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i64
17
#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i64
18
+#define tcg_gen_vec_add32_tl tcg_gen_vec_add32_i64
19
+#define tcg_gen_vec_sub32_tl tcg_gen_vec_sub32_i64
20
#define tcg_gen_vec_shl8i_tl tcg_gen_vec_shl8i_i64
21
#define tcg_gen_vec_shr8i_tl tcg_gen_vec_shr8i_i64
22
#define tcg_gen_vec_sar8i_tl tcg_gen_vec_sar8i_i64
23
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
24
#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i32
25
#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i32
26
#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i32
27
+#define tcg_gen_vec_add32_tl tcg_gen_add_i32
28
+#define tcg_gen_vec_sub32_tl tcg_gen_sub_i32
29
#define tcg_gen_vec_shl8i_tl tcg_gen_vec_shl8i_i32
30
#define tcg_gen_vec_shr8i_tl tcg_gen_vec_shr8i_i32
31
#define tcg_gen_vec_sar8i_tl tcg_gen_vec_sar8i_i32
32
--
33
2.25.1
34
35
diff view generated by jsdifflib
1
From: Warner Losh <imp@bsdimp.com>
1
The results are generally 6 bit unsigned values, though
2
the count leading and trailing bits may produce any value
3
for a zero input.
2
4
3
The trap number for a page fault on BSD systems is T_PAGEFLT
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
not 0xe -- 0xe is used by Linux and represents the intel hardware
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
trap vector. The BSD kernels, however, translate this to T_PAGEFLT
6
in their Xpage, Xtrap0e, Xtrap14, etc fault handlers. This is true
7
for i386 and x86_64, though the name of the trap hanlder can very
8
on the flavor of BSD. As far as I can tell, Linux doesn't provide
9
a define for this value. Invent a new one (PAGE_FAULT_TRAP) and
10
use it instead to avoid uglier ifdefs.
11
12
Signed-off-by: Mark Johnston <markj@FreeBSD.org>
13
Signed-off-by: Juergen Lock <nox@FreeBSD.org>
14
[ Rework to avoid ifdefs and expand it to i386 ]
15
Signed-off-by: Warner Losh <imp@bsdimp.com>
16
Message-Id: <20210625045707.84534-3-imp@bsdimp.com>
17
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
18
---
8
---
19
accel/tcg/user-exec.c | 20 ++++++++++++++++++--
9
tcg/optimize.c | 3 ++-
20
1 file changed, 18 insertions(+), 2 deletions(-)
10
1 file changed, 2 insertions(+), 1 deletion(-)
21
11
22
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
23
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
24
--- a/accel/tcg/user-exec.c
14
--- a/tcg/optimize.c
25
+++ b/accel/tcg/user-exec.c
15
+++ b/tcg/optimize.c
26
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
16
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
27
17
g_assert_not_reached();
28
#if defined(__NetBSD__)
18
}
29
#include <ucontext.h>
19
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
30
+#include <machine/trap.h>
20
-
31
21
+ ctx->s_mask = smask_from_zmask(ctx->z_mask);
32
#define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
22
return false;
33
#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
34
#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
35
#define MASK_sig(context) ((context)->uc_sigmask)
36
+#define PAGE_FAULT_TRAP T_PAGEFLT
37
#elif defined(__FreeBSD__) || defined(__DragonFly__)
38
#include <ucontext.h>
39
+#include <machine/trap.h>
40
41
#define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_eip))
42
#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
43
#define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
44
#define MASK_sig(context) ((context)->uc_sigmask)
45
+#define PAGE_FAULT_TRAP T_PAGEFLT
46
#elif defined(__OpenBSD__)
47
+#include <machine/trap.h>
48
#define EIP_sig(context) ((context)->sc_eip)
49
#define TRAP_sig(context) ((context)->sc_trapno)
50
#define ERROR_sig(context) ((context)->sc_err)
51
#define MASK_sig(context) ((context)->sc_mask)
52
+#define PAGE_FAULT_TRAP T_PAGEFLT
53
#else
54
#define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
55
#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
56
#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
57
#define MASK_sig(context) ((context)->uc_sigmask)
58
+#define PAGE_FAULT_TRAP 0xe
59
#endif
60
61
int cpu_signal_handler(int host_signum, void *pinfo,
62
@@ -XXX,XX +XXX,XX @@ int cpu_signal_handler(int host_signum, void *pinfo,
63
pc = EIP_sig(uc);
64
trapno = TRAP_sig(uc);
65
return handle_cpu_signal(pc, info,
66
- trapno == 0xe ? (ERROR_sig(uc) >> 1) & 1 : 0,
67
+ trapno == PAGE_FAULT_TRAP ?
68
+ (ERROR_sig(uc) >> 1) & 1 : 0,
69
&MASK_sig(uc));
70
}
23
}
71
24
72
#elif defined(__x86_64__)
25
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
73
26
default:
74
#ifdef __NetBSD__
27
g_assert_not_reached();
75
+#include <machine/trap.h>
28
}
76
#define PC_sig(context) _UC_MACHINE_PC(context)
29
+ ctx->s_mask = smask_from_zmask(ctx->z_mask);
77
#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
30
return false;
78
#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
79
#define MASK_sig(context) ((context)->uc_sigmask)
80
+#define PAGE_FAULT_TRAP T_PAGEFLT
81
#elif defined(__OpenBSD__)
82
+#include <machine/trap.h>
83
#define PC_sig(context) ((context)->sc_rip)
84
#define TRAP_sig(context) ((context)->sc_trapno)
85
#define ERROR_sig(context) ((context)->sc_err)
86
#define MASK_sig(context) ((context)->sc_mask)
87
+#define PAGE_FAULT_TRAP T_PAGEFLT
88
#elif defined(__FreeBSD__) || defined(__DragonFly__)
89
#include <ucontext.h>
90
+#include <machine/trap.h>
91
92
#define PC_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_rip))
93
#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
94
#define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
95
#define MASK_sig(context) ((context)->uc_sigmask)
96
+#define PAGE_FAULT_TRAP T_PAGEFLT
97
#else
98
#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
99
#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
100
#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
101
#define MASK_sig(context) ((context)->uc_sigmask)
102
+#define PAGE_FAULT_TRAP 0xe
103
#endif
104
105
int cpu_signal_handler(int host_signum, void *pinfo,
106
@@ -XXX,XX +XXX,XX @@ int cpu_signal_handler(int host_signum, void *pinfo,
107
108
pc = PC_sig(uc);
109
return handle_cpu_signal(pc, info,
110
- TRAP_sig(uc) == 0xe ? (ERROR_sig(uc) >> 1) & 1 : 0,
111
+ TRAP_sig(uc) == PAGE_FAULT_TRAP ?
112
+ (ERROR_sig(uc) >> 1) & 1 : 0,
113
&MASK_sig(uc));
114
}
31
}
115
32
116
--
33
--
117
2.25.1
34
2.25.1
118
35
119
36
diff view generated by jsdifflib
1
This will eventually simplify front-end usage, and will allow
1
For constant shifts, we can simply shift the s_mask.
2
backends to unset TCG_TARGET_HAS_MEMORY_BSWAP without loss of
3
optimization.
4
2
5
The argument is added during expansion, not currently exposed to the
3
For variable shifts, we know that sar does not reduce
6
front end translators. The backends currently only support a flags
4
the s_mask, which helps for sequences like
7
value of either TCG_BSWAP_IZ, or (TCG_BSWAP_IZ | TCG_BSWAP_OZ),
8
since they all require zero top bytes and leave them that way.
9
At the existing call sites we pass in (TCG_BSWAP_IZ | TCG_BSWAP_OZ),
10
except for the flags-ignored cases of a 32-bit swap of a 32-bit
11
value and or a 64-bit swap of a 64-bit value, where we pass 0.
12
5
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
ext32s_i64 t, in
7
sar_i64 t, t, v
8
ext32s_i64 out, t
9
10
allowing the final extend to be eliminated.
11
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
15
---
16
include/tcg/tcg-opc.h | 10 +++++-----
16
tcg/optimize.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++---
17
include/tcg/tcg.h | 12 ++++++++++++
17
1 file changed, 47 insertions(+), 3 deletions(-)
18
tcg/tcg-op.c | 13 ++++++++-----
19
tcg/tcg.c | 28 ++++++++++++++++++++++++++++
20
tcg/README | 22 ++++++++++++++--------
21
5 files changed, 67 insertions(+), 18 deletions(-)
22
18
23
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
19
diff --git a/tcg/optimize.c b/tcg/optimize.c
24
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
25
--- a/include/tcg/tcg-opc.h
21
--- a/tcg/optimize.c
26
+++ b/include/tcg/tcg-opc.h
22
+++ b/tcg/optimize.c
27
@@ -XXX,XX +XXX,XX @@ DEF(ext8s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8s_i32))
23
@@ -XXX,XX +XXX,XX @@ static uint64_t smask_from_zmask(uint64_t zmask)
28
DEF(ext16s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16s_i32))
24
return ~(~0ull >> rep);
29
DEF(ext8u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8u_i32))
25
}
30
DEF(ext16u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16u_i32))
31
-DEF(bswap16_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_bswap16_i32))
32
-DEF(bswap32_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_bswap32_i32))
33
+DEF(bswap16_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap16_i32))
34
+DEF(bswap32_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap32_i32))
35
DEF(not_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_not_i32))
36
DEF(neg_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_neg_i32))
37
DEF(andc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_andc_i32))
38
@@ -XXX,XX +XXX,XX @@ DEF(ext32s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32s_i64))
39
DEF(ext8u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8u_i64))
40
DEF(ext16u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16u_i64))
41
DEF(ext32u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32u_i64))
42
-DEF(bswap16_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap16_i64))
43
-DEF(bswap32_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap32_i64))
44
-DEF(bswap64_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap64_i64))
45
+DEF(bswap16_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap16_i64))
46
+DEF(bswap32_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap32_i64))
47
+DEF(bswap64_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap64_i64))
48
DEF(not_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_not_i64))
49
DEF(neg_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_neg_i64))
50
DEF(andc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_andc_i64))
51
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
52
index XXXXXXX..XXXXXXX 100644
53
--- a/include/tcg/tcg.h
54
+++ b/include/tcg/tcg.h
55
@@ -XXX,XX +XXX,XX @@ typedef TCGv_ptr TCGv_env;
56
/* Used to align parameters. See the comment before tcgv_i32_temp. */
57
#define TCG_CALL_DUMMY_ARG ((TCGArg)0)
58
26
59
+/*
27
+/*
60
+ * Flags for the bswap opcodes.
28
+ * Recreate a properly left-aligned smask after manipulation.
61
+ * If IZ, the input is zero-extended, otherwise unknown.
29
+ * Some bit-shuffling, particularly shifts and rotates, may
62
+ * If OZ or OS, the output is zero- or sign-extended respectively,
30
+ * retain sign bits on the left, but may scatter disconnected
63
+ * otherwise the high bits are undefined.
31
+ * sign bits on the right. Retain only what remains to the left.
64
+ */
32
+ */
65
+enum {
33
+static uint64_t smask_from_smask(int64_t smask)
66
+ TCG_BSWAP_IZ = 1,
34
+{
67
+ TCG_BSWAP_OZ = 2,
35
+ /* Only the 1 bits are significant for smask */
68
+ TCG_BSWAP_OS = 4,
36
+ return smask_from_zmask(~smask);
69
+};
37
+}
70
+
38
+
71
typedef enum TCGTempVal {
39
static inline TempOptInfo *ts_info(TCGTemp *ts)
72
TEMP_VAL_DEAD,
73
TEMP_VAL_REG,
74
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/tcg/tcg-op.c
77
+++ b/tcg/tcg-op.c
78
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg)
79
void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg)
80
{
40
{
81
if (TCG_TARGET_HAS_bswap16_i32) {
41
return ts->state_ptr;
82
- tcg_gen_op2_i32(INDEX_op_bswap16_i32, ret, arg);
42
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
83
+ tcg_gen_op3i_i32(INDEX_op_bswap16_i32, ret, arg,
43
84
+ TCG_BSWAP_IZ | TCG_BSWAP_OZ);
44
static bool fold_shift(OptContext *ctx, TCGOp *op)
85
} else {
86
TCGv_i32 t0 = tcg_temp_new_i32();
87
88
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg)
89
void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
90
{
45
{
91
if (TCG_TARGET_HAS_bswap32_i32) {
46
+ uint64_t s_mask, z_mask, sign;
92
- tcg_gen_op2_i32(INDEX_op_bswap32_i32, ret, arg);
93
+ tcg_gen_op3i_i32(INDEX_op_bswap32_i32, ret, arg, 0);
94
} else {
95
TCGv_i32 t0 = tcg_temp_new_i32();
96
TCGv_i32 t1 = tcg_temp_new_i32();
97
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg)
98
tcg_gen_bswap16_i32(TCGV_LOW(ret), TCGV_LOW(arg));
99
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
100
} else if (TCG_TARGET_HAS_bswap16_i64) {
101
- tcg_gen_op2_i64(INDEX_op_bswap16_i64, ret, arg);
102
+ tcg_gen_op3i_i64(INDEX_op_bswap16_i64, ret, arg,
103
+ TCG_BSWAP_IZ | TCG_BSWAP_OZ);
104
} else {
105
TCGv_i64 t0 = tcg_temp_new_i64();
106
107
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg)
108
tcg_gen_bswap32_i32(TCGV_LOW(ret), TCGV_LOW(arg));
109
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
110
} else if (TCG_TARGET_HAS_bswap32_i64) {
111
- tcg_gen_op2_i64(INDEX_op_bswap32_i64, ret, arg);
112
+ tcg_gen_op3i_i64(INDEX_op_bswap32_i64, ret, arg,
113
+ TCG_BSWAP_IZ | TCG_BSWAP_OZ);
114
} else {
115
TCGv_i64 t0 = tcg_temp_new_i64();
116
TCGv_i64 t1 = tcg_temp_new_i64();
117
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
118
tcg_temp_free_i32(t0);
119
tcg_temp_free_i32(t1);
120
} else if (TCG_TARGET_HAS_bswap64_i64) {
121
- tcg_gen_op2_i64(INDEX_op_bswap64_i64, ret, arg);
122
+ tcg_gen_op3i_i64(INDEX_op_bswap64_i64, ret, arg, 0);
123
} else {
124
TCGv_i64 t0 = tcg_temp_new_i64();
125
TCGv_i64 t1 = tcg_temp_new_i64();
126
diff --git a/tcg/tcg.c b/tcg/tcg.c
127
index XXXXXXX..XXXXXXX 100644
128
--- a/tcg/tcg.c
129
+++ b/tcg/tcg.c
130
@@ -XXX,XX +XXX,XX @@ static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
131
[MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
132
};
133
134
+static const char bswap_flag_name[][6] = {
135
+ [TCG_BSWAP_IZ] = "iz",
136
+ [TCG_BSWAP_OZ] = "oz",
137
+ [TCG_BSWAP_OS] = "os",
138
+ [TCG_BSWAP_IZ | TCG_BSWAP_OZ] = "iz,oz",
139
+ [TCG_BSWAP_IZ | TCG_BSWAP_OS] = "iz,os",
140
+};
141
+
47
+
142
static inline bool tcg_regset_single(TCGRegSet d)
48
if (fold_const2(ctx, op) ||
143
{
49
fold_ix_to_i(ctx, op, 0) ||
144
return (d & (d - 1)) == 0;
50
fold_xi_to_x(ctx, op, 0)) {
145
@@ -XXX,XX +XXX,XX @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs)
51
return true;
146
i = 1;
52
}
147
}
53
148
break;
54
+ s_mask = arg_info(op->args[1])->s_mask;
149
+ case INDEX_op_bswap16_i32:
55
+ z_mask = arg_info(op->args[1])->z_mask;
150
+ case INDEX_op_bswap16_i64:
151
+ case INDEX_op_bswap32_i32:
152
+ case INDEX_op_bswap32_i64:
153
+ case INDEX_op_bswap64_i64:
154
+ {
155
+ TCGArg flags = op->args[k];
156
+ const char *name = NULL;
157
+
56
+
158
+ if (flags < ARRAY_SIZE(bswap_flag_name)) {
57
if (arg_is_const(op->args[2])) {
159
+ name = bswap_flag_name[flags];
58
- ctx->z_mask = do_constant_folding(op->opc, ctx->type,
160
+ }
59
- arg_info(op->args[1])->z_mask,
161
+ if (name) {
60
- arg_info(op->args[2])->val);
162
+ col += qemu_log(",%s", name);
61
+ int sh = arg_info(op->args[2])->val;
163
+ } else {
62
+
164
+ col += qemu_log(",$0x%" TCG_PRIlx, flags);
63
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
165
+ }
64
+
166
+ i = k = 1;
65
+ s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
167
+ }
66
+ ctx->s_mask = smask_from_smask(s_mask);
168
+ break;
67
+
169
default:
68
return fold_masks(ctx, op);
170
i = 0;
69
}
171
break;
70
+
172
diff --git a/tcg/README b/tcg/README
71
+ switch (op->opc) {
173
index XXXXXXX..XXXXXXX 100644
72
+ CASE_OP_32_64(sar):
174
--- a/tcg/README
73
+ /*
175
+++ b/tcg/README
74
+ * Arithmetic right shift will not reduce the number of
176
@@ -XXX,XX +XXX,XX @@ ext32u_i64 t0, t1
75
+ * input sign repetitions.
177
76
+ */
178
8, 16 or 32 bit sign/zero extension (both operands must have the same type)
77
+ ctx->s_mask = s_mask;
179
78
+ break;
180
-* bswap16_i32/i64 t0, t1
79
+ CASE_OP_32_64(shr):
181
+* bswap16_i32/i64 t0, t1, flags
80
+ /*
182
81
+ * If the sign bit is known zero, then logical right shift
183
-16 bit byte swap on a 32/64 bit value. It assumes that the two/six high order
82
+ * will not reduced the number of input sign repetitions.
184
-bytes are set to zero.
83
+ */
185
+16 bit byte swap on the low bits of a 32/64 bit input.
84
+ sign = (s_mask & -s_mask) >> 1;
186
+If flags & TCG_BSWAP_IZ, then t1 is known to be zero-extended from bit 15.
85
+ if (!(z_mask & sign)) {
187
+If flags & TCG_BSWAP_OZ, then t0 will be zero-extended from bit 15.
86
+ ctx->s_mask = s_mask;
188
+If flags & TCG_BSWAP_OS, then t0 will be sign-extended from bit 15.
87
+ }
189
+If neither TCG_BSWAP_OZ nor TCG_BSWAP_OS are set, then the bits of
88
+ break;
190
+t0 above bit 15 may contain any value.
89
+ default:
191
90
+ break;
192
-* bswap32_i32/i64 t0, t1
91
+ }
193
+* bswap32_i64 t0, t1, flags
92
+
194
93
return false;
195
-32 bit byte swap on a 32/64 bit value. With a 64 bit value, it assumes that
94
}
196
-the four high order bytes are set to zero.
197
+32 bit byte swap on a 64-bit value. The flags are the same as for bswap16,
198
+except they apply from bit 31 instead of bit 15.
199
200
-* bswap64_i64 t0, t1
201
+* bswap32_i32 t0, t1, flags
202
+* bswap64_i64 t0, t1, flags
203
204
-64 bit byte swap
205
+32/64 bit byte swap. The flags are ignored, but still present
206
+for consistency with the other bswap opcodes.
207
208
* discard_i32/i64 t0
209
95
210
--
96
--
211
2.25.1
97
2.25.1
212
98
213
99
diff view generated by jsdifflib
Deleted patch
1
Retain the current rorw bswap16 expansion for the zero-in/zero-out case.
2
Otherwise, perform a wider bswap plus a right-shift or extend.
3
1
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/i386/tcg-target.c.inc | 20 +++++++++++++++++++-
8
1 file changed, 19 insertions(+), 1 deletion(-)
9
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
13
+++ b/tcg/i386/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
15
break;
16
17
OP_32_64(bswap16):
18
- tcg_out_rolw_8(s, a0);
19
+ if (a2 & TCG_BSWAP_OS) {
20
+ /* Output must be sign-extended. */
21
+ if (rexw) {
22
+ tcg_out_bswap64(s, a0);
23
+ tcg_out_shifti(s, SHIFT_SAR + rexw, a0, 48);
24
+ } else {
25
+ tcg_out_bswap32(s, a0);
26
+ tcg_out_shifti(s, SHIFT_SAR, a0, 16);
27
+ }
28
+ } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
29
+ /* Output must be zero-extended, but input isn't. */
30
+ tcg_out_bswap32(s, a0);
31
+ tcg_out_shifti(s, SHIFT_SHR, a0, 16);
32
+ } else {
33
+ tcg_out_rolw_8(s, a0);
34
+ }
35
break;
36
OP_32_64(bswap32):
37
tcg_out_bswap32(s, a0);
38
+ if (rexw && (a2 & TCG_BSWAP_OS)) {
39
+ tcg_out_ext32s(s, a0, a0);
40
+ }
41
break;
42
43
OP_32_64(neg):
44
--
45
2.25.1
46
47
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/aarch64/tcg-target.c.inc | 12 ++++++++++++
6
1 file changed, 12 insertions(+)
7
1
8
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/aarch64/tcg-target.c.inc
11
+++ b/tcg/aarch64/tcg-target.c.inc
12
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
13
tcg_out_rev(s, TCG_TYPE_I64, MO_64, a0, a1);
14
break;
15
case INDEX_op_bswap32_i64:
16
+ tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
17
+ if (a2 & TCG_BSWAP_OS) {
18
+ tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a0);
19
+ }
20
+ break;
21
case INDEX_op_bswap32_i32:
22
tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
23
break;
24
case INDEX_op_bswap16_i64:
25
case INDEX_op_bswap16_i32:
26
tcg_out_rev(s, TCG_TYPE_I32, MO_16, a0, a1);
27
+ if (a2 & TCG_BSWAP_OS) {
28
+ /* Output must be sign-extended. */
29
+ tcg_out_sxt(s, ext, MO_16, a0, a0);
30
+ } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
31
+ /* Output must be zero-extended, but input isn't. */
32
+ tcg_out_uxt(s, MO_16, a0, a0);
33
+ }
34
break;
35
36
case INDEX_op_ext8s_i64:
37
--
38
2.25.1
39
40
diff view generated by jsdifflib