1 | The following changes since commit c52d69e7dbaaed0ffdef8125e79218672c30161d: | 1 | The following changes since commit a9fe9e191b4305b88c356a1ed9ac3baf89eb18aa: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/cschoenebeck/tags/pull-9p-20211027' into staging (2021-10-27 11:45:18 -0700) | 3 | Merge tag 'pull-riscv-to-apply-20230505-1' of https://github.com/alistair23/qemu into staging (2023-05-05 09:25:13 +0100) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211027 | 7 | https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230505 |
8 | 8 | ||
9 | for you to fetch changes up to 820c025f0dcacf2f3c12735b1f162893fbfa7bc6: | 9 | for you to fetch changes up to 35a0bd63b458f30389b6bc6b7471c1665fe7b9d8: |
10 | 10 | ||
11 | tcg/optimize: Propagate sign info for shifting (2021-10-27 17:11:23 -0700) | 11 | tcg: Widen helper_*_st[bw]_mmu val arguments (2023-05-05 17:21:03 +0100) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Improvements to qemu/int128 | 14 | softfloat: Fix the incorrect computation in float32_exp2 |
15 | Fixes for 128/64 division. | 15 | tcg: Remove compatability helpers for qemu ld/st |
16 | Cleanup tcg/optimize.c | 16 | target/alpha: Remove TARGET_ALIGNED_ONLY |
17 | Optimize redundant sign extensions | 17 | target/hppa: Remove TARGET_ALIGNED_ONLY |
18 | target/sparc: Remove TARGET_ALIGNED_ONLY | ||
19 | tcg: Cleanups preparing to unify calls to qemu_ld/st helpers | ||
18 | 20 | ||
19 | ---------------------------------------------------------------- | 21 | ---------------------------------------------------------------- |
20 | Frédéric Pétrot (1): | 22 | Richard Henderson (41): |
21 | qemu/int128: Add int128_{not,xor} | 23 | target/avr: Finish conversion to tcg_gen_qemu_{ld,st}_* |
24 | target/cris: Finish conversion to tcg_gen_qemu_{ld,st}_* | ||
25 | target/Hexagon: Finish conversion to tcg_gen_qemu_{ld, st}_* | ||
26 | target/m68k: Finish conversion to tcg_gen_qemu_{ld,st}_* | ||
27 | target/mips: Finish conversion to tcg_gen_qemu_{ld,st}_* | ||
28 | target/s390x: Finish conversion to tcg_gen_qemu_{ld, st}_* | ||
29 | target/sparc: Finish conversion to tcg_gen_qemu_{ld, st}_* | ||
30 | target/xtensa: Finish conversion to tcg_gen_qemu_{ld, st}_* | ||
31 | tcg: Remove compatability helpers for qemu ld/st | ||
32 | target/alpha: Use MO_ALIGN for system UNALIGN() | ||
33 | target/alpha: Use MO_ALIGN where required | ||
34 | target/alpha: Remove TARGET_ALIGNED_ONLY | ||
35 | target/hppa: Use MO_ALIGN for system UNALIGN() | ||
36 | target/hppa: Remove TARGET_ALIGNED_ONLY | ||
37 | target/sparc: Use MO_ALIGN where required | ||
38 | target/sparc: Use cpu_ld*_code_mmu | ||
39 | target/sparc: Remove TARGET_ALIGNED_ONLY | ||
40 | tcg/i386: Rationalize args to tcg_out_qemu_{ld,st} | ||
41 | tcg/i386: Generalize multi-part load overlap test | ||
42 | tcg/i386: Introduce HostAddress | ||
43 | tcg/i386: Drop r0+r1 local variables from tcg_out_tlb_load | ||
44 | tcg/i386: Introduce tcg_out_testi | ||
45 | tcg/aarch64: Rationalize args to tcg_out_qemu_{ld,st} | ||
46 | tcg/aarch64: Introduce HostAddress | ||
47 | tcg/arm: Rationalize args to tcg_out_qemu_{ld,st} | ||
48 | tcg/arm: Introduce HostAddress | ||
49 | tcg/loongarch64: Rationalize args to tcg_out_qemu_{ld,st} | ||
50 | tcg/loongarch64: Introduce HostAddress | ||
51 | tcg/mips: Rationalize args to tcg_out_qemu_{ld,st} | ||
52 | tcg/ppc: Rationalize args to tcg_out_qemu_{ld,st} | ||
53 | tcg/ppc: Introduce HostAddress | ||
54 | tcg/riscv: Require TCG_TARGET_REG_BITS == 64 | ||
55 | tcg/riscv: Rationalize args to tcg_out_qemu_{ld,st} | ||
56 | tcg/s390x: Pass TCGType to tcg_out_qemu_{ld,st} | ||
57 | tcg/s390x: Introduce HostAddress | ||
58 | tcg/sparc64: Drop is_64 test from tcg_out_qemu_ld data return | ||
59 | tcg/sparc64: Pass TCGType to tcg_out_qemu_{ld,st} | ||
60 | tcg: Move TCGLabelQemuLdst to tcg.c | ||
61 | tcg: Replace REG_P with arg_loc_reg_p | ||
62 | tcg: Introduce arg_slot_stk_ofs | ||
63 | tcg: Widen helper_*_st[bw]_mmu val arguments | ||
22 | 64 | ||
23 | Luis Pires (4): | 65 | Shivaprasad G Bhat (1): |
24 | host-utils: move checks out of divu128/divs128 | 66 | softfloat: Fix the incorrect computation in float32_exp2 |
25 | host-utils: move udiv_qrnnd() to host-utils | ||
26 | host-utils: add 128-bit quotient support to divu128/divs128 | ||
27 | host-utils: add unit tests for divu128/divs128 | ||
28 | 67 | ||
29 | Richard Henderson (51): | 68 | configs/targets/alpha-linux-user.mak | 1 - |
30 | tcg/optimize: Rename "mask" to "z_mask" | 69 | configs/targets/alpha-softmmu.mak | 1 - |
31 | tcg/optimize: Split out OptContext | 70 | configs/targets/hppa-linux-user.mak | 1 - |
32 | tcg/optimize: Remove do_default label | 71 | configs/targets/hppa-softmmu.mak | 1 - |
33 | tcg/optimize: Change tcg_opt_gen_{mov,movi} interface | 72 | configs/targets/sparc-linux-user.mak | 1 - |
34 | tcg/optimize: Move prev_mb into OptContext | 73 | configs/targets/sparc-softmmu.mak | 1 - |
35 | tcg/optimize: Split out init_arguments | 74 | configs/targets/sparc32plus-linux-user.mak | 1 - |
36 | tcg/optimize: Split out copy_propagate | 75 | configs/targets/sparc64-linux-user.mak | 1 - |
37 | tcg/optimize: Split out fold_call | 76 | configs/targets/sparc64-softmmu.mak | 1 - |
38 | tcg/optimize: Drop nb_oargs, nb_iargs locals | 77 | include/tcg/tcg-ldst.h | 10 +- |
39 | tcg/optimize: Change fail return for do_constant_folding_cond* | 78 | include/tcg/tcg-op.h | 55 ----- |
40 | tcg/optimize: Return true from tcg_opt_gen_{mov,movi} | 79 | target/hexagon/macros.h | 14 +- |
41 | tcg/optimize: Split out finish_folding | 80 | tcg/riscv/tcg-target-con-set.h | 8 - |
42 | tcg/optimize: Use a boolean to avoid a mass of continues | 81 | tcg/riscv/tcg-target.h | 22 +- |
43 | tcg/optimize: Split out fold_mb, fold_qemu_{ld,st} | 82 | tcg/tcg-internal.h | 4 - |
44 | tcg/optimize: Split out fold_const{1,2} | 83 | accel/tcg/cputlb.c | 6 +- |
45 | tcg/optimize: Split out fold_setcond2 | 84 | fpu/softfloat.c | 2 +- |
46 | tcg/optimize: Split out fold_brcond2 | 85 | target/alpha/translate.c | 38 +-- |
47 | tcg/optimize: Split out fold_brcond | 86 | target/avr/translate.c | 16 +- |
48 | tcg/optimize: Split out fold_setcond | 87 | target/hexagon/genptr.c | 8 +- |
49 | tcg/optimize: Split out fold_mulu2_i32 | 88 | target/hexagon/idef-parser/parser-helpers.c | 28 +-- |
50 | tcg/optimize: Split out fold_addsub2_i32 | 89 | target/hexagon/translate.c | 32 +-- |
51 | tcg/optimize: Split out fold_movcond | 90 | target/hppa/translate.c | 2 +- |
52 | tcg/optimize: Split out fold_extract2 | 91 | target/m68k/translate.c | 76 ++---- |
53 | tcg/optimize: Split out fold_extract, fold_sextract | 92 | target/mips/tcg/translate.c | 8 +- |
54 | tcg/optimize: Split out fold_deposit | 93 | target/s390x/tcg/translate.c | 152 ++++++------ |
55 | tcg/optimize: Split out fold_count_zeros | 94 | target/sparc/ldst_helper.c | 10 +- |
56 | tcg/optimize: Split out fold_bswap | 95 | target/sparc/translate.c | 85 ++++--- |
57 | tcg/optimize: Split out fold_dup, fold_dup2 | 96 | target/xtensa/translate.c | 4 +- |
58 | tcg/optimize: Split out fold_mov | 97 | tcg/tcg.c | 58 +++-- |
59 | tcg/optimize: Split out fold_xx_to_i | 98 | target/cris/translate_v10.c.inc | 18 +- |
60 | tcg/optimize: Split out fold_xx_to_x | 99 | target/mips/tcg/nanomips_translate.c.inc | 2 +- |
61 | tcg/optimize: Split out fold_xi_to_i | 100 | tcg/aarch64/tcg-target.c.inc | 108 ++++++--- |
62 | tcg/optimize: Add type to OptContext | 101 | tcg/arm/tcg-target.c.inc | 357 +++++++++++++--------------- |
63 | tcg/optimize: Split out fold_to_not | 102 | tcg/i386/tcg-target.c.inc | 345 ++++++++++++++------------- |
64 | tcg/optimize: Split out fold_sub_to_neg | 103 | tcg/loongarch64/tcg-target.c.inc | 135 +++++------ |
65 | tcg/optimize: Split out fold_xi_to_x | 104 | tcg/mips/tcg-target.c.inc | 186 ++++++++------- |
66 | tcg/optimize: Split out fold_ix_to_i | 105 | tcg/ppc/tcg-target.c.inc | 192 ++++++++------- |
67 | tcg/optimize: Split out fold_masks | 106 | tcg/riscv/tcg-target.c.inc | 268 ++++++--------------- |
68 | tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies | 107 | tcg/s390x/tcg-target.c.inc | 131 +++++----- |
69 | tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops | 108 | tcg/sparc64/tcg-target.c.inc | 8 +- |
70 | tcg/optimize: Sink commutative operand swapping into fold functions | 109 | tcg/tcg-ldst.c.inc | 14 -- |
71 | tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values | 110 | 42 files changed, 1120 insertions(+), 1291 deletions(-) |
72 | tcg/optimize: Use fold_xx_to_i for orc | ||
73 | tcg/optimize: Use fold_xi_to_x for mul | ||
74 | tcg/optimize: Use fold_xi_to_x for div | ||
75 | tcg/optimize: Use fold_xx_to_i for rem | ||
76 | tcg/optimize: Optimize sign extensions | ||
77 | tcg/optimize: Propagate sign info for logical operations | ||
78 | tcg/optimize: Propagate sign info for setcond | ||
79 | tcg/optimize: Propagate sign info for bit counting | ||
80 | tcg/optimize: Propagate sign info for shifting | ||
81 | |||
82 | include/fpu/softfloat-macros.h | 82 -- | ||
83 | include/hw/clock.h | 5 +- | ||
84 | include/qemu/host-utils.h | 121 +- | ||
85 | include/qemu/int128.h | 20 + | ||
86 | target/ppc/int_helper.c | 23 +- | ||
87 | tcg/optimize.c | 2644 ++++++++++++++++++++++++---------------- | ||
88 | tests/unit/test-div128.c | 197 +++ | ||
89 | util/host-utils.c | 147 ++- | ||
90 | tests/unit/meson.build | 1 + | ||
91 | 9 files changed, 2053 insertions(+), 1187 deletions(-) | ||
92 | create mode 100644 tests/unit/test-div128.c | ||
93 | diff view generated by jsdifflib |
1 | This "garbage" setting pre-dates the addition of the type | 1 | From: Shivaprasad G Bhat <sbhat@linux.ibm.com> |
---|---|---|---|
2 | changing opcodes INDEX_op_ext_i32_i64, INDEX_op_extu_i32_i64, | ||
3 | and INDEX_op_extr{l,h}_i64_i32. | ||
4 | 2 | ||
5 | So now we have a definitive points at which to adjust z_mask | 3 | The float32_exp2 function is computing wrong exponent of 2. |
6 | to eliminate such bits from the 32-bit operands. | ||
7 | 4 | ||
8 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 5 | For example, with the following set of values {0.1, 2.0, 2.0, -1.0}, |
9 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | 6 | the expected output would be {1.071773, 4.000000, 4.000000, 0.500000}. |
7 | Instead, the function is computing {1.119102, 3.382044, 3.382044, -0.191022} | ||
8 | |||
9 | Looking at the code, the float32_exp2() attempts to do this | ||
10 | |||
11 | 2 3 4 5 n | ||
12 | x x x x x x x | ||
13 | e = 1 + --- + --- + --- + --- + --- + ... + --- + ... | ||
14 | 1! 2! 3! 4! 5! n! | ||
15 | |||
16 | But because of the typo it ends up doing | ||
17 | |||
18 | x x x x x x x | ||
19 | e = 1 + --- + --- + --- + --- + --- + ... + --- + ... | ||
20 | 1! 2! 3! 4! 5! n! | ||
21 | |||
22 | This is because instead of the xnp which holds the numerator, parts_muladd | ||
23 | is using the xp which is just 'x'. Commit '572c4d862ff2' refactored this | ||
24 | function, and mistakenly used xp instead of xnp. | ||
25 | |||
26 | Cc: qemu-stable@nongnu.org | ||
27 | Fixes: 572c4d862ff2 "softfloat: Convert float32_exp2 to FloatParts" | ||
28 | Partially-Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1623 | ||
29 | Reported-By: Luca Barbato (https://gitlab.com/lu-zero) | ||
30 | Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com> | ||
31 | Signed-off-by: Vaibhav Jain <vaibhav@linux.ibm.com> | ||
32 | Message-Id: <168304110865.537992.13059030916325018670.stgit@localhost.localdomain> | ||
33 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 34 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | --- | 35 | --- |
12 | tcg/optimize.c | 35 ++++++++++++++++------------------- | 36 | fpu/softfloat.c | 2 +- |
13 | 1 file changed, 16 insertions(+), 19 deletions(-) | 37 | 1 file changed, 1 insertion(+), 1 deletion(-) |
14 | 38 | ||
15 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 39 | diff --git a/fpu/softfloat.c b/fpu/softfloat.c |
16 | index XXXXXXX..XXXXXXX 100644 | 40 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/tcg/optimize.c | 41 | --- a/fpu/softfloat.c |
18 | +++ b/tcg/optimize.c | 42 | +++ b/fpu/softfloat.c |
19 | @@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts) | 43 | @@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status) |
20 | ti->is_const = true; | 44 | float64_unpack_canonical(&rp, float64_one, status); |
21 | ti->val = ts->val; | 45 | for (i = 0 ; i < 15 ; i++) { |
22 | ti->z_mask = ts->val; | 46 | float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status); |
23 | - if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) { | 47 | - rp = *parts_muladd(&tp, &xp, &rp, 0, status); |
24 | - /* High bits of a 32-bit quantity are garbage. */ | 48 | + rp = *parts_muladd(&tp, &xnp, &rp, 0, status); |
25 | - ti->z_mask |= ~0xffffffffull; | 49 | xnp = *parts_mul(&xnp, &xp, status); |
26 | - } | ||
27 | } else { | ||
28 | ti->is_const = false; | ||
29 | ti->z_mask = -1; | ||
30 | @@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) | ||
31 | TCGTemp *src_ts = arg_temp(src); | ||
32 | TempOptInfo *di; | ||
33 | TempOptInfo *si; | ||
34 | - uint64_t z_mask; | ||
35 | TCGOpcode new_op; | ||
36 | |||
37 | if (ts_are_copies(dst_ts, src_ts)) { | ||
38 | @@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) | ||
39 | op->args[0] = dst; | ||
40 | op->args[1] = src; | ||
41 | |||
42 | - z_mask = si->z_mask; | ||
43 | - if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) { | ||
44 | - /* High bits of the destination are now garbage. */ | ||
45 | - z_mask |= ~0xffffffffull; | ||
46 | - } | ||
47 | - di->z_mask = z_mask; | ||
48 | + di->z_mask = si->z_mask; | ||
49 | |||
50 | if (src_ts->type == dst_ts->type) { | ||
51 | TempOptInfo *ni = ts_info(si->next_copy); | ||
52 | @@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) | ||
53 | static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, | ||
54 | TCGArg dst, uint64_t val) | ||
55 | { | ||
56 | - /* Convert movi to mov with constant temp. */ | ||
57 | - TCGTemp *tv = tcg_constant_internal(ctx->type, val); | ||
58 | + TCGTemp *tv; | ||
59 | |||
60 | + if (ctx->type == TCG_TYPE_I32) { | ||
61 | + val = (int32_t)val; | ||
62 | + } | ||
63 | + | ||
64 | + /* Convert movi to mov with constant temp. */ | ||
65 | + tv = tcg_constant_internal(ctx->type, val); | ||
66 | init_ts_info(ctx, tv); | ||
67 | return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv)); | ||
68 | } | ||
69 | @@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op) | ||
70 | uint64_t z_mask = ctx->z_mask; | ||
71 | |||
72 | /* | ||
73 | - * 32-bit ops generate 32-bit results. For the result is zero test | ||
74 | - * below, we can ignore high bits, but for further optimizations we | ||
75 | - * need to record that the high bits contain garbage. | ||
76 | + * 32-bit ops generate 32-bit results, which for the purpose of | ||
77 | + * simplifying tcg are sign-extended. Certainly that's how we | ||
78 | + * represent our constants elsewhere. Note that the bits will | ||
79 | + * be reset properly for a 64-bit value when encountering the | ||
80 | + * type changing opcodes. | ||
81 | */ | ||
82 | if (ctx->type == TCG_TYPE_I32) { | ||
83 | - ctx->z_mask |= MAKE_64BIT_MASK(32, 32); | ||
84 | - a_mask &= MAKE_64BIT_MASK(0, 32); | ||
85 | - z_mask &= MAKE_64BIT_MASK(0, 32); | ||
86 | + a_mask = (int32_t)a_mask; | ||
87 | + z_mask = (int32_t)z_mask; | ||
88 | + ctx->z_mask = z_mask; | ||
89 | } | 50 | } |
90 | 51 | ||
91 | if (z_mask == 0) { | ||
92 | -- | 52 | -- |
93 | 2.25.1 | 53 | 2.34.1 |
94 | |||
95 | diff view generated by jsdifflib |
1 | Move all of the known-zero optimizations into the per-opcode | 1 | Convert away from the old interface with the implicit |
---|---|---|---|
2 | functions. Use fold_masks when there is a possibility of the | 2 | MemOp argument. |
3 | result being determined, and simply set ctx->z_mask otherwise. | ||
4 | 3 | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Reviewed-by: Anton Johansson <anjo@rev.ng> | ||
6 | Message-Id: <20230502135741.1158035-2-richard.henderson@linaro.org> | ||
8 | --- | 7 | --- |
9 | tcg/optimize.c | 545 ++++++++++++++++++++++++++----------------------- | 8 | target/avr/translate.c | 16 ++++++++-------- |
10 | 1 file changed, 294 insertions(+), 251 deletions(-) | 9 | 1 file changed, 8 insertions(+), 8 deletions(-) |
11 | 10 | ||
12 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 11 | diff --git a/target/avr/translate.c b/target/avr/translate.c |
13 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tcg/optimize.c | 13 | --- a/target/avr/translate.c |
15 | +++ b/tcg/optimize.c | 14 | +++ b/target/avr/translate.c |
16 | @@ -XXX,XX +XXX,XX @@ typedef struct OptContext { | 15 | @@ -XXX,XX +XXX,XX @@ static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr) |
17 | TCGTempSet temps_used; | 16 | if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) { |
18 | 17 | gen_helper_fullwr(cpu_env, data, addr); | |
19 | /* In flight values from optimization. */ | 18 | } else { |
20 | - uint64_t z_mask; | 19 | - tcg_gen_qemu_st8(data, addr, MMU_DATA_IDX); /* mem[addr] = data */ |
21 | + uint64_t a_mask; /* mask bit is 0 iff value identical to first input */ | 20 | + tcg_gen_qemu_st_tl(data, addr, MMU_DATA_IDX, MO_UB); |
22 | + uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */ | 21 | } |
23 | TCGType type; | ||
24 | } OptContext; | ||
25 | |||
26 | @@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op) | ||
27 | return false; | ||
28 | } | 22 | } |
29 | 23 | ||
30 | +static bool fold_masks(OptContext *ctx, TCGOp *op) | 24 | @@ -XXX,XX +XXX,XX @@ static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr) |
31 | +{ | 25 | if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) { |
32 | + uint64_t a_mask = ctx->a_mask; | 26 | gen_helper_fullrd(data, cpu_env, addr); |
33 | + uint64_t z_mask = ctx->z_mask; | 27 | } else { |
34 | + | 28 | - tcg_gen_qemu_ld8u(data, addr, MMU_DATA_IDX); /* data = mem[addr] */ |
35 | + /* | 29 | + tcg_gen_qemu_ld_tl(data, addr, MMU_DATA_IDX, MO_UB); |
36 | + * 32-bit ops generate 32-bit results. For the result is zero test | ||
37 | + * below, we can ignore high bits, but for further optimizations we | ||
38 | + * need to record that the high bits contain garbage. | ||
39 | + */ | ||
40 | + if (ctx->type == TCG_TYPE_I32) { | ||
41 | + ctx->z_mask |= MAKE_64BIT_MASK(32, 32); | ||
42 | + a_mask &= MAKE_64BIT_MASK(0, 32); | ||
43 | + z_mask &= MAKE_64BIT_MASK(0, 32); | ||
44 | + } | ||
45 | + | ||
46 | + if (z_mask == 0) { | ||
47 | + return tcg_opt_gen_movi(ctx, op, op->args[0], 0); | ||
48 | + } | ||
49 | + if (a_mask == 0) { | ||
50 | + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | ||
51 | + } | ||
52 | + return false; | ||
53 | +} | ||
54 | + | ||
55 | /* | ||
56 | * Convert @op to NOT, if NOT is supported by the host. | ||
57 | * Return true f the conversion is successful, which will still | ||
58 | @@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op) | ||
59 | |||
60 | static bool fold_and(OptContext *ctx, TCGOp *op) | ||
61 | { | ||
62 | + uint64_t z1, z2; | ||
63 | + | ||
64 | if (fold_const2(ctx, op) || | ||
65 | fold_xi_to_i(ctx, op, 0) || | ||
66 | fold_xi_to_x(ctx, op, -1) || | ||
67 | fold_xx_to_x(ctx, op)) { | ||
68 | return true; | ||
69 | } | 30 | } |
70 | - return false; | ||
71 | + | ||
72 | + z1 = arg_info(op->args[1])->z_mask; | ||
73 | + z2 = arg_info(op->args[2])->z_mask; | ||
74 | + ctx->z_mask = z1 & z2; | ||
75 | + | ||
76 | + /* | ||
77 | + * Known-zeros does not imply known-ones. Therefore unless | ||
78 | + * arg2 is constant, we can't infer affected bits from it. | ||
79 | + */ | ||
80 | + if (arg_is_const(op->args[2])) { | ||
81 | + ctx->a_mask = z1 & ~z2; | ||
82 | + } | ||
83 | + | ||
84 | + return fold_masks(ctx, op); | ||
85 | } | 31 | } |
86 | 32 | ||
87 | static bool fold_andc(OptContext *ctx, TCGOp *op) | 33 | @@ -XXX,XX +XXX,XX @@ static bool trans_LPM1(DisasContext *ctx, arg_LPM1 *a) |
88 | { | 34 | |
89 | + uint64_t z1; | 35 | tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */ |
90 | + | 36 | tcg_gen_or_tl(addr, addr, L); |
91 | if (fold_const2(ctx, op) || | 37 | - tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ |
92 | fold_xx_to_i(ctx, op, 0) || | 38 | + tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); |
93 | fold_xi_to_x(ctx, op, 0) || | 39 | return true; |
94 | fold_ix_to_not(ctx, op, -1)) { | ||
95 | return true; | ||
96 | } | ||
97 | - return false; | ||
98 | + | ||
99 | + z1 = arg_info(op->args[1])->z_mask; | ||
100 | + | ||
101 | + /* | ||
102 | + * Known-zeros does not imply known-ones. Therefore unless | ||
103 | + * arg2 is constant, we can't infer anything from it. | ||
104 | + */ | ||
105 | + if (arg_is_const(op->args[2])) { | ||
106 | + uint64_t z2 = ~arg_info(op->args[2])->z_mask; | ||
107 | + ctx->a_mask = z1 & ~z2; | ||
108 | + z1 &= z2; | ||
109 | + } | ||
110 | + ctx->z_mask = z1; | ||
111 | + | ||
112 | + return fold_masks(ctx, op); | ||
113 | } | 40 | } |
114 | 41 | ||
115 | static bool fold_brcond(OptContext *ctx, TCGOp *op) | 42 | @@ -XXX,XX +XXX,XX @@ static bool trans_LPM2(DisasContext *ctx, arg_LPM2 *a) |
116 | @@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op) | 43 | |
117 | 44 | tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */ | |
118 | static bool fold_bswap(OptContext *ctx, TCGOp *op) | 45 | tcg_gen_or_tl(addr, addr, L); |
119 | { | 46 | - tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ |
120 | + uint64_t z_mask, sign; | 47 | + tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); |
121 | + | 48 | return true; |
122 | if (arg_is_const(op->args[1])) { | ||
123 | uint64_t t = arg_info(op->args[1])->val; | ||
124 | |||
125 | t = do_constant_folding(op->opc, ctx->type, t, op->args[2]); | ||
126 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | ||
127 | } | ||
128 | - return false; | ||
129 | + | ||
130 | + z_mask = arg_info(op->args[1])->z_mask; | ||
131 | + switch (op->opc) { | ||
132 | + case INDEX_op_bswap16_i32: | ||
133 | + case INDEX_op_bswap16_i64: | ||
134 | + z_mask = bswap16(z_mask); | ||
135 | + sign = INT16_MIN; | ||
136 | + break; | ||
137 | + case INDEX_op_bswap32_i32: | ||
138 | + case INDEX_op_bswap32_i64: | ||
139 | + z_mask = bswap32(z_mask); | ||
140 | + sign = INT32_MIN; | ||
141 | + break; | ||
142 | + case INDEX_op_bswap64_i64: | ||
143 | + z_mask = bswap64(z_mask); | ||
144 | + sign = INT64_MIN; | ||
145 | + break; | ||
146 | + default: | ||
147 | + g_assert_not_reached(); | ||
148 | + } | ||
149 | + | ||
150 | + switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { | ||
151 | + case TCG_BSWAP_OZ: | ||
152 | + break; | ||
153 | + case TCG_BSWAP_OS: | ||
154 | + /* If the sign bit may be 1, force all the bits above to 1. */ | ||
155 | + if (z_mask & sign) { | ||
156 | + z_mask |= sign; | ||
157 | + } | ||
158 | + break; | ||
159 | + default: | ||
160 | + /* The high bits are undefined: force all bits above the sign to 1. */ | ||
161 | + z_mask |= sign << 1; | ||
162 | + break; | ||
163 | + } | ||
164 | + ctx->z_mask = z_mask; | ||
165 | + | ||
166 | + return fold_masks(ctx, op); | ||
167 | } | 49 | } |
168 | 50 | ||
169 | static bool fold_call(OptContext *ctx, TCGOp *op) | 51 | @@ -XXX,XX +XXX,XX @@ static bool trans_LPMX(DisasContext *ctx, arg_LPMX *a) |
170 | @@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op) | 52 | |
171 | 53 | tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */ | |
172 | static bool fold_count_zeros(OptContext *ctx, TCGOp *op) | 54 | tcg_gen_or_tl(addr, addr, L); |
173 | { | 55 | - tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ |
174 | + uint64_t z_mask; | 56 | + tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); |
175 | + | 57 | tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ |
176 | if (arg_is_const(op->args[1])) { | 58 | tcg_gen_andi_tl(L, addr, 0xff); |
177 | uint64_t t = arg_info(op->args[1])->val; | 59 | tcg_gen_shri_tl(addr, addr, 8); |
178 | 60 | @@ -XXX,XX +XXX,XX @@ static bool trans_ELPM1(DisasContext *ctx, arg_ELPM1 *a) | |
179 | @@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op) | 61 | TCGv Rd = cpu_r[0]; |
180 | } | 62 | TCGv addr = gen_get_zaddr(); |
181 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); | 63 | |
182 | } | 64 | - tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ |
183 | + | 65 | + tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); |
184 | + switch (ctx->type) { | 66 | return true; |
185 | + case TCG_TYPE_I32: | ||
186 | + z_mask = 31; | ||
187 | + break; | ||
188 | + case TCG_TYPE_I64: | ||
189 | + z_mask = 63; | ||
190 | + break; | ||
191 | + default: | ||
192 | + g_assert_not_reached(); | ||
193 | + } | ||
194 | + ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask; | ||
195 | + | ||
196 | return false; | ||
197 | } | 67 | } |
198 | 68 | ||
199 | static bool fold_ctpop(OptContext *ctx, TCGOp *op) | 69 | @@ -XXX,XX +XXX,XX @@ static bool trans_ELPM2(DisasContext *ctx, arg_ELPM2 *a) |
200 | { | 70 | TCGv Rd = cpu_r[a->rd]; |
201 | - return fold_const1(ctx, op); | 71 | TCGv addr = gen_get_zaddr(); |
202 | + if (fold_const1(ctx, op)) { | 72 | |
203 | + return true; | 73 | - tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ |
204 | + } | 74 | + tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); |
205 | + | 75 | return true; |
206 | + switch (ctx->type) { | ||
207 | + case TCG_TYPE_I32: | ||
208 | + ctx->z_mask = 32 | 31; | ||
209 | + break; | ||
210 | + case TCG_TYPE_I64: | ||
211 | + ctx->z_mask = 64 | 63; | ||
212 | + break; | ||
213 | + default: | ||
214 | + g_assert_not_reached(); | ||
215 | + } | ||
216 | + return false; | ||
217 | } | 76 | } |
218 | 77 | ||
219 | static bool fold_deposit(OptContext *ctx, TCGOp *op) | 78 | @@ -XXX,XX +XXX,XX @@ static bool trans_ELPMX(DisasContext *ctx, arg_ELPMX *a) |
220 | @@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op) | 79 | TCGv Rd = cpu_r[a->rd]; |
221 | t1 = deposit64(t1, op->args[3], op->args[4], t2); | 80 | TCGv addr = gen_get_zaddr(); |
222 | return tcg_opt_gen_movi(ctx, op, op->args[0], t1); | 81 | |
223 | } | 82 | - tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ |
224 | + | 83 | + tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); |
225 | + ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask, | 84 | tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ |
226 | + op->args[3], op->args[4], | 85 | gen_set_zaddr(addr); |
227 | + arg_info(op->args[2])->z_mask); | 86 | return true; |
228 | return false; | ||
229 | } | ||
230 | |||
231 | @@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op) | ||
232 | |||
233 | static bool fold_extract(OptContext *ctx, TCGOp *op) | ||
234 | { | ||
235 | + uint64_t z_mask_old, z_mask; | ||
236 | + | ||
237 | if (arg_is_const(op->args[1])) { | ||
238 | uint64_t t; | ||
239 | |||
240 | @@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op) | ||
241 | t = extract64(t, op->args[2], op->args[3]); | ||
242 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | ||
243 | } | ||
244 | - return false; | ||
245 | + | ||
246 | + z_mask_old = arg_info(op->args[1])->z_mask; | ||
247 | + z_mask = extract64(z_mask_old, op->args[2], op->args[3]); | ||
248 | + if (op->args[2] == 0) { | ||
249 | + ctx->a_mask = z_mask_old ^ z_mask; | ||
250 | + } | ||
251 | + ctx->z_mask = z_mask; | ||
252 | + | ||
253 | + return fold_masks(ctx, op); | ||
254 | } | ||
255 | |||
256 | static bool fold_extract2(OptContext *ctx, TCGOp *op) | ||
257 | @@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op) | ||
258 | |||
259 | static bool fold_exts(OptContext *ctx, TCGOp *op) | ||
260 | { | ||
261 | - return fold_const1(ctx, op); | ||
262 | + uint64_t z_mask_old, z_mask, sign; | ||
263 | + bool type_change = false; | ||
264 | + | ||
265 | + if (fold_const1(ctx, op)) { | ||
266 | + return true; | ||
267 | + } | ||
268 | + | ||
269 | + z_mask_old = z_mask = arg_info(op->args[1])->z_mask; | ||
270 | + | ||
271 | + switch (op->opc) { | ||
272 | + CASE_OP_32_64(ext8s): | ||
273 | + sign = INT8_MIN; | ||
274 | + z_mask = (uint8_t)z_mask; | ||
275 | + break; | ||
276 | + CASE_OP_32_64(ext16s): | ||
277 | + sign = INT16_MIN; | ||
278 | + z_mask = (uint16_t)z_mask; | ||
279 | + break; | ||
280 | + case INDEX_op_ext_i32_i64: | ||
281 | + type_change = true; | ||
282 | + QEMU_FALLTHROUGH; | ||
283 | + case INDEX_op_ext32s_i64: | ||
284 | + sign = INT32_MIN; | ||
285 | + z_mask = (uint32_t)z_mask; | ||
286 | + break; | ||
287 | + default: | ||
288 | + g_assert_not_reached(); | ||
289 | + } | ||
290 | + | ||
291 | + if (z_mask & sign) { | ||
292 | + z_mask |= sign; | ||
293 | + } else if (!type_change) { | ||
294 | + ctx->a_mask = z_mask_old ^ z_mask; | ||
295 | + } | ||
296 | + ctx->z_mask = z_mask; | ||
297 | + | ||
298 | + return fold_masks(ctx, op); | ||
299 | } | ||
300 | |||
301 | static bool fold_extu(OptContext *ctx, TCGOp *op) | ||
302 | { | ||
303 | - return fold_const1(ctx, op); | ||
304 | + uint64_t z_mask_old, z_mask; | ||
305 | + bool type_change = false; | ||
306 | + | ||
307 | + if (fold_const1(ctx, op)) { | ||
308 | + return true; | ||
309 | + } | ||
310 | + | ||
311 | + z_mask_old = z_mask = arg_info(op->args[1])->z_mask; | ||
312 | + | ||
313 | + switch (op->opc) { | ||
314 | + CASE_OP_32_64(ext8u): | ||
315 | + z_mask = (uint8_t)z_mask; | ||
316 | + break; | ||
317 | + CASE_OP_32_64(ext16u): | ||
318 | + z_mask = (uint16_t)z_mask; | ||
319 | + break; | ||
320 | + case INDEX_op_extrl_i64_i32: | ||
321 | + case INDEX_op_extu_i32_i64: | ||
322 | + type_change = true; | ||
323 | + QEMU_FALLTHROUGH; | ||
324 | + case INDEX_op_ext32u_i64: | ||
325 | + z_mask = (uint32_t)z_mask; | ||
326 | + break; | ||
327 | + case INDEX_op_extrh_i64_i32: | ||
328 | + type_change = true; | ||
329 | + z_mask >>= 32; | ||
330 | + break; | ||
331 | + default: | ||
332 | + g_assert_not_reached(); | ||
333 | + } | ||
334 | + | ||
335 | + ctx->z_mask = z_mask; | ||
336 | + if (!type_change) { | ||
337 | + ctx->a_mask = z_mask_old ^ z_mask; | ||
338 | + } | ||
339 | + return fold_masks(ctx, op); | ||
340 | } | ||
341 | |||
342 | static bool fold_mb(OptContext *ctx, TCGOp *op) | ||
343 | @@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op) | ||
344 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); | ||
345 | } | ||
346 | |||
347 | + ctx->z_mask = arg_info(op->args[3])->z_mask | ||
348 | + | arg_info(op->args[4])->z_mask; | ||
349 | + | ||
350 | if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { | ||
351 | uint64_t tv = arg_info(op->args[3])->val; | ||
352 | uint64_t fv = arg_info(op->args[4])->val; | ||
353 | @@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op) | ||
354 | |||
355 | static bool fold_neg(OptContext *ctx, TCGOp *op) | ||
356 | { | ||
357 | + uint64_t z_mask; | ||
358 | + | ||
359 | if (fold_const1(ctx, op)) { | ||
360 | return true; | ||
361 | } | ||
362 | + | ||
363 | + /* Set to 1 all bits to the left of the rightmost. */ | ||
364 | + z_mask = arg_info(op->args[1])->z_mask; | ||
365 | + ctx->z_mask = -(z_mask & -z_mask); | ||
366 | + | ||
367 | /* | ||
368 | * Because of fold_sub_to_neg, we want to always return true, | ||
369 | * via finish_folding. | ||
370 | @@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op) | ||
371 | fold_xx_to_x(ctx, op)) { | ||
372 | return true; | ||
373 | } | ||
374 | - return false; | ||
375 | + | ||
376 | + ctx->z_mask = arg_info(op->args[1])->z_mask | ||
377 | + | arg_info(op->args[2])->z_mask; | ||
378 | + return fold_masks(ctx, op); | ||
379 | } | ||
380 | |||
381 | static bool fold_orc(OptContext *ctx, TCGOp *op) | ||
382 | @@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op) | ||
383 | |||
384 | static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) | ||
385 | { | ||
386 | + const TCGOpDef *def = &tcg_op_defs[op->opc]; | ||
387 | + MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs]; | ||
388 | + MemOp mop = get_memop(oi); | ||
389 | + int width = 8 * memop_size(mop); | ||
390 | + | ||
391 | + if (!(mop & MO_SIGN) && width < 64) { | ||
392 | + ctx->z_mask = MAKE_64BIT_MASK(0, width); | ||
393 | + } | ||
394 | + | ||
395 | /* Opcodes that touch guest memory stop the mb optimization. */ | ||
396 | ctx->prev_mb = NULL; | ||
397 | return false; | ||
398 | @@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op) | ||
399 | if (i >= 0) { | ||
400 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | ||
401 | } | ||
402 | + | ||
403 | + ctx->z_mask = 1; | ||
404 | return false; | ||
405 | } | ||
406 | |||
407 | @@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op) | ||
408 | op->opc = INDEX_op_setcond_i32; | ||
409 | break; | ||
410 | } | ||
411 | + | ||
412 | + ctx->z_mask = 1; | ||
413 | return false; | ||
414 | |||
415 | do_setcond_const: | ||
416 | @@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op) | ||
417 | |||
418 | static bool fold_sextract(OptContext *ctx, TCGOp *op) | ||
419 | { | ||
420 | + int64_t z_mask_old, z_mask; | ||
421 | + | ||
422 | if (arg_is_const(op->args[1])) { | ||
423 | uint64_t t; | ||
424 | |||
425 | @@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op) | ||
426 | t = sextract64(t, op->args[2], op->args[3]); | ||
427 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | ||
428 | } | ||
429 | - return false; | ||
430 | + | ||
431 | + z_mask_old = arg_info(op->args[1])->z_mask; | ||
432 | + z_mask = sextract64(z_mask_old, op->args[2], op->args[3]); | ||
433 | + if (op->args[2] == 0 && z_mask >= 0) { | ||
434 | + ctx->a_mask = z_mask_old ^ z_mask; | ||
435 | + } | ||
436 | + ctx->z_mask = z_mask; | ||
437 | + | ||
438 | + return fold_masks(ctx, op); | ||
439 | } | ||
440 | |||
441 | static bool fold_shift(OptContext *ctx, TCGOp *op) | ||
442 | @@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op) | ||
443 | fold_xi_to_x(ctx, op, 0)) { | ||
444 | return true; | ||
445 | } | ||
446 | + | ||
447 | + if (arg_is_const(op->args[2])) { | ||
448 | + ctx->z_mask = do_constant_folding(op->opc, ctx->type, | ||
449 | + arg_info(op->args[1])->z_mask, | ||
450 | + arg_info(op->args[2])->val); | ||
451 | + return fold_masks(ctx, op); | ||
452 | + } | ||
453 | return false; | ||
454 | } | ||
455 | |||
456 | @@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op) | ||
457 | return fold_addsub2_i32(ctx, op, false); | ||
458 | } | ||
459 | |||
460 | +static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) | ||
461 | +{ | ||
462 | + /* We can't do any folding with a load, but we can record bits. */ | ||
463 | + switch (op->opc) { | ||
464 | + CASE_OP_32_64(ld8u): | ||
465 | + ctx->z_mask = MAKE_64BIT_MASK(0, 8); | ||
466 | + break; | ||
467 | + CASE_OP_32_64(ld16u): | ||
468 | + ctx->z_mask = MAKE_64BIT_MASK(0, 16); | ||
469 | + break; | ||
470 | + case INDEX_op_ld32u_i64: | ||
471 | + ctx->z_mask = MAKE_64BIT_MASK(0, 32); | ||
472 | + break; | ||
473 | + default: | ||
474 | + g_assert_not_reached(); | ||
475 | + } | ||
476 | + return false; | ||
477 | +} | ||
478 | + | ||
479 | static bool fold_xor(OptContext *ctx, TCGOp *op) | ||
480 | { | ||
481 | if (fold_const2(ctx, op) || | ||
482 | @@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op) | ||
483 | fold_xi_to_not(ctx, op, -1)) { | ||
484 | return true; | ||
485 | } | ||
486 | - return false; | ||
487 | + | ||
488 | + ctx->z_mask = arg_info(op->args[1])->z_mask | ||
489 | + | arg_info(op->args[2])->z_mask; | ||
490 | + return fold_masks(ctx, op); | ||
491 | } | ||
492 | |||
493 | /* Propagate constants and copies, fold constant expressions. */ | ||
494 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
495 | } | ||
496 | |||
497 | QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { | ||
498 | - uint64_t z_mask, partmask, affected, tmp; | ||
499 | TCGOpcode opc = op->opc; | ||
500 | const TCGOpDef *def; | ||
501 | bool done = false; | ||
502 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
503 | break; | ||
504 | } | ||
505 | |||
506 | - /* Simplify using known-zero bits. Currently only ops with a single | ||
507 | - output argument is supported. */ | ||
508 | - z_mask = -1; | ||
509 | - affected = -1; | ||
510 | - switch (opc) { | ||
511 | - CASE_OP_32_64(ext8s): | ||
512 | - if ((arg_info(op->args[1])->z_mask & 0x80) != 0) { | ||
513 | - break; | ||
514 | - } | ||
515 | - QEMU_FALLTHROUGH; | ||
516 | - CASE_OP_32_64(ext8u): | ||
517 | - z_mask = 0xff; | ||
518 | - goto and_const; | ||
519 | - CASE_OP_32_64(ext16s): | ||
520 | - if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) { | ||
521 | - break; | ||
522 | - } | ||
523 | - QEMU_FALLTHROUGH; | ||
524 | - CASE_OP_32_64(ext16u): | ||
525 | - z_mask = 0xffff; | ||
526 | - goto and_const; | ||
527 | - case INDEX_op_ext32s_i64: | ||
528 | - if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) { | ||
529 | - break; | ||
530 | - } | ||
531 | - QEMU_FALLTHROUGH; | ||
532 | - case INDEX_op_ext32u_i64: | ||
533 | - z_mask = 0xffffffffU; | ||
534 | - goto and_const; | ||
535 | - | ||
536 | - CASE_OP_32_64(and): | ||
537 | - z_mask = arg_info(op->args[2])->z_mask; | ||
538 | - if (arg_is_const(op->args[2])) { | ||
539 | - and_const: | ||
540 | - affected = arg_info(op->args[1])->z_mask & ~z_mask; | ||
541 | - } | ||
542 | - z_mask = arg_info(op->args[1])->z_mask & z_mask; | ||
543 | - break; | ||
544 | - | ||
545 | - case INDEX_op_ext_i32_i64: | ||
546 | - if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) { | ||
547 | - break; | ||
548 | - } | ||
549 | - QEMU_FALLTHROUGH; | ||
550 | - case INDEX_op_extu_i32_i64: | ||
551 | - /* We do not compute affected as it is a size changing op. */ | ||
552 | - z_mask = (uint32_t)arg_info(op->args[1])->z_mask; | ||
553 | - break; | ||
554 | - | ||
555 | - CASE_OP_32_64(andc): | ||
556 | - /* Known-zeros does not imply known-ones. Therefore unless | ||
557 | - op->args[2] is constant, we can't infer anything from it. */ | ||
558 | - if (arg_is_const(op->args[2])) { | ||
559 | - z_mask = ~arg_info(op->args[2])->z_mask; | ||
560 | - goto and_const; | ||
561 | - } | ||
562 | - /* But we certainly know nothing outside args[1] may be set. */ | ||
563 | - z_mask = arg_info(op->args[1])->z_mask; | ||
564 | - break; | ||
565 | - | ||
566 | - case INDEX_op_sar_i32: | ||
567 | - if (arg_is_const(op->args[2])) { | ||
568 | - tmp = arg_info(op->args[2])->val & 31; | ||
569 | - z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp; | ||
570 | - } | ||
571 | - break; | ||
572 | - case INDEX_op_sar_i64: | ||
573 | - if (arg_is_const(op->args[2])) { | ||
574 | - tmp = arg_info(op->args[2])->val & 63; | ||
575 | - z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp; | ||
576 | - } | ||
577 | - break; | ||
578 | - | ||
579 | - case INDEX_op_shr_i32: | ||
580 | - if (arg_is_const(op->args[2])) { | ||
581 | - tmp = arg_info(op->args[2])->val & 31; | ||
582 | - z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp; | ||
583 | - } | ||
584 | - break; | ||
585 | - case INDEX_op_shr_i64: | ||
586 | - if (arg_is_const(op->args[2])) { | ||
587 | - tmp = arg_info(op->args[2])->val & 63; | ||
588 | - z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp; | ||
589 | - } | ||
590 | - break; | ||
591 | - | ||
592 | - case INDEX_op_extrl_i64_i32: | ||
593 | - z_mask = (uint32_t)arg_info(op->args[1])->z_mask; | ||
594 | - break; | ||
595 | - case INDEX_op_extrh_i64_i32: | ||
596 | - z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32; | ||
597 | - break; | ||
598 | - | ||
599 | - CASE_OP_32_64(shl): | ||
600 | - if (arg_is_const(op->args[2])) { | ||
601 | - tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1); | ||
602 | - z_mask = arg_info(op->args[1])->z_mask << tmp; | ||
603 | - } | ||
604 | - break; | ||
605 | - | ||
606 | - CASE_OP_32_64(neg): | ||
607 | - /* Set to 1 all bits to the left of the rightmost. */ | ||
608 | - z_mask = -(arg_info(op->args[1])->z_mask | ||
609 | - & -arg_info(op->args[1])->z_mask); | ||
610 | - break; | ||
611 | - | ||
612 | - CASE_OP_32_64(deposit): | ||
613 | - z_mask = deposit64(arg_info(op->args[1])->z_mask, | ||
614 | - op->args[3], op->args[4], | ||
615 | - arg_info(op->args[2])->z_mask); | ||
616 | - break; | ||
617 | - | ||
618 | - CASE_OP_32_64(extract): | ||
619 | - z_mask = extract64(arg_info(op->args[1])->z_mask, | ||
620 | - op->args[2], op->args[3]); | ||
621 | - if (op->args[2] == 0) { | ||
622 | - affected = arg_info(op->args[1])->z_mask & ~z_mask; | ||
623 | - } | ||
624 | - break; | ||
625 | - CASE_OP_32_64(sextract): | ||
626 | - z_mask = sextract64(arg_info(op->args[1])->z_mask, | ||
627 | - op->args[2], op->args[3]); | ||
628 | - if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) { | ||
629 | - affected = arg_info(op->args[1])->z_mask & ~z_mask; | ||
630 | - } | ||
631 | - break; | ||
632 | - | ||
633 | - CASE_OP_32_64(or): | ||
634 | - CASE_OP_32_64(xor): | ||
635 | - z_mask = arg_info(op->args[1])->z_mask | ||
636 | - | arg_info(op->args[2])->z_mask; | ||
637 | - break; | ||
638 | - | ||
639 | - case INDEX_op_clz_i32: | ||
640 | - case INDEX_op_ctz_i32: | ||
641 | - z_mask = arg_info(op->args[2])->z_mask | 31; | ||
642 | - break; | ||
643 | - | ||
644 | - case INDEX_op_clz_i64: | ||
645 | - case INDEX_op_ctz_i64: | ||
646 | - z_mask = arg_info(op->args[2])->z_mask | 63; | ||
647 | - break; | ||
648 | - | ||
649 | - case INDEX_op_ctpop_i32: | ||
650 | - z_mask = 32 | 31; | ||
651 | - break; | ||
652 | - case INDEX_op_ctpop_i64: | ||
653 | - z_mask = 64 | 63; | ||
654 | - break; | ||
655 | - | ||
656 | - CASE_OP_32_64(setcond): | ||
657 | - case INDEX_op_setcond2_i32: | ||
658 | - z_mask = 1; | ||
659 | - break; | ||
660 | - | ||
661 | - CASE_OP_32_64(movcond): | ||
662 | - z_mask = arg_info(op->args[3])->z_mask | ||
663 | - | arg_info(op->args[4])->z_mask; | ||
664 | - break; | ||
665 | - | ||
666 | - CASE_OP_32_64(ld8u): | ||
667 | - z_mask = 0xff; | ||
668 | - break; | ||
669 | - CASE_OP_32_64(ld16u): | ||
670 | - z_mask = 0xffff; | ||
671 | - break; | ||
672 | - case INDEX_op_ld32u_i64: | ||
673 | - z_mask = 0xffffffffu; | ||
674 | - break; | ||
675 | - | ||
676 | - CASE_OP_32_64(qemu_ld): | ||
677 | - { | ||
678 | - MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs]; | ||
679 | - MemOp mop = get_memop(oi); | ||
680 | - if (!(mop & MO_SIGN)) { | ||
681 | - z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; | ||
682 | - } | ||
683 | - } | ||
684 | - break; | ||
685 | - | ||
686 | - CASE_OP_32_64(bswap16): | ||
687 | - z_mask = arg_info(op->args[1])->z_mask; | ||
688 | - if (z_mask <= 0xffff) { | ||
689 | - op->args[2] |= TCG_BSWAP_IZ; | ||
690 | - } | ||
691 | - z_mask = bswap16(z_mask); | ||
692 | - switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { | ||
693 | - case TCG_BSWAP_OZ: | ||
694 | - break; | ||
695 | - case TCG_BSWAP_OS: | ||
696 | - z_mask = (int16_t)z_mask; | ||
697 | - break; | ||
698 | - default: /* undefined high bits */ | ||
699 | - z_mask |= MAKE_64BIT_MASK(16, 48); | ||
700 | - break; | ||
701 | - } | ||
702 | - break; | ||
703 | - | ||
704 | - case INDEX_op_bswap32_i64: | ||
705 | - z_mask = arg_info(op->args[1])->z_mask; | ||
706 | - if (z_mask <= 0xffffffffu) { | ||
707 | - op->args[2] |= TCG_BSWAP_IZ; | ||
708 | - } | ||
709 | - z_mask = bswap32(z_mask); | ||
710 | - switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { | ||
711 | - case TCG_BSWAP_OZ: | ||
712 | - break; | ||
713 | - case TCG_BSWAP_OS: | ||
714 | - z_mask = (int32_t)z_mask; | ||
715 | - break; | ||
716 | - default: /* undefined high bits */ | ||
717 | - z_mask |= MAKE_64BIT_MASK(32, 32); | ||
718 | - break; | ||
719 | - } | ||
720 | - break; | ||
721 | - | ||
722 | - default: | ||
723 | - break; | ||
724 | - } | ||
725 | - | ||
726 | - /* 32-bit ops generate 32-bit results. For the result is zero test | ||
727 | - below, we can ignore high bits, but for further optimizations we | ||
728 | - need to record that the high bits contain garbage. */ | ||
729 | - partmask = z_mask; | ||
730 | - if (ctx.type == TCG_TYPE_I32) { | ||
731 | - z_mask |= ~(tcg_target_ulong)0xffffffffu; | ||
732 | - partmask &= 0xffffffffu; | ||
733 | - affected &= 0xffffffffu; | ||
734 | - } | ||
735 | - ctx.z_mask = z_mask; | ||
736 | - | ||
737 | - if (partmask == 0) { | ||
738 | - tcg_opt_gen_movi(&ctx, op, op->args[0], 0); | ||
739 | - continue; | ||
740 | - } | ||
741 | - if (affected == 0) { | ||
742 | - tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); | ||
743 | - continue; | ||
744 | - } | ||
745 | + /* Assume all bits affected, and no bits known zero. */ | ||
746 | + ctx.a_mask = -1; | ||
747 | + ctx.z_mask = -1; | ||
748 | |||
749 | /* | ||
750 | * Process each opcode. | ||
751 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
752 | case INDEX_op_extrh_i64_i32: | ||
753 | done = fold_extu(&ctx, op); | ||
754 | break; | ||
755 | + CASE_OP_32_64(ld8u): | ||
756 | + CASE_OP_32_64(ld16u): | ||
757 | + case INDEX_op_ld32u_i64: | ||
758 | + done = fold_tcg_ld(&ctx, op); | ||
759 | + break; | ||
760 | case INDEX_op_mb: | ||
761 | done = fold_mb(&ctx, op); | ||
762 | break; | ||
763 | -- | 87 | -- |
764 | 2.25.1 | 88 | 2.34.1 |
765 | |||
766 | diff view generated by jsdifflib |
1 | Adjust the interface to take the OptContext parameter instead | 1 | Convert away from the old interface with the implicit |
---|---|---|---|
2 | of TCGContext or both. | 2 | MemOp argument. In this case we can fold the calls |
3 | using the size bits of MemOp. | ||
3 | 4 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
5 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | Reviewed-by: Anton Johansson <anjo@rev.ng> | ||
7 | Message-Id: <20230502135741.1158035-3-richard.henderson@linaro.org> | ||
7 | --- | 8 | --- |
8 | tcg/optimize.c | 67 +++++++++++++++++++++++++------------------------- | 9 | target/cris/translate_v10.c.inc | 18 ++++-------------- |
9 | 1 file changed, 34 insertions(+), 33 deletions(-) | 10 | 1 file changed, 4 insertions(+), 14 deletions(-) |
10 | 11 | ||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 12 | diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc |
12 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/optimize.c | 14 | --- a/target/cris/translate_v10.c.inc |
14 | +++ b/tcg/optimize.c | 15 | +++ b/target/cris/translate_v10.c.inc |
15 | @@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo { | 16 | @@ -XXX,XX +XXX,XX @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val, |
16 | } TempOptInfo; | 17 | /* Store only if F flag isn't set */ |
17 | 18 | tcg_gen_andi_tl(t1, cpu_PR[PR_CCS], F_FLAG_V10); | |
18 | typedef struct OptContext { | 19 | tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); |
19 | + TCGContext *tcg; | 20 | - if (size == 1) { |
20 | TCGTempSet temps_used; | 21 | - tcg_gen_qemu_st8(tval, taddr, mem_index); |
21 | } OptContext; | 22 | - } else if (size == 2) { |
22 | 23 | - tcg_gen_qemu_st16(tval, taddr, mem_index); | |
23 | @@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2) | 24 | - } else { |
24 | return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); | 25 | - tcg_gen_qemu_st32(tval, taddr, mem_index); |
25 | } | 26 | - } |
26 | 27 | + | |
27 | -static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) | 28 | + tcg_gen_qemu_st_tl(tval, taddr, mem_index, ctz32(size) | MO_TE); |
28 | +static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) | 29 | + |
29 | { | 30 | gen_set_label(l1); |
30 | TCGTemp *dst_ts = arg_temp(dst); | 31 | tcg_gen_shri_tl(t1, t1, 1); /* shift F to P position */ |
31 | TCGTemp *src_ts = arg_temp(src); | 32 | tcg_gen_or_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], t1); /*P=F*/ |
32 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) | 33 | @@ -XXX,XX +XXX,XX @@ static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val, |
33 | TCGOpcode new_op; | ||
34 | |||
35 | if (ts_are_copies(dst_ts, src_ts)) { | ||
36 | - tcg_op_remove(s, op); | ||
37 | + tcg_op_remove(ctx->tcg, op); | ||
38 | return; | 34 | return; |
39 | } | 35 | } |
40 | 36 | ||
41 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) | 37 | - if (size == 1) { |
42 | } | 38 | - tcg_gen_qemu_st8(val, addr, mem_index); |
39 | - } else if (size == 2) { | ||
40 | - tcg_gen_qemu_st16(val, addr, mem_index); | ||
41 | - } else { | ||
42 | - tcg_gen_qemu_st32(val, addr, mem_index); | ||
43 | - } | ||
44 | + tcg_gen_qemu_st_tl(val, addr, mem_index, ctz32(size) | MO_TE); | ||
43 | } | 45 | } |
44 | 46 | ||
45 | -static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx, | 47 | |
46 | - TCGOp *op, TCGArg dst, uint64_t val) | ||
47 | +static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, | ||
48 | + TCGArg dst, uint64_t val) | ||
49 | { | ||
50 | const TCGOpDef *def = &tcg_op_defs[op->opc]; | ||
51 | TCGType type; | ||
52 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx, | ||
53 | /* Convert movi to mov with constant temp. */ | ||
54 | tv = tcg_constant_internal(type, val); | ||
55 | init_ts_info(ctx, tv); | ||
56 | - tcg_opt_gen_mov(s, op, dst, temp_arg(tv)); | ||
57 | + tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv)); | ||
58 | } | ||
59 | |||
60 | static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) | ||
61 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
62 | { | ||
63 | int nb_temps, nb_globals, i; | ||
64 | TCGOp *op, *op_next, *prev_mb = NULL; | ||
65 | - OptContext ctx = {}; | ||
66 | + OptContext ctx = { .tcg = s }; | ||
67 | |||
68 | /* Array VALS has an element for each temp. | ||
69 | If this temp holds a constant then its value is kept in VALS' element. | ||
70 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
71 | CASE_OP_32_64(rotr): | ||
72 | if (arg_is_const(op->args[1]) | ||
73 | && arg_info(op->args[1])->val == 0) { | ||
74 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0); | ||
75 | + tcg_opt_gen_movi(&ctx, op, op->args[0], 0); | ||
76 | continue; | ||
77 | } | ||
78 | break; | ||
79 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
80 | if (!arg_is_const(op->args[1]) | ||
81 | && arg_is_const(op->args[2]) | ||
82 | && arg_info(op->args[2])->val == 0) { | ||
83 | - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); | ||
84 | + tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); | ||
85 | continue; | ||
86 | } | ||
87 | break; | ||
88 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
89 | if (!arg_is_const(op->args[1]) | ||
90 | && arg_is_const(op->args[2]) | ||
91 | && arg_info(op->args[2])->val == -1) { | ||
92 | - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); | ||
93 | + tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); | ||
94 | continue; | ||
95 | } | ||
96 | break; | ||
97 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
98 | |||
99 | if (partmask == 0) { | ||
100 | tcg_debug_assert(nb_oargs == 1); | ||
101 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0); | ||
102 | + tcg_opt_gen_movi(&ctx, op, op->args[0], 0); | ||
103 | continue; | ||
104 | } | ||
105 | if (affected == 0) { | ||
106 | tcg_debug_assert(nb_oargs == 1); | ||
107 | - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); | ||
108 | + tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); | ||
109 | continue; | ||
110 | } | ||
111 | |||
112 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
113 | CASE_OP_32_64(mulsh): | ||
114 | if (arg_is_const(op->args[2]) | ||
115 | && arg_info(op->args[2])->val == 0) { | ||
116 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0); | ||
117 | + tcg_opt_gen_movi(&ctx, op, op->args[0], 0); | ||
118 | continue; | ||
119 | } | ||
120 | break; | ||
121 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
122 | CASE_OP_32_64_VEC(or): | ||
123 | CASE_OP_32_64_VEC(and): | ||
124 | if (args_are_copies(op->args[1], op->args[2])) { | ||
125 | - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); | ||
126 | + tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); | ||
127 | continue; | ||
128 | } | ||
129 | break; | ||
130 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
131 | CASE_OP_32_64_VEC(sub): | ||
132 | CASE_OP_32_64_VEC(xor): | ||
133 | if (args_are_copies(op->args[1], op->args[2])) { | ||
134 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0); | ||
135 | + tcg_opt_gen_movi(&ctx, op, op->args[0], 0); | ||
136 | continue; | ||
137 | } | ||
138 | break; | ||
139 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
140 | allocator where needed and possible. Also detect copies. */ | ||
141 | switch (opc) { | ||
142 | CASE_OP_32_64_VEC(mov): | ||
143 | - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); | ||
144 | + tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); | ||
145 | continue; | ||
146 | |||
147 | case INDEX_op_dup_vec: | ||
148 | if (arg_is_const(op->args[1])) { | ||
149 | tmp = arg_info(op->args[1])->val; | ||
150 | tmp = dup_const(TCGOP_VECE(op), tmp); | ||
151 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
152 | + tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
153 | continue; | ||
154 | } | ||
155 | break; | ||
156 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
157 | case INDEX_op_dup2_vec: | ||
158 | assert(TCG_TARGET_REG_BITS == 32); | ||
159 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
160 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], | ||
161 | + tcg_opt_gen_movi(&ctx, op, op->args[0], | ||
162 | deposit64(arg_info(op->args[1])->val, 32, 32, | ||
163 | arg_info(op->args[2])->val)); | ||
164 | continue; | ||
165 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
166 | case INDEX_op_extrh_i64_i32: | ||
167 | if (arg_is_const(op->args[1])) { | ||
168 | tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0); | ||
169 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
170 | + tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
171 | continue; | ||
172 | } | ||
173 | break; | ||
174 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
175 | if (arg_is_const(op->args[1])) { | ||
176 | tmp = do_constant_folding(opc, arg_info(op->args[1])->val, | ||
177 | op->args[2]); | ||
178 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
179 | + tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
180 | continue; | ||
181 | } | ||
182 | break; | ||
183 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
184 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
185 | tmp = do_constant_folding(opc, arg_info(op->args[1])->val, | ||
186 | arg_info(op->args[2])->val); | ||
187 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
188 | + tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
189 | continue; | ||
190 | } | ||
191 | break; | ||
192 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
193 | TCGArg v = arg_info(op->args[1])->val; | ||
194 | if (v != 0) { | ||
195 | tmp = do_constant_folding(opc, v, 0); | ||
196 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
197 | + tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
198 | } else { | ||
199 | - tcg_opt_gen_mov(s, op, op->args[0], op->args[2]); | ||
200 | + tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]); | ||
201 | } | ||
202 | continue; | ||
203 | } | ||
204 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
205 | tmp = deposit64(arg_info(op->args[1])->val, | ||
206 | op->args[3], op->args[4], | ||
207 | arg_info(op->args[2])->val); | ||
208 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
209 | + tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
210 | continue; | ||
211 | } | ||
212 | break; | ||
213 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
214 | if (arg_is_const(op->args[1])) { | ||
215 | tmp = extract64(arg_info(op->args[1])->val, | ||
216 | op->args[2], op->args[3]); | ||
217 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
218 | + tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
219 | continue; | ||
220 | } | ||
221 | break; | ||
222 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
223 | if (arg_is_const(op->args[1])) { | ||
224 | tmp = sextract64(arg_info(op->args[1])->val, | ||
225 | op->args[2], op->args[3]); | ||
226 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
227 | + tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
228 | continue; | ||
229 | } | ||
230 | break; | ||
231 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
232 | tmp = (int32_t)(((uint32_t)v1 >> shr) | | ||
233 | ((uint32_t)v2 << (32 - shr))); | ||
234 | } | ||
235 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
236 | + tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
237 | continue; | ||
238 | } | ||
239 | break; | ||
240 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
241 | tmp = do_constant_folding_cond(opc, op->args[1], | ||
242 | op->args[2], op->args[3]); | ||
243 | if (tmp != 2) { | ||
244 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
245 | + tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
246 | continue; | ||
247 | } | ||
248 | break; | ||
249 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
250 | tmp = do_constant_folding_cond(opc, op->args[1], | ||
251 | op->args[2], op->args[5]); | ||
252 | if (tmp != 2) { | ||
253 | - tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]); | ||
254 | + tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]); | ||
255 | continue; | ||
256 | } | ||
257 | if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { | ||
258 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
259 | |||
260 | rl = op->args[0]; | ||
261 | rh = op->args[1]; | ||
262 | - tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a); | ||
263 | - tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32)); | ||
264 | + tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a); | ||
265 | + tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32)); | ||
266 | continue; | ||
267 | } | ||
268 | break; | ||
269 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
270 | |||
271 | rl = op->args[0]; | ||
272 | rh = op->args[1]; | ||
273 | - tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r); | ||
274 | - tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32)); | ||
275 | + tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r); | ||
276 | + tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32)); | ||
277 | continue; | ||
278 | } | ||
279 | break; | ||
280 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
281 | op->args[5]); | ||
282 | if (tmp != 2) { | ||
283 | do_setcond_const: | ||
284 | - tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
285 | + tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
286 | continue; | ||
287 | } | ||
288 | if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE) | ||
289 | -- | 48 | -- |
290 | 2.25.1 | 49 | 2.34.1 |
291 | |||
292 | diff view generated by jsdifflib |
1 | For constant shifts, we can simply shift the s_mask. | 1 | Convert away from the old interface with the implicit |
---|---|---|---|
2 | MemOp argument. Importantly, this removes some incorrect | ||
3 | casts generated by idef-parser's gen_load(). | ||
2 | 4 | ||
3 | For variable shifts, we know that sar does not reduce | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | the s_mask, which helps for sequences like | 6 | Tested-by: Taylor Simpson <tsimpson@quicinc.com> |
7 | Reviewed-by: Taylor Simpson <tsimpson@quicinc.com> | ||
8 | Reviewed-by: Anton Johansson <anjo@rev.ng> | ||
9 | Message-Id: <20230502135741.1158035-4-richard.henderson@linaro.org> | ||
10 | --- | ||
11 | target/hexagon/macros.h | 14 ++++----- | ||
12 | target/hexagon/genptr.c | 8 +++--- | ||
13 | target/hexagon/idef-parser/parser-helpers.c | 28 +++++++++--------- | ||
14 | target/hexagon/translate.c | 32 ++++++++++----------- | ||
15 | 4 files changed, 40 insertions(+), 42 deletions(-) | ||
5 | 16 | ||
6 | ext32s_i64 t, in | 17 | diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h |
7 | sar_i64 t, t, v | 18 | index XXXXXXX..XXXXXXX 100644 |
8 | ext32s_i64 out, t | 19 | --- a/target/hexagon/macros.h |
9 | 20 | +++ b/target/hexagon/macros.h | |
10 | allowing the final extend to be eliminated. | 21 | @@ -XXX,XX +XXX,XX @@ |
11 | 22 | #define MEM_LOAD1s(DST, VA) \ | |
12 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 23 | do { \ |
13 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | 24 | CHECK_NOSHUF(VA, 1); \ |
14 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 25 | - tcg_gen_qemu_ld8s(DST, VA, ctx->mem_idx); \ |
15 | --- | 26 | + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_SB); \ |
16 | tcg/optimize.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++--- | 27 | } while (0) |
17 | 1 file changed, 47 insertions(+), 3 deletions(-) | 28 | #define MEM_LOAD1u(DST, VA) \ |
18 | 29 | do { \ | |
19 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 30 | CHECK_NOSHUF(VA, 1); \ |
20 | index XXXXXXX..XXXXXXX 100644 | 31 | - tcg_gen_qemu_ld8u(DST, VA, ctx->mem_idx); \ |
21 | --- a/tcg/optimize.c | 32 | + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_UB); \ |
22 | +++ b/tcg/optimize.c | 33 | } while (0) |
23 | @@ -XXX,XX +XXX,XX @@ static uint64_t smask_from_zmask(uint64_t zmask) | 34 | #define MEM_LOAD2s(DST, VA) \ |
24 | return ~(~0ull >> rep); | 35 | do { \ |
25 | } | 36 | CHECK_NOSHUF(VA, 2); \ |
26 | 37 | - tcg_gen_qemu_ld16s(DST, VA, ctx->mem_idx); \ | |
27 | +/* | 38 | + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESW); \ |
28 | + * Recreate a properly left-aligned smask after manipulation. | 39 | } while (0) |
29 | + * Some bit-shuffling, particularly shifts and rotates, may | 40 | #define MEM_LOAD2u(DST, VA) \ |
30 | + * retain sign bits on the left, but may scatter disconnected | 41 | do { \ |
31 | + * sign bits on the right. Retain only what remains to the left. | 42 | CHECK_NOSHUF(VA, 2); \ |
32 | + */ | 43 | - tcg_gen_qemu_ld16u(DST, VA, ctx->mem_idx); \ |
33 | +static uint64_t smask_from_smask(int64_t smask) | 44 | + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUW); \ |
34 | +{ | 45 | } while (0) |
35 | + /* Only the 1 bits are significant for smask */ | 46 | #define MEM_LOAD4s(DST, VA) \ |
36 | + return smask_from_zmask(~smask); | 47 | do { \ |
37 | +} | 48 | CHECK_NOSHUF(VA, 4); \ |
38 | + | 49 | - tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \ |
39 | static inline TempOptInfo *ts_info(TCGTemp *ts) | 50 | + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESL); \ |
40 | { | 51 | } while (0) |
41 | return ts->state_ptr; | 52 | #define MEM_LOAD4u(DST, VA) \ |
42 | @@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op) | 53 | do { \ |
43 | 54 | CHECK_NOSHUF(VA, 4); \ | |
44 | static bool fold_shift(OptContext *ctx, TCGOp *op) | 55 | - tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \ |
45 | { | 56 | + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUL); \ |
46 | + uint64_t s_mask, z_mask, sign; | 57 | } while (0) |
47 | + | 58 | #define MEM_LOAD8u(DST, VA) \ |
48 | if (fold_const2(ctx, op) || | 59 | do { \ |
49 | fold_ix_to_i(ctx, op, 0) || | 60 | CHECK_NOSHUF(VA, 8); \ |
50 | fold_xi_to_x(ctx, op, 0)) { | 61 | - tcg_gen_qemu_ld64(DST, VA, ctx->mem_idx); \ |
51 | return true; | 62 | + tcg_gen_qemu_ld_i64(DST, VA, ctx->mem_idx, MO_TEUQ); \ |
52 | } | 63 | } while (0) |
53 | 64 | ||
54 | + s_mask = arg_info(op->args[1])->s_mask; | 65 | #define MEM_STORE1_FUNC(X) \ |
55 | + z_mask = arg_info(op->args[1])->z_mask; | 66 | diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c |
56 | + | 67 | index XXXXXXX..XXXXXXX 100644 |
57 | if (arg_is_const(op->args[2])) { | 68 | --- a/target/hexagon/genptr.c |
58 | - ctx->z_mask = do_constant_folding(op->opc, ctx->type, | 69 | +++ b/target/hexagon/genptr.c |
59 | - arg_info(op->args[1])->z_mask, | 70 | @@ -XXX,XX +XXX,XX @@ void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src) |
60 | - arg_info(op->args[2])->val); | 71 | |
61 | + int sh = arg_info(op->args[2])->val; | 72 | static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index) |
62 | + | 73 | { |
63 | + ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh); | 74 | - tcg_gen_qemu_ld32u(dest, vaddr, mem_index); |
64 | + | 75 | + tcg_gen_qemu_ld_tl(dest, vaddr, mem_index, MO_TEUL); |
65 | + s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh); | 76 | tcg_gen_mov_tl(hex_llsc_addr, vaddr); |
66 | + ctx->s_mask = smask_from_smask(s_mask); | 77 | tcg_gen_mov_tl(hex_llsc_val, dest); |
67 | + | 78 | } |
68 | return fold_masks(ctx, op); | 79 | |
69 | } | 80 | static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index) |
70 | + | 81 | { |
71 | + switch (op->opc) { | 82 | - tcg_gen_qemu_ld64(dest, vaddr, mem_index); |
72 | + CASE_OP_32_64(sar): | 83 | + tcg_gen_qemu_ld_i64(dest, vaddr, mem_index, MO_TEUQ); |
73 | + /* | 84 | tcg_gen_mov_tl(hex_llsc_addr, vaddr); |
74 | + * Arithmetic right shift will not reduce the number of | 85 | tcg_gen_mov_i64(hex_llsc_val_i64, dest); |
75 | + * input sign repetitions. | 86 | } |
76 | + */ | 87 | @@ -XXX,XX +XXX,XX @@ static void gen_load_frame(DisasContext *ctx, TCGv_i64 frame, TCGv EA) |
77 | + ctx->s_mask = s_mask; | 88 | { |
78 | + break; | 89 | Insn *insn = ctx->insn; /* Needed for CHECK_NOSHUF */ |
79 | + CASE_OP_32_64(shr): | 90 | CHECK_NOSHUF(EA, 8); |
80 | + /* | 91 | - tcg_gen_qemu_ld64(frame, EA, ctx->mem_idx); |
81 | + * If the sign bit is known zero, then logical right shift | 92 | + tcg_gen_qemu_ld_i64(frame, EA, ctx->mem_idx, MO_TEUQ); |
82 | + * will not reduced the number of input sign repetitions. | 93 | } |
83 | + */ | 94 | |
84 | + sign = (s_mask & -s_mask) >> 1; | 95 | static void gen_return(DisasContext *ctx, TCGv_i64 dst, TCGv src) |
85 | + if (!(z_mask & sign)) { | 96 | @@ -XXX,XX +XXX,XX @@ static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src, |
86 | + ctx->s_mask = s_mask; | 97 | tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1)); |
87 | + } | 98 | } |
88 | + break; | 99 | for (int i = 0; i < sizeof(MMVector) / 8; i++) { |
89 | + default: | 100 | - tcg_gen_qemu_ld64(tmp, src, ctx->mem_idx); |
90 | + break; | 101 | + tcg_gen_qemu_ld_i64(tmp, src, ctx->mem_idx, MO_TEUQ); |
91 | + } | 102 | tcg_gen_addi_tl(src, src, 8); |
92 | + | 103 | tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8); |
93 | return false; | 104 | } |
94 | } | 105 | diff --git a/target/hexagon/idef-parser/parser-helpers.c b/target/hexagon/idef-parser/parser-helpers.c |
106 | index XXXXXXX..XXXXXXX 100644 | ||
107 | --- a/target/hexagon/idef-parser/parser-helpers.c | ||
108 | +++ b/target/hexagon/idef-parser/parser-helpers.c | ||
109 | @@ -XXX,XX +XXX,XX @@ void gen_load_cancel(Context *c, YYLTYPE *locp) | ||
110 | void gen_load(Context *c, YYLTYPE *locp, HexValue *width, | ||
111 | HexSignedness signedness, HexValue *ea, HexValue *dst) | ||
112 | { | ||
113 | - char size_suffix[4] = {0}; | ||
114 | - const char *sign_suffix; | ||
115 | + unsigned dst_bit_width; | ||
116 | + unsigned src_bit_width; | ||
117 | + | ||
118 | /* Memop width is specified in the load macro */ | ||
119 | assert_signedness(c, locp, signedness); | ||
120 | - sign_suffix = (width->imm.value > 4) | ||
121 | - ? "" | ||
122 | - : ((signedness == UNSIGNED) ? "u" : "s"); | ||
123 | + | ||
124 | /* If dst is a variable, assert that is declared and load the type info */ | ||
125 | if (dst->type == VARID) { | ||
126 | find_variable(c, locp, dst, dst); | ||
127 | } | ||
128 | |||
129 | - snprintf(size_suffix, 4, "%" PRIu64, width->imm.value * 8); | ||
130 | + src_bit_width = width->imm.value * 8; | ||
131 | + dst_bit_width = MAX(dst->bit_width, 32); | ||
132 | + | ||
133 | /* Lookup the effective address EA */ | ||
134 | find_variable(c, locp, ea, ea); | ||
135 | OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n"); | ||
136 | OUT(c, locp, "probe_noshuf_load(", ea, ", ", width, ", ctx->mem_idx);\n"); | ||
137 | OUT(c, locp, "process_store(ctx, 1);\n"); | ||
138 | OUT(c, locp, "}\n"); | ||
139 | - OUT(c, locp, "tcg_gen_qemu_ld", size_suffix, sign_suffix); | ||
140 | + | ||
141 | + OUT(c, locp, "tcg_gen_qemu_ld_i", &dst_bit_width); | ||
142 | OUT(c, locp, "("); | ||
143 | - if (dst->bit_width > width->imm.value * 8) { | ||
144 | - /* | ||
145 | - * Cast to the correct TCG type if necessary, to avoid implict cast | ||
146 | - * warnings. This is needed when the width of the destination var is | ||
147 | - * larger than the size of the requested load. | ||
148 | - */ | ||
149 | - OUT(c, locp, "(TCGv) "); | ||
150 | + OUT(c, locp, dst, ", ", ea, ", ctx->mem_idx, MO_", &src_bit_width); | ||
151 | + if (signedness == SIGNED) { | ||
152 | + OUT(c, locp, " | MO_SIGN"); | ||
153 | } | ||
154 | - OUT(c, locp, dst, ", ", ea, ", ctx->mem_idx);\n"); | ||
155 | + OUT(c, locp, " | MO_TE);\n"); | ||
156 | } | ||
157 | |||
158 | void gen_store(Context *c, YYLTYPE *locp, HexValue *width, HexValue *ea, | ||
159 | diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c | ||
160 | index XXXXXXX..XXXXXXX 100644 | ||
161 | --- a/target/hexagon/translate.c | ||
162 | +++ b/target/hexagon/translate.c | ||
163 | @@ -XXX,XX +XXX,XX @@ void process_store(DisasContext *ctx, int slot_num) | ||
164 | switch (ctx->store_width[slot_num]) { | ||
165 | case 1: | ||
166 | gen_check_store_width(ctx, slot_num); | ||
167 | - tcg_gen_qemu_st8(hex_store_val32[slot_num], | ||
168 | - hex_store_addr[slot_num], | ||
169 | - ctx->mem_idx); | ||
170 | + tcg_gen_qemu_st_tl(hex_store_val32[slot_num], | ||
171 | + hex_store_addr[slot_num], | ||
172 | + ctx->mem_idx, MO_UB); | ||
173 | break; | ||
174 | case 2: | ||
175 | gen_check_store_width(ctx, slot_num); | ||
176 | - tcg_gen_qemu_st16(hex_store_val32[slot_num], | ||
177 | - hex_store_addr[slot_num], | ||
178 | - ctx->mem_idx); | ||
179 | + tcg_gen_qemu_st_tl(hex_store_val32[slot_num], | ||
180 | + hex_store_addr[slot_num], | ||
181 | + ctx->mem_idx, MO_TEUW); | ||
182 | break; | ||
183 | case 4: | ||
184 | gen_check_store_width(ctx, slot_num); | ||
185 | - tcg_gen_qemu_st32(hex_store_val32[slot_num], | ||
186 | - hex_store_addr[slot_num], | ||
187 | - ctx->mem_idx); | ||
188 | + tcg_gen_qemu_st_tl(hex_store_val32[slot_num], | ||
189 | + hex_store_addr[slot_num], | ||
190 | + ctx->mem_idx, MO_TEUL); | ||
191 | break; | ||
192 | case 8: | ||
193 | gen_check_store_width(ctx, slot_num); | ||
194 | - tcg_gen_qemu_st64(hex_store_val64[slot_num], | ||
195 | - hex_store_addr[slot_num], | ||
196 | - ctx->mem_idx); | ||
197 | + tcg_gen_qemu_st_i64(hex_store_val64[slot_num], | ||
198 | + hex_store_addr[slot_num], | ||
199 | + ctx->mem_idx, MO_TEUQ); | ||
200 | break; | ||
201 | default: | ||
202 | { | ||
203 | @@ -XXX,XX +XXX,XX @@ static void process_dczeroa(DisasContext *ctx) | ||
204 | TCGv_i64 zero = tcg_constant_i64(0); | ||
205 | |||
206 | tcg_gen_andi_tl(addr, hex_dczero_addr, ~0x1f); | ||
207 | - tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); | ||
208 | + tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ); | ||
209 | tcg_gen_addi_tl(addr, addr, 8); | ||
210 | - tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); | ||
211 | + tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ); | ||
212 | tcg_gen_addi_tl(addr, addr, 8); | ||
213 | - tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); | ||
214 | + tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ); | ||
215 | tcg_gen_addi_tl(addr, addr, 8); | ||
216 | - tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); | ||
217 | + tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ); | ||
218 | } | ||
219 | } | ||
95 | 220 | ||
96 | -- | 221 | -- |
97 | 2.25.1 | 222 | 2.34.1 |
98 | |||
99 | diff view generated by jsdifflib |
1 | Most of these are handled by creating a fold_const2_commutative | 1 | Convert away from the old interface with the implicit |
---|---|---|---|
2 | to handle all of the binary operators. The rest were already | 2 | MemOp argument. |
3 | handled on a case-by-case basis in the switch, and have their | ||
4 | own fold function in which to place the call. | ||
5 | 3 | ||
6 | We now have only one major switch on TCGOpcode. | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Reviewed-by: Anton Johansson <anjo@rev.ng> | ||
6 | Message-Id: <20230502135741.1158035-5-richard.henderson@linaro.org> | ||
7 | --- | ||
8 | target/m68k/translate.c | 76 ++++++++++++++--------------------------- | ||
9 | 1 file changed, 25 insertions(+), 51 deletions(-) | ||
7 | 10 | ||
8 | Introduce NO_DEST and a block comment for swap_commutative in | 11 | diff --git a/target/m68k/translate.c b/target/m68k/translate.c |
9 | order to make the handling of brcond and movcond opcodes cleaner. | ||
10 | |||
11 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
12 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
13 | --- | ||
14 | tcg/optimize.c | 142 ++++++++++++++++++++++++------------------------- | ||
15 | 1 file changed, 70 insertions(+), 72 deletions(-) | ||
16 | |||
17 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/tcg/optimize.c | 13 | --- a/target/m68k/translate.c |
20 | +++ b/tcg/optimize.c | 14 | +++ b/target/m68k/translate.c |
21 | @@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) | 15 | @@ -XXX,XX +XXX,XX @@ static inline void gen_addr_fault(DisasContext *s) |
22 | return -1; | 16 | static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr, |
17 | int sign, int index) | ||
18 | { | ||
19 | - TCGv tmp; | ||
20 | - tmp = tcg_temp_new_i32(); | ||
21 | - switch(opsize) { | ||
22 | + TCGv tmp = tcg_temp_new_i32(); | ||
23 | + | ||
24 | + switch (opsize) { | ||
25 | case OS_BYTE: | ||
26 | - if (sign) | ||
27 | - tcg_gen_qemu_ld8s(tmp, addr, index); | ||
28 | - else | ||
29 | - tcg_gen_qemu_ld8u(tmp, addr, index); | ||
30 | - break; | ||
31 | case OS_WORD: | ||
32 | - if (sign) | ||
33 | - tcg_gen_qemu_ld16s(tmp, addr, index); | ||
34 | - else | ||
35 | - tcg_gen_qemu_ld16u(tmp, addr, index); | ||
36 | - break; | ||
37 | case OS_LONG: | ||
38 | - tcg_gen_qemu_ld32u(tmp, addr, index); | ||
39 | + tcg_gen_qemu_ld_tl(tmp, addr, index, | ||
40 | + opsize | (sign ? MO_SIGN : 0) | MO_TE); | ||
41 | break; | ||
42 | default: | ||
43 | g_assert_not_reached(); | ||
44 | @@ -XXX,XX +XXX,XX @@ static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr, | ||
45 | static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val, | ||
46 | int index) | ||
47 | { | ||
48 | - switch(opsize) { | ||
49 | + switch (opsize) { | ||
50 | case OS_BYTE: | ||
51 | - tcg_gen_qemu_st8(val, addr, index); | ||
52 | - break; | ||
53 | case OS_WORD: | ||
54 | - tcg_gen_qemu_st16(val, addr, index); | ||
55 | - break; | ||
56 | case OS_LONG: | ||
57 | - tcg_gen_qemu_st32(val, addr, index); | ||
58 | + tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE); | ||
59 | break; | ||
60 | default: | ||
61 | g_assert_not_reached(); | ||
62 | @@ -XXX,XX +XXX,XX @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, | ||
63 | tmp = tcg_temp_new(); | ||
64 | switch (opsize) { | ||
65 | case OS_BYTE: | ||
66 | - tcg_gen_qemu_ld8s(tmp, addr, index); | ||
67 | - gen_helper_exts32(cpu_env, fp, tmp); | ||
68 | - break; | ||
69 | case OS_WORD: | ||
70 | - tcg_gen_qemu_ld16s(tmp, addr, index); | ||
71 | - gen_helper_exts32(cpu_env, fp, tmp); | ||
72 | - break; | ||
73 | - case OS_LONG: | ||
74 | - tcg_gen_qemu_ld32u(tmp, addr, index); | ||
75 | + tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE); | ||
76 | gen_helper_exts32(cpu_env, fp, tmp); | ||
77 | break; | ||
78 | case OS_SINGLE: | ||
79 | - tcg_gen_qemu_ld32u(tmp, addr, index); | ||
80 | + tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL); | ||
81 | gen_helper_extf32(cpu_env, fp, tmp); | ||
82 | break; | ||
83 | case OS_DOUBLE: | ||
84 | - tcg_gen_qemu_ld64(t64, addr, index); | ||
85 | + tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ); | ||
86 | gen_helper_extf64(cpu_env, fp, t64); | ||
87 | break; | ||
88 | case OS_EXTENDED: | ||
89 | @@ -XXX,XX +XXX,XX @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, | ||
90 | gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); | ||
91 | break; | ||
92 | } | ||
93 | - tcg_gen_qemu_ld32u(tmp, addr, index); | ||
94 | + tcg_gen_qemu_ld_i32(tmp, addr, index, MO_TEUL); | ||
95 | tcg_gen_shri_i32(tmp, tmp, 16); | ||
96 | tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper)); | ||
97 | tcg_gen_addi_i32(tmp, addr, 4); | ||
98 | - tcg_gen_qemu_ld64(t64, tmp, index); | ||
99 | + tcg_gen_qemu_ld_i64(t64, tmp, index, MO_TEUQ); | ||
100 | tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower)); | ||
101 | break; | ||
102 | case OS_PACKED: | ||
103 | @@ -XXX,XX +XXX,XX @@ static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, | ||
104 | tmp = tcg_temp_new(); | ||
105 | switch (opsize) { | ||
106 | case OS_BYTE: | ||
107 | - gen_helper_reds32(tmp, cpu_env, fp); | ||
108 | - tcg_gen_qemu_st8(tmp, addr, index); | ||
109 | - break; | ||
110 | case OS_WORD: | ||
111 | - gen_helper_reds32(tmp, cpu_env, fp); | ||
112 | - tcg_gen_qemu_st16(tmp, addr, index); | ||
113 | - break; | ||
114 | case OS_LONG: | ||
115 | gen_helper_reds32(tmp, cpu_env, fp); | ||
116 | - tcg_gen_qemu_st32(tmp, addr, index); | ||
117 | + tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE); | ||
118 | break; | ||
119 | case OS_SINGLE: | ||
120 | gen_helper_redf32(tmp, cpu_env, fp); | ||
121 | - tcg_gen_qemu_st32(tmp, addr, index); | ||
122 | + tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL); | ||
123 | break; | ||
124 | case OS_DOUBLE: | ||
125 | gen_helper_redf64(t64, cpu_env, fp); | ||
126 | - tcg_gen_qemu_st64(t64, addr, index); | ||
127 | + tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ); | ||
128 | break; | ||
129 | case OS_EXTENDED: | ||
130 | if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) { | ||
131 | @@ -XXX,XX +XXX,XX @@ static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, | ||
132 | } | ||
133 | tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper)); | ||
134 | tcg_gen_shli_i32(tmp, tmp, 16); | ||
135 | - tcg_gen_qemu_st32(tmp, addr, index); | ||
136 | + tcg_gen_qemu_st_i32(tmp, addr, index, MO_TEUL); | ||
137 | tcg_gen_addi_i32(tmp, addr, 4); | ||
138 | tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower)); | ||
139 | - tcg_gen_qemu_st64(t64, tmp, index); | ||
140 | + tcg_gen_qemu_st_i64(t64, tmp, index, MO_TEUQ); | ||
141 | break; | ||
142 | case OS_PACKED: | ||
143 | /* | ||
144 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(movep) | ||
145 | if (insn & 0x80) { | ||
146 | for ( ; i > 0 ; i--) { | ||
147 | tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8); | ||
148 | - tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s)); | ||
149 | + tcg_gen_qemu_st_i32(dbuf, abuf, IS_USER(s), MO_UB); | ||
150 | if (i > 1) { | ||
151 | tcg_gen_addi_i32(abuf, abuf, 2); | ||
152 | } | ||
153 | } | ||
154 | } else { | ||
155 | for ( ; i > 0 ; i--) { | ||
156 | - tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s)); | ||
157 | + tcg_gen_qemu_ld_tl(dbuf, abuf, IS_USER(s), MO_UB); | ||
158 | tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8); | ||
159 | if (i > 1) { | ||
160 | tcg_gen_addi_i32(abuf, abuf, 2); | ||
161 | @@ -XXX,XX +XXX,XX @@ static void m68k_copy_line(TCGv dst, TCGv src, int index) | ||
162 | t1 = tcg_temp_new_i64(); | ||
163 | |||
164 | tcg_gen_andi_i32(addr, src, ~15); | ||
165 | - tcg_gen_qemu_ld64(t0, addr, index); | ||
166 | + tcg_gen_qemu_ld_i64(t0, addr, index, MO_TEUQ); | ||
167 | tcg_gen_addi_i32(addr, addr, 8); | ||
168 | - tcg_gen_qemu_ld64(t1, addr, index); | ||
169 | + tcg_gen_qemu_ld_i64(t1, addr, index, MO_TEUQ); | ||
170 | |||
171 | tcg_gen_andi_i32(addr, dst, ~15); | ||
172 | - tcg_gen_qemu_st64(t0, addr, index); | ||
173 | + tcg_gen_qemu_st_i64(t0, addr, index, MO_TEUQ); | ||
174 | tcg_gen_addi_i32(addr, addr, 8); | ||
175 | - tcg_gen_qemu_st64(t1, addr, index); | ||
176 | + tcg_gen_qemu_st_i64(t1, addr, index, MO_TEUQ); | ||
23 | } | 177 | } |
24 | 178 | ||
25 | +/** | 179 | DISAS_INSN(move16_reg) |
26 | + * swap_commutative: | 180 | @@ -XXX,XX +XXX,XX @@ static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg) |
27 | + * @dest: TCGArg of the destination argument, or NO_DEST. | 181 | |
28 | + * @p1: first paired argument | 182 | tmp = tcg_temp_new(); |
29 | + * @p2: second paired argument | 183 | gen_load_fcr(s, tmp, reg); |
30 | + * | 184 | - tcg_gen_qemu_st32(tmp, addr, index); |
31 | + * If *@p1 is a constant and *@p2 is not, swap. | 185 | + tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL); |
32 | + * If *@p2 matches @dest, swap. | ||
33 | + * Return true if a swap was performed. | ||
34 | + */ | ||
35 | + | ||
36 | +#define NO_DEST temp_arg(NULL) | ||
37 | + | ||
38 | static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2) | ||
39 | { | ||
40 | TCGArg a1 = *p1, a2 = *p2; | ||
41 | @@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op) | ||
42 | return false; | ||
43 | } | 186 | } |
44 | 187 | ||
45 | +static bool fold_const2_commutative(OptContext *ctx, TCGOp *op) | 188 | static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg) |
46 | +{ | 189 | @@ -XXX,XX +XXX,XX @@ static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg) |
47 | + swap_commutative(op->args[0], &op->args[1], &op->args[2]); | 190 | TCGv tmp; |
48 | + return fold_const2(ctx, op); | 191 | |
49 | +} | 192 | tmp = tcg_temp_new(); |
50 | + | 193 | - tcg_gen_qemu_ld32u(tmp, addr, index); |
51 | static bool fold_masks(OptContext *ctx, TCGOp *op) | 194 | + tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL); |
52 | { | 195 | gen_store_fcr(s, tmp, reg); |
53 | uint64_t a_mask = ctx->a_mask; | ||
54 | @@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op) | ||
55 | |||
56 | static bool fold_add(OptContext *ctx, TCGOp *op) | ||
57 | { | ||
58 | - if (fold_const2(ctx, op) || | ||
59 | + if (fold_const2_commutative(ctx, op) || | ||
60 | fold_xi_to_x(ctx, op, 0)) { | ||
61 | return true; | ||
62 | } | ||
63 | @@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add) | ||
64 | |||
65 | static bool fold_add2(OptContext *ctx, TCGOp *op) | ||
66 | { | ||
67 | + /* Note that the high and low parts may be independently swapped. */ | ||
68 | + swap_commutative(op->args[0], &op->args[2], &op->args[4]); | ||
69 | + swap_commutative(op->args[1], &op->args[3], &op->args[5]); | ||
70 | + | ||
71 | return fold_addsub2(ctx, op, true); | ||
72 | } | 196 | } |
73 | 197 | ||
74 | @@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op) | ||
75 | { | ||
76 | uint64_t z1, z2; | ||
77 | |||
78 | - if (fold_const2(ctx, op) || | ||
79 | + if (fold_const2_commutative(ctx, op) || | ||
80 | fold_xi_to_i(ctx, op, 0) || | ||
81 | fold_xi_to_x(ctx, op, -1) || | ||
82 | fold_xx_to_x(ctx, op)) { | ||
83 | @@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op) | ||
84 | static bool fold_brcond(OptContext *ctx, TCGOp *op) | ||
85 | { | ||
86 | TCGCond cond = op->args[2]; | ||
87 | - int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond); | ||
88 | + int i; | ||
89 | |||
90 | + if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) { | ||
91 | + op->args[2] = cond = tcg_swap_cond(cond); | ||
92 | + } | ||
93 | + | ||
94 | + i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond); | ||
95 | if (i == 0) { | ||
96 | tcg_op_remove(ctx->tcg, op); | ||
97 | return true; | ||
98 | @@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op) | ||
99 | static bool fold_brcond2(OptContext *ctx, TCGOp *op) | ||
100 | { | ||
101 | TCGCond cond = op->args[4]; | ||
102 | - int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond); | ||
103 | TCGArg label = op->args[5]; | ||
104 | - int inv = 0; | ||
105 | + int i, inv = 0; | ||
106 | |||
107 | + if (swap_commutative2(&op->args[0], &op->args[2])) { | ||
108 | + op->args[4] = cond = tcg_swap_cond(cond); | ||
109 | + } | ||
110 | + | ||
111 | + i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond); | ||
112 | if (i >= 0) { | ||
113 | goto do_brcond_const; | ||
114 | } | ||
115 | @@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op) | ||
116 | |||
117 | static bool fold_eqv(OptContext *ctx, TCGOp *op) | ||
118 | { | ||
119 | - if (fold_const2(ctx, op) || | ||
120 | + if (fold_const2_commutative(ctx, op) || | ||
121 | fold_xi_to_x(ctx, op, -1) || | ||
122 | fold_xi_to_not(ctx, op, 0)) { | ||
123 | return true; | ||
124 | @@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op) | ||
125 | static bool fold_movcond(OptContext *ctx, TCGOp *op) | ||
126 | { | ||
127 | TCGCond cond = op->args[5]; | ||
128 | - int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); | ||
129 | + int i; | ||
130 | |||
131 | + if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { | ||
132 | + op->args[5] = cond = tcg_swap_cond(cond); | ||
133 | + } | ||
134 | + /* | ||
135 | + * Canonicalize the "false" input reg to match the destination reg so | ||
136 | + * that the tcg backend can implement a "move if true" operation. | ||
137 | + */ | ||
138 | + if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { | ||
139 | + op->args[5] = cond = tcg_invert_cond(cond); | ||
140 | + } | ||
141 | + | ||
142 | + i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); | ||
143 | if (i >= 0) { | ||
144 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); | ||
145 | } | ||
146 | @@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op) | ||
147 | |||
148 | static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) | ||
149 | { | ||
150 | - if (fold_const2(ctx, op) || | ||
151 | + if (fold_const2_commutative(ctx, op) || | ||
152 | fold_xi_to_i(ctx, op, 0)) { | ||
153 | return true; | ||
154 | } | ||
155 | @@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) | ||
156 | |||
157 | static bool fold_multiply2(OptContext *ctx, TCGOp *op) | ||
158 | { | ||
159 | + swap_commutative(op->args[0], &op->args[2], &op->args[3]); | ||
160 | + | ||
161 | if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { | ||
162 | uint64_t a = arg_info(op->args[2])->val; | ||
163 | uint64_t b = arg_info(op->args[3])->val; | ||
164 | @@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op) | ||
165 | |||
166 | static bool fold_nand(OptContext *ctx, TCGOp *op) | ||
167 | { | ||
168 | - if (fold_const2(ctx, op) || | ||
169 | + if (fold_const2_commutative(ctx, op) || | ||
170 | fold_xi_to_not(ctx, op, -1)) { | ||
171 | return true; | ||
172 | } | ||
173 | @@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op) | ||
174 | |||
175 | static bool fold_nor(OptContext *ctx, TCGOp *op) | ||
176 | { | ||
177 | - if (fold_const2(ctx, op) || | ||
178 | + if (fold_const2_commutative(ctx, op) || | ||
179 | fold_xi_to_not(ctx, op, 0)) { | ||
180 | return true; | ||
181 | } | ||
182 | @@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op) | ||
183 | |||
184 | static bool fold_or(OptContext *ctx, TCGOp *op) | ||
185 | { | ||
186 | - if (fold_const2(ctx, op) || | ||
187 | + if (fold_const2_commutative(ctx, op) || | ||
188 | fold_xi_to_x(ctx, op, 0) || | ||
189 | fold_xx_to_x(ctx, op)) { | ||
190 | return true; | ||
191 | @@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op) | ||
192 | static bool fold_setcond(OptContext *ctx, TCGOp *op) | ||
193 | { | ||
194 | TCGCond cond = op->args[3]; | ||
195 | - int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); | ||
196 | + int i; | ||
197 | |||
198 | + if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) { | ||
199 | + op->args[3] = cond = tcg_swap_cond(cond); | ||
200 | + } | ||
201 | + | ||
202 | + i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); | ||
203 | if (i >= 0) { | ||
204 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | ||
205 | } | ||
206 | @@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op) | ||
207 | static bool fold_setcond2(OptContext *ctx, TCGOp *op) | ||
208 | { | ||
209 | TCGCond cond = op->args[5]; | ||
210 | - int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond); | ||
211 | - int inv = 0; | ||
212 | + int i, inv = 0; | ||
213 | |||
214 | + if (swap_commutative2(&op->args[1], &op->args[3])) { | ||
215 | + op->args[5] = cond = tcg_swap_cond(cond); | ||
216 | + } | ||
217 | + | ||
218 | + i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond); | ||
219 | if (i >= 0) { | ||
220 | goto do_setcond_const; | ||
221 | } | ||
222 | @@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) | ||
223 | |||
224 | static bool fold_xor(OptContext *ctx, TCGOp *op) | ||
225 | { | ||
226 | - if (fold_const2(ctx, op) || | ||
227 | + if (fold_const2_commutative(ctx, op) || | ||
228 | fold_xx_to_i(ctx, op, 0) || | ||
229 | fold_xi_to_x(ctx, op, 0) || | ||
230 | fold_xi_to_not(ctx, op, -1)) { | ||
231 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
232 | ctx.type = TCG_TYPE_I32; | ||
233 | } | ||
234 | |||
235 | - /* For commutative operations make constant second argument */ | ||
236 | - switch (opc) { | ||
237 | - CASE_OP_32_64_VEC(add): | ||
238 | - CASE_OP_32_64_VEC(mul): | ||
239 | - CASE_OP_32_64_VEC(and): | ||
240 | - CASE_OP_32_64_VEC(or): | ||
241 | - CASE_OP_32_64_VEC(xor): | ||
242 | - CASE_OP_32_64(eqv): | ||
243 | - CASE_OP_32_64(nand): | ||
244 | - CASE_OP_32_64(nor): | ||
245 | - CASE_OP_32_64(muluh): | ||
246 | - CASE_OP_32_64(mulsh): | ||
247 | - swap_commutative(op->args[0], &op->args[1], &op->args[2]); | ||
248 | - break; | ||
249 | - CASE_OP_32_64(brcond): | ||
250 | - if (swap_commutative(-1, &op->args[0], &op->args[1])) { | ||
251 | - op->args[2] = tcg_swap_cond(op->args[2]); | ||
252 | - } | ||
253 | - break; | ||
254 | - CASE_OP_32_64(setcond): | ||
255 | - if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) { | ||
256 | - op->args[3] = tcg_swap_cond(op->args[3]); | ||
257 | - } | ||
258 | - break; | ||
259 | - CASE_OP_32_64(movcond): | ||
260 | - if (swap_commutative(-1, &op->args[1], &op->args[2])) { | ||
261 | - op->args[5] = tcg_swap_cond(op->args[5]); | ||
262 | - } | ||
263 | - /* For movcond, we canonicalize the "false" input reg to match | ||
264 | - the destination reg so that the tcg backend can implement | ||
265 | - a "move if true" operation. */ | ||
266 | - if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { | ||
267 | - op->args[5] = tcg_invert_cond(op->args[5]); | ||
268 | - } | ||
269 | - break; | ||
270 | - CASE_OP_32_64(add2): | ||
271 | - swap_commutative(op->args[0], &op->args[2], &op->args[4]); | ||
272 | - swap_commutative(op->args[1], &op->args[3], &op->args[5]); | ||
273 | - break; | ||
274 | - CASE_OP_32_64(mulu2): | ||
275 | - CASE_OP_32_64(muls2): | ||
276 | - swap_commutative(op->args[0], &op->args[2], &op->args[3]); | ||
277 | - break; | ||
278 | - case INDEX_op_brcond2_i32: | ||
279 | - if (swap_commutative2(&op->args[0], &op->args[2])) { | ||
280 | - op->args[4] = tcg_swap_cond(op->args[4]); | ||
281 | - } | ||
282 | - break; | ||
283 | - case INDEX_op_setcond2_i32: | ||
284 | - if (swap_commutative2(&op->args[1], &op->args[3])) { | ||
285 | - op->args[5] = tcg_swap_cond(op->args[5]); | ||
286 | - } | ||
287 | - break; | ||
288 | - default: | ||
289 | - break; | ||
290 | - } | ||
291 | - | ||
292 | /* Assume all bits affected, and no bits known zero. */ | ||
293 | ctx.a_mask = -1; | ||
294 | ctx.z_mask = -1; | ||
295 | -- | 198 | -- |
296 | 2.25.1 | 199 | 2.34.1 |
297 | |||
298 | diff view generated by jsdifflib |
1 | Recognize the constant function for remainder. | 1 | Convert away from the old interface with the implicit |
---|---|---|---|
2 | MemOp argument. | ||
2 | 3 | ||
3 | Suggested-by: Luis Pires <luis.pires@eldorado.org.br> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Reviewed-by: Anton Johansson <anjo@rev.ng> | ||
6 | Message-Id: <20230502135741.1158035-6-richard.henderson@linaro.org> | ||
6 | --- | 7 | --- |
7 | tcg/optimize.c | 6 +++++- | 8 | target/mips/tcg/translate.c | 8 ++++---- |
8 | 1 file changed, 5 insertions(+), 1 deletion(-) | 9 | target/mips/tcg/nanomips_translate.c.inc | 2 +- |
10 | 2 files changed, 5 insertions(+), 5 deletions(-) | ||
9 | 11 | ||
10 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 12 | diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c |
11 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
12 | --- a/tcg/optimize.c | 14 | --- a/target/mips/tcg/translate.c |
13 | +++ b/tcg/optimize.c | 15 | +++ b/target/mips/tcg/translate.c |
14 | @@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op) | 16 | @@ -XXX,XX +XXX,XX @@ FOP_CONDNS(s, FMT_S, 32, gen_store_fpr32(ctx, fp0, fd)) |
15 | 17 | ||
16 | static bool fold_remainder(OptContext *ctx, TCGOp *op) | 18 | /* load/store instructions. */ |
17 | { | 19 | #ifdef CONFIG_USER_ONLY |
18 | - return fold_const2(ctx, op); | 20 | -#define OP_LD_ATOMIC(insn, fname) \ |
19 | + if (fold_const2(ctx, op) || | 21 | +#define OP_LD_ATOMIC(insn, memop) \ |
20 | + fold_xx_to_i(ctx, op, 0)) { | 22 | static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \ |
21 | + return true; | 23 | DisasContext *ctx) \ |
22 | + } | 24 | { \ |
23 | + return false; | 25 | TCGv t0 = tcg_temp_new(); \ |
26 | tcg_gen_mov_tl(t0, arg1); \ | ||
27 | - tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \ | ||
28 | + tcg_gen_qemu_ld_tl(ret, arg1, ctx->mem_idx, memop); \ | ||
29 | tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \ | ||
30 | tcg_gen_st_tl(ret, cpu_env, offsetof(CPUMIPSState, llval)); \ | ||
24 | } | 31 | } |
25 | 32 | @@ -XXX,XX +XXX,XX @@ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \ | |
26 | static bool fold_setcond(OptContext *ctx, TCGOp *op) | 33 | gen_helper_##insn(ret, cpu_env, arg1, tcg_constant_i32(mem_idx)); \ |
34 | } | ||
35 | #endif | ||
36 | -OP_LD_ATOMIC(ll, ld32s); | ||
37 | +OP_LD_ATOMIC(ll, MO_TESL); | ||
38 | #if defined(TARGET_MIPS64) | ||
39 | -OP_LD_ATOMIC(lld, ld64); | ||
40 | +OP_LD_ATOMIC(lld, MO_TEUQ); | ||
41 | #endif | ||
42 | #undef OP_LD_ATOMIC | ||
43 | |||
44 | diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc | ||
45 | index XXXXXXX..XXXXXXX 100644 | ||
46 | --- a/target/mips/tcg/nanomips_translate.c.inc | ||
47 | +++ b/target/mips/tcg/nanomips_translate.c.inc | ||
48 | @@ -XXX,XX +XXX,XX @@ static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset, | ||
49 | TCGv tmp2 = tcg_temp_new(); | ||
50 | |||
51 | gen_base_offset_addr(ctx, taddr, base, offset); | ||
52 | - tcg_gen_qemu_ld64(tval, taddr, ctx->mem_idx); | ||
53 | + tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx, MO_TEUQ); | ||
54 | if (cpu_is_bigendian(ctx)) { | ||
55 | tcg_gen_extr_i64_tl(tmp2, tmp1, tval); | ||
56 | } else { | ||
27 | -- | 57 | -- |
28 | 2.25.1 | 58 | 2.34.1 |
29 | |||
30 | diff view generated by jsdifflib |
1 | Pull the "op r, 0, b => movi r, 0" optimization into a function, | 1 | Convert away from the old interface with the implicit |
---|---|---|---|
2 | and use it in fold_shift. | 2 | MemOp argument. |
3 | 3 | ||
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Reviewed-by: David Hildenbrand <david@redhat.com> | ||
6 | Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com> | ||
7 | Message-Id: <20230502135741.1158035-7-richard.henderson@linaro.org> | ||
7 | --- | 8 | --- |
8 | tcg/optimize.c | 28 ++++++++++------------------ | 9 | target/s390x/tcg/translate.c | 152 ++++++++++++++++------------------- |
9 | 1 file changed, 10 insertions(+), 18 deletions(-) | 10 | 1 file changed, 71 insertions(+), 81 deletions(-) |
10 | 11 | ||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 12 | diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c |
12 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/optimize.c | 14 | --- a/target/s390x/tcg/translate.c |
14 | +++ b/tcg/optimize.c | 15 | +++ b/target/s390x/tcg/translate.c |
15 | @@ -XXX,XX +XXX,XX @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx) | 16 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_clc(DisasContext *s, DisasOps *o) |
16 | return false; | 17 | { |
17 | } | 18 | int l = get_field(s, l1); |
18 | 19 | TCGv_i32 vl; | |
19 | +/* If the binary operation has first argument @i, fold to @i. */ | 20 | + MemOp mop; |
20 | +static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | 21 | |
21 | +{ | 22 | switch (l + 1) { |
22 | + if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { | 23 | case 1: |
23 | + return tcg_opt_gen_movi(ctx, op, op->args[0], i); | 24 | - tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s)); |
24 | + } | 25 | - tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s)); |
25 | + return false; | 26 | - break; |
26 | +} | 27 | case 2: |
28 | - tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s)); | ||
29 | - tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s)); | ||
30 | - break; | ||
31 | case 4: | ||
32 | - tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s)); | ||
33 | - tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s)); | ||
34 | - break; | ||
35 | case 8: | ||
36 | - tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s)); | ||
37 | - tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s)); | ||
38 | - break; | ||
39 | + mop = ctz32(l + 1) | MO_TE; | ||
40 | + tcg_gen_qemu_ld_tl(cc_src, o->addr1, get_mem_index(s), mop); | ||
41 | + tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop); | ||
42 | + gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst); | ||
43 | + return DISAS_NEXT; | ||
44 | default: | ||
45 | vl = tcg_constant_i32(l); | ||
46 | gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2); | ||
47 | set_cc_static(s); | ||
48 | return DISAS_NEXT; | ||
49 | } | ||
50 | - gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst); | ||
51 | - return DISAS_NEXT; | ||
52 | } | ||
53 | |||
54 | static DisasJumpType op_clcl(DisasContext *s, DisasOps *o) | ||
55 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_cvd(DisasContext *s, DisasOps *o) | ||
56 | TCGv_i32 t2 = tcg_temp_new_i32(); | ||
57 | tcg_gen_extrl_i64_i32(t2, o->in1); | ||
58 | gen_helper_cvd(t1, t2); | ||
59 | - tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s)); | ||
60 | + tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ); | ||
61 | return DISAS_NEXT; | ||
62 | } | ||
63 | |||
64 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o) | ||
65 | switch (m3) { | ||
66 | case 0xf: | ||
67 | /* Effectively a 32-bit load. */ | ||
68 | - tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s)); | ||
69 | + tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL); | ||
70 | len = 32; | ||
71 | goto one_insert; | ||
72 | |||
73 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o) | ||
74 | case 0x6: | ||
75 | case 0x3: | ||
76 | /* Effectively a 16-bit load. */ | ||
77 | - tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s)); | ||
78 | + tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW); | ||
79 | len = 16; | ||
80 | goto one_insert; | ||
81 | |||
82 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o) | ||
83 | case 0x2: | ||
84 | case 0x1: | ||
85 | /* Effectively an 8-bit load. */ | ||
86 | - tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); | ||
87 | + tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB); | ||
88 | len = 8; | ||
89 | goto one_insert; | ||
90 | |||
91 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o) | ||
92 | ccm = 0; | ||
93 | while (m3) { | ||
94 | if (m3 & 0x8) { | ||
95 | - tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); | ||
96 | + tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB); | ||
97 | tcg_gen_addi_i64(o->in2, o->in2, 1); | ||
98 | tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8); | ||
99 | ccm |= 0xffull << pos; | ||
100 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_llgt(DisasContext *s, DisasOps *o) | ||
101 | |||
102 | static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o) | ||
103 | { | ||
104 | - tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s)); | ||
105 | + tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB); | ||
106 | return DISAS_NEXT; | ||
107 | } | ||
108 | |||
109 | static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o) | ||
110 | { | ||
111 | - tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s)); | ||
112 | + tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB); | ||
113 | return DISAS_NEXT; | ||
114 | } | ||
115 | |||
116 | static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o) | ||
117 | { | ||
118 | - tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s)); | ||
119 | + tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW); | ||
120 | return DISAS_NEXT; | ||
121 | } | ||
122 | |||
123 | static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o) | ||
124 | { | ||
125 | - tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s)); | ||
126 | + tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW); | ||
127 | return DISAS_NEXT; | ||
128 | } | ||
129 | |||
130 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lat(DisasContext *s, DisasOps *o) | ||
131 | static DisasJumpType op_lgat(DisasContext *s, DisasOps *o) | ||
132 | { | ||
133 | TCGLabel *lab = gen_new_label(); | ||
134 | - tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s)); | ||
135 | + tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ); | ||
136 | /* The value is stored even in case of trap. */ | ||
137 | tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); | ||
138 | gen_trap(s); | ||
139 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o) | ||
140 | static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o) | ||
141 | { | ||
142 | TCGLabel *lab = gen_new_label(); | ||
143 | - tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s)); | ||
27 | + | 144 | + |
28 | /* If the binary operation has first argument @i, fold to NOT. */ | 145 | + tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL); |
29 | static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i) | 146 | /* The value is stored even in case of trap. */ |
30 | { | 147 | tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); |
31 | @@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op) | 148 | gen_trap(s); |
32 | static bool fold_shift(OptContext *ctx, TCGOp *op) | 149 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o) |
33 | { | 150 | tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), |
34 | if (fold_const2(ctx, op) || | 151 | MO_TEUQ | MO_ALIGN_8); |
35 | + fold_ix_to_i(ctx, op, 0) || | 152 | tcg_gen_addi_i64(o->in2, o->in2, 8); |
36 | fold_xi_to_x(ctx, op, 0)) { | 153 | - tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s)); |
37 | return true; | 154 | + tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ); |
38 | } | 155 | gen_helper_load_psw(cpu_env, t1, t2); |
39 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 156 | return DISAS_NORETURN; |
157 | } | ||
158 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) | ||
159 | /* Only one register to read. */ | ||
160 | t1 = tcg_temp_new_i64(); | ||
161 | if (unlikely(r1 == r3)) { | ||
162 | - tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); | ||
163 | + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); | ||
164 | store_reg32_i64(r1, t1); | ||
165 | return DISAS_NEXT; | ||
166 | } | ||
167 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) | ||
168 | /* First load the values of the first and last registers to trigger | ||
169 | possible page faults. */ | ||
170 | t2 = tcg_temp_new_i64(); | ||
171 | - tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); | ||
172 | + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); | ||
173 | tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); | ||
174 | - tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s)); | ||
175 | + tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL); | ||
176 | store_reg32_i64(r1, t1); | ||
177 | store_reg32_i64(r3, t2); | ||
178 | |||
179 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) | ||
180 | while (r1 != r3) { | ||
181 | r1 = (r1 + 1) & 15; | ||
182 | tcg_gen_add_i64(o->in2, o->in2, t2); | ||
183 | - tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); | ||
184 | + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); | ||
185 | store_reg32_i64(r1, t1); | ||
186 | } | ||
187 | return DISAS_NEXT; | ||
188 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) | ||
189 | /* Only one register to read. */ | ||
190 | t1 = tcg_temp_new_i64(); | ||
191 | if (unlikely(r1 == r3)) { | ||
192 | - tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); | ||
193 | + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); | ||
194 | store_reg32h_i64(r1, t1); | ||
195 | return DISAS_NEXT; | ||
196 | } | ||
197 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) | ||
198 | /* First load the values of the first and last registers to trigger | ||
199 | possible page faults. */ | ||
200 | t2 = tcg_temp_new_i64(); | ||
201 | - tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); | ||
202 | + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); | ||
203 | tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); | ||
204 | - tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s)); | ||
205 | + tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL); | ||
206 | store_reg32h_i64(r1, t1); | ||
207 | store_reg32h_i64(r3, t2); | ||
208 | |||
209 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) | ||
210 | while (r1 != r3) { | ||
211 | r1 = (r1 + 1) & 15; | ||
212 | tcg_gen_add_i64(o->in2, o->in2, t2); | ||
213 | - tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); | ||
214 | + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); | ||
215 | store_reg32h_i64(r1, t1); | ||
216 | } | ||
217 | return DISAS_NEXT; | ||
218 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) | ||
219 | |||
220 | /* Only one register to read. */ | ||
221 | if (unlikely(r1 == r3)) { | ||
222 | - tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); | ||
223 | + tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ); | ||
224 | return DISAS_NEXT; | ||
225 | } | ||
226 | |||
227 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) | ||
228 | possible page faults. */ | ||
229 | t1 = tcg_temp_new_i64(); | ||
230 | t2 = tcg_temp_new_i64(); | ||
231 | - tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s)); | ||
232 | + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ); | ||
233 | tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15)); | ||
234 | - tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s)); | ||
235 | + tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ); | ||
236 | tcg_gen_mov_i64(regs[r1], t1); | ||
237 | |||
238 | /* Only two registers to read. */ | ||
239 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) | ||
240 | while (r1 != r3) { | ||
241 | r1 = (r1 + 1) & 15; | ||
242 | tcg_gen_add_i64(o->in2, o->in2, t1); | ||
243 | - tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); | ||
244 | + tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ); | ||
245 | } | ||
246 | return DISAS_NEXT; | ||
247 | } | ||
248 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_soc(DisasContext *s, DisasOps *o) | ||
249 | a = get_address(s, 0, get_field(s, b2), get_field(s, d2)); | ||
250 | switch (s->insn->data) { | ||
251 | case 1: /* STOCG */ | ||
252 | - tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s)); | ||
253 | + tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ); | ||
254 | break; | ||
255 | case 0: /* STOC */ | ||
256 | - tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s)); | ||
257 | + tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL); | ||
258 | break; | ||
259 | case 2: /* STOCFH */ | ||
260 | h = tcg_temp_new_i64(); | ||
261 | tcg_gen_shri_i64(h, regs[r1], 32); | ||
262 | - tcg_gen_qemu_st32(h, a, get_mem_index(s)); | ||
263 | + tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL); | ||
264 | break; | ||
265 | default: | ||
266 | g_assert_not_reached(); | ||
267 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_ectg(DisasContext *s, DisasOps *o) | ||
268 | gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0); | ||
269 | |||
270 | /* load the third operand into r3 before modifying anything */ | ||
271 | - tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s)); | ||
272 | + tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ); | ||
273 | |||
274 | /* subtract CPU timer from first operand and store in GR0 */ | ||
275 | gen_helper_stpt(tmp, cpu_env); | ||
276 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcke(DisasContext *s, DisasOps *o) | ||
277 | tcg_gen_shri_i64(c1, c1, 8); | ||
278 | tcg_gen_ori_i64(c2, c2, 0x10000); | ||
279 | tcg_gen_or_i64(c2, c2, todpr); | ||
280 | - tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s)); | ||
281 | + tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ); | ||
282 | tcg_gen_addi_i64(o->in2, o->in2, 8); | ||
283 | - tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s)); | ||
284 | + tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ); | ||
285 | /* ??? We don't implement clock states. */ | ||
286 | gen_op_movi_cc(s, 0); | ||
287 | return DISAS_NEXT; | ||
288 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) | ||
289 | restart, we'll have the wrong SYSTEM MASK in place. */ | ||
290 | t = tcg_temp_new_i64(); | ||
291 | tcg_gen_shri_i64(t, psw_mask, 56); | ||
292 | - tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s)); | ||
293 | + tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB); | ||
294 | |||
295 | if (s->fields.op == 0xac) { | ||
296 | tcg_gen_andi_i64(psw_mask, psw_mask, | ||
297 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stfle(DisasContext *s, DisasOps *o) | ||
298 | |||
299 | static DisasJumpType op_st8(DisasContext *s, DisasOps *o) | ||
300 | { | ||
301 | - tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s)); | ||
302 | + tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB); | ||
303 | return DISAS_NEXT; | ||
304 | } | ||
305 | |||
306 | static DisasJumpType op_st16(DisasContext *s, DisasOps *o) | ||
307 | { | ||
308 | - tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s)); | ||
309 | + tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW); | ||
310 | return DISAS_NEXT; | ||
311 | } | ||
312 | |||
313 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) | ||
314 | case 0xf: | ||
315 | /* Effectively a 32-bit store. */ | ||
316 | tcg_gen_shri_i64(tmp, o->in1, pos); | ||
317 | - tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s)); | ||
318 | + tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL); | ||
319 | break; | ||
320 | |||
321 | case 0xc: | ||
322 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) | ||
323 | case 0x3: | ||
324 | /* Effectively a 16-bit store. */ | ||
325 | tcg_gen_shri_i64(tmp, o->in1, pos); | ||
326 | - tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s)); | ||
327 | + tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW); | ||
328 | break; | ||
329 | |||
330 | case 0x8: | ||
331 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) | ||
332 | case 0x1: | ||
333 | /* Effectively an 8-bit store. */ | ||
334 | tcg_gen_shri_i64(tmp, o->in1, pos); | ||
335 | - tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s)); | ||
336 | + tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB); | ||
337 | break; | ||
338 | |||
339 | default: | ||
340 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) | ||
341 | while (m3) { | ||
342 | if (m3 & 0x8) { | ||
343 | tcg_gen_shri_i64(tmp, o->in1, pos); | ||
344 | - tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s)); | ||
345 | + tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB); | ||
346 | tcg_gen_addi_i64(o->in2, o->in2, 1); | ||
347 | } | ||
348 | m3 = (m3 << 1) & 0xf; | ||
349 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stm(DisasContext *s, DisasOps *o) | ||
350 | TCGv_i64 tsize = tcg_constant_i64(size); | ||
351 | |||
352 | while (1) { | ||
353 | - if (size == 8) { | ||
354 | - tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s)); | ||
355 | - } else { | ||
356 | - tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s)); | ||
357 | - } | ||
358 | + tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s), | ||
359 | + size == 8 ? MO_TEUQ : MO_TEUL); | ||
360 | if (r1 == r3) { | ||
40 | break; | 361 | break; |
41 | } | 362 | } |
42 | 363 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o) | |
43 | - /* Simplify expressions for "shift/rot r, 0, a => movi r, 0", | 364 | |
44 | - and "sub r, 0, a => neg r, a" case. */ | 365 | while (1) { |
45 | - switch (opc) { | 366 | tcg_gen_shl_i64(t, regs[r1], t32); |
46 | - CASE_OP_32_64(shl): | 367 | - tcg_gen_qemu_st32(t, o->in2, get_mem_index(s)); |
47 | - CASE_OP_32_64(shr): | 368 | + tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL); |
48 | - CASE_OP_32_64(sar): | 369 | if (r1 == r3) { |
49 | - CASE_OP_32_64(rotl): | 370 | break; |
50 | - CASE_OP_32_64(rotr): | 371 | } |
51 | - if (arg_is_const(op->args[1]) | 372 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_xc(DisasContext *s, DisasOps *o) |
52 | - && arg_info(op->args[1])->val == 0) { | 373 | |
53 | - tcg_opt_gen_movi(&ctx, op, op->args[0], 0); | 374 | l++; |
54 | - continue; | 375 | while (l >= 8) { |
55 | - } | 376 | - tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s)); |
56 | - break; | 377 | + tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ); |
57 | - default: | 378 | l -= 8; |
58 | - break; | 379 | if (l > 0) { |
59 | - } | 380 | tcg_gen_addi_i64(o->addr1, o->addr1, 8); |
60 | - | 381 | } |
61 | /* Simplify using known-zero bits. Currently only ops with a single | 382 | } |
62 | output argument is supported. */ | 383 | if (l >= 4) { |
63 | z_mask = -1; | 384 | - tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s)); |
385 | + tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL); | ||
386 | l -= 4; | ||
387 | if (l > 0) { | ||
388 | tcg_gen_addi_i64(o->addr1, o->addr1, 4); | ||
389 | } | ||
390 | } | ||
391 | if (l >= 2) { | ||
392 | - tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s)); | ||
393 | + tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW); | ||
394 | l -= 2; | ||
395 | if (l > 0) { | ||
396 | tcg_gen_addi_i64(o->addr1, o->addr1, 2); | ||
397 | } | ||
398 | } | ||
399 | if (l) { | ||
400 | - tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s)); | ||
401 | + tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB); | ||
402 | } | ||
403 | gen_op_movi_cc(s, 0); | ||
404 | return DISAS_NEXT; | ||
405 | @@ -XXX,XX +XXX,XX @@ static void wout_cond_e1e2(DisasContext *s, DisasOps *o) | ||
406 | |||
407 | static void wout_m1_8(DisasContext *s, DisasOps *o) | ||
408 | { | ||
409 | - tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s)); | ||
410 | + tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB); | ||
411 | } | ||
412 | #define SPEC_wout_m1_8 0 | ||
413 | |||
414 | static void wout_m1_16(DisasContext *s, DisasOps *o) | ||
415 | { | ||
416 | - tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s)); | ||
417 | + tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW); | ||
418 | } | ||
419 | #define SPEC_wout_m1_16 0 | ||
420 | |||
421 | @@ -XXX,XX +XXX,XX @@ static void wout_m1_16a(DisasContext *s, DisasOps *o) | ||
422 | |||
423 | static void wout_m1_32(DisasContext *s, DisasOps *o) | ||
424 | { | ||
425 | - tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s)); | ||
426 | + tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL); | ||
427 | } | ||
428 | #define SPEC_wout_m1_32 0 | ||
429 | |||
430 | @@ -XXX,XX +XXX,XX @@ static void wout_m1_32a(DisasContext *s, DisasOps *o) | ||
431 | |||
432 | static void wout_m1_64(DisasContext *s, DisasOps *o) | ||
433 | { | ||
434 | - tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s)); | ||
435 | + tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ); | ||
436 | } | ||
437 | #define SPEC_wout_m1_64 0 | ||
438 | |||
439 | @@ -XXX,XX +XXX,XX @@ static void wout_m1_64a(DisasContext *s, DisasOps *o) | ||
440 | |||
441 | static void wout_m2_32(DisasContext *s, DisasOps *o) | ||
442 | { | ||
443 | - tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s)); | ||
444 | + tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL); | ||
445 | } | ||
446 | #define SPEC_wout_m2_32 0 | ||
447 | |||
448 | @@ -XXX,XX +XXX,XX @@ static void in1_m1_8u(DisasContext *s, DisasOps *o) | ||
449 | { | ||
450 | in1_la1(s, o); | ||
451 | o->in1 = tcg_temp_new_i64(); | ||
452 | - tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s)); | ||
453 | + tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB); | ||
454 | } | ||
455 | #define SPEC_in1_m1_8u 0 | ||
456 | |||
457 | @@ -XXX,XX +XXX,XX @@ static void in1_m1_16s(DisasContext *s, DisasOps *o) | ||
458 | { | ||
459 | in1_la1(s, o); | ||
460 | o->in1 = tcg_temp_new_i64(); | ||
461 | - tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s)); | ||
462 | + tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW); | ||
463 | } | ||
464 | #define SPEC_in1_m1_16s 0 | ||
465 | |||
466 | @@ -XXX,XX +XXX,XX @@ static void in1_m1_16u(DisasContext *s, DisasOps *o) | ||
467 | { | ||
468 | in1_la1(s, o); | ||
469 | o->in1 = tcg_temp_new_i64(); | ||
470 | - tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s)); | ||
471 | + tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW); | ||
472 | } | ||
473 | #define SPEC_in1_m1_16u 0 | ||
474 | |||
475 | @@ -XXX,XX +XXX,XX @@ static void in1_m1_32s(DisasContext *s, DisasOps *o) | ||
476 | { | ||
477 | in1_la1(s, o); | ||
478 | o->in1 = tcg_temp_new_i64(); | ||
479 | - tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s)); | ||
480 | + tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL); | ||
481 | } | ||
482 | #define SPEC_in1_m1_32s 0 | ||
483 | |||
484 | @@ -XXX,XX +XXX,XX @@ static void in1_m1_32u(DisasContext *s, DisasOps *o) | ||
485 | { | ||
486 | in1_la1(s, o); | ||
487 | o->in1 = tcg_temp_new_i64(); | ||
488 | - tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s)); | ||
489 | + tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL); | ||
490 | } | ||
491 | #define SPEC_in1_m1_32u 0 | ||
492 | |||
493 | @@ -XXX,XX +XXX,XX @@ static void in1_m1_64(DisasContext *s, DisasOps *o) | ||
494 | { | ||
495 | in1_la1(s, o); | ||
496 | o->in1 = tcg_temp_new_i64(); | ||
497 | - tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s)); | ||
498 | + tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ); | ||
499 | } | ||
500 | #define SPEC_in1_m1_64 0 | ||
501 | |||
502 | @@ -XXX,XX +XXX,XX @@ static void in2_sh(DisasContext *s, DisasOps *o) | ||
503 | static void in2_m2_8u(DisasContext *s, DisasOps *o) | ||
504 | { | ||
505 | in2_a2(s, o); | ||
506 | - tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s)); | ||
507 | + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB); | ||
508 | } | ||
509 | #define SPEC_in2_m2_8u 0 | ||
510 | |||
511 | static void in2_m2_16s(DisasContext *s, DisasOps *o) | ||
512 | { | ||
513 | in2_a2(s, o); | ||
514 | - tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s)); | ||
515 | + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW); | ||
516 | } | ||
517 | #define SPEC_in2_m2_16s 0 | ||
518 | |||
519 | static void in2_m2_16u(DisasContext *s, DisasOps *o) | ||
520 | { | ||
521 | in2_a2(s, o); | ||
522 | - tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); | ||
523 | + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW); | ||
524 | } | ||
525 | #define SPEC_in2_m2_16u 0 | ||
526 | |||
527 | static void in2_m2_32s(DisasContext *s, DisasOps *o) | ||
528 | { | ||
529 | in2_a2(s, o); | ||
530 | - tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); | ||
531 | + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL); | ||
532 | } | ||
533 | #define SPEC_in2_m2_32s 0 | ||
534 | |||
535 | static void in2_m2_32u(DisasContext *s, DisasOps *o) | ||
536 | { | ||
537 | in2_a2(s, o); | ||
538 | - tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); | ||
539 | + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL); | ||
540 | } | ||
541 | #define SPEC_in2_m2_32u 0 | ||
542 | |||
543 | @@ -XXX,XX +XXX,XX @@ static void in2_m2_32ua(DisasContext *s, DisasOps *o) | ||
544 | static void in2_m2_64(DisasContext *s, DisasOps *o) | ||
545 | { | ||
546 | in2_a2(s, o); | ||
547 | - tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); | ||
548 | + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ); | ||
549 | } | ||
550 | #define SPEC_in2_m2_64 0 | ||
551 | |||
552 | static void in2_m2_64w(DisasContext *s, DisasOps *o) | ||
553 | { | ||
554 | in2_a2(s, o); | ||
555 | - tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); | ||
556 | + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ); | ||
557 | gen_addi_and_wrap_i64(s, o->in2, o->in2, 0); | ||
558 | } | ||
559 | #define SPEC_in2_m2_64w 0 | ||
560 | @@ -XXX,XX +XXX,XX @@ static void in2_m2_64a(DisasContext *s, DisasOps *o) | ||
561 | static void in2_mri2_16s(DisasContext *s, DisasOps *o) | ||
562 | { | ||
563 | o->in2 = tcg_temp_new_i64(); | ||
564 | - tcg_gen_qemu_ld16s(o->in2, gen_ri2(s), get_mem_index(s)); | ||
565 | + tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW); | ||
566 | } | ||
567 | #define SPEC_in2_mri2_16s 0 | ||
568 | |||
569 | static void in2_mri2_16u(DisasContext *s, DisasOps *o) | ||
570 | { | ||
571 | o->in2 = tcg_temp_new_i64(); | ||
572 | - tcg_gen_qemu_ld16u(o->in2, gen_ri2(s), get_mem_index(s)); | ||
573 | + tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW); | ||
574 | } | ||
575 | #define SPEC_in2_mri2_16u 0 | ||
576 | |||
64 | -- | 577 | -- |
65 | 2.25.1 | 578 | 2.34.1 |
66 | |||
67 | diff view generated by jsdifflib |
1 | Recognize the identity function for division. | 1 | Convert away from the old interface with the implicit |
---|---|---|---|
2 | MemOp argument. | ||
2 | 3 | ||
3 | Suggested-by: Luis Pires <luis.pires@eldorado.org.br> | ||
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Reviewed-by: Anton Johansson <anjo@rev.ng> | ||
6 | Message-Id: <20230502135741.1158035-8-richard.henderson@linaro.org> | ||
7 | --- | 7 | --- |
8 | tcg/optimize.c | 6 +++++- | 8 | target/sparc/translate.c | 43 ++++++++++++++++++++++++++-------------- |
9 | 1 file changed, 5 insertions(+), 1 deletion(-) | 9 | 1 file changed, 28 insertions(+), 15 deletions(-) |
10 | 10 | ||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 11 | diff --git a/target/sparc/translate.c b/target/sparc/translate.c |
12 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/optimize.c | 13 | --- a/target/sparc/translate.c |
14 | +++ b/tcg/optimize.c | 14 | +++ b/target/sparc/translate.c |
15 | @@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op) | 15 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) |
16 | 16 | switch (xop) { | |
17 | static bool fold_divide(OptContext *ctx, TCGOp *op) | 17 | case 0x0: /* ld, V9 lduw, load unsigned word */ |
18 | { | 18 | gen_address_mask(dc, cpu_addr); |
19 | - return fold_const2(ctx, op); | 19 | - tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx); |
20 | + if (fold_const2(ctx, op) || | 20 | + tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, |
21 | + fold_xi_to_x(ctx, op, 1)) { | 21 | + dc->mem_idx, MO_TEUL); |
22 | + return true; | 22 | break; |
23 | + } | 23 | case 0x1: /* ldub, load unsigned byte */ |
24 | + return false; | 24 | gen_address_mask(dc, cpu_addr); |
25 | } | 25 | - tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx); |
26 | 26 | + tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, | |
27 | static bool fold_dup(OptContext *ctx, TCGOp *op) | 27 | + dc->mem_idx, MO_UB); |
28 | break; | ||
29 | case 0x2: /* lduh, load unsigned halfword */ | ||
30 | gen_address_mask(dc, cpu_addr); | ||
31 | - tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx); | ||
32 | + tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, | ||
33 | + dc->mem_idx, MO_TEUW); | ||
34 | break; | ||
35 | case 0x3: /* ldd, load double word */ | ||
36 | if (rd & 1) | ||
37 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
38 | |||
39 | gen_address_mask(dc, cpu_addr); | ||
40 | t64 = tcg_temp_new_i64(); | ||
41 | - tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx); | ||
42 | + tcg_gen_qemu_ld_i64(t64, cpu_addr, | ||
43 | + dc->mem_idx, MO_TEUQ); | ||
44 | tcg_gen_trunc_i64_tl(cpu_val, t64); | ||
45 | tcg_gen_ext32u_tl(cpu_val, cpu_val); | ||
46 | gen_store_gpr(dc, rd + 1, cpu_val); | ||
47 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
48 | break; | ||
49 | case 0x9: /* ldsb, load signed byte */ | ||
50 | gen_address_mask(dc, cpu_addr); | ||
51 | - tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx); | ||
52 | + tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB); | ||
53 | break; | ||
54 | case 0xa: /* ldsh, load signed halfword */ | ||
55 | gen_address_mask(dc, cpu_addr); | ||
56 | - tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx); | ||
57 | + tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, | ||
58 | + dc->mem_idx, MO_TESW); | ||
59 | break; | ||
60 | case 0xd: /* ldstub */ | ||
61 | gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx); | ||
62 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
63 | #ifdef TARGET_SPARC64 | ||
64 | case 0x08: /* V9 ldsw */ | ||
65 | gen_address_mask(dc, cpu_addr); | ||
66 | - tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx); | ||
67 | + tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, | ||
68 | + dc->mem_idx, MO_TESL); | ||
69 | break; | ||
70 | case 0x0b: /* V9 ldx */ | ||
71 | gen_address_mask(dc, cpu_addr); | ||
72 | - tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx); | ||
73 | + tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, | ||
74 | + dc->mem_idx, MO_TEUQ); | ||
75 | break; | ||
76 | case 0x18: /* V9 ldswa */ | ||
77 | gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL); | ||
78 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
79 | switch (xop) { | ||
80 | case 0x4: /* st, store word */ | ||
81 | gen_address_mask(dc, cpu_addr); | ||
82 | - tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx); | ||
83 | + tcg_gen_qemu_st_tl(cpu_val, cpu_addr, | ||
84 | + dc->mem_idx, MO_TEUL); | ||
85 | break; | ||
86 | case 0x5: /* stb, store byte */ | ||
87 | gen_address_mask(dc, cpu_addr); | ||
88 | - tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx); | ||
89 | + tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB); | ||
90 | break; | ||
91 | case 0x6: /* sth, store halfword */ | ||
92 | gen_address_mask(dc, cpu_addr); | ||
93 | - tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx); | ||
94 | + tcg_gen_qemu_st_tl(cpu_val, cpu_addr, | ||
95 | + dc->mem_idx, MO_TEUW); | ||
96 | break; | ||
97 | case 0x7: /* std, store double word */ | ||
98 | if (rd & 1) | ||
99 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
100 | lo = gen_load_gpr(dc, rd + 1); | ||
101 | t64 = tcg_temp_new_i64(); | ||
102 | tcg_gen_concat_tl_i64(t64, lo, cpu_val); | ||
103 | - tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx); | ||
104 | + tcg_gen_qemu_st_i64(t64, cpu_addr, | ||
105 | + dc->mem_idx, MO_TEUQ); | ||
106 | } | ||
107 | break; | ||
108 | #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) | ||
109 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
110 | #ifdef TARGET_SPARC64 | ||
111 | case 0x0e: /* V9 stx */ | ||
112 | gen_address_mask(dc, cpu_addr); | ||
113 | - tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx); | ||
114 | + tcg_gen_qemu_st_tl(cpu_val, cpu_addr, | ||
115 | + dc->mem_idx, MO_TEUQ); | ||
116 | break; | ||
117 | case 0x1e: /* V9 stxa */ | ||
118 | gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ); | ||
119 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
120 | #ifdef TARGET_SPARC64 | ||
121 | gen_address_mask(dc, cpu_addr); | ||
122 | if (rd == 1) { | ||
123 | - tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx); | ||
124 | + tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr, | ||
125 | + dc->mem_idx, MO_TEUQ); | ||
126 | break; | ||
127 | } | ||
128 | #endif | ||
129 | - tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx); | ||
130 | + tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr, | ||
131 | + dc->mem_idx, MO_TEUL); | ||
132 | } | ||
133 | break; | ||
134 | case 0x26: | ||
28 | -- | 135 | -- |
29 | 2.25.1 | 136 | 2.34.1 |
30 | |||
31 | diff view generated by jsdifflib |
1 | Rename to fold_addsub2. | 1 | Convert away from the old interface with the implicit |
---|---|---|---|
2 | Use Int128 to implement the wider operation. | 2 | MemOp argument. |
3 | 3 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
5 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
6 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Reviewed-by: Max Filippov <jcmvbkbc@gmail.com> | ||
6 | Message-Id: <20230502135741.1158035-9-richard.henderson@linaro.org> | ||
8 | --- | 7 | --- |
9 | tcg/optimize.c | 65 ++++++++++++++++++++++++++++++++++---------------- | 8 | target/xtensa/translate.c | 4 ++-- |
10 | 1 file changed, 44 insertions(+), 21 deletions(-) | 9 | 1 file changed, 2 insertions(+), 2 deletions(-) |
11 | 10 | ||
12 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 11 | diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c |
13 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tcg/optimize.c | 13 | --- a/target/xtensa/translate.c |
15 | +++ b/tcg/optimize.c | 14 | +++ b/target/xtensa/translate.c |
16 | @@ -XXX,XX +XXX,XX @@ | 15 | @@ -XXX,XX +XXX,XX @@ static void translate_dcache(DisasContext *dc, const OpcodeArg arg[], |
17 | */ | 16 | TCGv_i32 res = tcg_temp_new_i32(); |
18 | 17 | ||
19 | #include "qemu/osdep.h" | 18 | tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm); |
20 | +#include "qemu/int128.h" | 19 | - tcg_gen_qemu_ld8u(res, addr, dc->cring); |
21 | #include "tcg/tcg-op.h" | 20 | + tcg_gen_qemu_ld_i32(res, addr, dc->cring, MO_UB); |
22 | #include "tcg-internal.h" | ||
23 | |||
24 | @@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op) | ||
25 | return false; | ||
26 | } | 21 | } |
27 | 22 | ||
28 | -static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add) | 23 | static void translate_depbits(DisasContext *dc, const OpcodeArg arg[], |
29 | +static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add) | 24 | @@ -XXX,XX +XXX,XX @@ static void translate_l32r(DisasContext *dc, const OpcodeArg arg[], |
30 | { | 25 | } else { |
31 | if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) && | 26 | tmp = tcg_constant_i32(arg[1].imm); |
32 | arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { | ||
33 | - uint32_t al = arg_info(op->args[2])->val; | ||
34 | - uint32_t ah = arg_info(op->args[3])->val; | ||
35 | - uint32_t bl = arg_info(op->args[4])->val; | ||
36 | - uint32_t bh = arg_info(op->args[5])->val; | ||
37 | - uint64_t a = ((uint64_t)ah << 32) | al; | ||
38 | - uint64_t b = ((uint64_t)bh << 32) | bl; | ||
39 | + uint64_t al = arg_info(op->args[2])->val; | ||
40 | + uint64_t ah = arg_info(op->args[3])->val; | ||
41 | + uint64_t bl = arg_info(op->args[4])->val; | ||
42 | + uint64_t bh = arg_info(op->args[5])->val; | ||
43 | TCGArg rl, rh; | ||
44 | - TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32); | ||
45 | + TCGOp *op2; | ||
46 | |||
47 | - if (add) { | ||
48 | - a += b; | ||
49 | + if (ctx->type == TCG_TYPE_I32) { | ||
50 | + uint64_t a = deposit64(al, 32, 32, ah); | ||
51 | + uint64_t b = deposit64(bl, 32, 32, bh); | ||
52 | + | ||
53 | + if (add) { | ||
54 | + a += b; | ||
55 | + } else { | ||
56 | + a -= b; | ||
57 | + } | ||
58 | + | ||
59 | + al = sextract64(a, 0, 32); | ||
60 | + ah = sextract64(a, 32, 32); | ||
61 | } else { | ||
62 | - a -= b; | ||
63 | + Int128 a = int128_make128(al, ah); | ||
64 | + Int128 b = int128_make128(bl, bh); | ||
65 | + | ||
66 | + if (add) { | ||
67 | + a = int128_add(a, b); | ||
68 | + } else { | ||
69 | + a = int128_sub(a, b); | ||
70 | + } | ||
71 | + | ||
72 | + al = int128_getlo(a); | ||
73 | + ah = int128_gethi(a); | ||
74 | } | ||
75 | |||
76 | rl = op->args[0]; | ||
77 | rh = op->args[1]; | ||
78 | - tcg_opt_gen_movi(ctx, op, rl, (int32_t)a); | ||
79 | - tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32)); | ||
80 | + | ||
81 | + /* The proper opcode is supplied by tcg_opt_gen_mov. */ | ||
82 | + op2 = tcg_op_insert_before(ctx->tcg, op, 0); | ||
83 | + | ||
84 | + tcg_opt_gen_movi(ctx, op, rl, al); | ||
85 | + tcg_opt_gen_movi(ctx, op2, rh, ah); | ||
86 | return true; | ||
87 | } | 27 | } |
88 | return false; | 28 | - tcg_gen_qemu_ld32u(arg[0].out, tmp, dc->cring); |
29 | + tcg_gen_qemu_ld_i32(arg[0].out, tmp, dc->cring, MO_TEUL); | ||
89 | } | 30 | } |
90 | 31 | ||
91 | -static bool fold_add2_i32(OptContext *ctx, TCGOp *op) | 32 | static void translate_loop(DisasContext *dc, const OpcodeArg arg[], |
92 | +static bool fold_add2(OptContext *ctx, TCGOp *op) | ||
93 | { | ||
94 | - return fold_addsub2_i32(ctx, op, true); | ||
95 | + return fold_addsub2(ctx, op, true); | ||
96 | } | ||
97 | |||
98 | static bool fold_and(OptContext *ctx, TCGOp *op) | ||
99 | @@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op) | ||
100 | return false; | ||
101 | } | ||
102 | |||
103 | -static bool fold_sub2_i32(OptContext *ctx, TCGOp *op) | ||
104 | +static bool fold_sub2(OptContext *ctx, TCGOp *op) | ||
105 | { | ||
106 | - return fold_addsub2_i32(ctx, op, false); | ||
107 | + return fold_addsub2(ctx, op, false); | ||
108 | } | ||
109 | |||
110 | static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) | ||
111 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
112 | CASE_OP_32_64_VEC(add): | ||
113 | done = fold_add(&ctx, op); | ||
114 | break; | ||
115 | - case INDEX_op_add2_i32: | ||
116 | - done = fold_add2_i32(&ctx, op); | ||
117 | + CASE_OP_32_64(add2): | ||
118 | + done = fold_add2(&ctx, op); | ||
119 | break; | ||
120 | CASE_OP_32_64_VEC(and): | ||
121 | done = fold_and(&ctx, op); | ||
122 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
123 | CASE_OP_32_64_VEC(sub): | ||
124 | done = fold_sub(&ctx, op); | ||
125 | break; | ||
126 | - case INDEX_op_sub2_i32: | ||
127 | - done = fold_sub2_i32(&ctx, op); | ||
128 | + CASE_OP_32_64(sub2): | ||
129 | + done = fold_sub2(&ctx, op); | ||
130 | break; | ||
131 | CASE_OP_32_64_VEC(xor): | ||
132 | done = fold_xor(&ctx, op); | ||
133 | -- | 33 | -- |
134 | 2.25.1 | 34 | 2.34.1 |
135 | |||
136 | diff view generated by jsdifflib |
1 | There was no real reason for calls to have separate code here. | 1 | Remove the old interfaces with the implicit MemOp argument. |
---|---|---|---|
2 | Unify init for calls vs non-calls using the call path, which | ||
3 | handles TCG_CALL_DUMMY_ARG. | ||
4 | 2 | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
7 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | Acked-by: David Hildenbrand <david@redhat.com> | ||
5 | Message-Id: <20230502135741.1158035-10-richard.henderson@linaro.org> | ||
9 | --- | 6 | --- |
10 | tcg/optimize.c | 25 +++++++++++-------------- | 7 | include/tcg/tcg-op.h | 55 -------------------------------------------- |
11 | 1 file changed, 11 insertions(+), 14 deletions(-) | 8 | 1 file changed, 55 deletions(-) |
12 | 9 | ||
13 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 10 | diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h |
14 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/tcg/optimize.c | 12 | --- a/include/tcg/tcg-op.h |
16 | +++ b/tcg/optimize.c | 13 | +++ b/include/tcg/tcg-op.h |
17 | @@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts) | 14 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, MemOp); |
18 | } | 15 | void tcg_gen_qemu_ld_i128(TCGv_i128, TCGv, TCGArg, MemOp); |
19 | } | 16 | void tcg_gen_qemu_st_i128(TCGv_i128, TCGv, TCGArg, MemOp); |
20 | 17 | ||
21 | -static void init_arg_info(OptContext *ctx, TCGArg arg) | 18 | -static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index) |
22 | -{ | 19 | -{ |
23 | - init_ts_info(ctx, arg_temp(arg)); | 20 | - tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_UB); |
24 | -} | 21 | -} |
25 | - | 22 | - |
26 | static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) | 23 | -static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index) |
27 | { | 24 | -{ |
28 | TCGTemp *i, *g, *l; | 25 | - tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_SB); |
29 | @@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2) | 26 | -} |
30 | return false; | 27 | - |
31 | } | 28 | -static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index) |
32 | 29 | -{ | |
33 | +static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args) | 30 | - tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUW); |
34 | +{ | 31 | -} |
35 | + for (int i = 0; i < nb_args; i++) { | 32 | - |
36 | + TCGTemp *ts = arg_temp(op->args[i]); | 33 | -static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index) |
37 | + if (ts) { | 34 | -{ |
38 | + init_ts_info(ctx, ts); | 35 | - tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESW); |
39 | + } | 36 | -} |
40 | + } | 37 | - |
41 | +} | 38 | -static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index) |
42 | + | 39 | -{ |
43 | /* Propagate constants and copies, fold constant expressions. */ | 40 | - tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUL); |
44 | void tcg_optimize(TCGContext *s) | 41 | -} |
45 | { | 42 | - |
46 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 43 | -static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index) |
47 | if (opc == INDEX_op_call) { | 44 | -{ |
48 | nb_oargs = TCGOP_CALLO(op); | 45 | - tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESL); |
49 | nb_iargs = TCGOP_CALLI(op); | 46 | -} |
50 | - for (i = 0; i < nb_oargs + nb_iargs; i++) { | 47 | - |
51 | - TCGTemp *ts = arg_temp(op->args[i]); | 48 | -static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index) |
52 | - if (ts) { | 49 | -{ |
53 | - init_ts_info(&ctx, ts); | 50 | - tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEUQ); |
54 | - } | 51 | -} |
55 | - } | 52 | - |
56 | } else { | 53 | -static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index) |
57 | nb_oargs = def->nb_oargs; | 54 | -{ |
58 | nb_iargs = def->nb_iargs; | 55 | - tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_UB); |
59 | - for (i = 0; i < nb_oargs + nb_iargs; i++) { | 56 | -} |
60 | - init_arg_info(&ctx, op->args[i]); | 57 | - |
61 | - } | 58 | -static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index) |
62 | } | 59 | -{ |
63 | + init_arguments(&ctx, op, nb_oargs + nb_iargs); | 60 | - tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUW); |
64 | 61 | -} | |
65 | /* Do copy propagation */ | 62 | - |
66 | for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { | 63 | -static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index) |
64 | -{ | ||
65 | - tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUL); | ||
66 | -} | ||
67 | - | ||
68 | -static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) | ||
69 | -{ | ||
70 | - tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEUQ); | ||
71 | -} | ||
72 | - | ||
73 | void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32, | ||
74 | TCGArg, MemOp); | ||
75 | void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64, | ||
67 | -- | 76 | -- |
68 | 2.25.1 | 77 | 2.34.1 |
69 | |||
70 | diff view generated by jsdifflib |
1 | Recognize the identity function for low-part multiply. | ||
---|---|---|---|
2 | |||
3 | Suggested-by: Luis Pires <luis.pires@eldorado.org.br> | ||
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 2 | --- |
8 | tcg/optimize.c | 3 ++- | 3 | target/alpha/translate.c | 2 +- |
9 | 1 file changed, 2 insertions(+), 1 deletion(-) | 4 | 1 file changed, 1 insertion(+), 1 deletion(-) |
10 | 5 | ||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 6 | diff --git a/target/alpha/translate.c b/target/alpha/translate.c |
12 | index XXXXXXX..XXXXXXX 100644 | 7 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/optimize.c | 8 | --- a/target/alpha/translate.c |
14 | +++ b/tcg/optimize.c | 9 | +++ b/target/alpha/translate.c |
15 | @@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op) | 10 | @@ -XXX,XX +XXX,XX @@ struct DisasContext { |
16 | static bool fold_mul(OptContext *ctx, TCGOp *op) | 11 | #ifdef CONFIG_USER_ONLY |
17 | { | 12 | #define UNALIGN(C) (C)->unalign |
18 | if (fold_const2(ctx, op) || | 13 | #else |
19 | - fold_xi_to_i(ctx, op, 0)) { | 14 | -#define UNALIGN(C) 0 |
20 | + fold_xi_to_i(ctx, op, 0) || | 15 | +#define UNALIGN(C) MO_ALIGN |
21 | + fold_xi_to_x(ctx, op, 1)) { | 16 | #endif |
22 | return true; | 17 | |
23 | } | 18 | /* Target-specific return values from translate_one, indicating the |
24 | return false; | ||
25 | -- | 19 | -- |
26 | 2.25.1 | 20 | 2.34.1 |
27 | |||
28 | diff view generated by jsdifflib |
1 | Even though there is only one user, place this more complex | 1 | Mark all memory operations that are not already marked with UNALIGN. |
---|---|---|---|
2 | conversion into its own helper. | ||
3 | 2 | ||
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 4 | --- |
7 | tcg/optimize.c | 89 ++++++++++++++++++++++++++------------------------ | 5 | target/alpha/translate.c | 36 ++++++++++++++++++++---------------- |
8 | 1 file changed, 47 insertions(+), 42 deletions(-) | 6 | 1 file changed, 20 insertions(+), 16 deletions(-) |
9 | 7 | ||
10 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 8 | diff --git a/target/alpha/translate.c b/target/alpha/translate.c |
11 | index XXXXXXX..XXXXXXX 100644 | 9 | index XXXXXXX..XXXXXXX 100644 |
12 | --- a/tcg/optimize.c | 10 | --- a/target/alpha/translate.c |
13 | +++ b/tcg/optimize.c | 11 | +++ b/target/alpha/translate.c |
14 | @@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op) | 12 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) |
15 | 13 | switch ((insn >> 12) & 0xF) { | |
16 | static bool fold_neg(OptContext *ctx, TCGOp *op) | 14 | case 0x0: |
17 | { | 15 | /* Longword physical access (hw_ldl/p) */ |
18 | - return fold_const1(ctx, op); | 16 | - tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL); |
19 | + if (fold_const1(ctx, op)) { | 17 | + tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); |
20 | + return true; | 18 | break; |
21 | + } | 19 | case 0x1: |
22 | + /* | 20 | /* Quadword physical access (hw_ldq/p) */ |
23 | + * Because of fold_sub_to_neg, we want to always return true, | 21 | - tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ); |
24 | + * via finish_folding. | 22 | + tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); |
25 | + */ | 23 | break; |
26 | + finish_folding(ctx, op); | 24 | case 0x2: |
27 | + return true; | 25 | /* Longword physical access with lock (hw_ldl_l/p) */ |
28 | } | 26 | - tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL); |
29 | 27 | + tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); | |
30 | static bool fold_nor(OptContext *ctx, TCGOp *op) | 28 | tcg_gen_mov_i64(cpu_lock_addr, addr); |
31 | @@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op) | 29 | tcg_gen_mov_i64(cpu_lock_value, va); |
32 | return fold_const2(ctx, op); | 30 | break; |
33 | } | 31 | case 0x3: |
34 | 32 | /* Quadword physical access with lock (hw_ldq_l/p) */ | |
35 | +static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) | 33 | - tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ); |
36 | +{ | 34 | + tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); |
37 | + TCGOpcode neg_op; | 35 | tcg_gen_mov_i64(cpu_lock_addr, addr); |
38 | + bool have_neg; | 36 | tcg_gen_mov_i64(cpu_lock_value, va); |
39 | + | 37 | break; |
40 | + if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) { | 38 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) |
41 | + return false; | 39 | goto invalid_opc; |
42 | + } | 40 | case 0xA: |
43 | + | 41 | /* Longword virtual access with protection check (hw_ldl/w) */ |
44 | + switch (ctx->type) { | 42 | - tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL); |
45 | + case TCG_TYPE_I32: | 43 | + tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, |
46 | + neg_op = INDEX_op_neg_i32; | 44 | + MO_LESL | MO_ALIGN); |
47 | + have_neg = TCG_TARGET_HAS_neg_i32; | 45 | break; |
48 | + break; | 46 | case 0xB: |
49 | + case TCG_TYPE_I64: | 47 | /* Quadword virtual access with protection check (hw_ldq/w) */ |
50 | + neg_op = INDEX_op_neg_i64; | 48 | - tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEUQ); |
51 | + have_neg = TCG_TARGET_HAS_neg_i64; | 49 | + tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, |
52 | + break; | 50 | + MO_LEUQ | MO_ALIGN); |
53 | + case TCG_TYPE_V64: | 51 | break; |
54 | + case TCG_TYPE_V128: | 52 | case 0xC: |
55 | + case TCG_TYPE_V256: | 53 | /* Longword virtual access with alt access mode (hw_ldl/a)*/ |
56 | + neg_op = INDEX_op_neg_vec; | 54 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) |
57 | + have_neg = (TCG_TARGET_HAS_neg_vec && | 55 | case 0xE: |
58 | + tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0); | 56 | /* Longword virtual access with alternate access mode and |
59 | + break; | 57 | protection checks (hw_ldl/wa) */ |
60 | + default: | 58 | - tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL); |
61 | + g_assert_not_reached(); | 59 | + tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, |
62 | + } | 60 | + MO_LESL | MO_ALIGN); |
63 | + if (have_neg) { | 61 | break; |
64 | + op->opc = neg_op; | 62 | case 0xF: |
65 | + op->args[1] = op->args[2]; | 63 | /* Quadword virtual access with alternate access mode and |
66 | + return fold_neg(ctx, op); | 64 | protection checks (hw_ldq/wa) */ |
67 | + } | 65 | - tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEUQ); |
68 | + return false; | 66 | + tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, |
69 | +} | 67 | + MO_LEUQ | MO_ALIGN); |
70 | + | 68 | break; |
71 | static bool fold_sub(OptContext *ctx, TCGOp *op) | ||
72 | { | ||
73 | if (fold_const2(ctx, op) || | ||
74 | - fold_xx_to_i(ctx, op, 0)) { | ||
75 | + fold_xx_to_i(ctx, op, 0) || | ||
76 | + fold_sub_to_neg(ctx, op)) { | ||
77 | return true; | ||
78 | } | ||
79 | return false; | ||
80 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
81 | continue; | ||
82 | } | 69 | } |
83 | break; | 70 | break; |
84 | - CASE_OP_32_64_VEC(sub): | 71 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) |
85 | - { | 72 | vb = load_gpr(ctx, rb); |
86 | - TCGOpcode neg_op; | 73 | tmp = tcg_temp_new(); |
87 | - bool have_neg; | 74 | tcg_gen_addi_i64(tmp, vb, disp12); |
88 | - | 75 | - tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL); |
89 | - if (arg_is_const(op->args[2])) { | 76 | + tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); |
90 | - /* Proceed with possible constant folding. */ | 77 | break; |
91 | - break; | 78 | case 0x1: |
92 | - } | 79 | /* Quadword physical access */ |
93 | - switch (ctx.type) { | 80 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) |
94 | - case TCG_TYPE_I32: | 81 | vb = load_gpr(ctx, rb); |
95 | - neg_op = INDEX_op_neg_i32; | 82 | tmp = tcg_temp_new(); |
96 | - have_neg = TCG_TARGET_HAS_neg_i32; | 83 | tcg_gen_addi_i64(tmp, vb, disp12); |
97 | - break; | 84 | - tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ); |
98 | - case TCG_TYPE_I64: | 85 | + tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); |
99 | - neg_op = INDEX_op_neg_i64; | 86 | break; |
100 | - have_neg = TCG_TARGET_HAS_neg_i64; | 87 | case 0x2: |
101 | - break; | 88 | /* Longword physical access with lock */ |
102 | - case TCG_TYPE_V64: | 89 | ret = gen_store_conditional(ctx, ra, rb, disp12, |
103 | - case TCG_TYPE_V128: | 90 | - MMU_PHYS_IDX, MO_LESL); |
104 | - case TCG_TYPE_V256: | 91 | + MMU_PHYS_IDX, MO_LESL | MO_ALIGN); |
105 | - neg_op = INDEX_op_neg_vec; | 92 | break; |
106 | - have_neg = tcg_can_emit_vec_op(neg_op, ctx.type, | 93 | case 0x3: |
107 | - TCGOP_VECE(op)) > 0; | 94 | /* Quadword physical access with lock */ |
108 | - break; | 95 | ret = gen_store_conditional(ctx, ra, rb, disp12, |
109 | - default: | 96 | - MMU_PHYS_IDX, MO_LEUQ); |
110 | - g_assert_not_reached(); | 97 | + MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); |
111 | - } | 98 | break; |
112 | - if (!have_neg) { | 99 | case 0x4: |
113 | - break; | 100 | /* Longword virtual access */ |
114 | - } | 101 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) |
115 | - if (arg_is_const(op->args[1]) | 102 | break; |
116 | - && arg_info(op->args[1])->val == 0) { | 103 | case 0x2A: |
117 | - op->opc = neg_op; | 104 | /* LDL_L */ |
118 | - reset_temp(op->args[0]); | 105 | - gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1); |
119 | - op->args[1] = op->args[2]; | 106 | + gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1); |
120 | - continue; | 107 | break; |
121 | - } | 108 | case 0x2B: |
122 | - } | 109 | /* LDQ_L */ |
123 | - break; | 110 | - gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 1); |
124 | default: | 111 | + gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1); |
125 | break; | 112 | break; |
126 | } | 113 | case 0x2C: |
114 | /* STL */ | ||
115 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) | ||
116 | case 0x2E: | ||
117 | /* STL_C */ | ||
118 | ret = gen_store_conditional(ctx, ra, rb, disp16, | ||
119 | - ctx->mem_idx, MO_LESL); | ||
120 | + ctx->mem_idx, MO_LESL | MO_ALIGN); | ||
121 | break; | ||
122 | case 0x2F: | ||
123 | /* STQ_C */ | ||
124 | ret = gen_store_conditional(ctx, ra, rb, disp16, | ||
125 | - ctx->mem_idx, MO_LEUQ); | ||
126 | + ctx->mem_idx, MO_LEUQ | MO_ALIGN); | ||
127 | break; | ||
128 | case 0x30: | ||
129 | /* BR */ | ||
127 | -- | 130 | -- |
128 | 2.25.1 | 131 | 2.34.1 |
129 | |||
130 | diff view generated by jsdifflib |
1 | Recognize the constant function for or-complement. | ||
---|---|---|---|
2 | |||
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 2 | --- |
8 | tcg/optimize.c | 1 + | 3 | configs/targets/alpha-linux-user.mak | 1 - |
9 | 1 file changed, 1 insertion(+) | 4 | configs/targets/alpha-softmmu.mak | 1 - |
5 | 2 files changed, 2 deletions(-) | ||
10 | 6 | ||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 7 | diff --git a/configs/targets/alpha-linux-user.mak b/configs/targets/alpha-linux-user.mak |
12 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/optimize.c | 9 | --- a/configs/targets/alpha-linux-user.mak |
14 | +++ b/tcg/optimize.c | 10 | +++ b/configs/targets/alpha-linux-user.mak |
15 | @@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op) | 11 | @@ -XXX,XX +XXX,XX @@ |
16 | static bool fold_orc(OptContext *ctx, TCGOp *op) | 12 | TARGET_ARCH=alpha |
17 | { | 13 | TARGET_SYSTBL_ABI=common |
18 | if (fold_const2(ctx, op) || | 14 | TARGET_SYSTBL=syscall.tbl |
19 | + fold_xx_to_i(ctx, op, -1) || | 15 | -TARGET_ALIGNED_ONLY=y |
20 | fold_xi_to_x(ctx, op, -1) || | 16 | diff --git a/configs/targets/alpha-softmmu.mak b/configs/targets/alpha-softmmu.mak |
21 | fold_ix_to_not(ctx, op, 0)) { | 17 | index XXXXXXX..XXXXXXX 100644 |
22 | return true; | 18 | --- a/configs/targets/alpha-softmmu.mak |
19 | +++ b/configs/targets/alpha-softmmu.mak | ||
20 | @@ -XXX,XX +XXX,XX @@ | ||
21 | TARGET_ARCH=alpha | ||
22 | -TARGET_ALIGNED_ONLY=y | ||
23 | TARGET_SUPPORTS_MTTCG=y | ||
23 | -- | 24 | -- |
24 | 2.25.1 | 25 | 2.34.1 |
25 | |||
26 | diff view generated by jsdifflib |
1 | Rename to fold_multiply2, and handle muls2_i32, mulu2_i64, | ||
---|---|---|---|
2 | and muls2_i64. | ||
3 | |||
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 2 | --- |
8 | tcg/optimize.c | 44 +++++++++++++++++++++++++++++++++++--------- | 3 | target/hppa/translate.c | 2 +- |
9 | 1 file changed, 35 insertions(+), 9 deletions(-) | 4 | 1 file changed, 1 insertion(+), 1 deletion(-) |
10 | 5 | ||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 6 | diff --git a/target/hppa/translate.c b/target/hppa/translate.c |
12 | index XXXXXXX..XXXXXXX 100644 | 7 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/optimize.c | 8 | --- a/target/hppa/translate.c |
14 | +++ b/tcg/optimize.c | 9 | +++ b/target/hppa/translate.c |
15 | @@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) | 10 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { |
16 | return false; | 11 | #ifdef CONFIG_USER_ONLY |
17 | } | 12 | #define UNALIGN(C) (C)->unalign |
18 | 13 | #else | |
19 | -static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op) | 14 | -#define UNALIGN(C) 0 |
20 | +static bool fold_multiply2(OptContext *ctx, TCGOp *op) | 15 | +#define UNALIGN(C) MO_ALIGN |
21 | { | 16 | #endif |
22 | if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { | 17 | |
23 | - uint32_t a = arg_info(op->args[2])->val; | 18 | /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ |
24 | - uint32_t b = arg_info(op->args[3])->val; | ||
25 | - uint64_t r = (uint64_t)a * b; | ||
26 | + uint64_t a = arg_info(op->args[2])->val; | ||
27 | + uint64_t b = arg_info(op->args[3])->val; | ||
28 | + uint64_t h, l; | ||
29 | TCGArg rl, rh; | ||
30 | - TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32); | ||
31 | + TCGOp *op2; | ||
32 | + | ||
33 | + switch (op->opc) { | ||
34 | + case INDEX_op_mulu2_i32: | ||
35 | + l = (uint64_t)(uint32_t)a * (uint32_t)b; | ||
36 | + h = (int32_t)(l >> 32); | ||
37 | + l = (int32_t)l; | ||
38 | + break; | ||
39 | + case INDEX_op_muls2_i32: | ||
40 | + l = (int64_t)(int32_t)a * (int32_t)b; | ||
41 | + h = l >> 32; | ||
42 | + l = (int32_t)l; | ||
43 | + break; | ||
44 | + case INDEX_op_mulu2_i64: | ||
45 | + mulu64(&l, &h, a, b); | ||
46 | + break; | ||
47 | + case INDEX_op_muls2_i64: | ||
48 | + muls64(&l, &h, a, b); | ||
49 | + break; | ||
50 | + default: | ||
51 | + g_assert_not_reached(); | ||
52 | + } | ||
53 | |||
54 | rl = op->args[0]; | ||
55 | rh = op->args[1]; | ||
56 | - tcg_opt_gen_movi(ctx, op, rl, (int32_t)r); | ||
57 | - tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32)); | ||
58 | + | ||
59 | + /* The proper opcode is supplied by tcg_opt_gen_mov. */ | ||
60 | + op2 = tcg_op_insert_before(ctx->tcg, op, 0); | ||
61 | + | ||
62 | + tcg_opt_gen_movi(ctx, op, rl, l); | ||
63 | + tcg_opt_gen_movi(ctx, op2, rh, h); | ||
64 | return true; | ||
65 | } | ||
66 | return false; | ||
67 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
68 | CASE_OP_32_64(muluh): | ||
69 | done = fold_mul_highpart(&ctx, op); | ||
70 | break; | ||
71 | - case INDEX_op_mulu2_i32: | ||
72 | - done = fold_mulu2_i32(&ctx, op); | ||
73 | + CASE_OP_32_64(muls2): | ||
74 | + CASE_OP_32_64(mulu2): | ||
75 | + done = fold_multiply2(&ctx, op); | ||
76 | break; | ||
77 | CASE_OP_32_64(nand): | ||
78 | done = fold_nand(&ctx, op); | ||
79 | -- | 19 | -- |
80 | 2.25.1 | 20 | 2.34.1 |
81 | |||
82 | diff view generated by jsdifflib |
1 | Pull the "op r, a, i => mov r, a" optimization into a function, | ||
---|---|---|---|
2 | and use them in the outer-most logical operations. | ||
3 | |||
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 2 | --- |
7 | tcg/optimize.c | 61 +++++++++++++++++++++----------------------------- | 3 | configs/targets/hppa-linux-user.mak | 1 - |
8 | 1 file changed, 26 insertions(+), 35 deletions(-) | 4 | configs/targets/hppa-softmmu.mak | 1 - |
5 | 2 files changed, 2 deletions(-) | ||
9 | 6 | ||
10 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 7 | diff --git a/configs/targets/hppa-linux-user.mak b/configs/targets/hppa-linux-user.mak |
11 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
12 | --- a/tcg/optimize.c | 9 | --- a/configs/targets/hppa-linux-user.mak |
13 | +++ b/tcg/optimize.c | 10 | +++ b/configs/targets/hppa-linux-user.mak |
14 | @@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | 11 | @@ -XXX,XX +XXX,XX @@ |
15 | return false; | 12 | TARGET_ARCH=hppa |
16 | } | 13 | TARGET_SYSTBL_ABI=common,32 |
17 | 14 | TARGET_SYSTBL=syscall.tbl | |
18 | +/* If the binary operation has second argument @i, fold to identity. */ | 15 | -TARGET_ALIGNED_ONLY=y |
19 | +static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i) | 16 | TARGET_BIG_ENDIAN=y |
20 | +{ | 17 | diff --git a/configs/targets/hppa-softmmu.mak b/configs/targets/hppa-softmmu.mak |
21 | + if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { | 18 | index XXXXXXX..XXXXXXX 100644 |
22 | + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | 19 | --- a/configs/targets/hppa-softmmu.mak |
23 | + } | 20 | +++ b/configs/targets/hppa-softmmu.mak |
24 | + return false; | 21 | @@ -XXX,XX +XXX,XX @@ |
25 | +} | 22 | TARGET_ARCH=hppa |
26 | + | 23 | -TARGET_ALIGNED_ONLY=y |
27 | /* If the binary operation has second argument @i, fold to NOT. */ | 24 | TARGET_BIG_ENDIAN=y |
28 | static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i) | 25 | TARGET_SUPPORTS_MTTCG=y |
29 | { | ||
30 | @@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op) | ||
31 | |||
32 | static bool fold_add(OptContext *ctx, TCGOp *op) | ||
33 | { | ||
34 | - return fold_const2(ctx, op); | ||
35 | + if (fold_const2(ctx, op) || | ||
36 | + fold_xi_to_x(ctx, op, 0)) { | ||
37 | + return true; | ||
38 | + } | ||
39 | + return false; | ||
40 | } | ||
41 | |||
42 | static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add) | ||
43 | @@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op) | ||
44 | { | ||
45 | if (fold_const2(ctx, op) || | ||
46 | fold_xi_to_i(ctx, op, 0) || | ||
47 | + fold_xi_to_x(ctx, op, -1) || | ||
48 | fold_xx_to_x(ctx, op)) { | ||
49 | return true; | ||
50 | } | ||
51 | @@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op) | ||
52 | { | ||
53 | if (fold_const2(ctx, op) || | ||
54 | fold_xx_to_i(ctx, op, 0) || | ||
55 | + fold_xi_to_x(ctx, op, 0) || | ||
56 | fold_ix_to_not(ctx, op, -1)) { | ||
57 | return true; | ||
58 | } | ||
59 | @@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op) | ||
60 | static bool fold_eqv(OptContext *ctx, TCGOp *op) | ||
61 | { | ||
62 | if (fold_const2(ctx, op) || | ||
63 | + fold_xi_to_x(ctx, op, -1) || | ||
64 | fold_xi_to_not(ctx, op, 0)) { | ||
65 | return true; | ||
66 | } | ||
67 | @@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op) | ||
68 | static bool fold_or(OptContext *ctx, TCGOp *op) | ||
69 | { | ||
70 | if (fold_const2(ctx, op) || | ||
71 | + fold_xi_to_x(ctx, op, 0) || | ||
72 | fold_xx_to_x(ctx, op)) { | ||
73 | return true; | ||
74 | } | ||
75 | @@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op) | ||
76 | static bool fold_orc(OptContext *ctx, TCGOp *op) | ||
77 | { | ||
78 | if (fold_const2(ctx, op) || | ||
79 | + fold_xi_to_x(ctx, op, -1) || | ||
80 | fold_ix_to_not(ctx, op, 0)) { | ||
81 | return true; | ||
82 | } | ||
83 | @@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op) | ||
84 | |||
85 | static bool fold_shift(OptContext *ctx, TCGOp *op) | ||
86 | { | ||
87 | - return fold_const2(ctx, op); | ||
88 | + if (fold_const2(ctx, op) || | ||
89 | + fold_xi_to_x(ctx, op, 0)) { | ||
90 | + return true; | ||
91 | + } | ||
92 | + return false; | ||
93 | } | ||
94 | |||
95 | static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) | ||
96 | @@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op) | ||
97 | { | ||
98 | if (fold_const2(ctx, op) || | ||
99 | fold_xx_to_i(ctx, op, 0) || | ||
100 | + fold_xi_to_x(ctx, op, 0) || | ||
101 | fold_sub_to_neg(ctx, op)) { | ||
102 | return true; | ||
103 | } | ||
104 | @@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op) | ||
105 | { | ||
106 | if (fold_const2(ctx, op) || | ||
107 | fold_xx_to_i(ctx, op, 0) || | ||
108 | + fold_xi_to_x(ctx, op, 0) || | ||
109 | fold_xi_to_not(ctx, op, -1)) { | ||
110 | return true; | ||
111 | } | ||
112 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
113 | break; | ||
114 | } | ||
115 | |||
116 | - /* Simplify expression for "op r, a, const => mov r, a" cases */ | ||
117 | - switch (opc) { | ||
118 | - CASE_OP_32_64_VEC(add): | ||
119 | - CASE_OP_32_64_VEC(sub): | ||
120 | - CASE_OP_32_64_VEC(or): | ||
121 | - CASE_OP_32_64_VEC(xor): | ||
122 | - CASE_OP_32_64_VEC(andc): | ||
123 | - CASE_OP_32_64(shl): | ||
124 | - CASE_OP_32_64(shr): | ||
125 | - CASE_OP_32_64(sar): | ||
126 | - CASE_OP_32_64(rotl): | ||
127 | - CASE_OP_32_64(rotr): | ||
128 | - if (!arg_is_const(op->args[1]) | ||
129 | - && arg_is_const(op->args[2]) | ||
130 | - && arg_info(op->args[2])->val == 0) { | ||
131 | - tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); | ||
132 | - continue; | ||
133 | - } | ||
134 | - break; | ||
135 | - CASE_OP_32_64_VEC(and): | ||
136 | - CASE_OP_32_64_VEC(orc): | ||
137 | - CASE_OP_32_64(eqv): | ||
138 | - if (!arg_is_const(op->args[1]) | ||
139 | - && arg_is_const(op->args[2]) | ||
140 | - && arg_info(op->args[2])->val == -1) { | ||
141 | - tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); | ||
142 | - continue; | ||
143 | - } | ||
144 | - break; | ||
145 | - default: | ||
146 | - break; | ||
147 | - } | ||
148 | - | ||
149 | /* Simplify using known-zero bits. Currently only ops with a single | ||
150 | output argument is supported. */ | ||
151 | z_mask = -1; | ||
152 | -- | 26 | -- |
153 | 2.25.1 | 27 | 2.34.1 |
154 | |||
155 | diff view generated by jsdifflib |
1 | Split out the conditional conversion from a more complex logical | 1 | Acked-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> |
---|---|---|---|
2 | operation to a simple NOT. Create a couple more helpers to make | ||
3 | this easy for the outer-most logical operations. | ||
4 | |||
5 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 3 | --- |
8 | tcg/optimize.c | 158 +++++++++++++++++++++++++++---------------------- | 4 | target/sparc/translate.c | 66 +++++++++++++++++++++------------------- |
9 | 1 file changed, 86 insertions(+), 72 deletions(-) | 5 | 1 file changed, 34 insertions(+), 32 deletions(-) |
10 | 6 | ||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 7 | diff --git a/target/sparc/translate.c b/target/sparc/translate.c |
12 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/optimize.c | 9 | --- a/target/sparc/translate.c |
14 | +++ b/tcg/optimize.c | 10 | +++ b/target/sparc/translate.c |
15 | @@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op) | 11 | @@ -XXX,XX +XXX,XX @@ static void gen_swap(DisasContext *dc, TCGv dst, TCGv src, |
16 | return false; | 12 | TCGv addr, int mmu_idx, MemOp memop) |
13 | { | ||
14 | gen_address_mask(dc, addr); | ||
15 | - tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop); | ||
16 | + tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN); | ||
17 | } | 17 | } |
18 | 18 | ||
19 | +/* | 19 | static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx) |
20 | + * Convert @op to NOT, if NOT is supported by the host. | 20 | @@ -XXX,XX +XXX,XX @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, |
21 | + * Return true f the conversion is successful, which will still | 21 | break; |
22 | + * indicate that the processing is complete. | 22 | case GET_ASI_DIRECT: |
23 | + */ | 23 | gen_address_mask(dc, addr); |
24 | +static bool fold_not(OptContext *ctx, TCGOp *op); | 24 | - tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop); |
25 | +static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx) | 25 | + tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN); |
26 | +{ | 26 | break; |
27 | + TCGOpcode not_op; | 27 | default: |
28 | + bool have_not; | 28 | { |
29 | + | 29 | TCGv_i32 r_asi = tcg_constant_i32(da.asi); |
30 | + switch (ctx->type) { | 30 | - TCGv_i32 r_mop = tcg_constant_i32(memop); |
31 | + case TCG_TYPE_I32: | 31 | + TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); |
32 | + not_op = INDEX_op_not_i32; | 32 | |
33 | + have_not = TCG_TARGET_HAS_not_i32; | 33 | save_state(dc); |
34 | + break; | 34 | #ifdef TARGET_SPARC64 |
35 | + case TCG_TYPE_I64: | 35 | @@ -XXX,XX +XXX,XX @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, |
36 | + not_op = INDEX_op_not_i64; | 36 | /* fall through */ |
37 | + have_not = TCG_TARGET_HAS_not_i64; | 37 | case GET_ASI_DIRECT: |
38 | + break; | 38 | gen_address_mask(dc, addr); |
39 | + case TCG_TYPE_V64: | 39 | - tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop); |
40 | + case TCG_TYPE_V128: | 40 | + tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN); |
41 | + case TCG_TYPE_V256: | 41 | break; |
42 | + not_op = INDEX_op_not_vec; | 42 | #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) |
43 | + have_not = TCG_TARGET_HAS_not_vec; | 43 | case GET_ASI_BCOPY: |
44 | + break; | 44 | @@ -XXX,XX +XXX,XX @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, |
45 | + default: | 45 | default: |
46 | + g_assert_not_reached(); | 46 | { |
47 | + } | 47 | TCGv_i32 r_asi = tcg_constant_i32(da.asi); |
48 | + if (have_not) { | 48 | - TCGv_i32 r_mop = tcg_constant_i32(memop & MO_SIZE); |
49 | + op->opc = not_op; | 49 | + TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); |
50 | + op->args[1] = op->args[idx]; | 50 | |
51 | + return fold_not(ctx, op); | 51 | save_state(dc); |
52 | + } | 52 | #ifdef TARGET_SPARC64 |
53 | + return false; | 53 | @@ -XXX,XX +XXX,XX @@ static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv, |
54 | +} | 54 | case GET_ASI_DIRECT: |
55 | + | 55 | oldv = tcg_temp_new(); |
56 | +/* If the binary operation has first argument @i, fold to NOT. */ | 56 | tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd), |
57 | +static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i) | 57 | - da.mem_idx, da.memop); |
58 | +{ | 58 | + da.mem_idx, da.memop | MO_ALIGN); |
59 | + if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { | 59 | gen_store_gpr(dc, rd, oldv); |
60 | + return fold_to_not(ctx, op, 2); | 60 | break; |
61 | + } | 61 | default: |
62 | + return false; | 62 | @@ -XXX,XX +XXX,XX @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, |
63 | +} | 63 | switch (size) { |
64 | + | 64 | case 4: |
65 | /* If the binary operation has second argument @i, fold to @i. */ | 65 | d32 = gen_dest_fpr_F(dc); |
66 | static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | 66 | - tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop); |
67 | { | 67 | + tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN); |
68 | @@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | 68 | gen_store_fpr_F(dc, rd, d32); |
69 | return false; | 69 | break; |
70 | } | 70 | case 8: |
71 | 71 | @@ -XXX,XX +XXX,XX @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, | |
72 | +/* If the binary operation has second argument @i, fold to NOT. */ | 72 | /* Valid for lddfa only. */ |
73 | +static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i) | 73 | if (size == 8) { |
74 | +{ | 74 | gen_address_mask(dc, addr); |
75 | + if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { | 75 | - tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop); |
76 | + return fold_to_not(ctx, op, 1); | 76 | + tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, |
77 | + } | 77 | + da.memop | MO_ALIGN); |
78 | + return false; | 78 | } else { |
79 | +} | 79 | gen_exception(dc, TT_ILL_INSN); |
80 | + | 80 | } |
81 | /* If the binary operation has both arguments equal, fold to @i. */ | 81 | @@ -XXX,XX +XXX,XX @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, |
82 | static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | 82 | default: |
83 | { | 83 | { |
84 | @@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op) | 84 | TCGv_i32 r_asi = tcg_constant_i32(da.asi); |
85 | static bool fold_andc(OptContext *ctx, TCGOp *op) | 85 | - TCGv_i32 r_mop = tcg_constant_i32(da.memop); |
86 | { | 86 | + TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN); |
87 | if (fold_const2(ctx, op) || | 87 | |
88 | - fold_xx_to_i(ctx, op, 0)) { | 88 | save_state(dc); |
89 | + fold_xx_to_i(ctx, op, 0) || | 89 | /* According to the table in the UA2011 manual, the only |
90 | + fold_ix_to_not(ctx, op, -1)) { | 90 | @@ -XXX,XX +XXX,XX @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, |
91 | return true; | 91 | switch (size) { |
92 | } | 92 | case 4: |
93 | return false; | 93 | d32 = gen_load_fpr_F(dc, rd); |
94 | @@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op) | 94 | - tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop); |
95 | 95 | + tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN); | |
96 | static bool fold_eqv(OptContext *ctx, TCGOp *op) | 96 | break; |
97 | { | 97 | case 8: |
98 | - return fold_const2(ctx, op); | 98 | tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, |
99 | + if (fold_const2(ctx, op) || | 99 | @@ -XXX,XX +XXX,XX @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, |
100 | + fold_xi_to_not(ctx, op, 0)) { | 100 | /* Valid for stdfa only. */ |
101 | + return true; | 101 | if (size == 8) { |
102 | + } | 102 | gen_address_mask(dc, addr); |
103 | + return false; | 103 | - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop); |
104 | } | 104 | + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, |
105 | 105 | + da.memop | MO_ALIGN); | |
106 | static bool fold_extract(OptContext *ctx, TCGOp *op) | 106 | } else { |
107 | @@ -XXX,XX +XXX,XX @@ static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op) | 107 | gen_exception(dc, TT_ILL_INSN); |
108 | 108 | } | |
109 | static bool fold_nand(OptContext *ctx, TCGOp *op) | 109 | @@ -XXX,XX +XXX,XX @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) |
110 | { | 110 | TCGv_i64 tmp = tcg_temp_new_i64(); |
111 | - return fold_const2(ctx, op); | 111 | |
112 | + if (fold_const2(ctx, op) || | 112 | gen_address_mask(dc, addr); |
113 | + fold_xi_to_not(ctx, op, -1)) { | 113 | - tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop); |
114 | + return true; | 114 | + tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN); |
115 | + } | 115 | |
116 | + return false; | 116 | /* Note that LE ldda acts as if each 32-bit register |
117 | } | 117 | result is byte swapped. Having just performed one |
118 | 118 | @@ -XXX,XX +XXX,XX @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, | |
119 | static bool fold_neg(OptContext *ctx, TCGOp *op) | 119 | tcg_gen_concat32_i64(t64, hi, lo); |
120 | @@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op) | ||
121 | |||
122 | static bool fold_nor(OptContext *ctx, TCGOp *op) | ||
123 | { | ||
124 | - return fold_const2(ctx, op); | ||
125 | + if (fold_const2(ctx, op) || | ||
126 | + fold_xi_to_not(ctx, op, 0)) { | ||
127 | + return true; | ||
128 | + } | ||
129 | + return false; | ||
130 | } | ||
131 | |||
132 | static bool fold_not(OptContext *ctx, TCGOp *op) | ||
133 | { | ||
134 | - return fold_const1(ctx, op); | ||
135 | + if (fold_const1(ctx, op)) { | ||
136 | + return true; | ||
137 | + } | ||
138 | + | ||
139 | + /* Because of fold_to_not, we want to always return true, via finish. */ | ||
140 | + finish_folding(ctx, op); | ||
141 | + return true; | ||
142 | } | ||
143 | |||
144 | static bool fold_or(OptContext *ctx, TCGOp *op) | ||
145 | @@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op) | ||
146 | |||
147 | static bool fold_orc(OptContext *ctx, TCGOp *op) | ||
148 | { | ||
149 | - return fold_const2(ctx, op); | ||
150 | + if (fold_const2(ctx, op) || | ||
151 | + fold_ix_to_not(ctx, op, 0)) { | ||
152 | + return true; | ||
153 | + } | ||
154 | + return false; | ||
155 | } | ||
156 | |||
157 | static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) | ||
158 | @@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op) | ||
159 | static bool fold_xor(OptContext *ctx, TCGOp *op) | ||
160 | { | ||
161 | if (fold_const2(ctx, op) || | ||
162 | - fold_xx_to_i(ctx, op, 0)) { | ||
163 | + fold_xx_to_i(ctx, op, 0) || | ||
164 | + fold_xi_to_not(ctx, op, -1)) { | ||
165 | return true; | ||
166 | } | ||
167 | return false; | ||
168 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
169 | } | ||
170 | } | 120 | } |
171 | break; | 121 | gen_address_mask(dc, addr); |
172 | - CASE_OP_32_64_VEC(xor): | 122 | - tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop); |
173 | - CASE_OP_32_64(nand): | 123 | + tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); |
174 | - if (!arg_is_const(op->args[1]) | ||
175 | - && arg_is_const(op->args[2]) | ||
176 | - && arg_info(op->args[2])->val == -1) { | ||
177 | - i = 1; | ||
178 | - goto try_not; | ||
179 | - } | ||
180 | - break; | ||
181 | - CASE_OP_32_64(nor): | ||
182 | - if (!arg_is_const(op->args[1]) | ||
183 | - && arg_is_const(op->args[2]) | ||
184 | - && arg_info(op->args[2])->val == 0) { | ||
185 | - i = 1; | ||
186 | - goto try_not; | ||
187 | - } | ||
188 | - break; | ||
189 | - CASE_OP_32_64_VEC(andc): | ||
190 | - if (!arg_is_const(op->args[2]) | ||
191 | - && arg_is_const(op->args[1]) | ||
192 | - && arg_info(op->args[1])->val == -1) { | ||
193 | - i = 2; | ||
194 | - goto try_not; | ||
195 | - } | ||
196 | - break; | ||
197 | - CASE_OP_32_64_VEC(orc): | ||
198 | - CASE_OP_32_64(eqv): | ||
199 | - if (!arg_is_const(op->args[2]) | ||
200 | - && arg_is_const(op->args[1]) | ||
201 | - && arg_info(op->args[1])->val == 0) { | ||
202 | - i = 2; | ||
203 | - goto try_not; | ||
204 | - } | ||
205 | - break; | ||
206 | - try_not: | ||
207 | - { | ||
208 | - TCGOpcode not_op; | ||
209 | - bool have_not; | ||
210 | - | ||
211 | - switch (ctx.type) { | ||
212 | - case TCG_TYPE_I32: | ||
213 | - not_op = INDEX_op_not_i32; | ||
214 | - have_not = TCG_TARGET_HAS_not_i32; | ||
215 | - break; | ||
216 | - case TCG_TYPE_I64: | ||
217 | - not_op = INDEX_op_not_i64; | ||
218 | - have_not = TCG_TARGET_HAS_not_i64; | ||
219 | - break; | ||
220 | - case TCG_TYPE_V64: | ||
221 | - case TCG_TYPE_V128: | ||
222 | - case TCG_TYPE_V256: | ||
223 | - not_op = INDEX_op_not_vec; | ||
224 | - have_not = TCG_TARGET_HAS_not_vec; | ||
225 | - break; | ||
226 | - default: | ||
227 | - g_assert_not_reached(); | ||
228 | - } | ||
229 | - if (!have_not) { | ||
230 | - break; | ||
231 | - } | ||
232 | - op->opc = not_op; | ||
233 | - reset_temp(op->args[0]); | ||
234 | - op->args[1] = op->args[i]; | ||
235 | - continue; | ||
236 | - } | ||
237 | default: | ||
238 | break; | ||
239 | } | 124 | } |
125 | break; | ||
126 | |||
127 | @@ -XXX,XX +XXX,XX @@ static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv, | ||
128 | case GET_ASI_DIRECT: | ||
129 | oldv = tcg_temp_new(); | ||
130 | tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd), | ||
131 | - da.mem_idx, da.memop); | ||
132 | + da.mem_idx, da.memop | MO_ALIGN); | ||
133 | gen_store_gpr(dc, rd, oldv); | ||
134 | break; | ||
135 | default: | ||
136 | @@ -XXX,XX +XXX,XX @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) | ||
137 | return; | ||
138 | case GET_ASI_DIRECT: | ||
139 | gen_address_mask(dc, addr); | ||
140 | - tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop); | ||
141 | + tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); | ||
142 | break; | ||
143 | default: | ||
144 | { | ||
145 | @@ -XXX,XX +XXX,XX @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, | ||
146 | break; | ||
147 | case GET_ASI_DIRECT: | ||
148 | gen_address_mask(dc, addr); | ||
149 | - tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop); | ||
150 | + tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); | ||
151 | break; | ||
152 | case GET_ASI_BFILL: | ||
153 | /* Store 32 bytes of T64 to ADDR. */ | ||
154 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
155 | case 0x0: /* ld, V9 lduw, load unsigned word */ | ||
156 | gen_address_mask(dc, cpu_addr); | ||
157 | tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, | ||
158 | - dc->mem_idx, MO_TEUL); | ||
159 | + dc->mem_idx, MO_TEUL | MO_ALIGN); | ||
160 | break; | ||
161 | case 0x1: /* ldub, load unsigned byte */ | ||
162 | gen_address_mask(dc, cpu_addr); | ||
163 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
164 | case 0x2: /* lduh, load unsigned halfword */ | ||
165 | gen_address_mask(dc, cpu_addr); | ||
166 | tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, | ||
167 | - dc->mem_idx, MO_TEUW); | ||
168 | + dc->mem_idx, MO_TEUW | MO_ALIGN); | ||
169 | break; | ||
170 | case 0x3: /* ldd, load double word */ | ||
171 | if (rd & 1) | ||
172 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
173 | gen_address_mask(dc, cpu_addr); | ||
174 | t64 = tcg_temp_new_i64(); | ||
175 | tcg_gen_qemu_ld_i64(t64, cpu_addr, | ||
176 | - dc->mem_idx, MO_TEUQ); | ||
177 | + dc->mem_idx, MO_TEUQ | MO_ALIGN); | ||
178 | tcg_gen_trunc_i64_tl(cpu_val, t64); | ||
179 | tcg_gen_ext32u_tl(cpu_val, cpu_val); | ||
180 | gen_store_gpr(dc, rd + 1, cpu_val); | ||
181 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
182 | case 0xa: /* ldsh, load signed halfword */ | ||
183 | gen_address_mask(dc, cpu_addr); | ||
184 | tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, | ||
185 | - dc->mem_idx, MO_TESW); | ||
186 | + dc->mem_idx, MO_TESW | MO_ALIGN); | ||
187 | break; | ||
188 | case 0xd: /* ldstub */ | ||
189 | gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx); | ||
190 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
191 | case 0x08: /* V9 ldsw */ | ||
192 | gen_address_mask(dc, cpu_addr); | ||
193 | tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, | ||
194 | - dc->mem_idx, MO_TESL); | ||
195 | + dc->mem_idx, MO_TESL | MO_ALIGN); | ||
196 | break; | ||
197 | case 0x0b: /* V9 ldx */ | ||
198 | gen_address_mask(dc, cpu_addr); | ||
199 | tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, | ||
200 | - dc->mem_idx, MO_TEUQ); | ||
201 | + dc->mem_idx, MO_TEUQ | MO_ALIGN); | ||
202 | break; | ||
203 | case 0x18: /* V9 ldswa */ | ||
204 | gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL); | ||
205 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
206 | gen_address_mask(dc, cpu_addr); | ||
207 | cpu_dst_32 = gen_dest_fpr_F(dc); | ||
208 | tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr, | ||
209 | - dc->mem_idx, MO_TEUL); | ||
210 | + dc->mem_idx, MO_TEUL | MO_ALIGN); | ||
211 | gen_store_fpr_F(dc, rd, cpu_dst_32); | ||
212 | break; | ||
213 | case 0x21: /* ldfsr, V9 ldxfsr */ | ||
214 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
215 | if (rd == 1) { | ||
216 | TCGv_i64 t64 = tcg_temp_new_i64(); | ||
217 | tcg_gen_qemu_ld_i64(t64, cpu_addr, | ||
218 | - dc->mem_idx, MO_TEUQ); | ||
219 | + dc->mem_idx, MO_TEUQ | MO_ALIGN); | ||
220 | gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64); | ||
221 | break; | ||
222 | } | ||
223 | #endif | ||
224 | cpu_dst_32 = tcg_temp_new_i32(); | ||
225 | tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr, | ||
226 | - dc->mem_idx, MO_TEUL); | ||
227 | + dc->mem_idx, MO_TEUL | MO_ALIGN); | ||
228 | gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32); | ||
229 | break; | ||
230 | case 0x22: /* ldqf, load quad fpreg */ | ||
231 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
232 | case 0x4: /* st, store word */ | ||
233 | gen_address_mask(dc, cpu_addr); | ||
234 | tcg_gen_qemu_st_tl(cpu_val, cpu_addr, | ||
235 | - dc->mem_idx, MO_TEUL); | ||
236 | + dc->mem_idx, MO_TEUL | MO_ALIGN); | ||
237 | break; | ||
238 | case 0x5: /* stb, store byte */ | ||
239 | gen_address_mask(dc, cpu_addr); | ||
240 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
241 | case 0x6: /* sth, store halfword */ | ||
242 | gen_address_mask(dc, cpu_addr); | ||
243 | tcg_gen_qemu_st_tl(cpu_val, cpu_addr, | ||
244 | - dc->mem_idx, MO_TEUW); | ||
245 | + dc->mem_idx, MO_TEUW | MO_ALIGN); | ||
246 | break; | ||
247 | case 0x7: /* std, store double word */ | ||
248 | if (rd & 1) | ||
249 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
250 | t64 = tcg_temp_new_i64(); | ||
251 | tcg_gen_concat_tl_i64(t64, lo, cpu_val); | ||
252 | tcg_gen_qemu_st_i64(t64, cpu_addr, | ||
253 | - dc->mem_idx, MO_TEUQ); | ||
254 | + dc->mem_idx, MO_TEUQ | MO_ALIGN); | ||
255 | } | ||
256 | break; | ||
257 | #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) | ||
258 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
259 | case 0x0e: /* V9 stx */ | ||
260 | gen_address_mask(dc, cpu_addr); | ||
261 | tcg_gen_qemu_st_tl(cpu_val, cpu_addr, | ||
262 | - dc->mem_idx, MO_TEUQ); | ||
263 | + dc->mem_idx, MO_TEUQ | MO_ALIGN); | ||
264 | break; | ||
265 | case 0x1e: /* V9 stxa */ | ||
266 | gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ); | ||
267 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
268 | gen_address_mask(dc, cpu_addr); | ||
269 | cpu_src1_32 = gen_load_fpr_F(dc, rd); | ||
270 | tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr, | ||
271 | - dc->mem_idx, MO_TEUL); | ||
272 | + dc->mem_idx, MO_TEUL | MO_ALIGN); | ||
273 | break; | ||
274 | case 0x25: /* stfsr, V9 stxfsr */ | ||
275 | { | ||
276 | @@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) | ||
277 | gen_address_mask(dc, cpu_addr); | ||
278 | if (rd == 1) { | ||
279 | tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr, | ||
280 | - dc->mem_idx, MO_TEUQ); | ||
281 | + dc->mem_idx, MO_TEUQ | MO_ALIGN); | ||
282 | break; | ||
283 | } | ||
284 | #endif | ||
285 | tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr, | ||
286 | - dc->mem_idx, MO_TEUL); | ||
287 | + dc->mem_idx, MO_TEUL | MO_ALIGN); | ||
288 | } | ||
289 | break; | ||
290 | case 0x26: | ||
240 | -- | 291 | -- |
241 | 2.25.1 | 292 | 2.34.1 |
242 | |||
243 | diff view generated by jsdifflib |
1 | This is the final entry in the main switch that was in a | 1 | This passes on the memop as given as argument to |
---|---|---|---|
2 | different form. After this, we have the option to convert | 2 | helper_ld_asi to the ultimate load primitive. |
3 | the switch into a function dispatch table. | ||
4 | 3 | ||
5 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | 4 | Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> |
6 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 6 | --- |
9 | tcg/optimize.c | 27 ++++++++++++++------------- | 7 | target/sparc/ldst_helper.c | 10 ++++++---- |
10 | 1 file changed, 14 insertions(+), 13 deletions(-) | 8 | 1 file changed, 6 insertions(+), 4 deletions(-) |
11 | 9 | ||
12 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 10 | diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c |
13 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tcg/optimize.c | 12 | --- a/target/sparc/ldst_helper.c |
15 | +++ b/tcg/optimize.c | 13 | +++ b/target/sparc/ldst_helper.c |
16 | @@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op) | 14 | @@ -XXX,XX +XXX,XX @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, |
17 | return true; | 15 | #if defined(DEBUG_MXCC) || defined(DEBUG_ASI) |
18 | } | 16 | uint32_t last_addr = addr; |
19 | 17 | #endif | |
20 | +static bool fold_mov(OptContext *ctx, TCGOp *op) | 18 | + MemOpIdx oi; |
21 | +{ | 19 | |
22 | + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | 20 | do_check_align(env, addr, size - 1, GETPC()); |
23 | +} | 21 | switch (asi) { |
24 | + | 22 | @@ -XXX,XX +XXX,XX @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, |
25 | static bool fold_movcond(OptContext *ctx, TCGOp *op) | 23 | case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */ |
26 | { | 24 | break; |
27 | TCGOpcode opc = op->opc; | 25 | case ASI_KERNELTXT: /* Supervisor code access */ |
28 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 26 | + oi = make_memop_idx(memop, cpu_mmu_index(env, true)); |
27 | switch (size) { | ||
28 | case 1: | ||
29 | - ret = cpu_ldub_code(env, addr); | ||
30 | + ret = cpu_ldb_code_mmu(env, addr, oi, GETPC()); | ||
31 | break; | ||
32 | case 2: | ||
33 | - ret = cpu_lduw_code(env, addr); | ||
34 | + ret = cpu_ldw_code_mmu(env, addr, oi, GETPC()); | ||
35 | break; | ||
36 | default: | ||
37 | case 4: | ||
38 | - ret = cpu_ldl_code(env, addr); | ||
39 | + ret = cpu_ldl_code_mmu(env, addr, oi, GETPC()); | ||
40 | break; | ||
41 | case 8: | ||
42 | - ret = cpu_ldq_code(env, addr); | ||
43 | + ret = cpu_ldq_code_mmu(env, addr, oi, GETPC()); | ||
29 | break; | 44 | break; |
30 | } | 45 | } |
31 | 46 | break; | |
32 | - /* Propagate constants through copy operations and do constant | ||
33 | - folding. Constants will be substituted to arguments by register | ||
34 | - allocator where needed and possible. Also detect copies. */ | ||
35 | + /* | ||
36 | + * Process each opcode. | ||
37 | + * Sorted alphabetically by opcode as much as possible. | ||
38 | + */ | ||
39 | switch (opc) { | ||
40 | - CASE_OP_32_64_VEC(mov): | ||
41 | - done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); | ||
42 | - break; | ||
43 | - | ||
44 | - default: | ||
45 | - break; | ||
46 | - | ||
47 | - /* ---------------------------------------------------------- */ | ||
48 | - /* Sorted alphabetically by opcode as much as possible. */ | ||
49 | - | ||
50 | CASE_OP_32_64_VEC(add): | ||
51 | done = fold_add(&ctx, op); | ||
52 | break; | ||
53 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
54 | case INDEX_op_mb: | ||
55 | done = fold_mb(&ctx, op); | ||
56 | break; | ||
57 | + CASE_OP_32_64_VEC(mov): | ||
58 | + done = fold_mov(&ctx, op); | ||
59 | + break; | ||
60 | CASE_OP_32_64(movcond): | ||
61 | done = fold_movcond(&ctx, op); | ||
62 | break; | ||
63 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
64 | CASE_OP_32_64_VEC(xor): | ||
65 | done = fold_xor(&ctx, op); | ||
66 | break; | ||
67 | + default: | ||
68 | + break; | ||
69 | } | ||
70 | |||
71 | if (!done) { | ||
72 | -- | 47 | -- |
73 | 2.25.1 | 48 | 2.34.1 |
74 | |||
75 | diff view generated by jsdifflib |
1 | Compute the type of the operation early. | 1 | Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> |
---|---|---|---|
2 | |||
3 | There are at least 4 places that used a def->flags ladder | ||
4 | to determine the type of the operation being optimized. | ||
5 | |||
6 | There were two places that assumed !TCG_OPF_64BIT means | ||
7 | TCG_TYPE_I32, and so could potentially compute incorrect | ||
8 | results for vector operations. | ||
9 | |||
10 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
12 | --- | 3 | --- |
13 | tcg/optimize.c | 149 +++++++++++++++++++++++++++++-------------------- | 4 | configs/targets/sparc-linux-user.mak | 1 - |
14 | 1 file changed, 89 insertions(+), 60 deletions(-) | 5 | configs/targets/sparc-softmmu.mak | 1 - |
6 | configs/targets/sparc32plus-linux-user.mak | 1 - | ||
7 | configs/targets/sparc64-linux-user.mak | 1 - | ||
8 | configs/targets/sparc64-softmmu.mak | 1 - | ||
9 | 5 files changed, 5 deletions(-) | ||
15 | 10 | ||
16 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 11 | diff --git a/configs/targets/sparc-linux-user.mak b/configs/targets/sparc-linux-user.mak |
17 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/tcg/optimize.c | 13 | --- a/configs/targets/sparc-linux-user.mak |
19 | +++ b/tcg/optimize.c | 14 | +++ b/configs/targets/sparc-linux-user.mak |
20 | @@ -XXX,XX +XXX,XX @@ typedef struct OptContext { | 15 | @@ -XXX,XX +XXX,XX @@ |
21 | 16 | TARGET_ARCH=sparc | |
22 | /* In flight values from optimization. */ | 17 | TARGET_SYSTBL_ABI=common,32 |
23 | uint64_t z_mask; | 18 | TARGET_SYSTBL=syscall.tbl |
24 | + TCGType type; | 19 | -TARGET_ALIGNED_ONLY=y |
25 | } OptContext; | 20 | TARGET_BIG_ENDIAN=y |
26 | 21 | diff --git a/configs/targets/sparc-softmmu.mak b/configs/targets/sparc-softmmu.mak | |
27 | static inline TempOptInfo *ts_info(TCGTemp *ts) | 22 | index XXXXXXX..XXXXXXX 100644 |
28 | @@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) | 23 | --- a/configs/targets/sparc-softmmu.mak |
29 | { | 24 | +++ b/configs/targets/sparc-softmmu.mak |
30 | TCGTemp *dst_ts = arg_temp(dst); | 25 | @@ -XXX,XX +XXX,XX @@ |
31 | TCGTemp *src_ts = arg_temp(src); | 26 | TARGET_ARCH=sparc |
32 | - const TCGOpDef *def; | 27 | -TARGET_ALIGNED_ONLY=y |
33 | TempOptInfo *di; | 28 | TARGET_BIG_ENDIAN=y |
34 | TempOptInfo *si; | 29 | diff --git a/configs/targets/sparc32plus-linux-user.mak b/configs/targets/sparc32plus-linux-user.mak |
35 | uint64_t z_mask; | 30 | index XXXXXXX..XXXXXXX 100644 |
36 | @@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) | 31 | --- a/configs/targets/sparc32plus-linux-user.mak |
37 | reset_ts(dst_ts); | 32 | +++ b/configs/targets/sparc32plus-linux-user.mak |
38 | di = ts_info(dst_ts); | 33 | @@ -XXX,XX +XXX,XX @@ TARGET_BASE_ARCH=sparc |
39 | si = ts_info(src_ts); | 34 | TARGET_ABI_DIR=sparc |
40 | - def = &tcg_op_defs[op->opc]; | 35 | TARGET_SYSTBL_ABI=common,32 |
41 | - if (def->flags & TCG_OPF_VECTOR) { | 36 | TARGET_SYSTBL=syscall.tbl |
42 | - new_op = INDEX_op_mov_vec; | 37 | -TARGET_ALIGNED_ONLY=y |
43 | - } else if (def->flags & TCG_OPF_64BIT) { | 38 | TARGET_BIG_ENDIAN=y |
44 | - new_op = INDEX_op_mov_i64; | 39 | diff --git a/configs/targets/sparc64-linux-user.mak b/configs/targets/sparc64-linux-user.mak |
45 | - } else { | 40 | index XXXXXXX..XXXXXXX 100644 |
46 | + | 41 | --- a/configs/targets/sparc64-linux-user.mak |
47 | + switch (ctx->type) { | 42 | +++ b/configs/targets/sparc64-linux-user.mak |
48 | + case TCG_TYPE_I32: | 43 | @@ -XXX,XX +XXX,XX @@ TARGET_BASE_ARCH=sparc |
49 | new_op = INDEX_op_mov_i32; | 44 | TARGET_ABI_DIR=sparc |
50 | + break; | 45 | TARGET_SYSTBL_ABI=common,64 |
51 | + case TCG_TYPE_I64: | 46 | TARGET_SYSTBL=syscall.tbl |
52 | + new_op = INDEX_op_mov_i64; | 47 | -TARGET_ALIGNED_ONLY=y |
53 | + break; | 48 | TARGET_BIG_ENDIAN=y |
54 | + case TCG_TYPE_V64: | 49 | diff --git a/configs/targets/sparc64-softmmu.mak b/configs/targets/sparc64-softmmu.mak |
55 | + case TCG_TYPE_V128: | 50 | index XXXXXXX..XXXXXXX 100644 |
56 | + case TCG_TYPE_V256: | 51 | --- a/configs/targets/sparc64-softmmu.mak |
57 | + /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ | 52 | +++ b/configs/targets/sparc64-softmmu.mak |
58 | + new_op = INDEX_op_mov_vec; | 53 | @@ -XXX,XX +XXX,XX @@ |
59 | + break; | 54 | TARGET_ARCH=sparc64 |
60 | + default: | 55 | TARGET_BASE_ARCH=sparc |
61 | + g_assert_not_reached(); | 56 | -TARGET_ALIGNED_ONLY=y |
62 | } | 57 | TARGET_BIG_ENDIAN=y |
63 | op->opc = new_op; | ||
64 | - /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ | ||
65 | op->args[0] = dst; | ||
66 | op->args[1] = src; | ||
67 | |||
68 | @@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) | ||
69 | static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, | ||
70 | TCGArg dst, uint64_t val) | ||
71 | { | ||
72 | - const TCGOpDef *def = &tcg_op_defs[op->opc]; | ||
73 | - TCGType type; | ||
74 | - TCGTemp *tv; | ||
75 | - | ||
76 | - if (def->flags & TCG_OPF_VECTOR) { | ||
77 | - type = TCGOP_VECL(op) + TCG_TYPE_V64; | ||
78 | - } else if (def->flags & TCG_OPF_64BIT) { | ||
79 | - type = TCG_TYPE_I64; | ||
80 | - } else { | ||
81 | - type = TCG_TYPE_I32; | ||
82 | - } | ||
83 | - | ||
84 | /* Convert movi to mov with constant temp. */ | ||
85 | - tv = tcg_constant_internal(type, val); | ||
86 | + TCGTemp *tv = tcg_constant_internal(ctx->type, val); | ||
87 | + | ||
88 | init_ts_info(ctx, tv); | ||
89 | return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv)); | ||
90 | } | ||
91 | @@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) | ||
92 | } | ||
93 | } | ||
94 | |||
95 | -static uint64_t do_constant_folding(TCGOpcode op, uint64_t x, uint64_t y) | ||
96 | +static uint64_t do_constant_folding(TCGOpcode op, TCGType type, | ||
97 | + uint64_t x, uint64_t y) | ||
98 | { | ||
99 | - const TCGOpDef *def = &tcg_op_defs[op]; | ||
100 | uint64_t res = do_constant_folding_2(op, x, y); | ||
101 | - if (!(def->flags & TCG_OPF_64BIT)) { | ||
102 | + if (type == TCG_TYPE_I32) { | ||
103 | res = (int32_t)res; | ||
104 | } | ||
105 | return res; | ||
106 | @@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c) | ||
107 | * Return -1 if the condition can't be simplified, | ||
108 | * and the result of the condition (0 or 1) if it can. | ||
109 | */ | ||
110 | -static int do_constant_folding_cond(TCGOpcode op, TCGArg x, | ||
111 | +static int do_constant_folding_cond(TCGType type, TCGArg x, | ||
112 | TCGArg y, TCGCond c) | ||
113 | { | ||
114 | uint64_t xv = arg_info(x)->val; | ||
115 | uint64_t yv = arg_info(y)->val; | ||
116 | |||
117 | if (arg_is_const(x) && arg_is_const(y)) { | ||
118 | - const TCGOpDef *def = &tcg_op_defs[op]; | ||
119 | - tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR)); | ||
120 | - if (def->flags & TCG_OPF_64BIT) { | ||
121 | - return do_constant_folding_cond_64(xv, yv, c); | ||
122 | - } else { | ||
123 | + switch (type) { | ||
124 | + case TCG_TYPE_I32: | ||
125 | return do_constant_folding_cond_32(xv, yv, c); | ||
126 | + case TCG_TYPE_I64: | ||
127 | + return do_constant_folding_cond_64(xv, yv, c); | ||
128 | + default: | ||
129 | + /* Only scalar comparisons are optimizable */ | ||
130 | + return -1; | ||
131 | } | ||
132 | } else if (args_are_copies(x, y)) { | ||
133 | return do_constant_folding_cond_eq(c); | ||
134 | @@ -XXX,XX +XXX,XX @@ static bool fold_const1(OptContext *ctx, TCGOp *op) | ||
135 | uint64_t t; | ||
136 | |||
137 | t = arg_info(op->args[1])->val; | ||
138 | - t = do_constant_folding(op->opc, t, 0); | ||
139 | + t = do_constant_folding(op->opc, ctx->type, t, 0); | ||
140 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | ||
141 | } | ||
142 | return false; | ||
143 | @@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op) | ||
144 | uint64_t t1 = arg_info(op->args[1])->val; | ||
145 | uint64_t t2 = arg_info(op->args[2])->val; | ||
146 | |||
147 | - t1 = do_constant_folding(op->opc, t1, t2); | ||
148 | + t1 = do_constant_folding(op->opc, ctx->type, t1, t2); | ||
149 | return tcg_opt_gen_movi(ctx, op, op->args[0], t1); | ||
150 | } | ||
151 | return false; | ||
152 | @@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op) | ||
153 | static bool fold_brcond(OptContext *ctx, TCGOp *op) | ||
154 | { | ||
155 | TCGCond cond = op->args[2]; | ||
156 | - int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond); | ||
157 | + int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond); | ||
158 | |||
159 | if (i == 0) { | ||
160 | tcg_op_remove(ctx->tcg, op); | ||
161 | @@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op) | ||
162 | * Simplify EQ/NE comparisons where one of the pairs | ||
163 | * can be simplified. | ||
164 | */ | ||
165 | - i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0], | ||
166 | + i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0], | ||
167 | op->args[2], cond); | ||
168 | switch (i ^ inv) { | ||
169 | case 0: | ||
170 | @@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op) | ||
171 | goto do_brcond_high; | ||
172 | } | ||
173 | |||
174 | - i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1], | ||
175 | + i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], | ||
176 | op->args[3], cond); | ||
177 | switch (i ^ inv) { | ||
178 | case 0: | ||
179 | @@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op) | ||
180 | if (arg_is_const(op->args[1])) { | ||
181 | uint64_t t = arg_info(op->args[1])->val; | ||
182 | |||
183 | - t = do_constant_folding(op->opc, t, op->args[2]); | ||
184 | + t = do_constant_folding(op->opc, ctx->type, t, op->args[2]); | ||
185 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | ||
186 | } | ||
187 | return false; | ||
188 | @@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op) | ||
189 | uint64_t t = arg_info(op->args[1])->val; | ||
190 | |||
191 | if (t != 0) { | ||
192 | - t = do_constant_folding(op->opc, t, 0); | ||
193 | + t = do_constant_folding(op->opc, ctx->type, t, 0); | ||
194 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | ||
195 | } | ||
196 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); | ||
197 | @@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op) | ||
198 | |||
199 | static bool fold_movcond(OptContext *ctx, TCGOp *op) | ||
200 | { | ||
201 | - TCGOpcode opc = op->opc; | ||
202 | TCGCond cond = op->args[5]; | ||
203 | - int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond); | ||
204 | + int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); | ||
205 | |||
206 | if (i >= 0) { | ||
207 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); | ||
208 | @@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op) | ||
209 | if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { | ||
210 | uint64_t tv = arg_info(op->args[3])->val; | ||
211 | uint64_t fv = arg_info(op->args[4])->val; | ||
212 | + TCGOpcode opc; | ||
213 | |||
214 | - opc = (opc == INDEX_op_movcond_i32 | ||
215 | - ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64); | ||
216 | + switch (ctx->type) { | ||
217 | + case TCG_TYPE_I32: | ||
218 | + opc = INDEX_op_setcond_i32; | ||
219 | + break; | ||
220 | + case TCG_TYPE_I64: | ||
221 | + opc = INDEX_op_setcond_i64; | ||
222 | + break; | ||
223 | + default: | ||
224 | + g_assert_not_reached(); | ||
225 | + } | ||
226 | |||
227 | if (tv == 1 && fv == 0) { | ||
228 | op->opc = opc; | ||
229 | @@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op) | ||
230 | static bool fold_setcond(OptContext *ctx, TCGOp *op) | ||
231 | { | ||
232 | TCGCond cond = op->args[3]; | ||
233 | - int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond); | ||
234 | + int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); | ||
235 | |||
236 | if (i >= 0) { | ||
237 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | ||
238 | @@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op) | ||
239 | * Simplify EQ/NE comparisons where one of the pairs | ||
240 | * can be simplified. | ||
241 | */ | ||
242 | - i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1], | ||
243 | + i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], | ||
244 | op->args[3], cond); | ||
245 | switch (i ^ inv) { | ||
246 | case 0: | ||
247 | @@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op) | ||
248 | goto do_setcond_high; | ||
249 | } | ||
250 | |||
251 | - i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2], | ||
252 | + i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2], | ||
253 | op->args[4], cond); | ||
254 | switch (i ^ inv) { | ||
255 | case 0: | ||
256 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
257 | init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs); | ||
258 | copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs); | ||
259 | |||
260 | + /* Pre-compute the type of the operation. */ | ||
261 | + if (def->flags & TCG_OPF_VECTOR) { | ||
262 | + ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op); | ||
263 | + } else if (def->flags & TCG_OPF_64BIT) { | ||
264 | + ctx.type = TCG_TYPE_I64; | ||
265 | + } else { | ||
266 | + ctx.type = TCG_TYPE_I32; | ||
267 | + } | ||
268 | + | ||
269 | /* For commutative operations make constant second argument */ | ||
270 | switch (opc) { | ||
271 | CASE_OP_32_64_VEC(add): | ||
272 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
273 | /* Proceed with possible constant folding. */ | ||
274 | break; | ||
275 | } | ||
276 | - if (opc == INDEX_op_sub_i32) { | ||
277 | + switch (ctx.type) { | ||
278 | + case TCG_TYPE_I32: | ||
279 | neg_op = INDEX_op_neg_i32; | ||
280 | have_neg = TCG_TARGET_HAS_neg_i32; | ||
281 | - } else if (opc == INDEX_op_sub_i64) { | ||
282 | + break; | ||
283 | + case TCG_TYPE_I64: | ||
284 | neg_op = INDEX_op_neg_i64; | ||
285 | have_neg = TCG_TARGET_HAS_neg_i64; | ||
286 | - } else if (TCG_TARGET_HAS_neg_vec) { | ||
287 | - TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64; | ||
288 | - unsigned vece = TCGOP_VECE(op); | ||
289 | - neg_op = INDEX_op_neg_vec; | ||
290 | - have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0; | ||
291 | - } else { | ||
292 | break; | ||
293 | + case TCG_TYPE_V64: | ||
294 | + case TCG_TYPE_V128: | ||
295 | + case TCG_TYPE_V256: | ||
296 | + neg_op = INDEX_op_neg_vec; | ||
297 | + have_neg = tcg_can_emit_vec_op(neg_op, ctx.type, | ||
298 | + TCGOP_VECE(op)) > 0; | ||
299 | + break; | ||
300 | + default: | ||
301 | + g_assert_not_reached(); | ||
302 | } | ||
303 | if (!have_neg) { | ||
304 | break; | ||
305 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
306 | TCGOpcode not_op; | ||
307 | bool have_not; | ||
308 | |||
309 | - if (def->flags & TCG_OPF_VECTOR) { | ||
310 | - not_op = INDEX_op_not_vec; | ||
311 | - have_not = TCG_TARGET_HAS_not_vec; | ||
312 | - } else if (def->flags & TCG_OPF_64BIT) { | ||
313 | - not_op = INDEX_op_not_i64; | ||
314 | - have_not = TCG_TARGET_HAS_not_i64; | ||
315 | - } else { | ||
316 | + switch (ctx.type) { | ||
317 | + case TCG_TYPE_I32: | ||
318 | not_op = INDEX_op_not_i32; | ||
319 | have_not = TCG_TARGET_HAS_not_i32; | ||
320 | + break; | ||
321 | + case TCG_TYPE_I64: | ||
322 | + not_op = INDEX_op_not_i64; | ||
323 | + have_not = TCG_TARGET_HAS_not_i64; | ||
324 | + break; | ||
325 | + case TCG_TYPE_V64: | ||
326 | + case TCG_TYPE_V128: | ||
327 | + case TCG_TYPE_V256: | ||
328 | + not_op = INDEX_op_not_vec; | ||
329 | + have_not = TCG_TARGET_HAS_not_vec; | ||
330 | + break; | ||
331 | + default: | ||
332 | + g_assert_not_reached(); | ||
333 | } | ||
334 | if (!have_not) { | ||
335 | break; | ||
336 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
337 | below, we can ignore high bits, but for further optimizations we | ||
338 | need to record that the high bits contain garbage. */ | ||
339 | partmask = z_mask; | ||
340 | - if (!(def->flags & TCG_OPF_64BIT)) { | ||
341 | + if (ctx.type == TCG_TYPE_I32) { | ||
342 | z_mask |= ~(tcg_target_ulong)0xffffffffu; | ||
343 | partmask &= 0xffffffffu; | ||
344 | affected &= 0xffffffffu; | ||
345 | -- | 58 | -- |
346 | 2.25.1 | 59 | 2.34.1 |
347 | |||
348 | diff view generated by jsdifflib |
1 | Return -1 instead of 2 for failure, so that we can | 1 | Interpret the variable argument placement in the caller. Pass data_type |
---|---|---|---|
2 | use comparisons against 0 for all cases. | 2 | instead of is64 -- there are several places where we already convert back |
3 | 3 | from bool to type. Clean things up by using type throughout. | |
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | 4 | |
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 7 | --- |
8 | tcg/optimize.c | 145 +++++++++++++++++++++++++------------------------ | 8 | tcg/i386/tcg-target.c.inc | 111 +++++++++++++++++--------------------- |
9 | 1 file changed, 74 insertions(+), 71 deletions(-) | 9 | 1 file changed, 50 insertions(+), 61 deletions(-) |
10 | 10 | ||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 11 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc |
12 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/optimize.c | 13 | --- a/tcg/i386/tcg-target.c.inc |
14 | +++ b/tcg/optimize.c | 14 | +++ b/tcg/i386/tcg-target.c.inc |
15 | @@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c) | 15 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, |
16 | * Record the context of a call to the out of line helper code for the slow path | ||
17 | * for a load or store, so that we can later generate the correct helper code | ||
18 | */ | ||
19 | -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, | ||
20 | - MemOpIdx oi, | ||
21 | +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, | ||
22 | + TCGType type, MemOpIdx oi, | ||
23 | TCGReg datalo, TCGReg datahi, | ||
24 | TCGReg addrlo, TCGReg addrhi, | ||
25 | tcg_insn_unit *raddr, | ||
26 | @@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, | ||
27 | |||
28 | label->is_ld = is_ld; | ||
29 | label->oi = oi; | ||
30 | - label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32; | ||
31 | + label->type = type; | ||
32 | label->datalo_reg = datalo; | ||
33 | label->datahi_reg = datahi; | ||
34 | label->addrlo_reg = addrlo; | ||
35 | @@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void) | ||
36 | |||
37 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
38 | TCGReg base, int index, intptr_t ofs, | ||
39 | - int seg, bool is64, MemOp memop) | ||
40 | + int seg, TCGType type, MemOp memop) | ||
41 | { | ||
42 | - TCGType type = is64 ? TCG_TYPE_I64 : TCG_TYPE_I32; | ||
43 | bool use_movbe = false; | ||
44 | - int rexw = is64 * P_REXW; | ||
45 | + int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW); | ||
46 | int movop = OPC_MOVL_GvEv; | ||
47 | |||
48 | /* Do big-endian loads with movbe. */ | ||
49 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
16 | } | 50 | } |
17 | } | 51 | } |
18 | 52 | ||
19 | -/* Return 2 if the condition can't be simplified, and the result | 53 | -/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and |
20 | - of the condition (0 or 1) if it can */ | 54 | - EAX. It will be useful once fixed registers globals are less |
21 | -static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x, | 55 | - common. */ |
22 | - TCGArg y, TCGCond c) | 56 | -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) |
23 | +/* | 57 | +static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, |
24 | + * Return -1 if the condition can't be simplified, | 58 | + TCGReg addrlo, TCGReg addrhi, |
25 | + * and the result of the condition (0 or 1) if it can. | 59 | + MemOpIdx oi, TCGType data_type) |
26 | + */ | ||
27 | +static int do_constant_folding_cond(TCGOpcode op, TCGArg x, | ||
28 | + TCGArg y, TCGCond c) | ||
29 | { | 60 | { |
30 | uint64_t xv = arg_info(x)->val; | 61 | - TCGReg datalo, datahi, addrlo; |
31 | uint64_t yv = arg_info(y)->val; | 62 | - TCGReg addrhi __attribute__((unused)); |
32 | @@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x, | 63 | - MemOpIdx oi; |
33 | case TCG_COND_GEU: | 64 | - MemOp opc; |
34 | return 1; | 65 | + MemOp opc = get_memop(oi); |
35 | default: | 66 | + |
36 | - return 2; | 67 | #if defined(CONFIG_SOFTMMU) |
37 | + return -1; | 68 | - int mem_index; |
38 | } | 69 | tcg_insn_unit *label_ptr[2]; |
39 | } | 70 | -#else |
40 | - return 2; | 71 | - unsigned a_bits; |
41 | + return -1; | 72 | -#endif |
73 | |||
74 | - datalo = *args++; | ||
75 | - datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); | ||
76 | - addrlo = *args++; | ||
77 | - addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); | ||
78 | - oi = *args++; | ||
79 | - opc = get_memop(oi); | ||
80 | - | ||
81 | -#if defined(CONFIG_SOFTMMU) | ||
82 | - mem_index = get_mmuidx(oi); | ||
83 | - | ||
84 | - tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc, | ||
85 | + tcg_out_tlb_load(s, addrlo, addrhi, get_mmuidx(oi), opc, | ||
86 | label_ptr, offsetof(CPUTLBEntry, addr_read)); | ||
87 | |||
88 | /* TLB Hit. */ | ||
89 | - tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc); | ||
90 | + tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, | ||
91 | + -1, 0, 0, data_type, opc); | ||
92 | |||
93 | /* Record the current context of a load into ldst label */ | ||
94 | - add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi, | ||
95 | - s->code_ptr, label_ptr); | ||
96 | + add_qemu_ldst_label(s, true, data_type, oi, datalo, datahi, | ||
97 | + addrlo, addrhi, s->code_ptr, label_ptr); | ||
98 | #else | ||
99 | - a_bits = get_alignment_bits(opc); | ||
100 | + unsigned a_bits = get_alignment_bits(opc); | ||
101 | if (a_bits) { | ||
102 | tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); | ||
103 | } | ||
104 | |||
105 | tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index, | ||
106 | x86_guest_base_offset, x86_guest_base_seg, | ||
107 | - is64, opc); | ||
108 | + data_type, opc); | ||
109 | #endif | ||
42 | } | 110 | } |
43 | 111 | ||
44 | -/* Return 2 if the condition can't be simplified, and the result | 112 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, |
45 | - of the condition (0 or 1) if it can */ | 113 | } |
46 | -static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) | 114 | } |
47 | +/* | 115 | |
48 | + * Return -1 if the condition can't be simplified, | 116 | -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) |
49 | + * and the result of the condition (0 or 1) if it can. | 117 | +static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, |
50 | + */ | 118 | + TCGReg addrlo, TCGReg addrhi, |
51 | +static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) | 119 | + MemOpIdx oi, TCGType data_type) |
52 | { | 120 | { |
53 | TCGArg al = p1[0], ah = p1[1]; | 121 | - TCGReg datalo, datahi, addrlo; |
54 | TCGArg bl = p2[0], bh = p2[1]; | 122 | - TCGReg addrhi __attribute__((unused)); |
55 | @@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) | 123 | - MemOpIdx oi; |
56 | if (args_are_copies(al, bl) && args_are_copies(ah, bh)) { | 124 | - MemOp opc; |
57 | return do_constant_folding_cond_eq(c); | 125 | + MemOp opc = get_memop(oi); |
58 | } | 126 | + |
59 | - return 2; | 127 | #if defined(CONFIG_SOFTMMU) |
60 | + return -1; | 128 | - int mem_index; |
61 | } | 129 | tcg_insn_unit *label_ptr[2]; |
62 | 130 | -#else | |
63 | static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2) | 131 | - unsigned a_bits; |
64 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 132 | -#endif |
65 | break; | 133 | |
66 | 134 | - datalo = *args++; | |
67 | CASE_OP_32_64(setcond): | 135 | - datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); |
68 | - tmp = do_constant_folding_cond(opc, op->args[1], | 136 | - addrlo = *args++; |
69 | - op->args[2], op->args[3]); | 137 | - addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); |
70 | - if (tmp != 2) { | 138 | - oi = *args++; |
71 | - tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | 139 | - opc = get_memop(oi); |
72 | + i = do_constant_folding_cond(opc, op->args[1], | 140 | - |
73 | + op->args[2], op->args[3]); | 141 | -#if defined(CONFIG_SOFTMMU) |
74 | + if (i >= 0) { | 142 | - mem_index = get_mmuidx(oi); |
75 | + tcg_opt_gen_movi(&ctx, op, op->args[0], i); | 143 | - |
76 | continue; | 144 | - tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc, |
77 | } | 145 | + tcg_out_tlb_load(s, addrlo, addrhi, get_mmuidx(oi), opc, |
78 | break; | 146 | label_ptr, offsetof(CPUTLBEntry, addr_write)); |
79 | 147 | ||
80 | CASE_OP_32_64(brcond): | 148 | /* TLB Hit. */ |
81 | - tmp = do_constant_folding_cond(opc, op->args[0], | 149 | tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc); |
82 | - op->args[1], op->args[2]); | 150 | |
83 | - switch (tmp) { | 151 | /* Record the current context of a store into ldst label */ |
84 | - case 0: | 152 | - add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi, |
85 | + i = do_constant_folding_cond(opc, op->args[0], | 153 | - s->code_ptr, label_ptr); |
86 | + op->args[1], op->args[2]); | 154 | + add_qemu_ldst_label(s, false, data_type, oi, datalo, datahi, |
87 | + if (i == 0) { | 155 | + addrlo, addrhi, s->code_ptr, label_ptr); |
88 | tcg_op_remove(s, op); | 156 | #else |
89 | continue; | 157 | - a_bits = get_alignment_bits(opc); |
90 | - case 1: | 158 | + unsigned a_bits = get_alignment_bits(opc); |
91 | + } else if (i > 0) { | 159 | if (a_bits) { |
92 | memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | 160 | tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); |
93 | op->opc = opc = INDEX_op_br; | 161 | } |
94 | op->args[0] = op->args[3]; | 162 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, |
95 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 163 | break; |
96 | break; | 164 | |
97 | 165 | case INDEX_op_qemu_ld_i32: | |
98 | CASE_OP_32_64(movcond): | 166 | - tcg_out_qemu_ld(s, args, 0); |
99 | - tmp = do_constant_folding_cond(opc, op->args[1], | 167 | + if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { |
100 | - op->args[2], op->args[5]); | 168 | + tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I32); |
101 | - if (tmp != 2) { | 169 | + } else { |
102 | - tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]); | 170 | + tcg_out_qemu_ld(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32); |
103 | + i = do_constant_folding_cond(opc, op->args[1], | 171 | + } |
104 | + op->args[2], op->args[5]); | 172 | break; |
105 | + if (i >= 0) { | 173 | case INDEX_op_qemu_ld_i64: |
106 | + tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]); | 174 | - tcg_out_qemu_ld(s, args, 1); |
107 | continue; | 175 | + if (TCG_TARGET_REG_BITS == 64) { |
108 | } | 176 | + tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64); |
109 | if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { | 177 | + } else if (TARGET_LONG_BITS == 32) { |
110 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 178 | + tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64); |
111 | break; | 179 | + } else { |
112 | 180 | + tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); | |
113 | case INDEX_op_brcond2_i32: | 181 | + } |
114 | - tmp = do_constant_folding_cond2(&op->args[0], &op->args[2], | 182 | break; |
115 | - op->args[4]); | 183 | case INDEX_op_qemu_st_i32: |
116 | - if (tmp == 0) { | 184 | case INDEX_op_qemu_st8_i32: |
117 | + i = do_constant_folding_cond2(&op->args[0], &op->args[2], | 185 | - tcg_out_qemu_st(s, args, 0); |
118 | + op->args[4]); | 186 | + if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { |
119 | + if (i == 0) { | 187 | + tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I32); |
120 | do_brcond_false: | 188 | + } else { |
121 | tcg_op_remove(s, op); | 189 | + tcg_out_qemu_st(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32); |
122 | continue; | 190 | + } |
123 | } | 191 | break; |
124 | - if (tmp == 1) { | 192 | case INDEX_op_qemu_st_i64: |
125 | + if (i > 0) { | 193 | - tcg_out_qemu_st(s, args, 1); |
126 | do_brcond_true: | 194 | + if (TCG_TARGET_REG_BITS == 64) { |
127 | op->opc = opc = INDEX_op_br; | 195 | + tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64); |
128 | op->args[0] = op->args[5]; | 196 | + } else if (TARGET_LONG_BITS == 32) { |
129 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 197 | + tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64); |
130 | if (op->args[4] == TCG_COND_EQ) { | 198 | + } else { |
131 | /* Simplify EQ comparisons where one of the pairs | 199 | + tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); |
132 | can be simplified. */ | 200 | + } |
133 | - tmp = do_constant_folding_cond(INDEX_op_brcond_i32, | 201 | break; |
134 | - op->args[0], op->args[2], | 202 | |
135 | - TCG_COND_EQ); | 203 | OP_32_64(mulu2): |
136 | - if (tmp == 0) { | ||
137 | + i = do_constant_folding_cond(INDEX_op_brcond_i32, | ||
138 | + op->args[0], op->args[2], | ||
139 | + TCG_COND_EQ); | ||
140 | + if (i == 0) { | ||
141 | goto do_brcond_false; | ||
142 | - } else if (tmp == 1) { | ||
143 | + } else if (i > 0) { | ||
144 | goto do_brcond_high; | ||
145 | } | ||
146 | - tmp = do_constant_folding_cond(INDEX_op_brcond_i32, | ||
147 | - op->args[1], op->args[3], | ||
148 | - TCG_COND_EQ); | ||
149 | - if (tmp == 0) { | ||
150 | + i = do_constant_folding_cond(INDEX_op_brcond_i32, | ||
151 | + op->args[1], op->args[3], | ||
152 | + TCG_COND_EQ); | ||
153 | + if (i == 0) { | ||
154 | goto do_brcond_false; | ||
155 | - } else if (tmp != 1) { | ||
156 | + } else if (i < 0) { | ||
157 | break; | ||
158 | } | ||
159 | do_brcond_low: | ||
160 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
161 | if (op->args[4] == TCG_COND_NE) { | ||
162 | /* Simplify NE comparisons where one of the pairs | ||
163 | can be simplified. */ | ||
164 | - tmp = do_constant_folding_cond(INDEX_op_brcond_i32, | ||
165 | - op->args[0], op->args[2], | ||
166 | - TCG_COND_NE); | ||
167 | - if (tmp == 0) { | ||
168 | + i = do_constant_folding_cond(INDEX_op_brcond_i32, | ||
169 | + op->args[0], op->args[2], | ||
170 | + TCG_COND_NE); | ||
171 | + if (i == 0) { | ||
172 | goto do_brcond_high; | ||
173 | - } else if (tmp == 1) { | ||
174 | + } else if (i > 0) { | ||
175 | goto do_brcond_true; | ||
176 | } | ||
177 | - tmp = do_constant_folding_cond(INDEX_op_brcond_i32, | ||
178 | - op->args[1], op->args[3], | ||
179 | - TCG_COND_NE); | ||
180 | - if (tmp == 0) { | ||
181 | + i = do_constant_folding_cond(INDEX_op_brcond_i32, | ||
182 | + op->args[1], op->args[3], | ||
183 | + TCG_COND_NE); | ||
184 | + if (i == 0) { | ||
185 | goto do_brcond_low; | ||
186 | - } else if (tmp == 1) { | ||
187 | + } else if (i > 0) { | ||
188 | goto do_brcond_true; | ||
189 | } | ||
190 | } | ||
191 | break; | ||
192 | |||
193 | case INDEX_op_setcond2_i32: | ||
194 | - tmp = do_constant_folding_cond2(&op->args[1], &op->args[3], | ||
195 | - op->args[5]); | ||
196 | - if (tmp != 2) { | ||
197 | + i = do_constant_folding_cond2(&op->args[1], &op->args[3], | ||
198 | + op->args[5]); | ||
199 | + if (i >= 0) { | ||
200 | do_setcond_const: | ||
201 | - tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
202 | + tcg_opt_gen_movi(&ctx, op, op->args[0], i); | ||
203 | continue; | ||
204 | } | ||
205 | if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE) | ||
206 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
207 | if (op->args[5] == TCG_COND_EQ) { | ||
208 | /* Simplify EQ comparisons where one of the pairs | ||
209 | can be simplified. */ | ||
210 | - tmp = do_constant_folding_cond(INDEX_op_setcond_i32, | ||
211 | - op->args[1], op->args[3], | ||
212 | - TCG_COND_EQ); | ||
213 | - if (tmp == 0) { | ||
214 | + i = do_constant_folding_cond(INDEX_op_setcond_i32, | ||
215 | + op->args[1], op->args[3], | ||
216 | + TCG_COND_EQ); | ||
217 | + if (i == 0) { | ||
218 | goto do_setcond_const; | ||
219 | - } else if (tmp == 1) { | ||
220 | + } else if (i > 0) { | ||
221 | goto do_setcond_high; | ||
222 | } | ||
223 | - tmp = do_constant_folding_cond(INDEX_op_setcond_i32, | ||
224 | - op->args[2], op->args[4], | ||
225 | - TCG_COND_EQ); | ||
226 | - if (tmp == 0) { | ||
227 | + i = do_constant_folding_cond(INDEX_op_setcond_i32, | ||
228 | + op->args[2], op->args[4], | ||
229 | + TCG_COND_EQ); | ||
230 | + if (i == 0) { | ||
231 | goto do_setcond_high; | ||
232 | - } else if (tmp != 1) { | ||
233 | + } else if (i < 0) { | ||
234 | break; | ||
235 | } | ||
236 | do_setcond_low: | ||
237 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
238 | if (op->args[5] == TCG_COND_NE) { | ||
239 | /* Simplify NE comparisons where one of the pairs | ||
240 | can be simplified. */ | ||
241 | - tmp = do_constant_folding_cond(INDEX_op_setcond_i32, | ||
242 | - op->args[1], op->args[3], | ||
243 | - TCG_COND_NE); | ||
244 | - if (tmp == 0) { | ||
245 | + i = do_constant_folding_cond(INDEX_op_setcond_i32, | ||
246 | + op->args[1], op->args[3], | ||
247 | + TCG_COND_NE); | ||
248 | + if (i == 0) { | ||
249 | goto do_setcond_high; | ||
250 | - } else if (tmp == 1) { | ||
251 | + } else if (i > 0) { | ||
252 | goto do_setcond_const; | ||
253 | } | ||
254 | - tmp = do_constant_folding_cond(INDEX_op_setcond_i32, | ||
255 | - op->args[2], op->args[4], | ||
256 | - TCG_COND_NE); | ||
257 | - if (tmp == 0) { | ||
258 | + i = do_constant_folding_cond(INDEX_op_setcond_i32, | ||
259 | + op->args[2], op->args[4], | ||
260 | + TCG_COND_NE); | ||
261 | + if (i == 0) { | ||
262 | goto do_setcond_low; | ||
263 | - } else if (tmp == 1) { | ||
264 | + } else if (i > 0) { | ||
265 | goto do_setcond_const; | ||
266 | } | ||
267 | } | ||
268 | -- | 204 | -- |
269 | 2.25.1 | 205 | 2.34.1 |
270 | 206 | ||
271 | 207 | diff view generated by jsdifflib |
1 | Break the final cleanup clause out of the main switch | 1 | Test for both base and index; use datahi as a temporary, overwritten |
---|---|---|---|
2 | statement. When fully folding an opcode to mov/movi, | 2 | by the final load. Always perform the loads in ascending order, so |
3 | use "continue" to process the next opcode, else break | 3 | that any (user-only) fault sees the correct address. |
4 | to fall into the final cleanup. | ||
5 | 4 | ||
6 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
7 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
8 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 6 | --- |
11 | tcg/optimize.c | 190 ++++++++++++++++++++++++------------------------- | 7 | tcg/i386/tcg-target.c.inc | 31 +++++++++++++++---------------- |
12 | 1 file changed, 94 insertions(+), 96 deletions(-) | 8 | 1 file changed, 15 insertions(+), 16 deletions(-) |
13 | 9 | ||
14 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 10 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc |
15 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/tcg/optimize.c | 12 | --- a/tcg/i386/tcg-target.c.inc |
17 | +++ b/tcg/optimize.c | 13 | +++ b/tcg/i386/tcg-target.c.inc |
18 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 14 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, |
19 | switch (opc) { | 15 | if (TCG_TARGET_REG_BITS == 64) { |
20 | CASE_OP_32_64_VEC(mov): | 16 | tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo, |
21 | tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); | 17 | base, index, 0, ofs); |
22 | - break; | ||
23 | + continue; | ||
24 | |||
25 | case INDEX_op_dup_vec: | ||
26 | if (arg_is_const(op->args[1])) { | ||
27 | tmp = arg_info(op->args[1])->val; | ||
28 | tmp = dup_const(TCGOP_VECE(op), tmp); | ||
29 | tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
30 | - break; | ||
31 | + continue; | ||
32 | } | ||
33 | - goto do_default; | ||
34 | + break; | ||
35 | |||
36 | case INDEX_op_dup2_vec: | ||
37 | assert(TCG_TARGET_REG_BITS == 32); | ||
38 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
39 | tcg_opt_gen_movi(s, &ctx, op, op->args[0], | ||
40 | deposit64(arg_info(op->args[1])->val, 32, 32, | ||
41 | arg_info(op->args[2])->val)); | ||
42 | - break; | ||
43 | + continue; | ||
44 | } else if (args_are_copies(op->args[1], op->args[2])) { | ||
45 | op->opc = INDEX_op_dup_vec; | ||
46 | TCGOP_VECE(op) = MO_32; | ||
47 | nb_iargs = 1; | ||
48 | } | ||
49 | - goto do_default; | ||
50 | + break; | ||
51 | |||
52 | CASE_OP_32_64(not): | ||
53 | CASE_OP_32_64(neg): | ||
54 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
55 | if (arg_is_const(op->args[1])) { | ||
56 | tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0); | ||
57 | tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
58 | - break; | ||
59 | + continue; | ||
60 | } | ||
61 | - goto do_default; | ||
62 | + break; | ||
63 | |||
64 | CASE_OP_32_64(bswap16): | ||
65 | CASE_OP_32_64(bswap32): | ||
66 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
67 | tmp = do_constant_folding(opc, arg_info(op->args[1])->val, | ||
68 | op->args[2]); | ||
69 | tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
70 | - break; | ||
71 | + continue; | ||
72 | } | ||
73 | - goto do_default; | ||
74 | + break; | ||
75 | |||
76 | CASE_OP_32_64(add): | ||
77 | CASE_OP_32_64(sub): | ||
78 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
79 | tmp = do_constant_folding(opc, arg_info(op->args[1])->val, | ||
80 | arg_info(op->args[2])->val); | ||
81 | tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
82 | - break; | ||
83 | + continue; | ||
84 | } | ||
85 | - goto do_default; | ||
86 | + break; | ||
87 | |||
88 | CASE_OP_32_64(clz): | ||
89 | CASE_OP_32_64(ctz): | ||
90 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
91 | } else { | ||
92 | tcg_opt_gen_mov(s, op, op->args[0], op->args[2]); | ||
93 | } | ||
94 | - break; | ||
95 | + continue; | ||
96 | } | ||
97 | - goto do_default; | ||
98 | + break; | ||
99 | |||
100 | CASE_OP_32_64(deposit): | ||
101 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
102 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
103 | op->args[3], op->args[4], | ||
104 | arg_info(op->args[2])->val); | ||
105 | tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
106 | - break; | ||
107 | + continue; | ||
108 | } | ||
109 | - goto do_default; | ||
110 | + break; | ||
111 | |||
112 | CASE_OP_32_64(extract): | ||
113 | if (arg_is_const(op->args[1])) { | ||
114 | tmp = extract64(arg_info(op->args[1])->val, | ||
115 | op->args[2], op->args[3]); | ||
116 | tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
117 | - break; | ||
118 | + continue; | ||
119 | } | ||
120 | - goto do_default; | ||
121 | + break; | ||
122 | |||
123 | CASE_OP_32_64(sextract): | ||
124 | if (arg_is_const(op->args[1])) { | ||
125 | tmp = sextract64(arg_info(op->args[1])->val, | ||
126 | op->args[2], op->args[3]); | ||
127 | tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
128 | - break; | ||
129 | + continue; | ||
130 | } | ||
131 | - goto do_default; | ||
132 | + break; | ||
133 | |||
134 | CASE_OP_32_64(extract2): | ||
135 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
136 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
137 | ((uint32_t)v2 << (32 - shr))); | ||
138 | } | ||
139 | tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
140 | - break; | ||
141 | + continue; | ||
142 | } | ||
143 | - goto do_default; | ||
144 | + break; | ||
145 | |||
146 | CASE_OP_32_64(setcond): | ||
147 | tmp = do_constant_folding_cond(opc, op->args[1], | ||
148 | op->args[2], op->args[3]); | ||
149 | if (tmp != 2) { | ||
150 | tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
151 | - break; | ||
152 | + continue; | ||
153 | } | ||
154 | - goto do_default; | ||
155 | + break; | ||
156 | |||
157 | CASE_OP_32_64(brcond): | ||
158 | tmp = do_constant_folding_cond(opc, op->args[0], | ||
159 | op->args[1], op->args[2]); | ||
160 | - if (tmp != 2) { | ||
161 | - if (tmp) { | ||
162 | - memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
163 | - op->opc = INDEX_op_br; | ||
164 | - op->args[0] = op->args[3]; | ||
165 | - } else { | ||
166 | - tcg_op_remove(s, op); | ||
167 | - } | ||
168 | + switch (tmp) { | ||
169 | + case 0: | ||
170 | + tcg_op_remove(s, op); | ||
171 | + continue; | ||
172 | + case 1: | ||
173 | + memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
174 | + op->opc = opc = INDEX_op_br; | ||
175 | + op->args[0] = op->args[3]; | ||
176 | break; | ||
177 | } | ||
178 | - goto do_default; | ||
179 | + break; | ||
180 | |||
181 | CASE_OP_32_64(movcond): | ||
182 | tmp = do_constant_folding_cond(opc, op->args[1], | ||
183 | op->args[2], op->args[5]); | ||
184 | if (tmp != 2) { | ||
185 | tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]); | ||
186 | - break; | ||
187 | + continue; | ||
188 | } | ||
189 | if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { | ||
190 | uint64_t tv = arg_info(op->args[3])->val; | ||
191 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
192 | if (fv == 1 && tv == 0) { | ||
193 | cond = tcg_invert_cond(cond); | ||
194 | } else if (!(tv == 1 && fv == 0)) { | ||
195 | - goto do_default; | ||
196 | + break; | ||
197 | } | ||
198 | op->args[3] = cond; | ||
199 | op->opc = opc = (opc == INDEX_op_movcond_i32 | ||
200 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
201 | : INDEX_op_setcond_i64); | ||
202 | nb_iargs = 2; | ||
203 | } | ||
204 | - goto do_default; | ||
205 | + break; | ||
206 | |||
207 | case INDEX_op_add2_i32: | ||
208 | case INDEX_op_sub2_i32: | ||
209 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
210 | rh = op->args[1]; | ||
211 | tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a); | ||
212 | tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32)); | ||
213 | - break; | ||
214 | + continue; | ||
215 | } | ||
216 | - goto do_default; | ||
217 | + break; | ||
218 | |||
219 | case INDEX_op_mulu2_i32: | ||
220 | if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { | ||
221 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
222 | rh = op->args[1]; | ||
223 | tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r); | ||
224 | tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32)); | ||
225 | - break; | ||
226 | + continue; | ||
227 | } | ||
228 | - goto do_default; | ||
229 | + break; | ||
230 | |||
231 | case INDEX_op_brcond2_i32: | ||
232 | tmp = do_constant_folding_cond2(&op->args[0], &op->args[2], | ||
233 | op->args[4]); | ||
234 | - if (tmp != 2) { | ||
235 | - if (tmp) { | ||
236 | - do_brcond_true: | ||
237 | - memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
238 | - op->opc = INDEX_op_br; | ||
239 | - op->args[0] = op->args[5]; | ||
240 | - } else { | ||
241 | + if (tmp == 0) { | ||
242 | do_brcond_false: | ||
243 | - tcg_op_remove(s, op); | ||
244 | - } | ||
245 | - } else if ((op->args[4] == TCG_COND_LT | ||
246 | - || op->args[4] == TCG_COND_GE) | ||
247 | - && arg_is_const(op->args[2]) | ||
248 | - && arg_info(op->args[2])->val == 0 | ||
249 | - && arg_is_const(op->args[3]) | ||
250 | - && arg_info(op->args[3])->val == 0) { | ||
251 | + tcg_op_remove(s, op); | ||
252 | + continue; | ||
253 | + } | ||
254 | + if (tmp == 1) { | ||
255 | + do_brcond_true: | ||
256 | + op->opc = opc = INDEX_op_br; | ||
257 | + op->args[0] = op->args[5]; | ||
258 | + break; | ||
259 | + } | ||
260 | + if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE) | ||
261 | + && arg_is_const(op->args[2]) | ||
262 | + && arg_info(op->args[2])->val == 0 | ||
263 | + && arg_is_const(op->args[3]) | ||
264 | + && arg_info(op->args[3])->val == 0) { | ||
265 | /* Simplify LT/GE comparisons vs zero to a single compare | ||
266 | vs the high word of the input. */ | ||
267 | do_brcond_high: | ||
268 | - memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
269 | - op->opc = INDEX_op_brcond_i32; | ||
270 | + op->opc = opc = INDEX_op_brcond_i32; | ||
271 | op->args[0] = op->args[1]; | ||
272 | op->args[1] = op->args[3]; | ||
273 | op->args[2] = op->args[4]; | ||
274 | op->args[3] = op->args[5]; | ||
275 | - } else if (op->args[4] == TCG_COND_EQ) { | ||
276 | + break; | ||
277 | + } | ||
278 | + if (op->args[4] == TCG_COND_EQ) { | ||
279 | /* Simplify EQ comparisons where one of the pairs | ||
280 | can be simplified. */ | ||
281 | tmp = do_constant_folding_cond(INDEX_op_brcond_i32, | ||
282 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
283 | if (tmp == 0) { | ||
284 | goto do_brcond_false; | ||
285 | } else if (tmp != 1) { | ||
286 | - goto do_default; | ||
287 | + break; | ||
288 | } | ||
289 | do_brcond_low: | ||
290 | memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
291 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
292 | op->args[1] = op->args[2]; | ||
293 | op->args[2] = op->args[4]; | ||
294 | op->args[3] = op->args[5]; | ||
295 | - } else if (op->args[4] == TCG_COND_NE) { | ||
296 | + break; | ||
297 | + } | ||
298 | + if (op->args[4] == TCG_COND_NE) { | ||
299 | /* Simplify NE comparisons where one of the pairs | ||
300 | can be simplified. */ | ||
301 | tmp = do_constant_folding_cond(INDEX_op_brcond_i32, | ||
302 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
303 | } else if (tmp == 1) { | ||
304 | goto do_brcond_true; | ||
305 | } | ||
306 | - goto do_default; | ||
307 | - } else { | ||
308 | - goto do_default; | ||
309 | } | ||
310 | break; | ||
311 | |||
312 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
313 | if (tmp != 2) { | ||
314 | do_setcond_const: | ||
315 | tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
316 | - } else if ((op->args[5] == TCG_COND_LT | ||
317 | - || op->args[5] == TCG_COND_GE) | ||
318 | - && arg_is_const(op->args[3]) | ||
319 | - && arg_info(op->args[3])->val == 0 | ||
320 | - && arg_is_const(op->args[4]) | ||
321 | - && arg_info(op->args[4])->val == 0) { | ||
322 | + continue; | ||
323 | + } | ||
324 | + if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE) | ||
325 | + && arg_is_const(op->args[3]) | ||
326 | + && arg_info(op->args[3])->val == 0 | ||
327 | + && arg_is_const(op->args[4]) | ||
328 | + && arg_info(op->args[4])->val == 0) { | ||
329 | /* Simplify LT/GE comparisons vs zero to a single compare | ||
330 | vs the high word of the input. */ | ||
331 | do_setcond_high: | ||
332 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
333 | op->args[1] = op->args[2]; | ||
334 | op->args[2] = op->args[4]; | ||
335 | op->args[3] = op->args[5]; | ||
336 | - } else if (op->args[5] == TCG_COND_EQ) { | ||
337 | + break; | ||
338 | + } | ||
339 | + if (op->args[5] == TCG_COND_EQ) { | ||
340 | /* Simplify EQ comparisons where one of the pairs | ||
341 | can be simplified. */ | ||
342 | tmp = do_constant_folding_cond(INDEX_op_setcond_i32, | ||
343 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
344 | if (tmp == 0) { | ||
345 | goto do_setcond_high; | ||
346 | } else if (tmp != 1) { | ||
347 | - goto do_default; | ||
348 | + break; | ||
349 | } | ||
350 | do_setcond_low: | ||
351 | reset_temp(op->args[0]); | ||
352 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
353 | op->opc = INDEX_op_setcond_i32; | ||
354 | op->args[2] = op->args[3]; | ||
355 | op->args[3] = op->args[5]; | ||
356 | - } else if (op->args[5] == TCG_COND_NE) { | ||
357 | + break; | ||
358 | + } | ||
359 | + if (op->args[5] == TCG_COND_NE) { | ||
360 | /* Simplify NE comparisons where one of the pairs | ||
361 | can be simplified. */ | ||
362 | tmp = do_constant_folding_cond(INDEX_op_setcond_i32, | ||
363 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
364 | } else if (tmp == 1) { | ||
365 | goto do_setcond_const; | ||
366 | } | ||
367 | - goto do_default; | ||
368 | - } else { | ||
369 | - goto do_default; | ||
370 | } | ||
371 | break; | ||
372 | |||
373 | - case INDEX_op_call: | ||
374 | - if (!(tcg_call_flags(op) | ||
375 | + default: | ||
376 | + break; | 18 | + break; |
377 | + } | 19 | + } |
378 | + | 20 | + if (use_movbe) { |
379 | + /* Some of the folding above can change opc. */ | 21 | + TCGReg t = datalo; |
380 | + opc = op->opc; | 22 | + datalo = datahi; |
381 | + def = &tcg_op_defs[opc]; | 23 | + datahi = t; |
382 | + if (def->flags & TCG_OPF_BB_END) { | 24 | + } |
383 | + memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | 25 | + if (base == datalo || index == datalo) { |
384 | + } else { | 26 | + tcg_out_modrm_sib_offset(s, OPC_LEA, datahi, base, index, 0, ofs); |
385 | + if (opc == INDEX_op_call && | 27 | + tcg_out_modrm_offset(s, movop + seg, datalo, datahi, 0); |
386 | + !(tcg_call_flags(op) | 28 | + tcg_out_modrm_offset(s, movop + seg, datahi, datahi, 4); |
387 | & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { | 29 | } else { |
388 | for (i = 0; i < nb_globals; i++) { | 30 | - if (use_movbe) { |
389 | if (test_bit(i, ctx.temps_used.l)) { | 31 | - TCGReg t = datalo; |
390 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 32 | - datalo = datahi; |
391 | } | 33 | - datahi = t; |
392 | } | 34 | - } |
393 | } | 35 | - if (base != datalo) { |
394 | - goto do_reset_output; | 36 | - tcg_out_modrm_sib_offset(s, movop + seg, datalo, |
395 | 37 | - base, index, 0, ofs); | |
396 | - default: | 38 | - tcg_out_modrm_sib_offset(s, movop + seg, datahi, |
397 | - do_default: | 39 | - base, index, 0, ofs + 4); |
398 | - /* Default case: we know nothing about operation (or were unable | ||
399 | - to compute the operation result) so no propagation is done. | ||
400 | - We trash everything if the operation is the end of a basic | ||
401 | - block, otherwise we only trash the output args. "z_mask" is | ||
402 | - the non-zero bits mask for the first output arg. */ | ||
403 | - if (def->flags & TCG_OPF_BB_END) { | ||
404 | - memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
405 | - } else { | 40 | - } else { |
406 | - do_reset_output: | 41 | - tcg_out_modrm_sib_offset(s, movop + seg, datahi, |
407 | - for (i = 0; i < nb_oargs; i++) { | 42 | - base, index, 0, ofs + 4); |
408 | - reset_temp(op->args[i]); | 43 | - tcg_out_modrm_sib_offset(s, movop + seg, datalo, |
409 | - /* Save the corresponding known-zero bits mask for the | 44 | - base, index, 0, ofs); |
410 | - first output argument (only one supported so far). */ | 45 | - } |
411 | - if (i == 0) { | 46 | + tcg_out_modrm_sib_offset(s, movop + seg, datalo, |
412 | - arg_info(op->args[i])->z_mask = z_mask; | 47 | + base, index, 0, ofs); |
413 | - } | 48 | + tcg_out_modrm_sib_offset(s, movop + seg, datahi, |
414 | + for (i = 0; i < nb_oargs; i++) { | 49 | + base, index, 0, ofs + 4); |
415 | + reset_temp(op->args[i]); | ||
416 | + /* Save the corresponding known-zero bits mask for the | ||
417 | + first output argument (only one supported so far). */ | ||
418 | + if (i == 0) { | ||
419 | + arg_info(op->args[i])->z_mask = z_mask; | ||
420 | } | ||
421 | } | ||
422 | - break; | ||
423 | } | 50 | } |
424 | 51 | break; | |
425 | /* Eliminate duplicate and redundant fence instructions. */ | 52 | default: |
426 | -- | 53 | -- |
427 | 2.25.1 | 54 | 2.34.1 |
428 | |||
429 | diff view generated by jsdifflib |
1 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 1 | Collect the 4 potential parts of the host address into a struct. |
---|---|---|---|
2 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | 2 | Reorg tcg_out_qemu_{ld,st}_direct to use it. |
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 3 | Reorg guest_base handling to use it. |
4 | |||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | --- | 7 | --- |
6 | tcg/optimize.c | 9 ++++++--- | 8 | tcg/i386/tcg-target.c.inc | 165 +++++++++++++++++++++----------------- |
7 | 1 file changed, 6 insertions(+), 3 deletions(-) | 9 | 1 file changed, 90 insertions(+), 75 deletions(-) |
8 | 10 | ||
9 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 11 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc |
10 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
11 | --- a/tcg/optimize.c | 13 | --- a/tcg/i386/tcg-target.c.inc |
12 | +++ b/tcg/optimize.c | 14 | +++ b/tcg/i386/tcg-target.c.inc |
13 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 15 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_nopn(TCGContext *s, int n) |
14 | uint64_t z_mask, partmask, affected, tmp; | 16 | tcg_out8(s, 0x90); |
15 | TCGOpcode opc = op->opc; | 17 | } |
16 | const TCGOpDef *def; | 18 | |
17 | + bool done = false; | 19 | +typedef struct { |
18 | 20 | + TCGReg base; | |
19 | /* Calls are special. */ | 21 | + int index; |
20 | if (opc == INDEX_op_call) { | 22 | + int ofs; |
21 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 23 | + int seg; |
22 | allocator where needed and possible. Also detect copies. */ | 24 | +} HostAddress; |
23 | switch (opc) { | 25 | + |
24 | CASE_OP_32_64_VEC(mov): | 26 | #if defined(CONFIG_SOFTMMU) |
25 | - tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); | 27 | /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, |
26 | - continue; | 28 | * int mmu_idx, uintptr_t ra) |
27 | + done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); | 29 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) |
28 | + break; | 30 | return tcg_out_fail_alignment(s, l); |
29 | 31 | } | |
30 | case INDEX_op_dup_vec: | 32 | |
31 | if (arg_is_const(op->args[1])) { | 33 | -#if TCG_TARGET_REG_BITS == 32 |
32 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 34 | -# define x86_guest_base_seg 0 |
35 | -# define x86_guest_base_index -1 | ||
36 | -# define x86_guest_base_offset guest_base | ||
37 | -#else | ||
38 | -static int x86_guest_base_seg; | ||
39 | -static int x86_guest_base_index = -1; | ||
40 | -static int32_t x86_guest_base_offset; | ||
41 | -# if defined(__x86_64__) && defined(__linux__) | ||
42 | -# include <asm/prctl.h> | ||
43 | -# include <sys/prctl.h> | ||
44 | +static HostAddress x86_guest_base = { | ||
45 | + .index = -1 | ||
46 | +}; | ||
47 | + | ||
48 | +#if defined(__x86_64__) && defined(__linux__) | ||
49 | +# include <asm/prctl.h> | ||
50 | +# include <sys/prctl.h> | ||
51 | int arch_prctl(int code, unsigned long addr); | ||
52 | static inline int setup_guest_base_seg(void) | ||
53 | { | ||
54 | @@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void) | ||
55 | } | ||
56 | return 0; | ||
57 | } | ||
58 | -# elif defined (__FreeBSD__) || defined (__FreeBSD_kernel__) | ||
59 | -# include <machine/sysarch.h> | ||
60 | +#elif defined(__x86_64__) && \ | ||
61 | + (defined (__FreeBSD__) || defined (__FreeBSD_kernel__)) | ||
62 | +# include <machine/sysarch.h> | ||
63 | static inline int setup_guest_base_seg(void) | ||
64 | { | ||
65 | if (sysarch(AMD64_SET_GSBASE, &guest_base) == 0) { | ||
66 | @@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void) | ||
67 | } | ||
68 | return 0; | ||
69 | } | ||
70 | -# else | ||
71 | +#else | ||
72 | static inline int setup_guest_base_seg(void) | ||
73 | { | ||
74 | return 0; | ||
75 | } | ||
76 | -# endif | ||
77 | -#endif | ||
78 | +#endif /* setup_guest_base_seg */ | ||
79 | #endif /* SOFTMMU */ | ||
80 | |||
81 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
82 | - TCGReg base, int index, intptr_t ofs, | ||
83 | - int seg, TCGType type, MemOp memop) | ||
84 | + HostAddress h, TCGType type, MemOp memop) | ||
85 | { | ||
86 | bool use_movbe = false; | ||
87 | int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW); | ||
88 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
89 | |||
90 | switch (memop & MO_SSIZE) { | ||
91 | case MO_UB: | ||
92 | - tcg_out_modrm_sib_offset(s, OPC_MOVZBL + seg, datalo, | ||
93 | - base, index, 0, ofs); | ||
94 | + tcg_out_modrm_sib_offset(s, OPC_MOVZBL + h.seg, datalo, | ||
95 | + h.base, h.index, 0, h.ofs); | ||
96 | break; | ||
97 | case MO_SB: | ||
98 | - tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + seg, datalo, | ||
99 | - base, index, 0, ofs); | ||
100 | + tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + h.seg, datalo, | ||
101 | + h.base, h.index, 0, h.ofs); | ||
102 | break; | ||
103 | case MO_UW: | ||
104 | if (use_movbe) { | ||
105 | /* There is no extending movbe; only low 16-bits are modified. */ | ||
106 | - if (datalo != base && datalo != index) { | ||
107 | + if (datalo != h.base && datalo != h.index) { | ||
108 | /* XOR breaks dependency chains. */ | ||
109 | tgen_arithr(s, ARITH_XOR, datalo, datalo); | ||
110 | - tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg, | ||
111 | - datalo, base, index, 0, ofs); | ||
112 | + tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg, | ||
113 | + datalo, h.base, h.index, 0, h.ofs); | ||
114 | } else { | ||
115 | - tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg, | ||
116 | - datalo, base, index, 0, ofs); | ||
117 | + tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg, | ||
118 | + datalo, h.base, h.index, 0, h.ofs); | ||
119 | tcg_out_ext16u(s, datalo, datalo); | ||
120 | } | ||
121 | } else { | ||
122 | - tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo, | ||
123 | - base, index, 0, ofs); | ||
124 | + tcg_out_modrm_sib_offset(s, OPC_MOVZWL + h.seg, datalo, | ||
125 | + h.base, h.index, 0, h.ofs); | ||
126 | } | ||
127 | break; | ||
128 | case MO_SW: | ||
129 | if (use_movbe) { | ||
130 | - tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg, | ||
131 | - datalo, base, index, 0, ofs); | ||
132 | + tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg, | ||
133 | + datalo, h.base, h.index, 0, h.ofs); | ||
134 | tcg_out_ext16s(s, type, datalo, datalo); | ||
135 | } else { | ||
136 | - tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + seg, | ||
137 | - datalo, base, index, 0, ofs); | ||
138 | + tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + h.seg, | ||
139 | + datalo, h.base, h.index, 0, h.ofs); | ||
140 | } | ||
141 | break; | ||
142 | case MO_UL: | ||
143 | - tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs); | ||
144 | + tcg_out_modrm_sib_offset(s, movop + h.seg, datalo, | ||
145 | + h.base, h.index, 0, h.ofs); | ||
146 | break; | ||
147 | #if TCG_TARGET_REG_BITS == 64 | ||
148 | case MO_SL: | ||
149 | if (use_movbe) { | ||
150 | - tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + seg, datalo, | ||
151 | - base, index, 0, ofs); | ||
152 | + tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + h.seg, datalo, | ||
153 | + h.base, h.index, 0, h.ofs); | ||
154 | tcg_out_ext32s(s, datalo, datalo); | ||
155 | } else { | ||
156 | - tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + seg, datalo, | ||
157 | - base, index, 0, ofs); | ||
158 | + tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + h.seg, datalo, | ||
159 | + h.base, h.index, 0, h.ofs); | ||
160 | } | ||
161 | break; | ||
162 | #endif | ||
163 | case MO_UQ: | ||
164 | if (TCG_TARGET_REG_BITS == 64) { | ||
165 | - tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo, | ||
166 | - base, index, 0, ofs); | ||
167 | + tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datalo, | ||
168 | + h.base, h.index, 0, h.ofs); | ||
33 | break; | 169 | break; |
34 | } | 170 | } |
35 | 171 | if (use_movbe) { | |
36 | - finish_folding(&ctx, op); | 172 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, |
37 | + if (!done) { | 173 | datalo = datahi; |
38 | + finish_folding(&ctx, op); | 174 | datahi = t; |
39 | + } | 175 | } |
40 | 176 | - if (base == datalo || index == datalo) { | |
41 | /* Eliminate duplicate and redundant fence instructions. */ | 177 | - tcg_out_modrm_sib_offset(s, OPC_LEA, datahi, base, index, 0, ofs); |
42 | if (ctx.prev_mb) { | 178 | - tcg_out_modrm_offset(s, movop + seg, datalo, datahi, 0); |
179 | - tcg_out_modrm_offset(s, movop + seg, datahi, datahi, 4); | ||
180 | + if (h.base == datalo || h.index == datalo) { | ||
181 | + tcg_out_modrm_sib_offset(s, OPC_LEA, datahi, | ||
182 | + h.base, h.index, 0, h.ofs); | ||
183 | + tcg_out_modrm_offset(s, movop + h.seg, datalo, datahi, 0); | ||
184 | + tcg_out_modrm_offset(s, movop + h.seg, datahi, datahi, 4); | ||
185 | } else { | ||
186 | - tcg_out_modrm_sib_offset(s, movop + seg, datalo, | ||
187 | - base, index, 0, ofs); | ||
188 | - tcg_out_modrm_sib_offset(s, movop + seg, datahi, | ||
189 | - base, index, 0, ofs + 4); | ||
190 | + tcg_out_modrm_sib_offset(s, movop + h.seg, datalo, | ||
191 | + h.base, h.index, 0, h.ofs); | ||
192 | + tcg_out_modrm_sib_offset(s, movop + h.seg, datahi, | ||
193 | + h.base, h.index, 0, h.ofs + 4); | ||
194 | } | ||
195 | break; | ||
196 | default: | ||
197 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
198 | MemOpIdx oi, TCGType data_type) | ||
199 | { | ||
200 | MemOp opc = get_memop(oi); | ||
201 | + HostAddress h; | ||
202 | |||
203 | #if defined(CONFIG_SOFTMMU) | ||
204 | tcg_insn_unit *label_ptr[2]; | ||
205 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
206 | label_ptr, offsetof(CPUTLBEntry, addr_read)); | ||
207 | |||
208 | /* TLB Hit. */ | ||
209 | - tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, | ||
210 | - -1, 0, 0, data_type, opc); | ||
211 | + h.base = TCG_REG_L1; | ||
212 | + h.index = -1; | ||
213 | + h.ofs = 0; | ||
214 | + h.seg = 0; | ||
215 | + tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, opc); | ||
216 | |||
217 | /* Record the current context of a load into ldst label */ | ||
218 | add_qemu_ldst_label(s, true, data_type, oi, datalo, datahi, | ||
219 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
220 | tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); | ||
221 | } | ||
222 | |||
223 | - tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index, | ||
224 | - x86_guest_base_offset, x86_guest_base_seg, | ||
225 | - data_type, opc); | ||
226 | + h = x86_guest_base; | ||
227 | + h.base = addrlo; | ||
228 | + tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, opc); | ||
229 | #endif | ||
230 | } | ||
231 | |||
232 | static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
233 | - TCGReg base, int index, intptr_t ofs, | ||
234 | - int seg, MemOp memop) | ||
235 | + HostAddress h, MemOp memop) | ||
236 | { | ||
237 | bool use_movbe = false; | ||
238 | int movop = OPC_MOVL_EvGv; | ||
239 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
240 | case MO_8: | ||
241 | /* This is handled with constraints on INDEX_op_qemu_st8_i32. */ | ||
242 | tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4); | ||
243 | - tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg, | ||
244 | - datalo, base, index, 0, ofs); | ||
245 | + tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + h.seg, | ||
246 | + datalo, h.base, h.index, 0, h.ofs); | ||
247 | break; | ||
248 | case MO_16: | ||
249 | - tcg_out_modrm_sib_offset(s, movop + P_DATA16 + seg, datalo, | ||
250 | - base, index, 0, ofs); | ||
251 | + tcg_out_modrm_sib_offset(s, movop + P_DATA16 + h.seg, datalo, | ||
252 | + h.base, h.index, 0, h.ofs); | ||
253 | break; | ||
254 | case MO_32: | ||
255 | - tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs); | ||
256 | + tcg_out_modrm_sib_offset(s, movop + h.seg, datalo, | ||
257 | + h.base, h.index, 0, h.ofs); | ||
258 | break; | ||
259 | case MO_64: | ||
260 | if (TCG_TARGET_REG_BITS == 64) { | ||
261 | - tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo, | ||
262 | - base, index, 0, ofs); | ||
263 | + tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datalo, | ||
264 | + h.base, h.index, 0, h.ofs); | ||
265 | } else { | ||
266 | if (use_movbe) { | ||
267 | TCGReg t = datalo; | ||
268 | datalo = datahi; | ||
269 | datahi = t; | ||
270 | } | ||
271 | - tcg_out_modrm_sib_offset(s, movop + seg, datalo, | ||
272 | - base, index, 0, ofs); | ||
273 | - tcg_out_modrm_sib_offset(s, movop + seg, datahi, | ||
274 | - base, index, 0, ofs + 4); | ||
275 | + tcg_out_modrm_sib_offset(s, movop + h.seg, datalo, | ||
276 | + h.base, h.index, 0, h.ofs); | ||
277 | + tcg_out_modrm_sib_offset(s, movop + h.seg, datahi, | ||
278 | + h.base, h.index, 0, h.ofs + 4); | ||
279 | } | ||
280 | break; | ||
281 | default: | ||
282 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
283 | MemOpIdx oi, TCGType data_type) | ||
284 | { | ||
285 | MemOp opc = get_memop(oi); | ||
286 | + HostAddress h; | ||
287 | |||
288 | #if defined(CONFIG_SOFTMMU) | ||
289 | tcg_insn_unit *label_ptr[2]; | ||
290 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
291 | label_ptr, offsetof(CPUTLBEntry, addr_write)); | ||
292 | |||
293 | /* TLB Hit. */ | ||
294 | - tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc); | ||
295 | + h.base = TCG_REG_L1; | ||
296 | + h.index = -1; | ||
297 | + h.ofs = 0; | ||
298 | + h.seg = 0; | ||
299 | + tcg_out_qemu_st_direct(s, datalo, datahi, h, opc); | ||
300 | |||
301 | /* Record the current context of a store into ldst label */ | ||
302 | add_qemu_ldst_label(s, false, data_type, oi, datalo, datahi, | ||
303 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
304 | tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); | ||
305 | } | ||
306 | |||
307 | - tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index, | ||
308 | - x86_guest_base_offset, x86_guest_base_seg, opc); | ||
309 | + h = x86_guest_base; | ||
310 | + h.base = addrlo; | ||
311 | + | ||
312 | + tcg_out_qemu_st_direct(s, datalo, datahi, h, opc); | ||
313 | #endif | ||
314 | } | ||
315 | |||
316 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
317 | (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4 | ||
318 | + stack_addend); | ||
319 | #else | ||
320 | -# if !defined(CONFIG_SOFTMMU) && TCG_TARGET_REG_BITS == 64 | ||
321 | +# if !defined(CONFIG_SOFTMMU) | ||
322 | if (guest_base) { | ||
323 | int seg = setup_guest_base_seg(); | ||
324 | if (seg != 0) { | ||
325 | - x86_guest_base_seg = seg; | ||
326 | + x86_guest_base.seg = seg; | ||
327 | } else if (guest_base == (int32_t)guest_base) { | ||
328 | - x86_guest_base_offset = guest_base; | ||
329 | + x86_guest_base.ofs = guest_base; | ||
330 | } else { | ||
331 | /* Choose R12 because, as a base, it requires a SIB byte. */ | ||
332 | - x86_guest_base_index = TCG_REG_R12; | ||
333 | - tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base_index, guest_base); | ||
334 | - tcg_regset_set_reg(s->reserved_regs, x86_guest_base_index); | ||
335 | + x86_guest_base.index = TCG_REG_R12; | ||
336 | + tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base.index, guest_base); | ||
337 | + tcg_regset_set_reg(s->reserved_regs, x86_guest_base.index); | ||
338 | } | ||
339 | } | ||
340 | # endif | ||
43 | -- | 341 | -- |
44 | 2.25.1 | 342 | 2.34.1 |
45 | 343 | ||
46 | 344 | diff view generated by jsdifflib |
1 | The result is either 0 or 1, which means that we have | 1 | Use TCG_REG_L[01] constants directly. |
---|---|---|---|
2 | a 2 bit signed result, and thus 62 bits of sign. | ||
3 | For clarity, use the smask_from_zmask function. | ||
4 | 2 | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
6 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 5 | --- |
9 | tcg/optimize.c | 2 ++ | 6 | tcg/i386/tcg-target.c.inc | 32 ++++++++++++++++---------------- |
10 | 1 file changed, 2 insertions(+) | 7 | 1 file changed, 16 insertions(+), 16 deletions(-) |
11 | 8 | ||
12 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 9 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc |
13 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tcg/optimize.c | 11 | --- a/tcg/i386/tcg-target.c.inc |
15 | +++ b/tcg/optimize.c | 12 | +++ b/tcg/i386/tcg-target.c.inc |
16 | @@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op) | 13 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, |
14 | int mem_index, MemOp opc, | ||
15 | tcg_insn_unit **label_ptr, int which) | ||
16 | { | ||
17 | - const TCGReg r0 = TCG_REG_L0; | ||
18 | - const TCGReg r1 = TCG_REG_L1; | ||
19 | TCGType ttype = TCG_TYPE_I32; | ||
20 | TCGType tlbtype = TCG_TYPE_I32; | ||
21 | int trexw = 0, hrexw = 0, tlbrexw = 0; | ||
22 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, | ||
23 | } | ||
17 | } | 24 | } |
18 | 25 | ||
19 | ctx->z_mask = 1; | 26 | - tcg_out_mov(s, tlbtype, r0, addrlo); |
20 | + ctx->s_mask = smask_from_zmask(1); | 27 | - tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0, |
21 | return false; | 28 | + tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo); |
29 | + tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0, | ||
30 | TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); | ||
31 | |||
32 | - tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, r0, TCG_AREG0, | ||
33 | + tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0, | ||
34 | TLB_MASK_TABLE_OFS(mem_index) + | ||
35 | offsetof(CPUTLBDescFast, mask)); | ||
36 | |||
37 | - tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r0, TCG_AREG0, | ||
38 | + tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0, | ||
39 | TLB_MASK_TABLE_OFS(mem_index) + | ||
40 | offsetof(CPUTLBDescFast, table)); | ||
41 | |||
42 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, | ||
43 | copy the address and mask. For lesser alignments, check that we don't | ||
44 | cross pages for the complete access. */ | ||
45 | if (a_bits >= s_bits) { | ||
46 | - tcg_out_mov(s, ttype, r1, addrlo); | ||
47 | + tcg_out_mov(s, ttype, TCG_REG_L1, addrlo); | ||
48 | } else { | ||
49 | - tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask - a_mask); | ||
50 | + tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1, | ||
51 | + addrlo, s_mask - a_mask); | ||
52 | } | ||
53 | tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask; | ||
54 | - tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0); | ||
55 | + tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0); | ||
56 | |||
57 | - /* cmp 0(r0), r1 */ | ||
58 | - tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, which); | ||
59 | + /* cmp 0(TCG_REG_L0), TCG_REG_L1 */ | ||
60 | + tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, | ||
61 | + TCG_REG_L1, TCG_REG_L0, which); | ||
62 | |||
63 | /* Prepare for both the fast path add of the tlb addend, and the slow | ||
64 | path function argument setup. */ | ||
65 | - tcg_out_mov(s, ttype, r1, addrlo); | ||
66 | + tcg_out_mov(s, ttype, TCG_REG_L1, addrlo); | ||
67 | |||
68 | /* jne slow_path */ | ||
69 | tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); | ||
70 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, | ||
71 | s->code_ptr += 4; | ||
72 | |||
73 | if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { | ||
74 | - /* cmp 4(r0), addrhi */ | ||
75 | - tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, which + 4); | ||
76 | + /* cmp 4(TCG_REG_L0), addrhi */ | ||
77 | + tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, TCG_REG_L0, which + 4); | ||
78 | |||
79 | /* jne slow_path */ | ||
80 | tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); | ||
81 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, | ||
82 | |||
83 | /* TLB Hit. */ | ||
84 | |||
85 | - /* add addend(r0), r1 */ | ||
86 | - tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0, | ||
87 | + /* add addend(TCG_REG_L0), TCG_REG_L1 */ | ||
88 | + tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L1, TCG_REG_L0, | ||
89 | offsetof(CPUTLBEntry, addend)); | ||
22 | } | 90 | } |
23 | 91 | ||
24 | @@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op) | ||
25 | } | ||
26 | |||
27 | ctx->z_mask = 1; | ||
28 | + ctx->s_mask = smask_from_zmask(1); | ||
29 | return false; | ||
30 | |||
31 | do_setcond_const: | ||
32 | -- | 92 | -- |
33 | 2.25.1 | 93 | 2.34.1 |
34 | 94 | ||
35 | 95 | diff view generated by jsdifflib |
1 | Copy z_mask into OptContext, for writeback to the | 1 | Split out a helper for choosing testb vs testl. |
---|---|---|---|
2 | first output within the new function. | ||
3 | 2 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
5 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 5 | --- |
8 | tcg/optimize.c | 49 +++++++++++++++++++++++++++++++++---------------- | 6 | tcg/i386/tcg-target.c.inc | 30 ++++++++++++++++++------------ |
9 | 1 file changed, 33 insertions(+), 16 deletions(-) | 7 | 1 file changed, 18 insertions(+), 12 deletions(-) |
10 | 8 | ||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 9 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc |
12 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/optimize.c | 11 | --- a/tcg/i386/tcg-target.c.inc |
14 | +++ b/tcg/optimize.c | 12 | +++ b/tcg/i386/tcg-target.c.inc |
15 | @@ -XXX,XX +XXX,XX @@ typedef struct OptContext { | 13 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_nopn(TCGContext *s, int n) |
16 | TCGContext *tcg; | 14 | tcg_out8(s, 0x90); |
17 | TCGOp *prev_mb; | ||
18 | TCGTempSet temps_used; | ||
19 | + | ||
20 | + /* In flight values from optimization. */ | ||
21 | + uint64_t z_mask; | ||
22 | } OptContext; | ||
23 | |||
24 | static inline TempOptInfo *ts_info(TCGTemp *ts) | ||
25 | @@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op, | ||
26 | } | ||
27 | } | 15 | } |
28 | 16 | ||
29 | +static void finish_folding(OptContext *ctx, TCGOp *op) | 17 | +/* Test register R vs immediate bits I, setting Z flag for EQ/NE. */ |
18 | +static void __attribute__((unused)) | ||
19 | +tcg_out_testi(TCGContext *s, TCGReg r, uint32_t i) | ||
30 | +{ | 20 | +{ |
31 | + const TCGOpDef *def = &tcg_op_defs[op->opc]; | ||
32 | + int i, nb_oargs; | ||
33 | + | ||
34 | + /* | 21 | + /* |
35 | + * For an opcode that ends a BB, reset all temp data. | 22 | + * This is used for testing alignment, so we can usually use testb. |
36 | + * We do no cross-BB optimization. | 23 | + * For i686, we have to use testl for %esi/%edi. |
37 | + */ | 24 | + */ |
38 | + if (def->flags & TCG_OPF_BB_END) { | 25 | + if (i <= 0xff && (TCG_TARGET_REG_BITS == 64 || r < 4)) { |
39 | + memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); | 26 | + tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, r); |
40 | + ctx->prev_mb = NULL; | 27 | + tcg_out8(s, i); |
41 | + return; | 28 | + } else { |
42 | + } | 29 | + tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, r); |
43 | + | 30 | + tcg_out32(s, i); |
44 | + nb_oargs = def->nb_oargs; | ||
45 | + for (i = 0; i < nb_oargs; i++) { | ||
46 | + reset_temp(op->args[i]); | ||
47 | + /* | ||
48 | + * Save the corresponding known-zero bits mask for the | ||
49 | + * first output argument (only one supported so far). | ||
50 | + */ | ||
51 | + if (i == 0) { | ||
52 | + arg_info(op->args[i])->z_mask = ctx->z_mask; | ||
53 | + } | ||
54 | + } | 31 | + } |
55 | +} | 32 | +} |
56 | + | 33 | + |
57 | static bool fold_call(OptContext *ctx, TCGOp *op) | 34 | typedef struct { |
58 | { | 35 | TCGReg base; |
59 | TCGContext *s = ctx->tcg; | 36 | int index; |
60 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 37 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, |
61 | partmask &= 0xffffffffu; | 38 | unsigned a_mask = (1 << a_bits) - 1; |
62 | affected &= 0xffffffffu; | 39 | TCGLabelQemuLdst *label; |
63 | } | 40 | |
64 | + ctx.z_mask = z_mask; | 41 | - /* |
65 | 42 | - * We are expecting a_bits to max out at 7, so we can usually use testb. | |
66 | if (partmask == 0) { | 43 | - * For i686, we have to use testl for %esi/%edi. |
67 | tcg_opt_gen_movi(&ctx, op, op->args[0], 0); | 44 | - */ |
68 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 45 | - if (a_mask <= 0xff && (TCG_TARGET_REG_BITS == 64 || addrlo < 4)) { |
69 | break; | 46 | - tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, addrlo); |
70 | } | 47 | - tcg_out8(s, a_mask); |
71 | 48 | - } else { | |
72 | - /* Some of the folding above can change opc. */ | 49 | - tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, addrlo); |
73 | - opc = op->opc; | 50 | - tcg_out32(s, a_mask); |
74 | - def = &tcg_op_defs[opc]; | 51 | - } |
75 | - if (def->flags & TCG_OPF_BB_END) { | 52 | - |
76 | - memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | 53 | + tcg_out_testi(s, addrlo, a_mask); |
77 | - } else { | 54 | /* jne slow_path */ |
78 | - int nb_oargs = def->nb_oargs; | 55 | tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); |
79 | - for (i = 0; i < nb_oargs; i++) { | 56 | |
80 | - reset_temp(op->args[i]); | ||
81 | - /* Save the corresponding known-zero bits mask for the | ||
82 | - first output argument (only one supported so far). */ | ||
83 | - if (i == 0) { | ||
84 | - arg_info(op->args[i])->z_mask = z_mask; | ||
85 | - } | ||
86 | - } | ||
87 | - } | ||
88 | + finish_folding(&ctx, op); | ||
89 | |||
90 | /* Eliminate duplicate and redundant fence instructions. */ | ||
91 | if (ctx.prev_mb) { | ||
92 | -- | 57 | -- |
93 | 2.25.1 | 58 | 2.34.1 |
94 | 59 | ||
95 | 60 | diff view generated by jsdifflib |
1 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | 1 | Rename the 'ext' parameter 'data_type' to make the use clearer; |
---|---|---|---|
2 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 2 | pass it to tcg_out_qemu_st as well to even out the interfaces. |
3 | Rename the 'otype' local 'addr_type' to make the use clearer. | ||
4 | |||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | --- | 7 | --- |
5 | tcg/optimize.c | 48 ++++++++++++++++++++++++++++++------------------ | 8 | tcg/aarch64/tcg-target.c.inc | 36 +++++++++++++++++------------------- |
6 | 1 file changed, 30 insertions(+), 18 deletions(-) | 9 | 1 file changed, 17 insertions(+), 19 deletions(-) |
7 | 10 | ||
8 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 11 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc |
9 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
10 | --- a/tcg/optimize.c | 13 | --- a/tcg/aarch64/tcg-target.c.inc |
11 | +++ b/tcg/optimize.c | 14 | +++ b/tcg/aarch64/tcg-target.c.inc |
12 | @@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op) | 15 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, |
13 | return fold_const2(ctx, op); | ||
14 | } | 16 | } |
15 | 17 | ||
16 | +static bool fold_extract(OptContext *ctx, TCGOp *op) | 18 | static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, |
17 | +{ | 19 | - MemOpIdx oi, TCGType ext) |
18 | + if (arg_is_const(op->args[1])) { | 20 | + MemOpIdx oi, TCGType data_type) |
19 | + uint64_t t; | ||
20 | + | ||
21 | + t = arg_info(op->args[1])->val; | ||
22 | + t = extract64(t, op->args[2], op->args[3]); | ||
23 | + return tcg_opt_gen_movi(ctx, op, op->args[0], t); | ||
24 | + } | ||
25 | + return false; | ||
26 | +} | ||
27 | + | ||
28 | static bool fold_extract2(OptContext *ctx, TCGOp *op) | ||
29 | { | 21 | { |
30 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | 22 | MemOp memop = get_memop(oi); |
31 | @@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op) | 23 | - const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; |
32 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | 24 | + TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; |
25 | |||
26 | /* Byte swapping is left to middle-end expansion. */ | ||
27 | tcg_debug_assert((memop & MO_BSWAP) == 0); | ||
28 | |||
29 | #ifdef CONFIG_SOFTMMU | ||
30 | - unsigned mem_index = get_mmuidx(oi); | ||
31 | tcg_insn_unit *label_ptr; | ||
32 | |||
33 | - tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1); | ||
34 | - tcg_out_qemu_ld_direct(s, memop, ext, data_reg, | ||
35 | - TCG_REG_X1, otype, addr_reg); | ||
36 | - add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg, | ||
37 | + tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 1); | ||
38 | + tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, | ||
39 | + TCG_REG_X1, addr_type, addr_reg); | ||
40 | + add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg, | ||
41 | s->code_ptr, label_ptr); | ||
42 | #else /* !CONFIG_SOFTMMU */ | ||
43 | unsigned a_bits = get_alignment_bits(memop); | ||
44 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
45 | tcg_out_test_alignment(s, true, addr_reg, a_bits); | ||
46 | } | ||
47 | if (USE_GUEST_BASE) { | ||
48 | - tcg_out_qemu_ld_direct(s, memop, ext, data_reg, | ||
49 | - TCG_REG_GUEST_BASE, otype, addr_reg); | ||
50 | + tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, | ||
51 | + TCG_REG_GUEST_BASE, addr_type, addr_reg); | ||
52 | } else { | ||
53 | - tcg_out_qemu_ld_direct(s, memop, ext, data_reg, | ||
54 | + tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, | ||
55 | addr_reg, TCG_TYPE_I64, TCG_REG_XZR); | ||
56 | } | ||
57 | #endif /* CONFIG_SOFTMMU */ | ||
33 | } | 58 | } |
34 | 59 | ||
35 | +static bool fold_sextract(OptContext *ctx, TCGOp *op) | 60 | static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, |
36 | +{ | 61 | - MemOpIdx oi) |
37 | + if (arg_is_const(op->args[1])) { | 62 | + MemOpIdx oi, TCGType data_type) |
38 | + uint64_t t; | ||
39 | + | ||
40 | + t = arg_info(op->args[1])->val; | ||
41 | + t = sextract64(t, op->args[2], op->args[3]); | ||
42 | + return tcg_opt_gen_movi(ctx, op, op->args[0], t); | ||
43 | + } | ||
44 | + return false; | ||
45 | +} | ||
46 | + | ||
47 | static bool fold_shift(OptContext *ctx, TCGOp *op) | ||
48 | { | 63 | { |
49 | return fold_const2(ctx, op); | 64 | MemOp memop = get_memop(oi); |
50 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 65 | - const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; |
51 | } | 66 | + TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; |
52 | break; | 67 | |
53 | 68 | /* Byte swapping is left to middle-end expansion. */ | |
54 | - CASE_OP_32_64(extract): | 69 | tcg_debug_assert((memop & MO_BSWAP) == 0); |
55 | - if (arg_is_const(op->args[1])) { | 70 | |
56 | - tmp = extract64(arg_info(op->args[1])->val, | 71 | #ifdef CONFIG_SOFTMMU |
57 | - op->args[2], op->args[3]); | 72 | - unsigned mem_index = get_mmuidx(oi); |
58 | - tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | 73 | tcg_insn_unit *label_ptr; |
59 | - continue; | 74 | |
60 | - } | 75 | - tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0); |
61 | - break; | 76 | + tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 0); |
62 | - | 77 | tcg_out_qemu_st_direct(s, memop, data_reg, |
63 | - CASE_OP_32_64(sextract): | 78 | - TCG_REG_X1, otype, addr_reg); |
64 | - if (arg_is_const(op->args[1])) { | 79 | - add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64, |
65 | - tmp = sextract64(arg_info(op->args[1])->val, | 80 | - data_reg, addr_reg, s->code_ptr, label_ptr); |
66 | - op->args[2], op->args[3]); | 81 | + TCG_REG_X1, addr_type, addr_reg); |
67 | - tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | 82 | + add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg, |
68 | - continue; | 83 | + s->code_ptr, label_ptr); |
69 | - } | 84 | #else /* !CONFIG_SOFTMMU */ |
70 | - break; | 85 | unsigned a_bits = get_alignment_bits(memop); |
71 | - | 86 | if (a_bits) { |
72 | default: | 87 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, |
73 | break; | 88 | } |
74 | 89 | if (USE_GUEST_BASE) { | |
75 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 90 | tcg_out_qemu_st_direct(s, memop, data_reg, |
76 | CASE_OP_32_64(eqv): | 91 | - TCG_REG_GUEST_BASE, otype, addr_reg); |
77 | done = fold_eqv(&ctx, op); | 92 | + TCG_REG_GUEST_BASE, addr_type, addr_reg); |
78 | break; | 93 | } else { |
79 | + CASE_OP_32_64(extract): | 94 | tcg_out_qemu_st_direct(s, memop, data_reg, |
80 | + done = fold_extract(&ctx, op); | 95 | addr_reg, TCG_TYPE_I64, TCG_REG_XZR); |
81 | + break; | 96 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, |
82 | CASE_OP_32_64(extract2): | 97 | break; |
83 | done = fold_extract2(&ctx, op); | 98 | case INDEX_op_qemu_st_i32: |
84 | break; | 99 | case INDEX_op_qemu_st_i64: |
85 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 100 | - tcg_out_qemu_st(s, REG0(0), a1, a2); |
86 | case INDEX_op_setcond2_i32: | 101 | + tcg_out_qemu_st(s, REG0(0), a1, a2, ext); |
87 | done = fold_setcond2(&ctx, op); | 102 | break; |
88 | break; | 103 | |
89 | + CASE_OP_32_64(sextract): | 104 | case INDEX_op_bswap64_i64: |
90 | + done = fold_sextract(&ctx, op); | ||
91 | + break; | ||
92 | CASE_OP_32_64_VEC(sub): | ||
93 | done = fold_sub(&ctx, op); | ||
94 | break; | ||
95 | -- | 105 | -- |
96 | 2.25.1 | 106 | 2.34.1 |
97 | 107 | ||
98 | 108 | diff view generated by jsdifflib |
1 | Pull the "op r, a, a => movi r, 0" optimization into a function, | 1 | Collect the 3 potential parts of the host address into a struct. |
---|---|---|---|
2 | and use it in the outer opcode fold functions. | 2 | Reorg tcg_out_qemu_{ld,st}_direct to use it. |
3 | 3 | ||
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 6 | --- |
8 | tcg/optimize.c | 41 ++++++++++++++++++++++++----------------- | 7 | tcg/aarch64/tcg-target.c.inc | 86 +++++++++++++++++++++++++----------- |
9 | 1 file changed, 24 insertions(+), 17 deletions(-) | 8 | 1 file changed, 59 insertions(+), 27 deletions(-) |
10 | 9 | ||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 10 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc |
12 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/optimize.c | 12 | --- a/tcg/aarch64/tcg-target.c.inc |
14 | +++ b/tcg/optimize.c | 13 | +++ b/tcg/aarch64/tcg-target.c.inc |
15 | @@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op) | 14 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target) |
16 | return false; | 15 | tcg_out_insn(s, 3406, ADR, rd, offset); |
17 | } | 16 | } |
18 | 17 | ||
19 | +/* If the binary operation has both arguments equal, fold to @i. */ | 18 | +typedef struct { |
20 | +static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | 19 | + TCGReg base; |
21 | +{ | 20 | + TCGReg index; |
22 | + if (args_are_copies(op->args[1], op->args[2])) { | 21 | + TCGType index_ext; |
23 | + return tcg_opt_gen_movi(ctx, op, op->args[0], i); | 22 | +} HostAddress; |
24 | + } | ||
25 | + return false; | ||
26 | +} | ||
27 | + | 23 | + |
28 | /* | 24 | #ifdef CONFIG_SOFTMMU |
29 | * These outermost fold_<op> functions are sorted alphabetically. | 25 | /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, |
30 | */ | 26 | * MemOpIdx oi, uintptr_t ra) |
31 | @@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op) | 27 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) |
32 | 28 | #endif /* CONFIG_SOFTMMU */ | |
33 | static bool fold_andc(OptContext *ctx, TCGOp *op) | 29 | |
30 | static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, | ||
31 | - TCGReg data_r, TCGReg addr_r, | ||
32 | - TCGType otype, TCGReg off_r) | ||
33 | + TCGReg data_r, HostAddress h) | ||
34 | { | 34 | { |
35 | - return fold_const2(ctx, op); | 35 | switch (memop & MO_SSIZE) { |
36 | + if (fold_const2(ctx, op) || | 36 | case MO_UB: |
37 | + fold_xx_to_i(ctx, op, 0)) { | 37 | - tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r); |
38 | + return true; | 38 | + tcg_out_ldst_r(s, I3312_LDRB, data_r, h.base, h.index_ext, h.index); |
39 | + } | 39 | break; |
40 | + return false; | 40 | case MO_SB: |
41 | tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW, | ||
42 | - data_r, addr_r, otype, off_r); | ||
43 | + data_r, h.base, h.index_ext, h.index); | ||
44 | break; | ||
45 | case MO_UW: | ||
46 | - tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r); | ||
47 | + tcg_out_ldst_r(s, I3312_LDRH, data_r, h.base, h.index_ext, h.index); | ||
48 | break; | ||
49 | case MO_SW: | ||
50 | tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW), | ||
51 | - data_r, addr_r, otype, off_r); | ||
52 | + data_r, h.base, h.index_ext, h.index); | ||
53 | break; | ||
54 | case MO_UL: | ||
55 | - tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r); | ||
56 | + tcg_out_ldst_r(s, I3312_LDRW, data_r, h.base, h.index_ext, h.index); | ||
57 | break; | ||
58 | case MO_SL: | ||
59 | - tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r); | ||
60 | + tcg_out_ldst_r(s, I3312_LDRSWX, data_r, h.base, h.index_ext, h.index); | ||
61 | break; | ||
62 | case MO_UQ: | ||
63 | - tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r); | ||
64 | + tcg_out_ldst_r(s, I3312_LDRX, data_r, h.base, h.index_ext, h.index); | ||
65 | break; | ||
66 | default: | ||
67 | g_assert_not_reached(); | ||
68 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, | ||
41 | } | 69 | } |
42 | 70 | ||
43 | static bool fold_brcond(OptContext *ctx, TCGOp *op) | 71 | static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, |
44 | @@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op) | 72 | - TCGReg data_r, TCGReg addr_r, |
45 | 73 | - TCGType otype, TCGReg off_r) | |
46 | static bool fold_sub(OptContext *ctx, TCGOp *op) | 74 | + TCGReg data_r, HostAddress h) |
47 | { | 75 | { |
48 | - return fold_const2(ctx, op); | 76 | switch (memop & MO_SIZE) { |
49 | + if (fold_const2(ctx, op) || | 77 | case MO_8: |
50 | + fold_xx_to_i(ctx, op, 0)) { | 78 | - tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r); |
51 | + return true; | 79 | + tcg_out_ldst_r(s, I3312_STRB, data_r, h.base, h.index_ext, h.index); |
52 | + } | 80 | break; |
53 | + return false; | 81 | case MO_16: |
82 | - tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r); | ||
83 | + tcg_out_ldst_r(s, I3312_STRH, data_r, h.base, h.index_ext, h.index); | ||
84 | break; | ||
85 | case MO_32: | ||
86 | - tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r); | ||
87 | + tcg_out_ldst_r(s, I3312_STRW, data_r, h.base, h.index_ext, h.index); | ||
88 | break; | ||
89 | case MO_64: | ||
90 | - tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r); | ||
91 | + tcg_out_ldst_r(s, I3312_STRX, data_r, h.base, h.index_ext, h.index); | ||
92 | break; | ||
93 | default: | ||
94 | g_assert_not_reached(); | ||
95 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
96 | { | ||
97 | MemOp memop = get_memop(oi); | ||
98 | TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; | ||
99 | + HostAddress h; | ||
100 | |||
101 | /* Byte swapping is left to middle-end expansion. */ | ||
102 | tcg_debug_assert((memop & MO_BSWAP) == 0); | ||
103 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
104 | tcg_insn_unit *label_ptr; | ||
105 | |||
106 | tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 1); | ||
107 | - tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, | ||
108 | - TCG_REG_X1, addr_type, addr_reg); | ||
109 | + | ||
110 | + h = (HostAddress){ | ||
111 | + .base = TCG_REG_X1, | ||
112 | + .index = addr_reg, | ||
113 | + .index_ext = addr_type | ||
114 | + }; | ||
115 | + tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, h); | ||
116 | + | ||
117 | add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg, | ||
118 | s->code_ptr, label_ptr); | ||
119 | #else /* !CONFIG_SOFTMMU */ | ||
120 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
121 | tcg_out_test_alignment(s, true, addr_reg, a_bits); | ||
122 | } | ||
123 | if (USE_GUEST_BASE) { | ||
124 | - tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, | ||
125 | - TCG_REG_GUEST_BASE, addr_type, addr_reg); | ||
126 | + h = (HostAddress){ | ||
127 | + .base = TCG_REG_GUEST_BASE, | ||
128 | + .index = addr_reg, | ||
129 | + .index_ext = addr_type | ||
130 | + }; | ||
131 | } else { | ||
132 | - tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, | ||
133 | - addr_reg, TCG_TYPE_I64, TCG_REG_XZR); | ||
134 | + h = (HostAddress){ | ||
135 | + .base = addr_reg, | ||
136 | + .index = TCG_REG_XZR, | ||
137 | + .index_ext = TCG_TYPE_I64 | ||
138 | + }; | ||
139 | } | ||
140 | + tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, h); | ||
141 | #endif /* CONFIG_SOFTMMU */ | ||
54 | } | 142 | } |
55 | 143 | ||
56 | static bool fold_sub2_i32(OptContext *ctx, TCGOp *op) | 144 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, |
57 | @@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op) | ||
58 | |||
59 | static bool fold_xor(OptContext *ctx, TCGOp *op) | ||
60 | { | 145 | { |
61 | - return fold_const2(ctx, op); | 146 | MemOp memop = get_memop(oi); |
62 | + if (fold_const2(ctx, op) || | 147 | TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; |
63 | + fold_xx_to_i(ctx, op, 0)) { | 148 | + HostAddress h; |
64 | + return true; | 149 | |
65 | + } | 150 | /* Byte swapping is left to middle-end expansion. */ |
66 | + return false; | 151 | tcg_debug_assert((memop & MO_BSWAP) == 0); |
152 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
153 | tcg_insn_unit *label_ptr; | ||
154 | |||
155 | tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 0); | ||
156 | - tcg_out_qemu_st_direct(s, memop, data_reg, | ||
157 | - TCG_REG_X1, addr_type, addr_reg); | ||
158 | + | ||
159 | + h = (HostAddress){ | ||
160 | + .base = TCG_REG_X1, | ||
161 | + .index = addr_reg, | ||
162 | + .index_ext = addr_type | ||
163 | + }; | ||
164 | + tcg_out_qemu_st_direct(s, memop, data_reg, h); | ||
165 | + | ||
166 | add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg, | ||
167 | s->code_ptr, label_ptr); | ||
168 | #else /* !CONFIG_SOFTMMU */ | ||
169 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
170 | tcg_out_test_alignment(s, false, addr_reg, a_bits); | ||
171 | } | ||
172 | if (USE_GUEST_BASE) { | ||
173 | - tcg_out_qemu_st_direct(s, memop, data_reg, | ||
174 | - TCG_REG_GUEST_BASE, addr_type, addr_reg); | ||
175 | + h = (HostAddress){ | ||
176 | + .base = TCG_REG_GUEST_BASE, | ||
177 | + .index = addr_reg, | ||
178 | + .index_ext = addr_type | ||
179 | + }; | ||
180 | } else { | ||
181 | - tcg_out_qemu_st_direct(s, memop, data_reg, | ||
182 | - addr_reg, TCG_TYPE_I64, TCG_REG_XZR); | ||
183 | + h = (HostAddress){ | ||
184 | + .base = addr_reg, | ||
185 | + .index = TCG_REG_XZR, | ||
186 | + .index_ext = TCG_TYPE_I64 | ||
187 | + }; | ||
188 | } | ||
189 | + tcg_out_qemu_st_direct(s, memop, data_reg, h); | ||
190 | #endif /* CONFIG_SOFTMMU */ | ||
67 | } | 191 | } |
68 | 192 | ||
69 | /* Propagate constants and copies, fold constant expressions. */ | ||
70 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
71 | break; | ||
72 | } | ||
73 | |||
74 | - /* Simplify expression for "op r, a, a => movi r, 0" cases */ | ||
75 | - switch (opc) { | ||
76 | - CASE_OP_32_64_VEC(andc): | ||
77 | - CASE_OP_32_64_VEC(sub): | ||
78 | - CASE_OP_32_64_VEC(xor): | ||
79 | - if (args_are_copies(op->args[1], op->args[2])) { | ||
80 | - tcg_opt_gen_movi(&ctx, op, op->args[0], 0); | ||
81 | - continue; | ||
82 | - } | ||
83 | - break; | ||
84 | - default: | ||
85 | - break; | ||
86 | - } | ||
87 | - | ||
88 | /* | ||
89 | * Process each opcode. | ||
90 | * Sorted alphabetically by opcode as much as possible. | ||
91 | -- | 193 | -- |
92 | 2.25.1 | 194 | 2.34.1 |
93 | 195 | ||
94 | 196 | diff view generated by jsdifflib |
1 | From: Luis Pires <luis.pires@eldorado.org.br> | 1 | Interpret the variable argument placement in the caller. |
---|---|---|---|
2 | Pass data_type instead of is_64. We need to set this in | ||
3 | TCGLabelQemuLdst, so plumb this all the way through from tcg_out_op. | ||
2 | 4 | ||
3 | In preparation for changing the divu128/divs128 implementations | 5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | to allow for quotients larger than 64 bits, move the div-by-zero | ||
5 | and overflow checks to the callers. | ||
6 | |||
7 | Signed-off-by: Luis Pires <luis.pires@eldorado.org.br> | ||
8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Message-Id: <20211025191154.350831-2-luis.pires@eldorado.org.br> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | --- | 7 | --- |
12 | include/hw/clock.h | 5 +++-- | 8 | tcg/arm/tcg-target.c.inc | 113 +++++++++++++++++++-------------------- |
13 | include/qemu/host-utils.h | 34 ++++++++++++--------------------- | 9 | 1 file changed, 56 insertions(+), 57 deletions(-) |
14 | target/ppc/int_helper.c | 14 +++++++++----- | ||
15 | util/host-utils.c | 40 ++++++++++++++++++--------------------- | ||
16 | 4 files changed, 42 insertions(+), 51 deletions(-) | ||
17 | 10 | ||
18 | diff --git a/include/hw/clock.h b/include/hw/clock.h | 11 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
19 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/include/hw/clock.h | 13 | --- a/tcg/arm/tcg-target.c.inc |
21 | +++ b/include/hw/clock.h | 14 | +++ b/tcg/arm/tcg-target.c.inc |
22 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns) | 15 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, |
23 | return 0; | 16 | /* Record the context of a call to the out of line helper code for the slow |
24 | } | 17 | path for a load or store, so that we can later generate the correct |
25 | /* | 18 | helper code. */ |
26 | - * Ignore divu128() return value as we've caught div-by-zero and don't | 19 | -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, |
27 | - * need different behaviour for overflow. | 20 | - TCGReg datalo, TCGReg datahi, TCGReg addrlo, |
28 | + * BUG: when CONFIG_INT128 is not defined, the current implementation of | 21 | - TCGReg addrhi, tcg_insn_unit *raddr, |
29 | + * divu128 does not return a valid truncated quotient, so the result will | 22 | +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, |
30 | + * be wrong. | 23 | + MemOpIdx oi, TCGType type, |
31 | */ | 24 | + TCGReg datalo, TCGReg datahi, |
32 | divu128(&lo, &hi, clk->period); | 25 | + TCGReg addrlo, TCGReg addrhi, |
33 | return lo; | 26 | + tcg_insn_unit *raddr, |
34 | diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h | 27 | tcg_insn_unit *label_ptr) |
35 | index XXXXXXX..XXXXXXX 100644 | ||
36 | --- a/include/qemu/host-utils.h | ||
37 | +++ b/include/qemu/host-utils.h | ||
38 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) | ||
39 | return (__int128_t)a * b / c; | ||
40 | } | ||
41 | |||
42 | -static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) | ||
43 | +static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) | ||
44 | { | 28 | { |
45 | - if (divisor == 0) { | 29 | TCGLabelQemuLdst *label = new_ldst_label(s); |
46 | - return 1; | 30 | |
47 | - } else { | 31 | label->is_ld = is_ld; |
48 | - __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow; | 32 | label->oi = oi; |
49 | - __uint128_t result = dividend / divisor; | 33 | + label->type = type; |
50 | - *plow = result; | 34 | label->datalo_reg = datalo; |
51 | - *phigh = dividend % divisor; | 35 | label->datahi_reg = datahi; |
52 | - return result > UINT64_MAX; | 36 | label->addrlo_reg = addrlo; |
53 | - } | 37 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, |
54 | + __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow; | ||
55 | + __uint128_t result = dividend / divisor; | ||
56 | + *plow = result; | ||
57 | + *phigh = dividend % divisor; | ||
58 | } | ||
59 | |||
60 | -static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor) | ||
61 | +static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor) | ||
62 | { | ||
63 | - if (divisor == 0) { | ||
64 | - return 1; | ||
65 | - } else { | ||
66 | - __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow; | ||
67 | - __int128_t result = dividend / divisor; | ||
68 | - *plow = result; | ||
69 | - *phigh = dividend % divisor; | ||
70 | - return result != *plow; | ||
71 | - } | ||
72 | + __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow; | ||
73 | + __int128_t result = dividend / divisor; | ||
74 | + *plow = result; | ||
75 | + *phigh = dividend % divisor; | ||
76 | } | ||
77 | #else | ||
78 | void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b); | ||
79 | void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b); | ||
80 | -int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor); | ||
81 | -int divs128(int64_t *plow, int64_t *phigh, int64_t divisor); | ||
82 | +void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor); | ||
83 | +void divs128(int64_t *plow, int64_t *phigh, int64_t divisor); | ||
84 | |||
85 | static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) | ||
86 | { | ||
87 | diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c | ||
88 | index XXXXXXX..XXXXXXX 100644 | ||
89 | --- a/target/ppc/int_helper.c | ||
90 | +++ b/target/ppc/int_helper.c | ||
91 | @@ -XXX,XX +XXX,XX @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe) | ||
92 | uint64_t rt = 0; | ||
93 | int overflow = 0; | ||
94 | |||
95 | - overflow = divu128(&rt, &ra, rb); | ||
96 | - | ||
97 | - if (unlikely(overflow)) { | ||
98 | + if (unlikely(rb == 0 || ra >= rb)) { | ||
99 | + overflow = 1; | ||
100 | rt = 0; /* Undefined */ | ||
101 | + } else { | ||
102 | + divu128(&rt, &ra, rb); | ||
103 | } | ||
104 | |||
105 | if (oe) { | ||
106 | @@ -XXX,XX +XXX,XX @@ uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe) | ||
107 | int64_t rt = 0; | ||
108 | int64_t ra = (int64_t)rau; | ||
109 | int64_t rb = (int64_t)rbu; | ||
110 | - int overflow = divs128(&rt, &ra, rb); | ||
111 | + int overflow = 0; | ||
112 | |||
113 | - if (unlikely(overflow)) { | ||
114 | + if (unlikely(rb == 0 || uabs64(ra) >= uabs64(rb))) { | ||
115 | + overflow = 1; | ||
116 | rt = 0; /* Undefined */ | ||
117 | + } else { | ||
118 | + divs128(&rt, &ra, rb); | ||
119 | } | ||
120 | |||
121 | if (oe) { | ||
122 | diff --git a/util/host-utils.c b/util/host-utils.c | ||
123 | index XXXXXXX..XXXXXXX 100644 | ||
124 | --- a/util/host-utils.c | ||
125 | +++ b/util/host-utils.c | ||
126 | @@ -XXX,XX +XXX,XX @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b) | ||
127 | *phigh = rh; | ||
128 | } | ||
129 | |||
130 | -/* Unsigned 128x64 division. Returns 1 if overflow (divide by zero or */ | ||
131 | -/* quotient exceeds 64 bits). Otherwise returns quotient via plow and */ | ||
132 | -/* remainder via phigh. */ | ||
133 | -int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) | ||
134 | +/* | ||
135 | + * Unsigned 128-by-64 division. Returns quotient via plow and | ||
136 | + * remainder via phigh. | ||
137 | + * The result must fit in 64 bits (plow) - otherwise, the result | ||
138 | + * is undefined. | ||
139 | + * This function will cause a division by zero if passed a zero divisor. | ||
140 | + */ | ||
141 | +void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) | ||
142 | { | ||
143 | uint64_t dhi = *phigh; | ||
144 | uint64_t dlo = *plow; | ||
145 | unsigned i; | ||
146 | uint64_t carry = 0; | ||
147 | |||
148 | - if (divisor == 0) { | ||
149 | - return 1; | ||
150 | - } else if (dhi == 0) { | ||
151 | + if (divisor == 0 || dhi == 0) { | ||
152 | *plow = dlo / divisor; | ||
153 | *phigh = dlo % divisor; | ||
154 | - return 0; | ||
155 | - } else if (dhi >= divisor) { | ||
156 | - return 1; | ||
157 | } else { | ||
158 | |||
159 | for (i = 0; i < 64; i++) { | ||
160 | @@ -XXX,XX +XXX,XX @@ int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) | ||
161 | |||
162 | *plow = dlo; | ||
163 | *phigh = dhi; | ||
164 | - return 0; | ||
165 | } | ||
166 | } | ||
167 | |||
168 | -int divs128(int64_t *plow, int64_t *phigh, int64_t divisor) | ||
169 | +/* | ||
170 | + * Signed 128-by-64 division. Returns quotient via plow and | ||
171 | + * remainder via phigh. | ||
172 | + * The result must fit in 64 bits (plow) - otherwise, the result | ||
173 | + * is undefined. | ||
174 | + * This function will cause a division by zero if passed a zero divisor. | ||
175 | + */ | ||
176 | +void divs128(int64_t *plow, int64_t *phigh, int64_t divisor) | ||
177 | { | ||
178 | int sgn_dvdnd = *phigh < 0; | ||
179 | int sgn_divsr = divisor < 0; | ||
180 | - int overflow = 0; | ||
181 | |||
182 | if (sgn_dvdnd) { | ||
183 | *plow = ~(*plow); | ||
184 | @@ -XXX,XX +XXX,XX @@ int divs128(int64_t *plow, int64_t *phigh, int64_t divisor) | ||
185 | divisor = 0 - divisor; | ||
186 | } | ||
187 | |||
188 | - overflow = divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor); | ||
189 | + divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor); | ||
190 | |||
191 | if (sgn_dvdnd ^ sgn_divsr) { | ||
192 | *plow = 0 - *plow; | ||
193 | } | ||
194 | - | ||
195 | - if (!overflow) { | ||
196 | - if ((*plow < 0) ^ (sgn_dvdnd ^ sgn_divsr)) { | ||
197 | - overflow = 1; | ||
198 | - } | ||
199 | - } | ||
200 | - | ||
201 | - return overflow; | ||
202 | } | 38 | } |
203 | #endif | 39 | #endif |
204 | 40 | ||
41 | -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
42 | +static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
43 | + TCGReg addrlo, TCGReg addrhi, | ||
44 | + MemOpIdx oi, TCGType data_type) | ||
45 | { | ||
46 | - TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); | ||
47 | - MemOpIdx oi; | ||
48 | - MemOp opc; | ||
49 | -#ifdef CONFIG_SOFTMMU | ||
50 | - int mem_index; | ||
51 | - TCGReg addend; | ||
52 | - tcg_insn_unit *label_ptr; | ||
53 | -#else | ||
54 | - unsigned a_bits; | ||
55 | -#endif | ||
56 | - | ||
57 | - datalo = *args++; | ||
58 | - datahi = (is64 ? *args++ : 0); | ||
59 | - addrlo = *args++; | ||
60 | - addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); | ||
61 | - oi = *args++; | ||
62 | - opc = get_memop(oi); | ||
63 | + MemOp opc = get_memop(oi); | ||
64 | |||
65 | #ifdef CONFIG_SOFTMMU | ||
66 | - mem_index = get_mmuidx(oi); | ||
67 | - addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1); | ||
68 | + TCGReg addend= tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 1); | ||
69 | |||
70 | - /* This a conditional BL only to load a pointer within this opcode into LR | ||
71 | - for the slow path. We will not be using the value for a tail call. */ | ||
72 | - label_ptr = s->code_ptr; | ||
73 | + /* | ||
74 | + * This a conditional BL only to load a pointer within this opcode into | ||
75 | + * LR for the slow path. We will not be using the value for a tail call. | ||
76 | + */ | ||
77 | + tcg_insn_unit *label_ptr = s->code_ptr; | ||
78 | tcg_out_bl_imm(s, COND_NE, 0); | ||
79 | |||
80 | tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true); | ||
81 | |||
82 | - add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, | ||
83 | - s->code_ptr, label_ptr); | ||
84 | + add_qemu_ldst_label(s, true, oi, data_type, datalo, datahi, | ||
85 | + addrlo, addrhi, s->code_ptr, label_ptr); | ||
86 | #else /* !CONFIG_SOFTMMU */ | ||
87 | - a_bits = get_alignment_bits(opc); | ||
88 | + unsigned a_bits = get_alignment_bits(opc); | ||
89 | if (a_bits) { | ||
90 | tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); | ||
91 | } | ||
92 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, | ||
93 | } | ||
94 | #endif | ||
95 | |||
96 | -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) | ||
97 | +static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
98 | + TCGReg addrlo, TCGReg addrhi, | ||
99 | + MemOpIdx oi, TCGType data_type) | ||
100 | { | ||
101 | - TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); | ||
102 | - MemOpIdx oi; | ||
103 | - MemOp opc; | ||
104 | -#ifdef CONFIG_SOFTMMU | ||
105 | - int mem_index; | ||
106 | - TCGReg addend; | ||
107 | - tcg_insn_unit *label_ptr; | ||
108 | -#else | ||
109 | - unsigned a_bits; | ||
110 | -#endif | ||
111 | - | ||
112 | - datalo = *args++; | ||
113 | - datahi = (is64 ? *args++ : 0); | ||
114 | - addrlo = *args++; | ||
115 | - addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); | ||
116 | - oi = *args++; | ||
117 | - opc = get_memop(oi); | ||
118 | + MemOp opc = get_memop(oi); | ||
119 | |||
120 | #ifdef CONFIG_SOFTMMU | ||
121 | - mem_index = get_mmuidx(oi); | ||
122 | - addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0); | ||
123 | + TCGReg addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 0); | ||
124 | |||
125 | tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, | ||
126 | addrlo, addend, true); | ||
127 | |||
128 | /* The conditional call must come last, as we're going to return here. */ | ||
129 | - label_ptr = s->code_ptr; | ||
130 | + tcg_insn_unit *label_ptr = s->code_ptr; | ||
131 | tcg_out_bl_imm(s, COND_NE, 0); | ||
132 | |||
133 | - add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, | ||
134 | - s->code_ptr, label_ptr); | ||
135 | + add_qemu_ldst_label(s, false, oi, data_type, datalo, datahi, | ||
136 | + addrlo, addrhi, s->code_ptr, label_ptr); | ||
137 | #else /* !CONFIG_SOFTMMU */ | ||
138 | - a_bits = get_alignment_bits(opc); | ||
139 | + unsigned a_bits = get_alignment_bits(opc); | ||
140 | if (a_bits) { | ||
141 | tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); | ||
142 | } | ||
143 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, | ||
144 | break; | ||
145 | |||
146 | case INDEX_op_qemu_ld_i32: | ||
147 | - tcg_out_qemu_ld(s, args, 0); | ||
148 | + if (TARGET_LONG_BITS == 32) { | ||
149 | + tcg_out_qemu_ld(s, args[0], -1, args[1], -1, | ||
150 | + args[2], TCG_TYPE_I32); | ||
151 | + } else { | ||
152 | + tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], | ||
153 | + args[3], TCG_TYPE_I32); | ||
154 | + } | ||
155 | break; | ||
156 | case INDEX_op_qemu_ld_i64: | ||
157 | - tcg_out_qemu_ld(s, args, 1); | ||
158 | + if (TARGET_LONG_BITS == 32) { | ||
159 | + tcg_out_qemu_ld(s, args[0], args[1], args[2], -1, | ||
160 | + args[3], TCG_TYPE_I64); | ||
161 | + } else { | ||
162 | + tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], | ||
163 | + args[4], TCG_TYPE_I64); | ||
164 | + } | ||
165 | break; | ||
166 | case INDEX_op_qemu_st_i32: | ||
167 | - tcg_out_qemu_st(s, args, 0); | ||
168 | + if (TARGET_LONG_BITS == 32) { | ||
169 | + tcg_out_qemu_st(s, args[0], -1, args[1], -1, | ||
170 | + args[2], TCG_TYPE_I32); | ||
171 | + } else { | ||
172 | + tcg_out_qemu_st(s, args[0], -1, args[1], args[2], | ||
173 | + args[3], TCG_TYPE_I32); | ||
174 | + } | ||
175 | break; | ||
176 | case INDEX_op_qemu_st_i64: | ||
177 | - tcg_out_qemu_st(s, args, 1); | ||
178 | + if (TARGET_LONG_BITS == 32) { | ||
179 | + tcg_out_qemu_st(s, args[0], args[1], args[2], -1, | ||
180 | + args[3], TCG_TYPE_I64); | ||
181 | + } else { | ||
182 | + tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], | ||
183 | + args[4], TCG_TYPE_I64); | ||
184 | + } | ||
185 | break; | ||
186 | |||
187 | case INDEX_op_bswap16_i32: | ||
205 | -- | 188 | -- |
206 | 2.25.1 | 189 | 2.34.1 |
207 | 190 | ||
208 | 191 | diff view generated by jsdifflib |
1 | Split out a whole bunch of placeholder functions, which are | 1 | Collect the parts of the host address, and condition, into a struct. |
---|---|---|---|
2 | currently identical. That won't last as more code gets moved. | 2 | Merge tcg_out_qemu_*_{index,direct} and use it. |
3 | 3 | ||
4 | Use CASE_32_64_VEC for some logical operators that previously | ||
5 | missed the addition of vectors. | ||
6 | |||
7 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
8 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 5 | --- |
11 | tcg/optimize.c | 271 +++++++++++++++++++++++++++++++++++++++---------- | 6 | tcg/arm/tcg-target.c.inc | 248 ++++++++++++++++++--------------------- |
12 | 1 file changed, 219 insertions(+), 52 deletions(-) | 7 | 1 file changed, 115 insertions(+), 133 deletions(-) |
13 | 8 | ||
14 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 9 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
15 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/tcg/optimize.c | 11 | --- a/tcg/arm/tcg-target.c.inc |
17 | +++ b/tcg/optimize.c | 12 | +++ b/tcg/arm/tcg-target.c.inc |
18 | @@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op) | 13 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vldst(TCGContext *s, ARMInsn insn, |
14 | tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf); | ||
15 | } | ||
16 | |||
17 | +typedef struct { | ||
18 | + ARMCond cond; | ||
19 | + TCGReg base; | ||
20 | + int index; | ||
21 | + bool index_scratch; | ||
22 | +} HostAddress; | ||
23 | + | ||
24 | #ifdef CONFIG_SOFTMMU | ||
25 | /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, | ||
26 | * int mmu_idx, uintptr_t ra) | ||
27 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
28 | } | ||
29 | #endif /* SOFTMMU */ | ||
30 | |||
31 | -static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, | ||
32 | - TCGReg datalo, TCGReg datahi, | ||
33 | - TCGReg addrlo, TCGReg addend, | ||
34 | - bool scratch_addend) | ||
35 | +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, | ||
36 | + TCGReg datahi, HostAddress h) | ||
37 | { | ||
38 | + TCGReg base; | ||
39 | + | ||
40 | /* Byte swapping is left to middle-end expansion. */ | ||
41 | tcg_debug_assert((opc & MO_BSWAP) == 0); | ||
42 | |||
43 | switch (opc & MO_SSIZE) { | ||
44 | case MO_UB: | ||
45 | - tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend); | ||
46 | + if (h.index < 0) { | ||
47 | + tcg_out_ld8_12(s, h.cond, datalo, h.base, 0); | ||
48 | + } else { | ||
49 | + tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index); | ||
50 | + } | ||
51 | break; | ||
52 | case MO_SB: | ||
53 | - tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend); | ||
54 | + if (h.index < 0) { | ||
55 | + tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0); | ||
56 | + } else { | ||
57 | + tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index); | ||
58 | + } | ||
59 | break; | ||
60 | case MO_UW: | ||
61 | - tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend); | ||
62 | + if (h.index < 0) { | ||
63 | + tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0); | ||
64 | + } else { | ||
65 | + tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index); | ||
66 | + } | ||
67 | break; | ||
68 | case MO_SW: | ||
69 | - tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend); | ||
70 | + if (h.index < 0) { | ||
71 | + tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0); | ||
72 | + } else { | ||
73 | + tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index); | ||
74 | + } | ||
75 | break; | ||
76 | case MO_UL: | ||
77 | - tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend); | ||
78 | + if (h.index < 0) { | ||
79 | + tcg_out_ld32_12(s, h.cond, datalo, h.base, 0); | ||
80 | + } else { | ||
81 | + tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index); | ||
82 | + } | ||
83 | break; | ||
84 | case MO_UQ: | ||
85 | /* We used pair allocation for datalo, so already should be aligned. */ | ||
86 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, | ||
87 | tcg_debug_assert(datahi == datalo + 1); | ||
88 | /* LDRD requires alignment; double-check that. */ | ||
89 | if (get_alignment_bits(opc) >= MO_64) { | ||
90 | + if (h.index < 0) { | ||
91 | + tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0); | ||
92 | + break; | ||
93 | + } | ||
94 | /* | ||
95 | * Rm (the second address op) must not overlap Rt or Rt + 1. | ||
96 | * Since datalo is aligned, we can simplify the test via alignment. | ||
97 | * Flip the two address arguments if that works. | ||
98 | */ | ||
99 | - if ((addend & ~1) != datalo) { | ||
100 | - tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend); | ||
101 | + if ((h.index & ~1) != datalo) { | ||
102 | + tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index); | ||
103 | break; | ||
104 | } | ||
105 | - if ((addrlo & ~1) != datalo) { | ||
106 | - tcg_out_ldrd_r(s, COND_AL, datalo, addend, addrlo); | ||
107 | + if ((h.base & ~1) != datalo) { | ||
108 | + tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base); | ||
109 | break; | ||
110 | } | ||
111 | } | ||
112 | - if (scratch_addend) { | ||
113 | - tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo); | ||
114 | - tcg_out_ld32_12(s, COND_AL, datahi, addend, 4); | ||
115 | + if (h.index < 0) { | ||
116 | + base = h.base; | ||
117 | + if (datalo == h.base) { | ||
118 | + tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base); | ||
119 | + base = TCG_REG_TMP; | ||
120 | + } | ||
121 | + } else if (h.index_scratch) { | ||
122 | + tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base); | ||
123 | + tcg_out_ld32_12(s, h.cond, datahi, h.index, 4); | ||
124 | + break; | ||
125 | } else { | ||
126 | - tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP, | ||
127 | - addend, addrlo, SHIFT_IMM_LSL(0)); | ||
128 | - tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0); | ||
129 | - tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4); | ||
130 | + tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP, | ||
131 | + h.base, h.index, SHIFT_IMM_LSL(0)); | ||
132 | + base = TCG_REG_TMP; | ||
133 | } | ||
134 | + tcg_out_ld32_12(s, h.cond, datalo, base, 0); | ||
135 | + tcg_out_ld32_12(s, h.cond, datahi, base, 4); | ||
136 | break; | ||
137 | default: | ||
138 | g_assert_not_reached(); | ||
19 | } | 139 | } |
20 | } | 140 | } |
21 | 141 | ||
22 | +/* | 142 | -#ifndef CONFIG_SOFTMMU |
23 | + * The fold_* functions return true when processing is complete, | 143 | -static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, |
24 | + * usually by folding the operation to a constant or to a copy, | 144 | - TCGReg datahi, TCGReg addrlo) |
25 | + * and calling tcg_opt_gen_{mov,movi}. They may do other things, | 145 | -{ |
26 | + * like collect information about the value produced, for use in | 146 | - /* Byte swapping is left to middle-end expansion. */ |
27 | + * optimizing a subsequent operation. | 147 | - tcg_debug_assert((opc & MO_BSWAP) == 0); |
28 | + * | 148 | - |
29 | + * These first fold_* functions are all helpers, used by other | 149 | - switch (opc & MO_SSIZE) { |
30 | + * folders for more specific operations. | 150 | - case MO_UB: |
31 | + */ | 151 | - tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0); |
32 | + | 152 | - break; |
33 | +static bool fold_const1(OptContext *ctx, TCGOp *op) | 153 | - case MO_SB: |
34 | +{ | 154 | - tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0); |
35 | + if (arg_is_const(op->args[1])) { | 155 | - break; |
36 | + uint64_t t; | 156 | - case MO_UW: |
37 | + | 157 | - tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0); |
38 | + t = arg_info(op->args[1])->val; | 158 | - break; |
39 | + t = do_constant_folding(op->opc, t, 0); | 159 | - case MO_SW: |
40 | + return tcg_opt_gen_movi(ctx, op, op->args[0], t); | 160 | - tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0); |
41 | + } | 161 | - break; |
42 | + return false; | 162 | - case MO_UL: |
43 | +} | 163 | - tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); |
44 | + | 164 | - break; |
45 | +static bool fold_const2(OptContext *ctx, TCGOp *op) | 165 | - case MO_UQ: |
46 | +{ | 166 | - /* We used pair allocation for datalo, so already should be aligned. */ |
47 | + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | 167 | - tcg_debug_assert((datalo & 1) == 0); |
48 | + uint64_t t1 = arg_info(op->args[1])->val; | 168 | - tcg_debug_assert(datahi == datalo + 1); |
49 | + uint64_t t2 = arg_info(op->args[2])->val; | 169 | - /* LDRD requires alignment; double-check that. */ |
50 | + | 170 | - if (get_alignment_bits(opc) >= MO_64) { |
51 | + t1 = do_constant_folding(op->opc, t1, t2); | 171 | - tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0); |
52 | + return tcg_opt_gen_movi(ctx, op, op->args[0], t1); | 172 | - } else if (datalo == addrlo) { |
53 | + } | 173 | - tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4); |
54 | + return false; | 174 | - tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); |
55 | +} | 175 | - } else { |
56 | + | 176 | - tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); |
57 | +/* | 177 | - tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4); |
58 | + * These outermost fold_<op> functions are sorted alphabetically. | 178 | - } |
59 | + */ | 179 | - break; |
60 | + | 180 | - default: |
61 | +static bool fold_add(OptContext *ctx, TCGOp *op) | 181 | - g_assert_not_reached(); |
62 | +{ | 182 | - } |
63 | + return fold_const2(ctx, op); | 183 | -} |
64 | +} | 184 | -#endif |
65 | + | 185 | - |
66 | +static bool fold_and(OptContext *ctx, TCGOp *op) | 186 | static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, |
67 | +{ | 187 | TCGReg addrlo, TCGReg addrhi, |
68 | + return fold_const2(ctx, op); | 188 | MemOpIdx oi, TCGType data_type) |
69 | +} | ||
70 | + | ||
71 | +static bool fold_andc(OptContext *ctx, TCGOp *op) | ||
72 | +{ | ||
73 | + return fold_const2(ctx, op); | ||
74 | +} | ||
75 | + | ||
76 | static bool fold_call(OptContext *ctx, TCGOp *op) | ||
77 | { | 189 | { |
78 | TCGContext *s = ctx->tcg; | 190 | MemOp opc = get_memop(oi); |
79 | @@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op) | 191 | + HostAddress h; |
80 | return true; | 192 | |
81 | } | 193 | #ifdef CONFIG_SOFTMMU |
82 | 194 | - TCGReg addend= tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 1); | |
83 | +static bool fold_ctpop(OptContext *ctx, TCGOp *op) | 195 | + h.cond = COND_AL; |
84 | +{ | 196 | + h.base = addrlo; |
85 | + return fold_const1(ctx, op); | 197 | + h.index_scratch = true; |
86 | +} | 198 | + h.index = tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 1); |
87 | + | 199 | |
88 | +static bool fold_divide(OptContext *ctx, TCGOp *op) | 200 | /* |
89 | +{ | 201 | * This a conditional BL only to load a pointer within this opcode into |
90 | + return fold_const2(ctx, op); | 202 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, |
91 | +} | 203 | tcg_insn_unit *label_ptr = s->code_ptr; |
92 | + | 204 | tcg_out_bl_imm(s, COND_NE, 0); |
93 | +static bool fold_eqv(OptContext *ctx, TCGOp *op) | 205 | |
94 | +{ | 206 | - tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true); |
95 | + return fold_const2(ctx, op); | 207 | + tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); |
96 | +} | 208 | |
97 | + | 209 | add_qemu_ldst_label(s, true, oi, data_type, datalo, datahi, |
98 | +static bool fold_exts(OptContext *ctx, TCGOp *op) | 210 | addrlo, addrhi, s->code_ptr, label_ptr); |
99 | +{ | 211 | -#else /* !CONFIG_SOFTMMU */ |
100 | + return fold_const1(ctx, op); | 212 | +#else |
101 | +} | 213 | unsigned a_bits = get_alignment_bits(opc); |
102 | + | 214 | if (a_bits) { |
103 | +static bool fold_extu(OptContext *ctx, TCGOp *op) | 215 | tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); |
104 | +{ | 216 | } |
105 | + return fold_const1(ctx, op); | 217 | - if (guest_base) { |
106 | +} | 218 | - tcg_out_qemu_ld_index(s, opc, datalo, datahi, |
107 | + | 219 | - addrlo, TCG_REG_GUEST_BASE, false); |
108 | static bool fold_mb(OptContext *ctx, TCGOp *op) | 220 | - } else { |
221 | - tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo); | ||
222 | - } | ||
223 | + | ||
224 | + h.cond = COND_AL; | ||
225 | + h.base = addrlo; | ||
226 | + h.index = guest_base ? TCG_REG_GUEST_BASE : -1; | ||
227 | + h.index_scratch = false; | ||
228 | + tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); | ||
229 | #endif | ||
230 | } | ||
231 | |||
232 | -static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc, | ||
233 | - TCGReg datalo, TCGReg datahi, | ||
234 | - TCGReg addrlo, TCGReg addend, | ||
235 | - bool scratch_addend) | ||
236 | -{ | ||
237 | - /* Byte swapping is left to middle-end expansion. */ | ||
238 | - tcg_debug_assert((opc & MO_BSWAP) == 0); | ||
239 | - | ||
240 | - switch (opc & MO_SIZE) { | ||
241 | - case MO_8: | ||
242 | - tcg_out_st8_r(s, cond, datalo, addrlo, addend); | ||
243 | - break; | ||
244 | - case MO_16: | ||
245 | - tcg_out_st16_r(s, cond, datalo, addrlo, addend); | ||
246 | - break; | ||
247 | - case MO_32: | ||
248 | - tcg_out_st32_r(s, cond, datalo, addrlo, addend); | ||
249 | - break; | ||
250 | - case MO_64: | ||
251 | - /* We used pair allocation for datalo, so already should be aligned. */ | ||
252 | - tcg_debug_assert((datalo & 1) == 0); | ||
253 | - tcg_debug_assert(datahi == datalo + 1); | ||
254 | - /* STRD requires alignment; double-check that. */ | ||
255 | - if (get_alignment_bits(opc) >= MO_64) { | ||
256 | - tcg_out_strd_r(s, cond, datalo, addrlo, addend); | ||
257 | - } else if (scratch_addend) { | ||
258 | - tcg_out_st32_rwb(s, cond, datalo, addend, addrlo); | ||
259 | - tcg_out_st32_12(s, cond, datahi, addend, 4); | ||
260 | - } else { | ||
261 | - tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP, | ||
262 | - addend, addrlo, SHIFT_IMM_LSL(0)); | ||
263 | - tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0); | ||
264 | - tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4); | ||
265 | - } | ||
266 | - break; | ||
267 | - default: | ||
268 | - g_assert_not_reached(); | ||
269 | - } | ||
270 | -} | ||
271 | - | ||
272 | -#ifndef CONFIG_SOFTMMU | ||
273 | static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, | ||
274 | - TCGReg datahi, TCGReg addrlo) | ||
275 | + TCGReg datahi, HostAddress h) | ||
109 | { | 276 | { |
110 | /* Eliminate duplicate and redundant fence instructions. */ | 277 | /* Byte swapping is left to middle-end expansion. */ |
111 | @@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op) | 278 | tcg_debug_assert((opc & MO_BSWAP) == 0); |
112 | return true; | 279 | |
113 | } | 280 | switch (opc & MO_SIZE) { |
114 | 281 | case MO_8: | |
115 | +static bool fold_mul(OptContext *ctx, TCGOp *op) | 282 | - tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0); |
116 | +{ | 283 | + if (h.index < 0) { |
117 | + return fold_const2(ctx, op); | 284 | + tcg_out_st8_12(s, h.cond, datalo, h.base, 0); |
118 | +} | 285 | + } else { |
119 | + | 286 | + tcg_out_st8_r(s, h.cond, datalo, h.base, h.index); |
120 | +static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) | 287 | + } |
121 | +{ | 288 | break; |
122 | + return fold_const2(ctx, op); | 289 | case MO_16: |
123 | +} | 290 | - tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0); |
124 | + | 291 | + if (h.index < 0) { |
125 | +static bool fold_nand(OptContext *ctx, TCGOp *op) | 292 | + tcg_out_st16_8(s, h.cond, datalo, h.base, 0); |
126 | +{ | 293 | + } else { |
127 | + return fold_const2(ctx, op); | 294 | + tcg_out_st16_r(s, h.cond, datalo, h.base, h.index); |
128 | +} | 295 | + } |
129 | + | 296 | break; |
130 | +static bool fold_neg(OptContext *ctx, TCGOp *op) | 297 | case MO_32: |
131 | +{ | 298 | - tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); |
132 | + return fold_const1(ctx, op); | 299 | + if (h.index < 0) { |
133 | +} | 300 | + tcg_out_st32_12(s, h.cond, datalo, h.base, 0); |
134 | + | 301 | + } else { |
135 | +static bool fold_nor(OptContext *ctx, TCGOp *op) | 302 | + tcg_out_st32_r(s, h.cond, datalo, h.base, h.index); |
136 | +{ | 303 | + } |
137 | + return fold_const2(ctx, op); | 304 | break; |
138 | +} | 305 | case MO_64: |
139 | + | 306 | /* We used pair allocation for datalo, so already should be aligned. */ |
140 | +static bool fold_not(OptContext *ctx, TCGOp *op) | 307 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, |
141 | +{ | 308 | tcg_debug_assert(datahi == datalo + 1); |
142 | + return fold_const1(ctx, op); | 309 | /* STRD requires alignment; double-check that. */ |
143 | +} | 310 | if (get_alignment_bits(opc) >= MO_64) { |
144 | + | 311 | - tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0); |
145 | +static bool fold_or(OptContext *ctx, TCGOp *op) | 312 | + if (h.index < 0) { |
146 | +{ | 313 | + tcg_out_strd_8(s, h.cond, datalo, h.base, 0); |
147 | + return fold_const2(ctx, op); | 314 | + } else { |
148 | +} | 315 | + tcg_out_strd_r(s, h.cond, datalo, h.base, h.index); |
149 | + | 316 | + } |
150 | +static bool fold_orc(OptContext *ctx, TCGOp *op) | 317 | + } else if (h.index_scratch) { |
151 | +{ | 318 | + tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base); |
152 | + return fold_const2(ctx, op); | 319 | + tcg_out_st32_12(s, h.cond, datahi, h.index, 4); |
153 | +} | 320 | } else { |
154 | + | 321 | - tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); |
155 | static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) | 322 | - tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4); |
323 | + tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP, | ||
324 | + h.base, h.index, SHIFT_IMM_LSL(0)); | ||
325 | + tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0); | ||
326 | + tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4); | ||
327 | } | ||
328 | break; | ||
329 | default: | ||
330 | g_assert_not_reached(); | ||
331 | } | ||
332 | } | ||
333 | -#endif | ||
334 | |||
335 | static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
336 | TCGReg addrlo, TCGReg addrhi, | ||
337 | MemOpIdx oi, TCGType data_type) | ||
156 | { | 338 | { |
157 | /* Opcodes that touch guest memory stop the mb optimization. */ | 339 | MemOp opc = get_memop(oi); |
158 | @@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op) | 340 | + HostAddress h; |
159 | return false; | 341 | |
160 | } | 342 | #ifdef CONFIG_SOFTMMU |
161 | 343 | - TCGReg addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 0); | |
162 | +static bool fold_remainder(OptContext *ctx, TCGOp *op) | 344 | - |
163 | +{ | 345 | - tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, |
164 | + return fold_const2(ctx, op); | 346 | - addrlo, addend, true); |
165 | +} | 347 | + h.cond = COND_EQ; |
166 | + | 348 | + h.base = addrlo; |
167 | +static bool fold_shift(OptContext *ctx, TCGOp *op) | 349 | + h.index_scratch = true; |
168 | +{ | 350 | + h.index = tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 0); |
169 | + return fold_const2(ctx, op); | 351 | + tcg_out_qemu_st_direct(s, opc, datalo, datahi, h); |
170 | +} | 352 | |
171 | + | 353 | /* The conditional call must come last, as we're going to return here. */ |
172 | +static bool fold_sub(OptContext *ctx, TCGOp *op) | 354 | tcg_insn_unit *label_ptr = s->code_ptr; |
173 | +{ | 355 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, |
174 | + return fold_const2(ctx, op); | 356 | |
175 | +} | 357 | add_qemu_ldst_label(s, false, oi, data_type, datalo, datahi, |
176 | + | 358 | addrlo, addrhi, s->code_ptr, label_ptr); |
177 | +static bool fold_xor(OptContext *ctx, TCGOp *op) | 359 | -#else /* !CONFIG_SOFTMMU */ |
178 | +{ | 360 | +#else |
179 | + return fold_const2(ctx, op); | 361 | unsigned a_bits = get_alignment_bits(opc); |
180 | +} | 362 | + |
181 | + | 363 | + h.cond = COND_AL; |
182 | /* Propagate constants and copies, fold constant expressions. */ | 364 | if (a_bits) { |
183 | void tcg_optimize(TCGContext *s) | 365 | tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); |
184 | { | 366 | + h.cond = COND_EQ; |
185 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 367 | } |
186 | } | 368 | - if (guest_base) { |
187 | break; | 369 | - tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi, |
188 | 370 | - addrlo, TCG_REG_GUEST_BASE, false); | |
189 | - CASE_OP_32_64(not): | 371 | - } else { |
190 | - CASE_OP_32_64(neg): | 372 | - tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo); |
191 | - CASE_OP_32_64(ext8s): | 373 | - } |
192 | - CASE_OP_32_64(ext8u): | 374 | + |
193 | - CASE_OP_32_64(ext16s): | 375 | + h.base = addrlo; |
194 | - CASE_OP_32_64(ext16u): | 376 | + h.index = guest_base ? TCG_REG_GUEST_BASE : -1; |
195 | - CASE_OP_32_64(ctpop): | 377 | + h.index_scratch = false; |
196 | - case INDEX_op_ext32s_i64: | 378 | + tcg_out_qemu_st_direct(s, opc, datalo, datahi, h); |
197 | - case INDEX_op_ext32u_i64: | 379 | #endif |
198 | - case INDEX_op_ext_i32_i64: | 380 | } |
199 | - case INDEX_op_extu_i32_i64: | ||
200 | - case INDEX_op_extrl_i64_i32: | ||
201 | - case INDEX_op_extrh_i64_i32: | ||
202 | - if (arg_is_const(op->args[1])) { | ||
203 | - tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0); | ||
204 | - tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
205 | - continue; | ||
206 | - } | ||
207 | - break; | ||
208 | - | ||
209 | CASE_OP_32_64(bswap16): | ||
210 | CASE_OP_32_64(bswap32): | ||
211 | case INDEX_op_bswap64_i64: | ||
212 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
213 | } | ||
214 | break; | ||
215 | |||
216 | - CASE_OP_32_64(add): | ||
217 | - CASE_OP_32_64(sub): | ||
218 | - CASE_OP_32_64(mul): | ||
219 | - CASE_OP_32_64(or): | ||
220 | - CASE_OP_32_64(and): | ||
221 | - CASE_OP_32_64(xor): | ||
222 | - CASE_OP_32_64(shl): | ||
223 | - CASE_OP_32_64(shr): | ||
224 | - CASE_OP_32_64(sar): | ||
225 | - CASE_OP_32_64(rotl): | ||
226 | - CASE_OP_32_64(rotr): | ||
227 | - CASE_OP_32_64(andc): | ||
228 | - CASE_OP_32_64(orc): | ||
229 | - CASE_OP_32_64(eqv): | ||
230 | - CASE_OP_32_64(nand): | ||
231 | - CASE_OP_32_64(nor): | ||
232 | - CASE_OP_32_64(muluh): | ||
233 | - CASE_OP_32_64(mulsh): | ||
234 | - CASE_OP_32_64(div): | ||
235 | - CASE_OP_32_64(divu): | ||
236 | - CASE_OP_32_64(rem): | ||
237 | - CASE_OP_32_64(remu): | ||
238 | - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
239 | - tmp = do_constant_folding(opc, arg_info(op->args[1])->val, | ||
240 | - arg_info(op->args[2])->val); | ||
241 | - tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
242 | - continue; | ||
243 | - } | ||
244 | - break; | ||
245 | - | ||
246 | CASE_OP_32_64(clz): | ||
247 | CASE_OP_32_64(ctz): | ||
248 | if (arg_is_const(op->args[1])) { | ||
249 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
250 | } | ||
251 | break; | ||
252 | |||
253 | + default: | ||
254 | + break; | ||
255 | + | ||
256 | + /* ---------------------------------------------------------- */ | ||
257 | + /* Sorted alphabetically by opcode as much as possible. */ | ||
258 | + | ||
259 | + CASE_OP_32_64_VEC(add): | ||
260 | + done = fold_add(&ctx, op); | ||
261 | + break; | ||
262 | + CASE_OP_32_64_VEC(and): | ||
263 | + done = fold_and(&ctx, op); | ||
264 | + break; | ||
265 | + CASE_OP_32_64_VEC(andc): | ||
266 | + done = fold_andc(&ctx, op); | ||
267 | + break; | ||
268 | + CASE_OP_32_64(ctpop): | ||
269 | + done = fold_ctpop(&ctx, op); | ||
270 | + break; | ||
271 | + CASE_OP_32_64(div): | ||
272 | + CASE_OP_32_64(divu): | ||
273 | + done = fold_divide(&ctx, op); | ||
274 | + break; | ||
275 | + CASE_OP_32_64(eqv): | ||
276 | + done = fold_eqv(&ctx, op); | ||
277 | + break; | ||
278 | + CASE_OP_32_64(ext8s): | ||
279 | + CASE_OP_32_64(ext16s): | ||
280 | + case INDEX_op_ext32s_i64: | ||
281 | + case INDEX_op_ext_i32_i64: | ||
282 | + done = fold_exts(&ctx, op); | ||
283 | + break; | ||
284 | + CASE_OP_32_64(ext8u): | ||
285 | + CASE_OP_32_64(ext16u): | ||
286 | + case INDEX_op_ext32u_i64: | ||
287 | + case INDEX_op_extu_i32_i64: | ||
288 | + case INDEX_op_extrl_i64_i32: | ||
289 | + case INDEX_op_extrh_i64_i32: | ||
290 | + done = fold_extu(&ctx, op); | ||
291 | + break; | ||
292 | case INDEX_op_mb: | ||
293 | done = fold_mb(&ctx, op); | ||
294 | break; | ||
295 | + CASE_OP_32_64(mul): | ||
296 | + done = fold_mul(&ctx, op); | ||
297 | + break; | ||
298 | + CASE_OP_32_64(mulsh): | ||
299 | + CASE_OP_32_64(muluh): | ||
300 | + done = fold_mul_highpart(&ctx, op); | ||
301 | + break; | ||
302 | + CASE_OP_32_64(nand): | ||
303 | + done = fold_nand(&ctx, op); | ||
304 | + break; | ||
305 | + CASE_OP_32_64(neg): | ||
306 | + done = fold_neg(&ctx, op); | ||
307 | + break; | ||
308 | + CASE_OP_32_64(nor): | ||
309 | + done = fold_nor(&ctx, op); | ||
310 | + break; | ||
311 | + CASE_OP_32_64_VEC(not): | ||
312 | + done = fold_not(&ctx, op); | ||
313 | + break; | ||
314 | + CASE_OP_32_64_VEC(or): | ||
315 | + done = fold_or(&ctx, op); | ||
316 | + break; | ||
317 | + CASE_OP_32_64_VEC(orc): | ||
318 | + done = fold_orc(&ctx, op); | ||
319 | + break; | ||
320 | case INDEX_op_qemu_ld_i32: | ||
321 | case INDEX_op_qemu_ld_i64: | ||
322 | done = fold_qemu_ld(&ctx, op); | ||
323 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
324 | case INDEX_op_qemu_st_i64: | ||
325 | done = fold_qemu_st(&ctx, op); | ||
326 | break; | ||
327 | - | ||
328 | - default: | ||
329 | + CASE_OP_32_64(rem): | ||
330 | + CASE_OP_32_64(remu): | ||
331 | + done = fold_remainder(&ctx, op); | ||
332 | + break; | ||
333 | + CASE_OP_32_64(rotl): | ||
334 | + CASE_OP_32_64(rotr): | ||
335 | + CASE_OP_32_64(sar): | ||
336 | + CASE_OP_32_64(shl): | ||
337 | + CASE_OP_32_64(shr): | ||
338 | + done = fold_shift(&ctx, op); | ||
339 | + break; | ||
340 | + CASE_OP_32_64_VEC(sub): | ||
341 | + done = fold_sub(&ctx, op); | ||
342 | + break; | ||
343 | + CASE_OP_32_64_VEC(xor): | ||
344 | + done = fold_xor(&ctx, op); | ||
345 | break; | ||
346 | } | ||
347 | 381 | ||
348 | -- | 382 | -- |
349 | 2.25.1 | 383 | 2.34.1 |
350 | |||
351 | diff view generated by jsdifflib |
1 | Provide what will become a larger context for splitting | 1 | Interpret the variable argument placement in the caller. Shift some |
---|---|---|---|
2 | the very large tcg_optimize function. | 2 | code around slightly to share more between softmmu and user-only. |
3 | 3 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
5 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
6 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 6 | --- |
9 | tcg/optimize.c | 77 ++++++++++++++++++++++++++------------------------ | 7 | tcg/loongarch64/tcg-target.c.inc | 100 +++++++++++++------------------ |
10 | 1 file changed, 40 insertions(+), 37 deletions(-) | 8 | 1 file changed, 42 insertions(+), 58 deletions(-) |
11 | 9 | ||
12 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 10 | diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc |
13 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tcg/optimize.c | 12 | --- a/tcg/loongarch64/tcg-target.c.inc |
15 | +++ b/tcg/optimize.c | 13 | +++ b/tcg/loongarch64/tcg-target.c.inc |
16 | @@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo { | 14 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj, |
17 | uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */ | ||
18 | } TempOptInfo; | ||
19 | |||
20 | +typedef struct OptContext { | ||
21 | + TCGTempSet temps_used; | ||
22 | +} OptContext; | ||
23 | + | ||
24 | static inline TempOptInfo *ts_info(TCGTemp *ts) | ||
25 | { | ||
26 | return ts->state_ptr; | ||
27 | @@ -XXX,XX +XXX,XX @@ static void reset_temp(TCGArg arg) | ||
28 | } | ||
29 | |||
30 | /* Initialize and activate a temporary. */ | ||
31 | -static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts) | ||
32 | +static void init_ts_info(OptContext *ctx, TCGTemp *ts) | ||
33 | { | ||
34 | size_t idx = temp_idx(ts); | ||
35 | TempOptInfo *ti; | ||
36 | |||
37 | - if (test_bit(idx, temps_used->l)) { | ||
38 | + if (test_bit(idx, ctx->temps_used.l)) { | ||
39 | return; | ||
40 | } | ||
41 | - set_bit(idx, temps_used->l); | ||
42 | + set_bit(idx, ctx->temps_used.l); | ||
43 | |||
44 | ti = ts->state_ptr; | ||
45 | if (ti == NULL) { | ||
46 | @@ -XXX,XX +XXX,XX @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts) | ||
47 | } | 15 | } |
48 | } | 16 | } |
49 | 17 | ||
50 | -static void init_arg_info(TCGTempSet *temps_used, TCGArg arg) | 18 | -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type) |
51 | +static void init_arg_info(OptContext *ctx, TCGArg arg) | 19 | +static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, |
20 | + MemOpIdx oi, TCGType data_type) | ||
52 | { | 21 | { |
53 | - init_ts_info(temps_used, arg_temp(arg)); | 22 | - TCGReg addr_regl; |
54 | + init_ts_info(ctx, arg_temp(arg)); | 23 | - TCGReg data_regl; |
24 | - MemOpIdx oi; | ||
25 | - MemOp opc; | ||
26 | -#if defined(CONFIG_SOFTMMU) | ||
27 | + MemOp opc = get_memop(oi); | ||
28 | + TCGReg base, index; | ||
29 | + | ||
30 | +#ifdef CONFIG_SOFTMMU | ||
31 | tcg_insn_unit *label_ptr[1]; | ||
32 | -#else | ||
33 | - unsigned a_bits; | ||
34 | -#endif | ||
35 | - TCGReg base; | ||
36 | |||
37 | - data_regl = *args++; | ||
38 | - addr_regl = *args++; | ||
39 | - oi = *args++; | ||
40 | - opc = get_memop(oi); | ||
41 | - | ||
42 | -#if defined(CONFIG_SOFTMMU) | ||
43 | - tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1); | ||
44 | - base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); | ||
45 | - tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type); | ||
46 | - add_qemu_ldst_label(s, 1, oi, type, | ||
47 | - data_regl, addr_regl, | ||
48 | - s->code_ptr, label_ptr); | ||
49 | + tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1); | ||
50 | + index = TCG_REG_TMP2; | ||
51 | #else | ||
52 | - a_bits = get_alignment_bits(opc); | ||
53 | + unsigned a_bits = get_alignment_bits(opc); | ||
54 | if (a_bits) { | ||
55 | - tcg_out_test_alignment(s, true, addr_regl, a_bits); | ||
56 | + tcg_out_test_alignment(s, true, addr_reg, a_bits); | ||
57 | } | ||
58 | - base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); | ||
59 | - TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; | ||
60 | - tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type); | ||
61 | + index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; | ||
62 | +#endif | ||
63 | + | ||
64 | + base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0); | ||
65 | + tcg_out_qemu_ld_indexed(s, data_reg, base, index, opc, data_type); | ||
66 | + | ||
67 | +#ifdef CONFIG_SOFTMMU | ||
68 | + add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg, | ||
69 | + s->code_ptr, label_ptr); | ||
70 | #endif | ||
55 | } | 71 | } |
56 | 72 | ||
57 | static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) | 73 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data, |
58 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) | ||
59 | } | 74 | } |
60 | } | 75 | } |
61 | 76 | ||
62 | -static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used, | 77 | -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGType type) |
63 | +static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx, | 78 | +static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, |
64 | TCGOp *op, TCGArg dst, uint64_t val) | 79 | + MemOpIdx oi, TCGType data_type) |
65 | { | 80 | { |
66 | const TCGOpDef *def = &tcg_op_defs[op->opc]; | 81 | - TCGReg addr_regl; |
67 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used, | 82 | - TCGReg data_regl; |
68 | 83 | - MemOpIdx oi; | |
69 | /* Convert movi to mov with constant temp. */ | 84 | - MemOp opc; |
70 | tv = tcg_constant_internal(type, val); | 85 | -#if defined(CONFIG_SOFTMMU) |
71 | - init_ts_info(temps_used, tv); | 86 | + MemOp opc = get_memop(oi); |
72 | + init_ts_info(ctx, tv); | 87 | + TCGReg base, index; |
73 | tcg_opt_gen_mov(s, op, dst, temp_arg(tv)); | 88 | + |
89 | +#ifdef CONFIG_SOFTMMU | ||
90 | tcg_insn_unit *label_ptr[1]; | ||
91 | -#else | ||
92 | - unsigned a_bits; | ||
93 | -#endif | ||
94 | - TCGReg base; | ||
95 | |||
96 | - data_regl = *args++; | ||
97 | - addr_regl = *args++; | ||
98 | - oi = *args++; | ||
99 | - opc = get_memop(oi); | ||
100 | - | ||
101 | -#if defined(CONFIG_SOFTMMU) | ||
102 | - tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0); | ||
103 | - base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); | ||
104 | - tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc); | ||
105 | - add_qemu_ldst_label(s, 0, oi, type, | ||
106 | - data_regl, addr_regl, | ||
107 | - s->code_ptr, label_ptr); | ||
108 | + tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0); | ||
109 | + index = TCG_REG_TMP2; | ||
110 | #else | ||
111 | - a_bits = get_alignment_bits(opc); | ||
112 | + unsigned a_bits = get_alignment_bits(opc); | ||
113 | if (a_bits) { | ||
114 | - tcg_out_test_alignment(s, false, addr_regl, a_bits); | ||
115 | + tcg_out_test_alignment(s, false, addr_reg, a_bits); | ||
116 | } | ||
117 | - base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); | ||
118 | - TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; | ||
119 | - tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc); | ||
120 | + index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; | ||
121 | +#endif | ||
122 | + | ||
123 | + base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0); | ||
124 | + tcg_out_qemu_st_indexed(s, data_reg, base, index, opc); | ||
125 | + | ||
126 | +#ifdef CONFIG_SOFTMMU | ||
127 | + add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg, | ||
128 | + s->code_ptr, label_ptr); | ||
129 | #endif | ||
74 | } | 130 | } |
75 | 131 | ||
76 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 132 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, |
77 | { | 133 | break; |
78 | int nb_temps, nb_globals, i; | 134 | |
79 | TCGOp *op, *op_next, *prev_mb = NULL; | 135 | case INDEX_op_qemu_ld_i32: |
80 | - TCGTempSet temps_used; | 136 | - tcg_out_qemu_ld(s, args, TCG_TYPE_I32); |
81 | + OptContext ctx = {}; | 137 | + tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); |
82 | 138 | break; | |
83 | /* Array VALS has an element for each temp. | 139 | case INDEX_op_qemu_ld_i64: |
84 | If this temp holds a constant then its value is kept in VALS' element. | 140 | - tcg_out_qemu_ld(s, args, TCG_TYPE_I64); |
85 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 141 | + tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); |
86 | nb_temps = s->nb_temps; | 142 | break; |
87 | nb_globals = s->nb_globals; | 143 | case INDEX_op_qemu_st_i32: |
88 | 144 | - tcg_out_qemu_st(s, args, TCG_TYPE_I32); | |
89 | - memset(&temps_used, 0, sizeof(temps_used)); | 145 | + tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); |
90 | for (i = 0; i < nb_temps; ++i) { | 146 | break; |
91 | s->temps[i].state_ptr = NULL; | 147 | case INDEX_op_qemu_st_i64: |
92 | } | 148 | - tcg_out_qemu_st(s, args, TCG_TYPE_I64); |
93 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 149 | + tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); |
94 | for (i = 0; i < nb_oargs + nb_iargs; i++) { | 150 | break; |
95 | TCGTemp *ts = arg_temp(op->args[i]); | 151 | |
96 | if (ts) { | 152 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ |
97 | - init_ts_info(&temps_used, ts); | ||
98 | + init_ts_info(&ctx, ts); | ||
99 | } | ||
100 | } | ||
101 | } else { | ||
102 | nb_oargs = def->nb_oargs; | ||
103 | nb_iargs = def->nb_iargs; | ||
104 | for (i = 0; i < nb_oargs + nb_iargs; i++) { | ||
105 | - init_arg_info(&temps_used, op->args[i]); | ||
106 | + init_arg_info(&ctx, op->args[i]); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
111 | CASE_OP_32_64(rotr): | ||
112 | if (arg_is_const(op->args[1]) | ||
113 | && arg_info(op->args[1])->val == 0) { | ||
114 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); | ||
115 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0); | ||
116 | continue; | ||
117 | } | ||
118 | break; | ||
119 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
120 | |||
121 | if (partmask == 0) { | ||
122 | tcg_debug_assert(nb_oargs == 1); | ||
123 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); | ||
124 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0); | ||
125 | continue; | ||
126 | } | ||
127 | if (affected == 0) { | ||
128 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
129 | CASE_OP_32_64(mulsh): | ||
130 | if (arg_is_const(op->args[2]) | ||
131 | && arg_info(op->args[2])->val == 0) { | ||
132 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); | ||
133 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0); | ||
134 | continue; | ||
135 | } | ||
136 | break; | ||
137 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
138 | CASE_OP_32_64_VEC(sub): | ||
139 | CASE_OP_32_64_VEC(xor): | ||
140 | if (args_are_copies(op->args[1], op->args[2])) { | ||
141 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); | ||
142 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0); | ||
143 | continue; | ||
144 | } | ||
145 | break; | ||
146 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
147 | if (arg_is_const(op->args[1])) { | ||
148 | tmp = arg_info(op->args[1])->val; | ||
149 | tmp = dup_const(TCGOP_VECE(op), tmp); | ||
150 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
151 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
152 | break; | ||
153 | } | ||
154 | goto do_default; | ||
155 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
156 | case INDEX_op_dup2_vec: | ||
157 | assert(TCG_TARGET_REG_BITS == 32); | ||
158 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
159 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], | ||
160 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], | ||
161 | deposit64(arg_info(op->args[1])->val, 32, 32, | ||
162 | arg_info(op->args[2])->val)); | ||
163 | break; | ||
164 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
165 | case INDEX_op_extrh_i64_i32: | ||
166 | if (arg_is_const(op->args[1])) { | ||
167 | tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0); | ||
168 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
169 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
170 | break; | ||
171 | } | ||
172 | goto do_default; | ||
173 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
174 | if (arg_is_const(op->args[1])) { | ||
175 | tmp = do_constant_folding(opc, arg_info(op->args[1])->val, | ||
176 | op->args[2]); | ||
177 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
178 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
179 | break; | ||
180 | } | ||
181 | goto do_default; | ||
182 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
183 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
184 | tmp = do_constant_folding(opc, arg_info(op->args[1])->val, | ||
185 | arg_info(op->args[2])->val); | ||
186 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
187 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
188 | break; | ||
189 | } | ||
190 | goto do_default; | ||
191 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
192 | TCGArg v = arg_info(op->args[1])->val; | ||
193 | if (v != 0) { | ||
194 | tmp = do_constant_folding(opc, v, 0); | ||
195 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
196 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
197 | } else { | ||
198 | tcg_opt_gen_mov(s, op, op->args[0], op->args[2]); | ||
199 | } | ||
200 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
201 | tmp = deposit64(arg_info(op->args[1])->val, | ||
202 | op->args[3], op->args[4], | ||
203 | arg_info(op->args[2])->val); | ||
204 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
205 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
206 | break; | ||
207 | } | ||
208 | goto do_default; | ||
209 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
210 | if (arg_is_const(op->args[1])) { | ||
211 | tmp = extract64(arg_info(op->args[1])->val, | ||
212 | op->args[2], op->args[3]); | ||
213 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
214 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
215 | break; | ||
216 | } | ||
217 | goto do_default; | ||
218 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
219 | if (arg_is_const(op->args[1])) { | ||
220 | tmp = sextract64(arg_info(op->args[1])->val, | ||
221 | op->args[2], op->args[3]); | ||
222 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
223 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
224 | break; | ||
225 | } | ||
226 | goto do_default; | ||
227 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
228 | tmp = (int32_t)(((uint32_t)v1 >> shr) | | ||
229 | ((uint32_t)v2 << (32 - shr))); | ||
230 | } | ||
231 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
232 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
233 | break; | ||
234 | } | ||
235 | goto do_default; | ||
236 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
237 | tmp = do_constant_folding_cond(opc, op->args[1], | ||
238 | op->args[2], op->args[3]); | ||
239 | if (tmp != 2) { | ||
240 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
241 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
242 | break; | ||
243 | } | ||
244 | goto do_default; | ||
245 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
246 | op->args[1], op->args[2]); | ||
247 | if (tmp != 2) { | ||
248 | if (tmp) { | ||
249 | - memset(&temps_used, 0, sizeof(temps_used)); | ||
250 | + memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
251 | op->opc = INDEX_op_br; | ||
252 | op->args[0] = op->args[3]; | ||
253 | } else { | ||
254 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
255 | |||
256 | rl = op->args[0]; | ||
257 | rh = op->args[1]; | ||
258 | - tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)a); | ||
259 | - tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(a >> 32)); | ||
260 | + tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a); | ||
261 | + tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32)); | ||
262 | break; | ||
263 | } | ||
264 | goto do_default; | ||
265 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
266 | |||
267 | rl = op->args[0]; | ||
268 | rh = op->args[1]; | ||
269 | - tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)r); | ||
270 | - tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(r >> 32)); | ||
271 | + tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r); | ||
272 | + tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32)); | ||
273 | break; | ||
274 | } | ||
275 | goto do_default; | ||
276 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
277 | if (tmp != 2) { | ||
278 | if (tmp) { | ||
279 | do_brcond_true: | ||
280 | - memset(&temps_used, 0, sizeof(temps_used)); | ||
281 | + memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
282 | op->opc = INDEX_op_br; | ||
283 | op->args[0] = op->args[5]; | ||
284 | } else { | ||
285 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
286 | /* Simplify LT/GE comparisons vs zero to a single compare | ||
287 | vs the high word of the input. */ | ||
288 | do_brcond_high: | ||
289 | - memset(&temps_used, 0, sizeof(temps_used)); | ||
290 | + memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
291 | op->opc = INDEX_op_brcond_i32; | ||
292 | op->args[0] = op->args[1]; | ||
293 | op->args[1] = op->args[3]; | ||
294 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
295 | goto do_default; | ||
296 | } | ||
297 | do_brcond_low: | ||
298 | - memset(&temps_used, 0, sizeof(temps_used)); | ||
299 | + memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
300 | op->opc = INDEX_op_brcond_i32; | ||
301 | op->args[1] = op->args[2]; | ||
302 | op->args[2] = op->args[4]; | ||
303 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
304 | op->args[5]); | ||
305 | if (tmp != 2) { | ||
306 | do_setcond_const: | ||
307 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
308 | + tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp); | ||
309 | } else if ((op->args[5] == TCG_COND_LT | ||
310 | || op->args[5] == TCG_COND_GE) | ||
311 | && arg_is_const(op->args[3]) | ||
312 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
313 | if (!(tcg_call_flags(op) | ||
314 | & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { | ||
315 | for (i = 0; i < nb_globals; i++) { | ||
316 | - if (test_bit(i, temps_used.l)) { | ||
317 | + if (test_bit(i, ctx.temps_used.l)) { | ||
318 | reset_ts(&s->temps[i]); | ||
319 | } | ||
320 | } | ||
321 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
322 | block, otherwise we only trash the output args. "z_mask" is | ||
323 | the non-zero bits mask for the first output arg. */ | ||
324 | if (def->flags & TCG_OPF_BB_END) { | ||
325 | - memset(&temps_used, 0, sizeof(temps_used)); | ||
326 | + memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
327 | } else { | ||
328 | do_reset_output: | ||
329 | for (i = 0; i < nb_oargs; i++) { | ||
330 | -- | 153 | -- |
331 | 2.25.1 | 154 | 2.34.1 |
332 | 155 | ||
333 | 156 | diff view generated by jsdifflib |
1 | The results are generally 6 bit unsigned values, though | 1 | Collect the 2 parts of the host address into a struct. |
---|---|---|---|
2 | the count leading and trailing bits may produce any value | 2 | Reorg tcg_out_qemu_{ld,st}_direct to use it. |
3 | for a zero input. | ||
4 | 3 | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
6 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 6 | --- |
9 | tcg/optimize.c | 3 ++- | 7 | tcg/loongarch64/tcg-target.c.inc | 55 +++++++++++++++++--------------- |
10 | 1 file changed, 2 insertions(+), 1 deletion(-) | 8 | 1 file changed, 30 insertions(+), 25 deletions(-) |
11 | 9 | ||
12 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 10 | diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc |
13 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tcg/optimize.c | 12 | --- a/tcg/loongarch64/tcg-target.c.inc |
15 | +++ b/tcg/optimize.c | 13 | +++ b/tcg/loongarch64/tcg-target.c.inc |
16 | @@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op) | 14 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s, |
17 | g_assert_not_reached(); | 15 | return addr; |
18 | } | ||
19 | ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask; | ||
20 | - | ||
21 | + ctx->s_mask = smask_from_zmask(ctx->z_mask); | ||
22 | return false; | ||
23 | } | 16 | } |
24 | 17 | ||
25 | @@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op) | 18 | -static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj, |
19 | - TCGReg rk, MemOp opc, TCGType type) | ||
20 | +typedef struct { | ||
21 | + TCGReg base; | ||
22 | + TCGReg index; | ||
23 | +} HostAddress; | ||
24 | + | ||
25 | +static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type, | ||
26 | + TCGReg rd, HostAddress h) | ||
27 | { | ||
28 | /* Byte swapping is left to middle-end expansion. */ | ||
29 | tcg_debug_assert((opc & MO_BSWAP) == 0); | ||
30 | |||
31 | switch (opc & MO_SSIZE) { | ||
32 | case MO_UB: | ||
33 | - tcg_out_opc_ldx_bu(s, rd, rj, rk); | ||
34 | + tcg_out_opc_ldx_bu(s, rd, h.base, h.index); | ||
35 | break; | ||
36 | case MO_SB: | ||
37 | - tcg_out_opc_ldx_b(s, rd, rj, rk); | ||
38 | + tcg_out_opc_ldx_b(s, rd, h.base, h.index); | ||
39 | break; | ||
40 | case MO_UW: | ||
41 | - tcg_out_opc_ldx_hu(s, rd, rj, rk); | ||
42 | + tcg_out_opc_ldx_hu(s, rd, h.base, h.index); | ||
43 | break; | ||
44 | case MO_SW: | ||
45 | - tcg_out_opc_ldx_h(s, rd, rj, rk); | ||
46 | + tcg_out_opc_ldx_h(s, rd, h.base, h.index); | ||
47 | break; | ||
48 | case MO_UL: | ||
49 | if (type == TCG_TYPE_I64) { | ||
50 | - tcg_out_opc_ldx_wu(s, rd, rj, rk); | ||
51 | + tcg_out_opc_ldx_wu(s, rd, h.base, h.index); | ||
52 | break; | ||
53 | } | ||
54 | /* fallthrough */ | ||
55 | case MO_SL: | ||
56 | - tcg_out_opc_ldx_w(s, rd, rj, rk); | ||
57 | + tcg_out_opc_ldx_w(s, rd, h.base, h.index); | ||
58 | break; | ||
59 | case MO_UQ: | ||
60 | - tcg_out_opc_ldx_d(s, rd, rj, rk); | ||
61 | + tcg_out_opc_ldx_d(s, rd, h.base, h.index); | ||
62 | break; | ||
26 | default: | 63 | default: |
27 | g_assert_not_reached(); | 64 | g_assert_not_reached(); |
65 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
66 | MemOpIdx oi, TCGType data_type) | ||
67 | { | ||
68 | MemOp opc = get_memop(oi); | ||
69 | - TCGReg base, index; | ||
70 | + HostAddress h; | ||
71 | |||
72 | #ifdef CONFIG_SOFTMMU | ||
73 | tcg_insn_unit *label_ptr[1]; | ||
74 | |||
75 | tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1); | ||
76 | - index = TCG_REG_TMP2; | ||
77 | + h.index = TCG_REG_TMP2; | ||
78 | #else | ||
79 | unsigned a_bits = get_alignment_bits(opc); | ||
80 | if (a_bits) { | ||
81 | tcg_out_test_alignment(s, true, addr_reg, a_bits); | ||
28 | } | 82 | } |
29 | + ctx->s_mask = smask_from_zmask(ctx->z_mask); | 83 | - index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; |
30 | return false; | 84 | + h.index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; |
85 | #endif | ||
86 | |||
87 | - base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0); | ||
88 | - tcg_out_qemu_ld_indexed(s, data_reg, base, index, opc, data_type); | ||
89 | + h.base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0); | ||
90 | + tcg_out_qemu_ld_indexed(s, opc, data_type, data_reg, h); | ||
91 | |||
92 | #ifdef CONFIG_SOFTMMU | ||
93 | add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg, | ||
94 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
95 | #endif | ||
31 | } | 96 | } |
32 | 97 | ||
98 | -static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data, | ||
99 | - TCGReg rj, TCGReg rk, MemOp opc) | ||
100 | +static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc, | ||
101 | + TCGReg rd, HostAddress h) | ||
102 | { | ||
103 | /* Byte swapping is left to middle-end expansion. */ | ||
104 | tcg_debug_assert((opc & MO_BSWAP) == 0); | ||
105 | |||
106 | switch (opc & MO_SIZE) { | ||
107 | case MO_8: | ||
108 | - tcg_out_opc_stx_b(s, data, rj, rk); | ||
109 | + tcg_out_opc_stx_b(s, rd, h.base, h.index); | ||
110 | break; | ||
111 | case MO_16: | ||
112 | - tcg_out_opc_stx_h(s, data, rj, rk); | ||
113 | + tcg_out_opc_stx_h(s, rd, h.base, h.index); | ||
114 | break; | ||
115 | case MO_32: | ||
116 | - tcg_out_opc_stx_w(s, data, rj, rk); | ||
117 | + tcg_out_opc_stx_w(s, rd, h.base, h.index); | ||
118 | break; | ||
119 | case MO_64: | ||
120 | - tcg_out_opc_stx_d(s, data, rj, rk); | ||
121 | + tcg_out_opc_stx_d(s, rd, h.base, h.index); | ||
122 | break; | ||
123 | default: | ||
124 | g_assert_not_reached(); | ||
125 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
126 | MemOpIdx oi, TCGType data_type) | ||
127 | { | ||
128 | MemOp opc = get_memop(oi); | ||
129 | - TCGReg base, index; | ||
130 | + HostAddress h; | ||
131 | |||
132 | #ifdef CONFIG_SOFTMMU | ||
133 | tcg_insn_unit *label_ptr[1]; | ||
134 | |||
135 | tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0); | ||
136 | - index = TCG_REG_TMP2; | ||
137 | + h.index = TCG_REG_TMP2; | ||
138 | #else | ||
139 | unsigned a_bits = get_alignment_bits(opc); | ||
140 | if (a_bits) { | ||
141 | tcg_out_test_alignment(s, false, addr_reg, a_bits); | ||
142 | } | ||
143 | - index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; | ||
144 | + h.index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; | ||
145 | #endif | ||
146 | |||
147 | - base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0); | ||
148 | - tcg_out_qemu_st_indexed(s, data_reg, base, index, opc); | ||
149 | + h.base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0); | ||
150 | + tcg_out_qemu_st_indexed(s, opc, data_reg, h); | ||
151 | |||
152 | #ifdef CONFIG_SOFTMMU | ||
153 | add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg, | ||
33 | -- | 154 | -- |
34 | 2.25.1 | 155 | 2.34.1 |
35 | 156 | ||
36 | 157 | diff view generated by jsdifflib |
1 | This will allow callers to tail call to these functions | 1 | Interpret the variable argument placement in the caller. There are |
---|---|---|---|
2 | and return true indicating processing complete. | 2 | several places where we already convert back from bool to type. |
3 | Clean things up by using type throughout. | ||
3 | 4 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
5 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
6 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 7 | --- |
9 | tcg/optimize.c | 9 +++++---- | 8 | tcg/mips/tcg-target.c.inc | 186 +++++++++++++++++++------------------- |
10 | 1 file changed, 5 insertions(+), 4 deletions(-) | 9 | 1 file changed, 95 insertions(+), 91 deletions(-) |
11 | 10 | ||
12 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 11 | diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc |
13 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tcg/optimize.c | 13 | --- a/tcg/mips/tcg-target.c.inc |
15 | +++ b/tcg/optimize.c | 14 | +++ b/tcg/mips/tcg-target.c.inc |
16 | @@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2) | 15 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) |
17 | return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); | 16 | #endif /* SOFTMMU */ |
17 | |||
18 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
19 | - TCGReg base, MemOp opc, bool is_64) | ||
20 | + TCGReg base, MemOp opc, TCGType type) | ||
21 | { | ||
22 | switch (opc & (MO_SSIZE | MO_BSWAP)) { | ||
23 | case MO_UB: | ||
24 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
25 | tcg_out_opc_imm(s, OPC_LH, lo, base, 0); | ||
26 | break; | ||
27 | case MO_UL | MO_BSWAP: | ||
28 | - if (TCG_TARGET_REG_BITS == 64 && is_64) { | ||
29 | + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) { | ||
30 | if (use_mips32r2_instructions) { | ||
31 | tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); | ||
32 | tcg_out_bswap32(s, lo, lo, TCG_BSWAP_IZ | TCG_BSWAP_OZ); | ||
33 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
34 | } | ||
35 | break; | ||
36 | case MO_UL: | ||
37 | - if (TCG_TARGET_REG_BITS == 64 && is_64) { | ||
38 | + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) { | ||
39 | tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); | ||
40 | break; | ||
41 | } | ||
42 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
18 | } | 43 | } |
19 | 44 | ||
20 | -static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) | 45 | static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi, |
21 | +static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) | 46 | - TCGReg base, MemOp opc, bool is_64) |
47 | + TCGReg base, MemOp opc, TCGType type) | ||
22 | { | 48 | { |
23 | TCGTemp *dst_ts = arg_temp(dst); | 49 | const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR; |
24 | TCGTemp *src_ts = arg_temp(src); | 50 | const MIPSInsn lw2 = MIPS_BE ? OPC_LWR : OPC_LWL; |
25 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) | 51 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi, |
26 | 52 | case MO_UL: | |
27 | if (ts_are_copies(dst_ts, src_ts)) { | 53 | tcg_out_opc_imm(s, lw1, lo, base, 0); |
28 | tcg_op_remove(ctx->tcg, op); | 54 | tcg_out_opc_imm(s, lw2, lo, base, 3); |
29 | - return; | 55 | - if (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn) { |
30 | + return true; | 56 | + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64 && !sgn) { |
31 | } | 57 | tcg_out_ext32u(s, lo, lo); |
32 | 58 | } | |
33 | reset_ts(dst_ts); | 59 | break; |
34 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) | 60 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi, |
35 | di->is_const = si->is_const; | 61 | tcg_out_opc_imm(s, lw1, lo, base, 0); |
36 | di->val = si->val; | 62 | tcg_out_opc_imm(s, lw2, lo, base, 3); |
37 | } | 63 | tcg_out_bswap32(s, lo, lo, |
38 | + return true; | 64 | - TCG_TARGET_REG_BITS == 64 && is_64 |
65 | + TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64 | ||
66 | ? (sgn ? TCG_BSWAP_OS : TCG_BSWAP_OZ) : 0); | ||
67 | } else { | ||
68 | const tcg_insn_unit *subr = | ||
69 | - (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn | ||
70 | + (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64 && !sgn | ||
71 | ? bswap32u_addr : bswap32_addr); | ||
72 | |||
73 | tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0); | ||
74 | tcg_out_bswap_subr(s, subr); | ||
75 | /* delay slot */ | ||
76 | tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 3); | ||
77 | - tcg_out_mov(s, is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, lo, TCG_TMP3); | ||
78 | + tcg_out_mov(s, type, lo, TCG_TMP3); | ||
79 | } | ||
80 | break; | ||
81 | |||
82 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi, | ||
83 | } | ||
39 | } | 84 | } |
40 | 85 | ||
41 | -static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, | 86 | -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) |
42 | +static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, | 87 | +static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, |
43 | TCGArg dst, uint64_t val) | 88 | + TCGReg addrlo, TCGReg addrhi, |
89 | + MemOpIdx oi, TCGType data_type) | ||
44 | { | 90 | { |
45 | const TCGOpDef *def = &tcg_op_defs[op->opc]; | 91 | - TCGReg addr_regl, addr_regh __attribute__((unused)); |
46 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, | 92 | - TCGReg data_regl, data_regh; |
47 | /* Convert movi to mov with constant temp. */ | 93 | - MemOpIdx oi; |
48 | tv = tcg_constant_internal(type, val); | 94 | - MemOp opc; |
49 | init_ts_info(ctx, tv); | 95 | -#if defined(CONFIG_SOFTMMU) |
50 | - tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv)); | 96 | - tcg_insn_unit *label_ptr[2]; |
51 | + return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv)); | 97 | -#else |
98 | -#endif | ||
99 | - unsigned a_bits, s_bits; | ||
100 | - TCGReg base = TCG_REG_A0; | ||
101 | - | ||
102 | - data_regl = *args++; | ||
103 | - data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); | ||
104 | - addr_regl = *args++; | ||
105 | - addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); | ||
106 | - oi = *args++; | ||
107 | - opc = get_memop(oi); | ||
108 | - a_bits = get_alignment_bits(opc); | ||
109 | - s_bits = opc & MO_SIZE; | ||
110 | + MemOp opc = get_memop(oi); | ||
111 | + unsigned a_bits = get_alignment_bits(opc); | ||
112 | + unsigned s_bits = opc & MO_SIZE; | ||
113 | + TCGReg base; | ||
114 | |||
115 | /* | ||
116 | * R6 removes the left/right instructions but requires the | ||
117 | * system to support misaligned memory accesses. | ||
118 | */ | ||
119 | #if defined(CONFIG_SOFTMMU) | ||
120 | - tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1); | ||
121 | + tcg_insn_unit *label_ptr[2]; | ||
122 | + | ||
123 | + base = TCG_REG_A0; | ||
124 | + tcg_out_tlb_load(s, base, addrlo, addrhi, oi, label_ptr, 1); | ||
125 | if (use_mips32r6_instructions || a_bits >= s_bits) { | ||
126 | - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); | ||
127 | + tcg_out_qemu_ld_direct(s, datalo, datahi, base, opc, data_type); | ||
128 | } else { | ||
129 | - tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64); | ||
130 | + tcg_out_qemu_ld_unalign(s, datalo, datahi, base, opc, data_type); | ||
131 | } | ||
132 | - add_qemu_ldst_label(s, 1, oi, | ||
133 | - (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), | ||
134 | - data_regl, data_regh, addr_regl, addr_regh, | ||
135 | - s->code_ptr, label_ptr); | ||
136 | + add_qemu_ldst_label(s, true, oi, data_type, datalo, datahi, | ||
137 | + addrlo, addrhi, s->code_ptr, label_ptr); | ||
138 | #else | ||
139 | + base = addrlo; | ||
140 | if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { | ||
141 | - tcg_out_ext32u(s, base, addr_regl); | ||
142 | - addr_regl = base; | ||
143 | + tcg_out_ext32u(s, TCG_REG_A0, base); | ||
144 | + base = TCG_REG_A0; | ||
145 | } | ||
146 | - if (guest_base == 0 && data_regl != addr_regl) { | ||
147 | - base = addr_regl; | ||
148 | - } else if (guest_base == (int16_t)guest_base) { | ||
149 | - tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base); | ||
150 | - } else { | ||
151 | - tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); | ||
152 | + if (guest_base) { | ||
153 | + if (guest_base == (int16_t)guest_base) { | ||
154 | + tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base); | ||
155 | + } else { | ||
156 | + tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base, | ||
157 | + TCG_GUEST_BASE_REG); | ||
158 | + } | ||
159 | + base = TCG_REG_A0; | ||
160 | } | ||
161 | if (use_mips32r6_instructions) { | ||
162 | if (a_bits) { | ||
163 | - tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); | ||
164 | + tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); | ||
165 | } | ||
166 | - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); | ||
167 | + tcg_out_qemu_ld_direct(s, datalo, datahi, base, opc, data_type); | ||
168 | } else { | ||
169 | if (a_bits && a_bits != s_bits) { | ||
170 | - tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); | ||
171 | + tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); | ||
172 | } | ||
173 | if (a_bits >= s_bits) { | ||
174 | - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); | ||
175 | + tcg_out_qemu_ld_direct(s, datalo, datahi, base, opc, data_type); | ||
176 | } else { | ||
177 | - tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64); | ||
178 | + tcg_out_qemu_ld_unalign(s, datalo, datahi, base, opc, data_type); | ||
179 | } | ||
180 | } | ||
181 | #endif | ||
182 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi, | ||
183 | g_assert_not_reached(); | ||
184 | } | ||
52 | } | 185 | } |
53 | 186 | -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | |
54 | static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) | 187 | -{ |
188 | - TCGReg addr_regl, addr_regh __attribute__((unused)); | ||
189 | - TCGReg data_regl, data_regh; | ||
190 | - MemOpIdx oi; | ||
191 | - MemOp opc; | ||
192 | -#if defined(CONFIG_SOFTMMU) | ||
193 | - tcg_insn_unit *label_ptr[2]; | ||
194 | -#endif | ||
195 | - unsigned a_bits, s_bits; | ||
196 | - TCGReg base = TCG_REG_A0; | ||
197 | |||
198 | - data_regl = *args++; | ||
199 | - data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); | ||
200 | - addr_regl = *args++; | ||
201 | - addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); | ||
202 | - oi = *args++; | ||
203 | - opc = get_memop(oi); | ||
204 | - a_bits = get_alignment_bits(opc); | ||
205 | - s_bits = opc & MO_SIZE; | ||
206 | +static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
207 | + TCGReg addrlo, TCGReg addrhi, | ||
208 | + MemOpIdx oi, TCGType data_type) | ||
209 | +{ | ||
210 | + MemOp opc = get_memop(oi); | ||
211 | + unsigned a_bits = get_alignment_bits(opc); | ||
212 | + unsigned s_bits = opc & MO_SIZE; | ||
213 | + TCGReg base; | ||
214 | |||
215 | /* | ||
216 | * R6 removes the left/right instructions but requires the | ||
217 | * system to support misaligned memory accesses. | ||
218 | */ | ||
219 | #if defined(CONFIG_SOFTMMU) | ||
220 | - tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0); | ||
221 | + tcg_insn_unit *label_ptr[2]; | ||
222 | + | ||
223 | + base = TCG_REG_A0; | ||
224 | + tcg_out_tlb_load(s, base, addrlo, addrhi, oi, label_ptr, 0); | ||
225 | if (use_mips32r6_instructions || a_bits >= s_bits) { | ||
226 | - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); | ||
227 | + tcg_out_qemu_st_direct(s, datalo, datahi, base, opc); | ||
228 | } else { | ||
229 | - tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc); | ||
230 | + tcg_out_qemu_st_unalign(s, datalo, datahi, base, opc); | ||
231 | } | ||
232 | - add_qemu_ldst_label(s, 0, oi, | ||
233 | - (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), | ||
234 | - data_regl, data_regh, addr_regl, addr_regh, | ||
235 | - s->code_ptr, label_ptr); | ||
236 | + add_qemu_ldst_label(s, false, oi, data_type, datalo, datahi, | ||
237 | + addrlo, addrhi, s->code_ptr, label_ptr); | ||
238 | #else | ||
239 | + base = addrlo; | ||
240 | if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { | ||
241 | - tcg_out_ext32u(s, base, addr_regl); | ||
242 | - addr_regl = base; | ||
243 | + tcg_out_ext32u(s, TCG_REG_A0, base); | ||
244 | + base = TCG_REG_A0; | ||
245 | } | ||
246 | - if (guest_base == 0) { | ||
247 | - base = addr_regl; | ||
248 | - } else if (guest_base == (int16_t)guest_base) { | ||
249 | - tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base); | ||
250 | - } else { | ||
251 | - tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); | ||
252 | + if (guest_base) { | ||
253 | + if (guest_base == (int16_t)guest_base) { | ||
254 | + tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base); | ||
255 | + } else { | ||
256 | + tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base, | ||
257 | + TCG_GUEST_BASE_REG); | ||
258 | + } | ||
259 | + base = TCG_REG_A0; | ||
260 | } | ||
261 | if (use_mips32r6_instructions) { | ||
262 | if (a_bits) { | ||
263 | - tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); | ||
264 | + tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); | ||
265 | } | ||
266 | - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); | ||
267 | + tcg_out_qemu_st_direct(s, datalo, datahi, base, opc); | ||
268 | } else { | ||
269 | if (a_bits && a_bits != s_bits) { | ||
270 | - tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); | ||
271 | + tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); | ||
272 | } | ||
273 | if (a_bits >= s_bits) { | ||
274 | - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); | ||
275 | + tcg_out_qemu_st_direct(s, datalo, datahi, base, opc); | ||
276 | } else { | ||
277 | - tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc); | ||
278 | + tcg_out_qemu_st_unalign(s, datalo, datahi, base, opc); | ||
279 | } | ||
280 | } | ||
281 | #endif | ||
282 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, | ||
283 | break; | ||
284 | |||
285 | case INDEX_op_qemu_ld_i32: | ||
286 | - tcg_out_qemu_ld(s, args, false); | ||
287 | + if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { | ||
288 | + tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I32); | ||
289 | + } else { | ||
290 | + tcg_out_qemu_ld(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32); | ||
291 | + } | ||
292 | break; | ||
293 | case INDEX_op_qemu_ld_i64: | ||
294 | - tcg_out_qemu_ld(s, args, true); | ||
295 | + if (TCG_TARGET_REG_BITS == 64) { | ||
296 | + tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64); | ||
297 | + } else if (TARGET_LONG_BITS == 32) { | ||
298 | + tcg_out_qemu_ld(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64); | ||
299 | + } else { | ||
300 | + tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); | ||
301 | + } | ||
302 | break; | ||
303 | case INDEX_op_qemu_st_i32: | ||
304 | - tcg_out_qemu_st(s, args, false); | ||
305 | + if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { | ||
306 | + tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I32); | ||
307 | + } else { | ||
308 | + tcg_out_qemu_st(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32); | ||
309 | + } | ||
310 | break; | ||
311 | case INDEX_op_qemu_st_i64: | ||
312 | - tcg_out_qemu_st(s, args, true); | ||
313 | + if (TCG_TARGET_REG_BITS == 64) { | ||
314 | + tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64); | ||
315 | + } else if (TARGET_LONG_BITS == 32) { | ||
316 | + tcg_out_qemu_st(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64); | ||
317 | + } else { | ||
318 | + tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); | ||
319 | + } | ||
320 | break; | ||
321 | |||
322 | case INDEX_op_add2_i32: | ||
55 | -- | 323 | -- |
56 | 2.25.1 | 324 | 2.34.1 |
57 | 325 | ||
58 | 326 | diff view generated by jsdifflib |
1 | Pull the "op r, a, 0 => movi r, 0" optimization into a function, | 1 | Interpret the variable argument placement in the caller. Pass data_type |
---|---|---|---|
2 | and use it in the outer opcode fold functions. | 2 | instead of is64 -- there are several places where we already convert back |
3 | 3 | from bool to type. Clean things up by using type throughout. | |
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | 4 | |
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
6 | Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 8 | --- |
8 | tcg/optimize.c | 38 ++++++++++++++++++++------------------ | 9 | tcg/ppc/tcg-target.c.inc | 110 +++++++++++++++++++++------------------ |
9 | 1 file changed, 20 insertions(+), 18 deletions(-) | 10 | 1 file changed, 59 insertions(+), 51 deletions(-) |
10 | 11 | ||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 12 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
12 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/optimize.c | 14 | --- a/tcg/ppc/tcg-target.c.inc |
14 | +++ b/tcg/optimize.c | 15 | +++ b/tcg/ppc/tcg-target.c.inc |
15 | @@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op) | 16 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc, |
16 | return false; | 17 | /* Record the context of a call to the out of line helper code for the slow |
18 | path for a load or store, so that we can later generate the correct | ||
19 | helper code. */ | ||
20 | -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, | ||
21 | +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, | ||
22 | + TCGType type, MemOpIdx oi, | ||
23 | TCGReg datalo_reg, TCGReg datahi_reg, | ||
24 | TCGReg addrlo_reg, TCGReg addrhi_reg, | ||
25 | tcg_insn_unit *raddr, tcg_insn_unit *lptr) | ||
26 | @@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, | ||
27 | TCGLabelQemuLdst *label = new_ldst_label(s); | ||
28 | |||
29 | label->is_ld = is_ld; | ||
30 | + label->type = type; | ||
31 | label->oi = oi; | ||
32 | label->datalo_reg = datalo_reg; | ||
33 | label->datahi_reg = datahi_reg; | ||
34 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
35 | |||
36 | #endif /* SOFTMMU */ | ||
37 | |||
38 | -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
39 | +static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
40 | + TCGReg addrlo, TCGReg addrhi, | ||
41 | + MemOpIdx oi, TCGType data_type) | ||
42 | { | ||
43 | - TCGReg datalo, datahi, addrlo, rbase; | ||
44 | - TCGReg addrhi __attribute__((unused)); | ||
45 | - MemOpIdx oi; | ||
46 | - MemOp opc, s_bits; | ||
47 | + MemOp opc = get_memop(oi); | ||
48 | + MemOp s_bits = opc & MO_SIZE; | ||
49 | + TCGReg rbase; | ||
50 | + | ||
51 | #ifdef CONFIG_SOFTMMU | ||
52 | - int mem_index; | ||
53 | tcg_insn_unit *label_ptr; | ||
54 | -#else | ||
55 | - unsigned a_bits; | ||
56 | -#endif | ||
57 | |||
58 | - datalo = *args++; | ||
59 | - datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); | ||
60 | - addrlo = *args++; | ||
61 | - addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); | ||
62 | - oi = *args++; | ||
63 | - opc = get_memop(oi); | ||
64 | - s_bits = opc & MO_SIZE; | ||
65 | - | ||
66 | -#ifdef CONFIG_SOFTMMU | ||
67 | - mem_index = get_mmuidx(oi); | ||
68 | - addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true); | ||
69 | + addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), true); | ||
70 | |||
71 | /* Load a pointer into the current opcode w/conditional branch-link. */ | ||
72 | label_ptr = s->code_ptr; | ||
73 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
74 | |||
75 | rbase = TCG_REG_R3; | ||
76 | #else /* !CONFIG_SOFTMMU */ | ||
77 | - a_bits = get_alignment_bits(opc); | ||
78 | + unsigned a_bits = get_alignment_bits(opc); | ||
79 | if (a_bits) { | ||
80 | tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); | ||
81 | } | ||
82 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
83 | } | ||
84 | |||
85 | #ifdef CONFIG_SOFTMMU | ||
86 | - add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, | ||
87 | - s->code_ptr, label_ptr); | ||
88 | + add_qemu_ldst_label(s, true, data_type, oi, datalo, datahi, | ||
89 | + addrlo, addrhi, s->code_ptr, label_ptr); | ||
90 | #endif | ||
17 | } | 91 | } |
18 | 92 | ||
19 | +/* If the binary operation has second argument @i, fold to @i. */ | 93 | -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) |
20 | +static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | 94 | +static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, |
21 | +{ | 95 | + TCGReg addrlo, TCGReg addrhi, |
22 | + if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { | 96 | + MemOpIdx oi, TCGType data_type) |
23 | + return tcg_opt_gen_movi(ctx, op, op->args[0], i); | 97 | { |
24 | + } | 98 | - TCGReg datalo, datahi, addrlo, rbase; |
25 | + return false; | 99 | - TCGReg addrhi __attribute__((unused)); |
26 | +} | 100 | - MemOpIdx oi; |
101 | - MemOp opc, s_bits; | ||
102 | + MemOp opc = get_memop(oi); | ||
103 | + MemOp s_bits = opc & MO_SIZE; | ||
104 | + TCGReg rbase; | ||
27 | + | 105 | + |
28 | /* If the binary operation has both arguments equal, fold to @i. */ | 106 | #ifdef CONFIG_SOFTMMU |
29 | static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | 107 | - int mem_index; |
30 | { | 108 | tcg_insn_unit *label_ptr; |
31 | @@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op) | 109 | -#else |
32 | static bool fold_and(OptContext *ctx, TCGOp *op) | 110 | - unsigned a_bits; |
33 | { | 111 | -#endif |
34 | if (fold_const2(ctx, op) || | 112 | |
35 | + fold_xi_to_i(ctx, op, 0) || | 113 | - datalo = *args++; |
36 | fold_xx_to_x(ctx, op)) { | 114 | - datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); |
37 | return true; | 115 | - addrlo = *args++; |
38 | } | 116 | - addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); |
39 | @@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op) | 117 | - oi = *args++; |
40 | 118 | - opc = get_memop(oi); | |
41 | static bool fold_mul(OptContext *ctx, TCGOp *op) | 119 | - s_bits = opc & MO_SIZE; |
42 | { | 120 | - |
43 | - return fold_const2(ctx, op); | 121 | -#ifdef CONFIG_SOFTMMU |
44 | + if (fold_const2(ctx, op) || | 122 | - mem_index = get_mmuidx(oi); |
45 | + fold_xi_to_i(ctx, op, 0)) { | 123 | - addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false); |
46 | + return true; | 124 | + addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), false); |
47 | + } | 125 | |
48 | + return false; | 126 | /* Load a pointer into the current opcode w/conditional branch-link. */ |
127 | label_ptr = s->code_ptr; | ||
128 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
129 | |||
130 | rbase = TCG_REG_R3; | ||
131 | #else /* !CONFIG_SOFTMMU */ | ||
132 | - a_bits = get_alignment_bits(opc); | ||
133 | + unsigned a_bits = get_alignment_bits(opc); | ||
134 | if (a_bits) { | ||
135 | tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); | ||
136 | } | ||
137 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
138 | } | ||
139 | |||
140 | #ifdef CONFIG_SOFTMMU | ||
141 | - add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, | ||
142 | - s->code_ptr, label_ptr); | ||
143 | + add_qemu_ldst_label(s, false, data_type, oi, datalo, datahi, | ||
144 | + addrlo, addrhi, s->code_ptr, label_ptr); | ||
145 | #endif | ||
49 | } | 146 | } |
50 | 147 | ||
51 | static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) | 148 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, |
52 | { | 149 | break; |
53 | - return fold_const2(ctx, op); | 150 | |
54 | + if (fold_const2(ctx, op) || | 151 | case INDEX_op_qemu_ld_i32: |
55 | + fold_xi_to_i(ctx, op, 0)) { | 152 | - tcg_out_qemu_ld(s, args, false); |
56 | + return true; | 153 | + if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { |
57 | + } | 154 | + tcg_out_qemu_ld(s, args[0], -1, args[1], -1, |
58 | + return false; | 155 | + args[2], TCG_TYPE_I32); |
59 | } | 156 | + } else { |
60 | 157 | + tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], | |
61 | static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op) | 158 | + args[3], TCG_TYPE_I32); |
62 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 159 | + } |
63 | continue; | 160 | break; |
64 | } | 161 | case INDEX_op_qemu_ld_i64: |
65 | 162 | - tcg_out_qemu_ld(s, args, true); | |
66 | - /* Simplify expression for "op r, a, 0 => movi r, 0" cases */ | 163 | + if (TCG_TARGET_REG_BITS == 64) { |
67 | - switch (opc) { | 164 | + tcg_out_qemu_ld(s, args[0], -1, args[1], -1, |
68 | - CASE_OP_32_64_VEC(and): | 165 | + args[2], TCG_TYPE_I64); |
69 | - CASE_OP_32_64_VEC(mul): | 166 | + } else if (TARGET_LONG_BITS == 32) { |
70 | - CASE_OP_32_64(muluh): | 167 | + tcg_out_qemu_ld(s, args[0], args[1], args[2], -1, |
71 | - CASE_OP_32_64(mulsh): | 168 | + args[3], TCG_TYPE_I64); |
72 | - if (arg_is_const(op->args[2]) | 169 | + } else { |
73 | - && arg_info(op->args[2])->val == 0) { | 170 | + tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], |
74 | - tcg_opt_gen_movi(&ctx, op, op->args[0], 0); | 171 | + args[4], TCG_TYPE_I64); |
75 | - continue; | 172 | + } |
76 | - } | 173 | break; |
77 | - break; | 174 | case INDEX_op_qemu_st_i32: |
78 | - default: | 175 | - tcg_out_qemu_st(s, args, false); |
79 | - break; | 176 | + if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { |
80 | - } | 177 | + tcg_out_qemu_st(s, args[0], -1, args[1], -1, |
81 | - | 178 | + args[2], TCG_TYPE_I32); |
82 | /* | 179 | + } else { |
83 | * Process each opcode. | 180 | + tcg_out_qemu_st(s, args[0], -1, args[1], args[2], |
84 | * Sorted alphabetically by opcode as much as possible. | 181 | + args[3], TCG_TYPE_I32); |
182 | + } | ||
183 | break; | ||
184 | case INDEX_op_qemu_st_i64: | ||
185 | - tcg_out_qemu_st(s, args, true); | ||
186 | + if (TCG_TARGET_REG_BITS == 64) { | ||
187 | + tcg_out_qemu_st(s, args[0], -1, args[1], -1, | ||
188 | + args[2], TCG_TYPE_I64); | ||
189 | + } else if (TARGET_LONG_BITS == 32) { | ||
190 | + tcg_out_qemu_st(s, args[0], args[1], args[2], -1, | ||
191 | + args[3], TCG_TYPE_I64); | ||
192 | + } else { | ||
193 | + tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], | ||
194 | + args[4], TCG_TYPE_I64); | ||
195 | + } | ||
196 | break; | ||
197 | |||
198 | case INDEX_op_setcond_i32: | ||
85 | -- | 199 | -- |
86 | 2.25.1 | 200 | 2.34.1 |
87 | 201 | ||
88 | 202 | diff view generated by jsdifflib |
1 | Prepare for tracking different masks by renaming this one. | 1 | Collect the parts of the host address into a struct. |
---|---|---|---|
2 | Reorg tcg_out_qemu_{ld,st} to use it. | ||
2 | 3 | ||
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 6 | --- |
8 | tcg/optimize.c | 142 +++++++++++++++++++++++++------------------------ | 7 | tcg/ppc/tcg-target.c.inc | 90 +++++++++++++++++++++------------------- |
9 | 1 file changed, 72 insertions(+), 70 deletions(-) | 8 | 1 file changed, 47 insertions(+), 43 deletions(-) |
10 | 9 | ||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 10 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
12 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/optimize.c | 12 | --- a/tcg/ppc/tcg-target.c.inc |
14 | +++ b/tcg/optimize.c | 13 | +++ b/tcg/ppc/tcg-target.c.inc |
15 | @@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo { | 14 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) |
16 | TCGTemp *prev_copy; | 15 | { |
17 | TCGTemp *next_copy; | 16 | return tcg_out_fail_alignment(s, l); |
18 | uint64_t val; | ||
19 | - uint64_t mask; | ||
20 | + uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */ | ||
21 | } TempOptInfo; | ||
22 | |||
23 | static inline TempOptInfo *ts_info(TCGTemp *ts) | ||
24 | @@ -XXX,XX +XXX,XX @@ static void reset_ts(TCGTemp *ts) | ||
25 | ti->next_copy = ts; | ||
26 | ti->prev_copy = ts; | ||
27 | ti->is_const = false; | ||
28 | - ti->mask = -1; | ||
29 | + ti->z_mask = -1; | ||
30 | } | 17 | } |
31 | 18 | - | |
32 | static void reset_temp(TCGArg arg) | 19 | #endif /* SOFTMMU */ |
33 | @@ -XXX,XX +XXX,XX @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts) | 20 | |
34 | if (ts->kind == TEMP_CONST) { | 21 | +typedef struct { |
35 | ti->is_const = true; | 22 | + TCGReg base; |
36 | ti->val = ts->val; | 23 | + TCGReg index; |
37 | - ti->mask = ts->val; | 24 | +} HostAddress; |
38 | + ti->z_mask = ts->val; | 25 | + |
39 | if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) { | 26 | static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, |
40 | /* High bits of a 32-bit quantity are garbage. */ | 27 | TCGReg addrlo, TCGReg addrhi, |
41 | - ti->mask |= ~0xffffffffull; | 28 | MemOpIdx oi, TCGType data_type) |
42 | + ti->z_mask |= ~0xffffffffull; | 29 | { |
30 | MemOp opc = get_memop(oi); | ||
31 | MemOp s_bits = opc & MO_SIZE; | ||
32 | - TCGReg rbase; | ||
33 | + HostAddress h; | ||
34 | |||
35 | #ifdef CONFIG_SOFTMMU | ||
36 | tcg_insn_unit *label_ptr; | ||
37 | |||
38 | - addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), true); | ||
39 | + h.index = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), true); | ||
40 | + h.base = TCG_REG_R3; | ||
41 | |||
42 | /* Load a pointer into the current opcode w/conditional branch-link. */ | ||
43 | label_ptr = s->code_ptr; | ||
44 | tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); | ||
45 | - | ||
46 | - rbase = TCG_REG_R3; | ||
47 | #else /* !CONFIG_SOFTMMU */ | ||
48 | unsigned a_bits = get_alignment_bits(opc); | ||
49 | if (a_bits) { | ||
50 | tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); | ||
51 | } | ||
52 | - rbase = guest_base ? TCG_GUEST_BASE_REG : 0; | ||
53 | + h.base = guest_base ? TCG_GUEST_BASE_REG : 0; | ||
54 | + h.index = addrlo; | ||
55 | if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { | ||
56 | tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); | ||
57 | - addrlo = TCG_REG_TMP1; | ||
58 | + h.index = TCG_REG_TMP1; | ||
59 | } | ||
60 | #endif | ||
61 | |||
62 | if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { | ||
63 | if (opc & MO_BSWAP) { | ||
64 | - tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); | ||
65 | - tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); | ||
66 | - tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0)); | ||
67 | - } else if (rbase != 0) { | ||
68 | - tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); | ||
69 | - tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo)); | ||
70 | - tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0)); | ||
71 | - } else if (addrlo == datahi) { | ||
72 | - tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); | ||
73 | - tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); | ||
74 | + tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); | ||
75 | + tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index)); | ||
76 | + tcg_out32(s, LWBRX | TAB(datahi, h.base, TCG_REG_R0)); | ||
77 | + } else if (h.base != 0) { | ||
78 | + tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); | ||
79 | + tcg_out32(s, LWZX | TAB(datahi, h.base, h.index)); | ||
80 | + tcg_out32(s, LWZX | TAB(datalo, h.base, TCG_REG_R0)); | ||
81 | + } else if (h.index == datahi) { | ||
82 | + tcg_out32(s, LWZ | TAI(datalo, h.index, 4)); | ||
83 | + tcg_out32(s, LWZ | TAI(datahi, h.index, 0)); | ||
84 | } else { | ||
85 | - tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); | ||
86 | - tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); | ||
87 | + tcg_out32(s, LWZ | TAI(datahi, h.index, 0)); | ||
88 | + tcg_out32(s, LWZ | TAI(datalo, h.index, 4)); | ||
43 | } | 89 | } |
44 | } else { | 90 | } else { |
45 | ti->is_const = false; | 91 | uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)]; |
46 | - ti->mask = -1; | 92 | if (!have_isa_2_06 && insn == LDBRX) { |
47 | + ti->z_mask = -1; | 93 | - tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); |
94 | - tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); | ||
95 | - tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0)); | ||
96 | + tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); | ||
97 | + tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index)); | ||
98 | + tcg_out32(s, LWBRX | TAB(TCG_REG_R0, h.base, TCG_REG_R0)); | ||
99 | tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0); | ||
100 | } else if (insn) { | ||
101 | - tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); | ||
102 | + tcg_out32(s, insn | TAB(datalo, h.base, h.index)); | ||
103 | } else { | ||
104 | insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)]; | ||
105 | - tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); | ||
106 | + tcg_out32(s, insn | TAB(datalo, h.base, h.index)); | ||
107 | tcg_out_movext(s, TCG_TYPE_REG, datalo, | ||
108 | TCG_TYPE_REG, opc & MO_SSIZE, datalo); | ||
109 | } | ||
110 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
111 | { | ||
112 | MemOp opc = get_memop(oi); | ||
113 | MemOp s_bits = opc & MO_SIZE; | ||
114 | - TCGReg rbase; | ||
115 | + HostAddress h; | ||
116 | |||
117 | #ifdef CONFIG_SOFTMMU | ||
118 | tcg_insn_unit *label_ptr; | ||
119 | |||
120 | - addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), false); | ||
121 | + h.index = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), false); | ||
122 | + h.base = TCG_REG_R3; | ||
123 | |||
124 | /* Load a pointer into the current opcode w/conditional branch-link. */ | ||
125 | label_ptr = s->code_ptr; | ||
126 | tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); | ||
127 | - | ||
128 | - rbase = TCG_REG_R3; | ||
129 | #else /* !CONFIG_SOFTMMU */ | ||
130 | unsigned a_bits = get_alignment_bits(opc); | ||
131 | if (a_bits) { | ||
132 | tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); | ||
48 | } | 133 | } |
49 | } | 134 | - rbase = guest_base ? TCG_GUEST_BASE_REG : 0; |
50 | 135 | + h.base = guest_base ? TCG_GUEST_BASE_REG : 0; | |
51 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) | 136 | + h.index = addrlo; |
52 | const TCGOpDef *def; | 137 | if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { |
53 | TempOptInfo *di; | 138 | tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); |
54 | TempOptInfo *si; | 139 | - addrlo = TCG_REG_TMP1; |
55 | - uint64_t mask; | 140 | + h.index = TCG_REG_TMP1; |
56 | + uint64_t z_mask; | ||
57 | TCGOpcode new_op; | ||
58 | |||
59 | if (ts_are_copies(dst_ts, src_ts)) { | ||
60 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) | ||
61 | op->args[0] = dst; | ||
62 | op->args[1] = src; | ||
63 | |||
64 | - mask = si->mask; | ||
65 | + z_mask = si->z_mask; | ||
66 | if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) { | ||
67 | /* High bits of the destination are now garbage. */ | ||
68 | - mask |= ~0xffffffffull; | ||
69 | + z_mask |= ~0xffffffffull; | ||
70 | } | 141 | } |
71 | - di->mask = mask; | 142 | #endif |
72 | + di->z_mask = z_mask; | 143 | |
73 | 144 | if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { | |
74 | if (src_ts->type == dst_ts->type) { | 145 | if (opc & MO_BSWAP) { |
75 | TempOptInfo *ni = ts_info(si->next_copy); | 146 | - tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); |
76 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 147 | - tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); |
148 | - tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0)); | ||
149 | - } else if (rbase != 0) { | ||
150 | - tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); | ||
151 | - tcg_out32(s, STWX | SAB(datahi, rbase, addrlo)); | ||
152 | - tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0)); | ||
153 | + tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); | ||
154 | + tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index)); | ||
155 | + tcg_out32(s, STWBRX | SAB(datahi, h.base, TCG_REG_R0)); | ||
156 | + } else if (h.base != 0) { | ||
157 | + tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); | ||
158 | + tcg_out32(s, STWX | SAB(datahi, h.base, h.index)); | ||
159 | + tcg_out32(s, STWX | SAB(datalo, h.base, TCG_REG_R0)); | ||
160 | } else { | ||
161 | - tcg_out32(s, STW | TAI(datahi, addrlo, 0)); | ||
162 | - tcg_out32(s, STW | TAI(datalo, addrlo, 4)); | ||
163 | + tcg_out32(s, STW | TAI(datahi, h.index, 0)); | ||
164 | + tcg_out32(s, STW | TAI(datalo, h.index, 4)); | ||
165 | } | ||
166 | } else { | ||
167 | uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)]; | ||
168 | if (!have_isa_2_06 && insn == STDBRX) { | ||
169 | - tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); | ||
170 | - tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4)); | ||
171 | + tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index)); | ||
172 | + tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, h.index, 4)); | ||
173 | tcg_out_shri64(s, TCG_REG_R0, datalo, 32); | ||
174 | - tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1)); | ||
175 | + tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP1)); | ||
176 | } else { | ||
177 | - tcg_out32(s, insn | SAB(datalo, rbase, addrlo)); | ||
178 | + tcg_out32(s, insn | SAB(datalo, h.base, h.index)); | ||
179 | } | ||
77 | } | 180 | } |
78 | 181 | ||
79 | QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { | ||
80 | - uint64_t mask, partmask, affected, tmp; | ||
81 | + uint64_t z_mask, partmask, affected, tmp; | ||
82 | int nb_oargs, nb_iargs; | ||
83 | TCGOpcode opc = op->opc; | ||
84 | const TCGOpDef *def = &tcg_op_defs[opc]; | ||
85 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
86 | |||
87 | /* Simplify using known-zero bits. Currently only ops with a single | ||
88 | output argument is supported. */ | ||
89 | - mask = -1; | ||
90 | + z_mask = -1; | ||
91 | affected = -1; | ||
92 | switch (opc) { | ||
93 | CASE_OP_32_64(ext8s): | ||
94 | - if ((arg_info(op->args[1])->mask & 0x80) != 0) { | ||
95 | + if ((arg_info(op->args[1])->z_mask & 0x80) != 0) { | ||
96 | break; | ||
97 | } | ||
98 | QEMU_FALLTHROUGH; | ||
99 | CASE_OP_32_64(ext8u): | ||
100 | - mask = 0xff; | ||
101 | + z_mask = 0xff; | ||
102 | goto and_const; | ||
103 | CASE_OP_32_64(ext16s): | ||
104 | - if ((arg_info(op->args[1])->mask & 0x8000) != 0) { | ||
105 | + if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) { | ||
106 | break; | ||
107 | } | ||
108 | QEMU_FALLTHROUGH; | ||
109 | CASE_OP_32_64(ext16u): | ||
110 | - mask = 0xffff; | ||
111 | + z_mask = 0xffff; | ||
112 | goto and_const; | ||
113 | case INDEX_op_ext32s_i64: | ||
114 | - if ((arg_info(op->args[1])->mask & 0x80000000) != 0) { | ||
115 | + if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) { | ||
116 | break; | ||
117 | } | ||
118 | QEMU_FALLTHROUGH; | ||
119 | case INDEX_op_ext32u_i64: | ||
120 | - mask = 0xffffffffU; | ||
121 | + z_mask = 0xffffffffU; | ||
122 | goto and_const; | ||
123 | |||
124 | CASE_OP_32_64(and): | ||
125 | - mask = arg_info(op->args[2])->mask; | ||
126 | + z_mask = arg_info(op->args[2])->z_mask; | ||
127 | if (arg_is_const(op->args[2])) { | ||
128 | and_const: | ||
129 | - affected = arg_info(op->args[1])->mask & ~mask; | ||
130 | + affected = arg_info(op->args[1])->z_mask & ~z_mask; | ||
131 | } | ||
132 | - mask = arg_info(op->args[1])->mask & mask; | ||
133 | + z_mask = arg_info(op->args[1])->z_mask & z_mask; | ||
134 | break; | ||
135 | |||
136 | case INDEX_op_ext_i32_i64: | ||
137 | - if ((arg_info(op->args[1])->mask & 0x80000000) != 0) { | ||
138 | + if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) { | ||
139 | break; | ||
140 | } | ||
141 | QEMU_FALLTHROUGH; | ||
142 | case INDEX_op_extu_i32_i64: | ||
143 | /* We do not compute affected as it is a size changing op. */ | ||
144 | - mask = (uint32_t)arg_info(op->args[1])->mask; | ||
145 | + z_mask = (uint32_t)arg_info(op->args[1])->z_mask; | ||
146 | break; | ||
147 | |||
148 | CASE_OP_32_64(andc): | ||
149 | /* Known-zeros does not imply known-ones. Therefore unless | ||
150 | op->args[2] is constant, we can't infer anything from it. */ | ||
151 | if (arg_is_const(op->args[2])) { | ||
152 | - mask = ~arg_info(op->args[2])->mask; | ||
153 | + z_mask = ~arg_info(op->args[2])->z_mask; | ||
154 | goto and_const; | ||
155 | } | ||
156 | /* But we certainly know nothing outside args[1] may be set. */ | ||
157 | - mask = arg_info(op->args[1])->mask; | ||
158 | + z_mask = arg_info(op->args[1])->z_mask; | ||
159 | break; | ||
160 | |||
161 | case INDEX_op_sar_i32: | ||
162 | if (arg_is_const(op->args[2])) { | ||
163 | tmp = arg_info(op->args[2])->val & 31; | ||
164 | - mask = (int32_t)arg_info(op->args[1])->mask >> tmp; | ||
165 | + z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp; | ||
166 | } | ||
167 | break; | ||
168 | case INDEX_op_sar_i64: | ||
169 | if (arg_is_const(op->args[2])) { | ||
170 | tmp = arg_info(op->args[2])->val & 63; | ||
171 | - mask = (int64_t)arg_info(op->args[1])->mask >> tmp; | ||
172 | + z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp; | ||
173 | } | ||
174 | break; | ||
175 | |||
176 | case INDEX_op_shr_i32: | ||
177 | if (arg_is_const(op->args[2])) { | ||
178 | tmp = arg_info(op->args[2])->val & 31; | ||
179 | - mask = (uint32_t)arg_info(op->args[1])->mask >> tmp; | ||
180 | + z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp; | ||
181 | } | ||
182 | break; | ||
183 | case INDEX_op_shr_i64: | ||
184 | if (arg_is_const(op->args[2])) { | ||
185 | tmp = arg_info(op->args[2])->val & 63; | ||
186 | - mask = (uint64_t)arg_info(op->args[1])->mask >> tmp; | ||
187 | + z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp; | ||
188 | } | ||
189 | break; | ||
190 | |||
191 | case INDEX_op_extrl_i64_i32: | ||
192 | - mask = (uint32_t)arg_info(op->args[1])->mask; | ||
193 | + z_mask = (uint32_t)arg_info(op->args[1])->z_mask; | ||
194 | break; | ||
195 | case INDEX_op_extrh_i64_i32: | ||
196 | - mask = (uint64_t)arg_info(op->args[1])->mask >> 32; | ||
197 | + z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32; | ||
198 | break; | ||
199 | |||
200 | CASE_OP_32_64(shl): | ||
201 | if (arg_is_const(op->args[2])) { | ||
202 | tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1); | ||
203 | - mask = arg_info(op->args[1])->mask << tmp; | ||
204 | + z_mask = arg_info(op->args[1])->z_mask << tmp; | ||
205 | } | ||
206 | break; | ||
207 | |||
208 | CASE_OP_32_64(neg): | ||
209 | /* Set to 1 all bits to the left of the rightmost. */ | ||
210 | - mask = -(arg_info(op->args[1])->mask | ||
211 | - & -arg_info(op->args[1])->mask); | ||
212 | + z_mask = -(arg_info(op->args[1])->z_mask | ||
213 | + & -arg_info(op->args[1])->z_mask); | ||
214 | break; | ||
215 | |||
216 | CASE_OP_32_64(deposit): | ||
217 | - mask = deposit64(arg_info(op->args[1])->mask, | ||
218 | - op->args[3], op->args[4], | ||
219 | - arg_info(op->args[2])->mask); | ||
220 | + z_mask = deposit64(arg_info(op->args[1])->z_mask, | ||
221 | + op->args[3], op->args[4], | ||
222 | + arg_info(op->args[2])->z_mask); | ||
223 | break; | ||
224 | |||
225 | CASE_OP_32_64(extract): | ||
226 | - mask = extract64(arg_info(op->args[1])->mask, | ||
227 | - op->args[2], op->args[3]); | ||
228 | + z_mask = extract64(arg_info(op->args[1])->z_mask, | ||
229 | + op->args[2], op->args[3]); | ||
230 | if (op->args[2] == 0) { | ||
231 | - affected = arg_info(op->args[1])->mask & ~mask; | ||
232 | + affected = arg_info(op->args[1])->z_mask & ~z_mask; | ||
233 | } | ||
234 | break; | ||
235 | CASE_OP_32_64(sextract): | ||
236 | - mask = sextract64(arg_info(op->args[1])->mask, | ||
237 | - op->args[2], op->args[3]); | ||
238 | - if (op->args[2] == 0 && (tcg_target_long)mask >= 0) { | ||
239 | - affected = arg_info(op->args[1])->mask & ~mask; | ||
240 | + z_mask = sextract64(arg_info(op->args[1])->z_mask, | ||
241 | + op->args[2], op->args[3]); | ||
242 | + if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) { | ||
243 | + affected = arg_info(op->args[1])->z_mask & ~z_mask; | ||
244 | } | ||
245 | break; | ||
246 | |||
247 | CASE_OP_32_64(or): | ||
248 | CASE_OP_32_64(xor): | ||
249 | - mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask; | ||
250 | + z_mask = arg_info(op->args[1])->z_mask | ||
251 | + | arg_info(op->args[2])->z_mask; | ||
252 | break; | ||
253 | |||
254 | case INDEX_op_clz_i32: | ||
255 | case INDEX_op_ctz_i32: | ||
256 | - mask = arg_info(op->args[2])->mask | 31; | ||
257 | + z_mask = arg_info(op->args[2])->z_mask | 31; | ||
258 | break; | ||
259 | |||
260 | case INDEX_op_clz_i64: | ||
261 | case INDEX_op_ctz_i64: | ||
262 | - mask = arg_info(op->args[2])->mask | 63; | ||
263 | + z_mask = arg_info(op->args[2])->z_mask | 63; | ||
264 | break; | ||
265 | |||
266 | case INDEX_op_ctpop_i32: | ||
267 | - mask = 32 | 31; | ||
268 | + z_mask = 32 | 31; | ||
269 | break; | ||
270 | case INDEX_op_ctpop_i64: | ||
271 | - mask = 64 | 63; | ||
272 | + z_mask = 64 | 63; | ||
273 | break; | ||
274 | |||
275 | CASE_OP_32_64(setcond): | ||
276 | case INDEX_op_setcond2_i32: | ||
277 | - mask = 1; | ||
278 | + z_mask = 1; | ||
279 | break; | ||
280 | |||
281 | CASE_OP_32_64(movcond): | ||
282 | - mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask; | ||
283 | + z_mask = arg_info(op->args[3])->z_mask | ||
284 | + | arg_info(op->args[4])->z_mask; | ||
285 | break; | ||
286 | |||
287 | CASE_OP_32_64(ld8u): | ||
288 | - mask = 0xff; | ||
289 | + z_mask = 0xff; | ||
290 | break; | ||
291 | CASE_OP_32_64(ld16u): | ||
292 | - mask = 0xffff; | ||
293 | + z_mask = 0xffff; | ||
294 | break; | ||
295 | case INDEX_op_ld32u_i64: | ||
296 | - mask = 0xffffffffu; | ||
297 | + z_mask = 0xffffffffu; | ||
298 | break; | ||
299 | |||
300 | CASE_OP_32_64(qemu_ld): | ||
301 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
302 | MemOpIdx oi = op->args[nb_oargs + nb_iargs]; | ||
303 | MemOp mop = get_memop(oi); | ||
304 | if (!(mop & MO_SIGN)) { | ||
305 | - mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; | ||
306 | + z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; | ||
307 | } | ||
308 | } | ||
309 | break; | ||
310 | |||
311 | CASE_OP_32_64(bswap16): | ||
312 | - mask = arg_info(op->args[1])->mask; | ||
313 | - if (mask <= 0xffff) { | ||
314 | + z_mask = arg_info(op->args[1])->z_mask; | ||
315 | + if (z_mask <= 0xffff) { | ||
316 | op->args[2] |= TCG_BSWAP_IZ; | ||
317 | } | ||
318 | - mask = bswap16(mask); | ||
319 | + z_mask = bswap16(z_mask); | ||
320 | switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { | ||
321 | case TCG_BSWAP_OZ: | ||
322 | break; | ||
323 | case TCG_BSWAP_OS: | ||
324 | - mask = (int16_t)mask; | ||
325 | + z_mask = (int16_t)z_mask; | ||
326 | break; | ||
327 | default: /* undefined high bits */ | ||
328 | - mask |= MAKE_64BIT_MASK(16, 48); | ||
329 | + z_mask |= MAKE_64BIT_MASK(16, 48); | ||
330 | break; | ||
331 | } | ||
332 | break; | ||
333 | |||
334 | case INDEX_op_bswap32_i64: | ||
335 | - mask = arg_info(op->args[1])->mask; | ||
336 | - if (mask <= 0xffffffffu) { | ||
337 | + z_mask = arg_info(op->args[1])->z_mask; | ||
338 | + if (z_mask <= 0xffffffffu) { | ||
339 | op->args[2] |= TCG_BSWAP_IZ; | ||
340 | } | ||
341 | - mask = bswap32(mask); | ||
342 | + z_mask = bswap32(z_mask); | ||
343 | switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { | ||
344 | case TCG_BSWAP_OZ: | ||
345 | break; | ||
346 | case TCG_BSWAP_OS: | ||
347 | - mask = (int32_t)mask; | ||
348 | + z_mask = (int32_t)z_mask; | ||
349 | break; | ||
350 | default: /* undefined high bits */ | ||
351 | - mask |= MAKE_64BIT_MASK(32, 32); | ||
352 | + z_mask |= MAKE_64BIT_MASK(32, 32); | ||
353 | break; | ||
354 | } | ||
355 | break; | ||
356 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
357 | /* 32-bit ops generate 32-bit results. For the result is zero test | ||
358 | below, we can ignore high bits, but for further optimizations we | ||
359 | need to record that the high bits contain garbage. */ | ||
360 | - partmask = mask; | ||
361 | + partmask = z_mask; | ||
362 | if (!(def->flags & TCG_OPF_64BIT)) { | ||
363 | - mask |= ~(tcg_target_ulong)0xffffffffu; | ||
364 | + z_mask |= ~(tcg_target_ulong)0xffffffffu; | ||
365 | partmask &= 0xffffffffu; | ||
366 | affected &= 0xffffffffu; | ||
367 | } | ||
368 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
369 | vs the high word of the input. */ | ||
370 | do_setcond_high: | ||
371 | reset_temp(op->args[0]); | ||
372 | - arg_info(op->args[0])->mask = 1; | ||
373 | + arg_info(op->args[0])->z_mask = 1; | ||
374 | op->opc = INDEX_op_setcond_i32; | ||
375 | op->args[1] = op->args[2]; | ||
376 | op->args[2] = op->args[4]; | ||
377 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
378 | } | ||
379 | do_setcond_low: | ||
380 | reset_temp(op->args[0]); | ||
381 | - arg_info(op->args[0])->mask = 1; | ||
382 | + arg_info(op->args[0])->z_mask = 1; | ||
383 | op->opc = INDEX_op_setcond_i32; | ||
384 | op->args[2] = op->args[3]; | ||
385 | op->args[3] = op->args[5]; | ||
386 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
387 | /* Default case: we know nothing about operation (or were unable | ||
388 | to compute the operation result) so no propagation is done. | ||
389 | We trash everything if the operation is the end of a basic | ||
390 | - block, otherwise we only trash the output args. "mask" is | ||
391 | + block, otherwise we only trash the output args. "z_mask" is | ||
392 | the non-zero bits mask for the first output arg. */ | ||
393 | if (def->flags & TCG_OPF_BB_END) { | ||
394 | memset(&temps_used, 0, sizeof(temps_used)); | ||
395 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
396 | /* Save the corresponding known-zero bits mask for the | ||
397 | first output argument (only one supported so far). */ | ||
398 | if (i == 0) { | ||
399 | - arg_info(op->args[i])->mask = mask; | ||
400 | + arg_info(op->args[i])->z_mask = z_mask; | ||
401 | } | ||
402 | } | ||
403 | } | ||
404 | -- | 182 | -- |
405 | 2.25.1 | 183 | 2.34.1 |
406 | 184 | ||
407 | 185 | diff view generated by jsdifflib |
1 | Certain targets, like riscv, produce signed 32-bit results. | 1 | The port currently does not support "oversize" guests, which |
---|---|---|---|
2 | This can lead to lots of redundant extensions as values are | 2 | means riscv32 can only target 32-bit guests. We will soon be |
3 | manipulated. | 3 | building TCG once for all guests. This implies that we can |
4 | only support riscv64. | ||
4 | 5 | ||
5 | Begin by tracking only the obvious sign-extensions, and | 6 | Since all Linux distributions target riscv64 not riscv32, |
6 | converting them to simple copies when possible. | 7 | this is not much of a restriction and simplifies the code. |
7 | 8 | ||
8 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 9 | The brcond2 and setcond2 opcodes are exclusive to 32-bit hosts, |
9 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | 10 | so we can and should remove the stubs. |
11 | |||
12 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
13 | Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 14 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | --- | 15 | --- |
12 | tcg/optimize.c | 123 ++++++++++++++++++++++++++++++++++++++++--------- | 16 | tcg/riscv/tcg-target-con-set.h | 8 -- |
13 | 1 file changed, 102 insertions(+), 21 deletions(-) | 17 | tcg/riscv/tcg-target.h | 22 ++-- |
18 | tcg/riscv/tcg-target.c.inc | 232 +++++++++------------------------ | ||
19 | 3 files changed, 72 insertions(+), 190 deletions(-) | ||
14 | 20 | ||
15 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 21 | diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h |
16 | index XXXXXXX..XXXXXXX 100644 | 22 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/tcg/optimize.c | 23 | --- a/tcg/riscv/tcg-target-con-set.h |
18 | +++ b/tcg/optimize.c | 24 | +++ b/tcg/riscv/tcg-target-con-set.h |
19 | @@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo { | 25 | @@ -XXX,XX +XXX,XX @@ C_O0_I1(r) |
20 | TCGTemp *next_copy; | 26 | C_O0_I2(LZ, L) |
21 | uint64_t val; | 27 | C_O0_I2(rZ, r) |
22 | uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */ | 28 | C_O0_I2(rZ, rZ) |
23 | + uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */ | 29 | -C_O0_I3(LZ, L, L) |
24 | } TempOptInfo; | 30 | -C_O0_I3(LZ, LZ, L) |
25 | 31 | -C_O0_I4(LZ, LZ, L, L) | |
26 | typedef struct OptContext { | 32 | -C_O0_I4(rZ, rZ, rZ, rZ) |
27 | @@ -XXX,XX +XXX,XX @@ typedef struct OptContext { | 33 | C_O1_I1(r, L) |
28 | /* In flight values from optimization. */ | 34 | C_O1_I1(r, r) |
29 | uint64_t a_mask; /* mask bit is 0 iff value identical to first input */ | 35 | -C_O1_I2(r, L, L) |
30 | uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */ | 36 | C_O1_I2(r, r, ri) |
31 | + uint64_t s_mask; /* mask of clrsb(value) bits */ | 37 | C_O1_I2(r, r, rI) |
32 | TCGType type; | 38 | C_O1_I2(r, rZ, rN) |
33 | } OptContext; | 39 | C_O1_I2(r, rZ, rZ) |
34 | 40 | -C_O1_I4(r, rZ, rZ, rZ, rZ) | |
35 | +/* Calculate the smask for a specific value. */ | 41 | -C_O2_I1(r, r, L) |
36 | +static uint64_t smask_from_value(uint64_t value) | 42 | -C_O2_I2(r, r, L, L) |
37 | +{ | 43 | C_O2_I4(r, r, rZ, rZ, rM, rM) |
38 | + int rep = clrsb64(value); | 44 | diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h |
39 | + return ~(~0ull >> rep); | 45 | index XXXXXXX..XXXXXXX 100644 |
40 | +} | 46 | --- a/tcg/riscv/tcg-target.h |
41 | + | 47 | +++ b/tcg/riscv/tcg-target.h |
48 | @@ -XXX,XX +XXX,XX @@ | ||
49 | #ifndef RISCV_TCG_TARGET_H | ||
50 | #define RISCV_TCG_TARGET_H | ||
51 | |||
52 | -#if __riscv_xlen == 32 | ||
53 | -# define TCG_TARGET_REG_BITS 32 | ||
54 | -#elif __riscv_xlen == 64 | ||
55 | -# define TCG_TARGET_REG_BITS 64 | ||
42 | +/* | 56 | +/* |
43 | + * Calculate the smask for a given set of known-zeros. | 57 | + * We don't support oversize guests. |
44 | + * If there are lots of zeros on the left, we can consider the remainder | 58 | + * Since we will only build tcg once, this in turn requires a 64-bit host. |
45 | + * an unsigned field, and thus the corresponding signed field is one bit | ||
46 | + * larger. | ||
47 | + */ | 59 | + */ |
48 | +static uint64_t smask_from_zmask(uint64_t zmask) | 60 | +#if __riscv_xlen != 64 |
49 | +{ | 61 | +#error "unsupported code generation mode" |
50 | + /* | 62 | #endif |
51 | + * Only the 0 bits are significant for zmask, thus the msb itself | 63 | +#define TCG_TARGET_REG_BITS 64 |
52 | + * must be zero, else we have no sign information. | 64 | |
53 | + */ | 65 | #define TCG_TARGET_INSN_UNIT_SIZE 4 |
54 | + int rep = clz64(zmask); | 66 | #define TCG_TARGET_TLB_DISPLACEMENT_BITS 20 |
55 | + if (rep == 0) { | 67 | @@ -XXX,XX +XXX,XX @@ typedef enum { |
56 | + return 0; | 68 | #define TCG_TARGET_STACK_ALIGN 16 |
57 | + } | 69 | #define TCG_TARGET_CALL_STACK_OFFSET 0 |
58 | + rep -= 1; | 70 | #define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL |
59 | + return ~(~0ull >> rep); | 71 | -#if TCG_TARGET_REG_BITS == 32 |
60 | +} | 72 | -#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN |
61 | + | 73 | -#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN |
62 | static inline TempOptInfo *ts_info(TCGTemp *ts) | 74 | -#else |
63 | { | 75 | #define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL |
64 | return ts->state_ptr; | 76 | #define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL |
65 | @@ -XXX,XX +XXX,XX @@ static void reset_ts(TCGTemp *ts) | 77 | -#endif |
66 | ti->prev_copy = ts; | 78 | #define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL |
67 | ti->is_const = false; | 79 | |
68 | ti->z_mask = -1; | 80 | /* optional instructions */ |
69 | + ti->s_mask = 0; | 81 | @@ -XXX,XX +XXX,XX @@ typedef enum { |
70 | } | 82 | #define TCG_TARGET_HAS_sub2_i32 1 |
71 | 83 | #define TCG_TARGET_HAS_mulu2_i32 0 | |
72 | static void reset_temp(TCGArg arg) | 84 | #define TCG_TARGET_HAS_muls2_i32 0 |
73 | @@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts) | 85 | -#define TCG_TARGET_HAS_muluh_i32 (TCG_TARGET_REG_BITS == 32) |
74 | ti->is_const = true; | 86 | -#define TCG_TARGET_HAS_mulsh_i32 (TCG_TARGET_REG_BITS == 32) |
75 | ti->val = ts->val; | 87 | +#define TCG_TARGET_HAS_muluh_i32 0 |
76 | ti->z_mask = ts->val; | 88 | +#define TCG_TARGET_HAS_mulsh_i32 0 |
77 | + ti->s_mask = smask_from_value(ts->val); | 89 | #define TCG_TARGET_HAS_ext8s_i32 1 |
90 | #define TCG_TARGET_HAS_ext16s_i32 1 | ||
91 | #define TCG_TARGET_HAS_ext8u_i32 1 | ||
92 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
93 | #define TCG_TARGET_HAS_setcond2 1 | ||
94 | #define TCG_TARGET_HAS_qemu_st8_i32 0 | ||
95 | |||
96 | -#if TCG_TARGET_REG_BITS == 64 | ||
97 | #define TCG_TARGET_HAS_movcond_i64 0 | ||
98 | #define TCG_TARGET_HAS_div_i64 1 | ||
99 | #define TCG_TARGET_HAS_rem_i64 1 | ||
100 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
101 | #define TCG_TARGET_HAS_muls2_i64 0 | ||
102 | #define TCG_TARGET_HAS_muluh_i64 1 | ||
103 | #define TCG_TARGET_HAS_mulsh_i64 1 | ||
104 | -#endif | ||
105 | |||
106 | #define TCG_TARGET_DEFAULT_MO (0) | ||
107 | |||
108 | diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc | ||
109 | index XXXXXXX..XXXXXXX 100644 | ||
110 | --- a/tcg/riscv/tcg-target.c.inc | ||
111 | +++ b/tcg/riscv/tcg-target.c.inc | ||
112 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) | ||
113 | #define SOFTMMU_RESERVE_REGS 0 | ||
114 | #endif | ||
115 | |||
116 | - | ||
117 | -static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) | ||
118 | -{ | ||
119 | - if (TCG_TARGET_REG_BITS == 32) { | ||
120 | - return sextract32(val, pos, len); | ||
121 | - } else { | ||
122 | - return sextract64(val, pos, len); | ||
123 | - } | ||
124 | -} | ||
125 | +#define sextreg sextract64 | ||
126 | |||
127 | /* test if a constant matches the constraint */ | ||
128 | static bool tcg_target_const_match(int64_t val, TCGType type, int ct) | ||
129 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
130 | OPC_XOR = 0x4033, | ||
131 | OPC_XORI = 0x4013, | ||
132 | |||
133 | -#if TCG_TARGET_REG_BITS == 64 | ||
134 | OPC_ADDIW = 0x1b, | ||
135 | OPC_ADDW = 0x3b, | ||
136 | OPC_DIVUW = 0x200503b, | ||
137 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
138 | OPC_SRLIW = 0x501b, | ||
139 | OPC_SRLW = 0x503b, | ||
140 | OPC_SUBW = 0x4000003b, | ||
141 | -#else | ||
142 | - /* Simplify code throughout by defining aliases for RV32. */ | ||
143 | - OPC_ADDIW = OPC_ADDI, | ||
144 | - OPC_ADDW = OPC_ADD, | ||
145 | - OPC_DIVUW = OPC_DIVU, | ||
146 | - OPC_DIVW = OPC_DIV, | ||
147 | - OPC_MULW = OPC_MUL, | ||
148 | - OPC_REMUW = OPC_REMU, | ||
149 | - OPC_REMW = OPC_REM, | ||
150 | - OPC_SLLIW = OPC_SLLI, | ||
151 | - OPC_SLLW = OPC_SLL, | ||
152 | - OPC_SRAIW = OPC_SRAI, | ||
153 | - OPC_SRAW = OPC_SRA, | ||
154 | - OPC_SRLIW = OPC_SRLI, | ||
155 | - OPC_SRLW = OPC_SRL, | ||
156 | - OPC_SUBW = OPC_SUB, | ||
157 | -#endif | ||
158 | |||
159 | OPC_FENCE = 0x0000000f, | ||
160 | OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */ | ||
161 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, | ||
162 | tcg_target_long lo, hi, tmp; | ||
163 | int shift, ret; | ||
164 | |||
165 | - if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { | ||
166 | + if (type == TCG_TYPE_I32) { | ||
167 | val = (int32_t)val; | ||
168 | } | ||
169 | |||
170 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, | ||
171 | } | ||
172 | |||
173 | hi = val - lo; | ||
174 | - if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) { | ||
175 | + if (val == (int32_t)val) { | ||
176 | tcg_out_opc_upper(s, OPC_LUI, rd, hi); | ||
177 | if (lo != 0) { | ||
178 | tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo); | ||
179 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, | ||
180 | return; | ||
181 | } | ||
182 | |||
183 | - /* We can only be here if TCG_TARGET_REG_BITS != 32 */ | ||
184 | tmp = tcg_pcrel_diff(s, (void *)val); | ||
185 | if (tmp == (int32_t)tmp) { | ||
186 | tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); | ||
187 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, | ||
188 | static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, | ||
189 | TCGReg arg1, intptr_t arg2) | ||
190 | { | ||
191 | - bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); | ||
192 | - tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD, arg, arg1, arg2); | ||
193 | + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD; | ||
194 | + tcg_out_ldst(s, insn, arg, arg1, arg2); | ||
195 | } | ||
196 | |||
197 | static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, | ||
198 | TCGReg arg1, intptr_t arg2) | ||
199 | { | ||
200 | - bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); | ||
201 | - tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD, arg, arg1, arg2); | ||
202 | + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD; | ||
203 | + tcg_out_ldst(s, insn, arg, arg1, arg2); | ||
204 | } | ||
205 | |||
206 | static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, | ||
207 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, | ||
208 | } | ||
209 | } | ||
210 | |||
211 | -static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, | ||
212 | - TCGReg bl, TCGReg bh, TCGLabel *l) | ||
213 | -{ | ||
214 | - /* todo */ | ||
215 | - g_assert_not_reached(); | ||
216 | -} | ||
217 | - | ||
218 | -static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, | ||
219 | - TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) | ||
220 | -{ | ||
221 | - /* todo */ | ||
222 | - g_assert_not_reached(); | ||
223 | -} | ||
224 | - | ||
225 | static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) | ||
226 | { | ||
227 | TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; | ||
228 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) | ||
229 | if (offset == sextreg(offset, 0, 20)) { | ||
230 | /* short jump: -2097150 to 2097152 */ | ||
231 | tcg_out_opc_jump(s, OPC_JAL, link, offset); | ||
232 | - } else if (TCG_TARGET_REG_BITS == 32 || offset == (int32_t)offset) { | ||
233 | + } else if (offset == (int32_t)offset) { | ||
234 | /* long jump: -2147483646 to 2147483648 */ | ||
235 | tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0); | ||
236 | tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0); | ||
237 | ret = reloc_call(s->code_ptr - 2, arg); | ||
238 | tcg_debug_assert(ret == true); | ||
239 | - } else if (TCG_TARGET_REG_BITS == 64) { | ||
240 | + } else { | ||
241 | /* far jump: 64-bit */ | ||
242 | tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12); | ||
243 | tcg_target_long base = (tcg_target_long)arg - imm; | ||
244 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base); | ||
245 | tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm); | ||
246 | - } else { | ||
247 | - g_assert_not_reached(); | ||
248 | } | ||
249 | } | ||
250 | |||
251 | @@ -XXX,XX +XXX,XX @@ static void * const qemu_st_helpers[MO_SIZE + 1] = { | ||
252 | #endif | ||
253 | }; | ||
254 | |||
255 | -/* We don't support oversize guests */ | ||
256 | -QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS); | ||
257 | - | ||
258 | /* We expect to use a 12-bit negative offset from ENV. */ | ||
259 | QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); | ||
260 | QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); | ||
261 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) | ||
262 | tcg_debug_assert(ok); | ||
263 | } | ||
264 | |||
265 | -static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl, | ||
266 | - TCGReg addrh, MemOpIdx oi, | ||
267 | +static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, MemOpIdx oi, | ||
268 | tcg_insn_unit **label_ptr, bool is_load) | ||
269 | { | ||
270 | MemOp opc = get_memop(oi); | ||
271 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl, | ||
272 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs); | ||
273 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs); | ||
274 | |||
275 | - tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl, | ||
276 | + tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr, | ||
277 | TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); | ||
278 | tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); | ||
279 | tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); | ||
280 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl, | ||
281 | /* Clear the non-page, non-alignment bits from the address. */ | ||
282 | compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); | ||
283 | if (compare_mask == sextreg(compare_mask, 0, 12)) { | ||
284 | - tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl, compare_mask); | ||
285 | + tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr, compare_mask); | ||
78 | } else { | 286 | } else { |
79 | ti->is_const = false; | 287 | tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); |
80 | ti->z_mask = -1; | 288 | - tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl); |
81 | + ti->s_mask = 0; | 289 | + tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr); |
82 | } | 290 | } |
83 | } | 291 | |
84 | 292 | /* Compare masked address with the TLB entry. */ | |
85 | @@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) | 293 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl, |
86 | op->args[1] = src; | 294 | tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); |
87 | 295 | ||
88 | di->z_mask = si->z_mask; | 296 | /* TLB Hit - translate address using addend. */ |
89 | + di->s_mask = si->s_mask; | 297 | - if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { |
90 | 298 | - tcg_out_ext32u(s, TCG_REG_TMP0, addrl); | |
91 | if (src_ts->type == dst_ts->type) { | 299 | - addrl = TCG_REG_TMP0; |
92 | TempOptInfo *ni = ts_info(si->next_copy); | 300 | + if (TARGET_LONG_BITS == 32) { |
93 | @@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op) | 301 | + tcg_out_ext32u(s, TCG_REG_TMP0, addr); |
94 | 302 | + addr = TCG_REG_TMP0; | |
95 | nb_oargs = def->nb_oargs; | 303 | } |
96 | for (i = 0; i < nb_oargs; i++) { | 304 | - tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl); |
97 | - reset_temp(op->args[i]); | 305 | + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr); |
98 | + TCGTemp *ts = arg_temp(op->args[i]); | 306 | return TCG_REG_TMP0; |
99 | + reset_ts(ts); | 307 | } |
100 | /* | 308 | |
101 | - * Save the corresponding known-zero bits mask for the | 309 | static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, |
102 | + * Save the corresponding known-zero/sign bits mask for the | 310 | - TCGType ext, |
103 | * first output argument (only one supported so far). | 311 | - TCGReg datalo, TCGReg datahi, |
104 | */ | 312 | - TCGReg addrlo, TCGReg addrhi, |
105 | if (i == 0) { | 313 | - void *raddr, tcg_insn_unit **label_ptr) |
106 | - arg_info(op->args[i])->z_mask = ctx->z_mask; | 314 | + TCGType data_type, TCGReg data_reg, |
107 | + ts_info(ts)->z_mask = ctx->z_mask; | 315 | + TCGReg addr_reg, void *raddr, |
108 | + ts_info(ts)->s_mask = ctx->s_mask; | 316 | + tcg_insn_unit **label_ptr) |
317 | { | ||
318 | TCGLabelQemuLdst *label = new_ldst_label(s); | ||
319 | |||
320 | label->is_ld = is_ld; | ||
321 | label->oi = oi; | ||
322 | - label->type = ext; | ||
323 | - label->datalo_reg = datalo; | ||
324 | - label->datahi_reg = datahi; | ||
325 | - label->addrlo_reg = addrlo; | ||
326 | - label->addrhi_reg = addrhi; | ||
327 | + label->type = data_type; | ||
328 | + label->datalo_reg = data_reg; | ||
329 | + label->addrlo_reg = addr_reg; | ||
330 | label->raddr = tcg_splitwx_to_rx(raddr); | ||
331 | label->label_ptr[0] = label_ptr[0]; | ||
332 | } | ||
333 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
334 | TCGReg a2 = tcg_target_call_iarg_regs[2]; | ||
335 | TCGReg a3 = tcg_target_call_iarg_regs[3]; | ||
336 | |||
337 | - /* We don't support oversize guests */ | ||
338 | - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { | ||
339 | - g_assert_not_reached(); | ||
340 | - } | ||
341 | - | ||
342 | /* resolve label address */ | ||
343 | if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { | ||
344 | return false; | ||
345 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
346 | TCGReg a3 = tcg_target_call_iarg_regs[3]; | ||
347 | TCGReg a4 = tcg_target_call_iarg_regs[4]; | ||
348 | |||
349 | - /* We don't support oversize guests */ | ||
350 | - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { | ||
351 | - g_assert_not_reached(); | ||
352 | - } | ||
353 | - | ||
354 | /* resolve label address */ | ||
355 | if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { | ||
356 | return false; | ||
357 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
358 | |||
359 | #endif /* CONFIG_SOFTMMU */ | ||
360 | |||
361 | -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
362 | +static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val, | ||
363 | TCGReg base, MemOp opc, bool is_64) | ||
364 | { | ||
365 | /* Byte swapping is left to middle-end expansion. */ | ||
366 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
367 | |||
368 | switch (opc & (MO_SSIZE)) { | ||
369 | case MO_UB: | ||
370 | - tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); | ||
371 | + tcg_out_opc_imm(s, OPC_LBU, val, base, 0); | ||
372 | break; | ||
373 | case MO_SB: | ||
374 | - tcg_out_opc_imm(s, OPC_LB, lo, base, 0); | ||
375 | + tcg_out_opc_imm(s, OPC_LB, val, base, 0); | ||
376 | break; | ||
377 | case MO_UW: | ||
378 | - tcg_out_opc_imm(s, OPC_LHU, lo, base, 0); | ||
379 | + tcg_out_opc_imm(s, OPC_LHU, val, base, 0); | ||
380 | break; | ||
381 | case MO_SW: | ||
382 | - tcg_out_opc_imm(s, OPC_LH, lo, base, 0); | ||
383 | + tcg_out_opc_imm(s, OPC_LH, val, base, 0); | ||
384 | break; | ||
385 | case MO_UL: | ||
386 | - if (TCG_TARGET_REG_BITS == 64 && is_64) { | ||
387 | - tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); | ||
388 | + if (is_64) { | ||
389 | + tcg_out_opc_imm(s, OPC_LWU, val, base, 0); | ||
390 | break; | ||
109 | } | 391 | } |
110 | } | 392 | /* FALLTHRU */ |
111 | } | 393 | case MO_SL: |
112 | @@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op) | 394 | - tcg_out_opc_imm(s, OPC_LW, lo, base, 0); |
113 | { | 395 | + tcg_out_opc_imm(s, OPC_LW, val, base, 0); |
114 | uint64_t a_mask = ctx->a_mask; | 396 | break; |
115 | uint64_t z_mask = ctx->z_mask; | 397 | case MO_UQ: |
116 | + uint64_t s_mask = ctx->s_mask; | 398 | - /* Prefer to load from offset 0 first, but allow for overlap. */ |
117 | 399 | - if (TCG_TARGET_REG_BITS == 64) { | |
118 | /* | 400 | - tcg_out_opc_imm(s, OPC_LD, lo, base, 0); |
119 | * 32-bit ops generate 32-bit results, which for the purpose of | 401 | - } else if (lo != base) { |
120 | @@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op) | 402 | - tcg_out_opc_imm(s, OPC_LW, lo, base, 0); |
121 | if (ctx->type == TCG_TYPE_I32) { | 403 | - tcg_out_opc_imm(s, OPC_LW, hi, base, 4); |
122 | a_mask = (int32_t)a_mask; | 404 | - } else { |
123 | z_mask = (int32_t)z_mask; | 405 | - tcg_out_opc_imm(s, OPC_LW, hi, base, 4); |
124 | + s_mask |= MAKE_64BIT_MASK(32, 32); | 406 | - tcg_out_opc_imm(s, OPC_LW, lo, base, 0); |
125 | ctx->z_mask = z_mask; | 407 | - } |
126 | + ctx->s_mask = s_mask; | 408 | + tcg_out_opc_imm(s, OPC_LD, val, base, 0); |
127 | } | 409 | break; |
128 | |||
129 | if (z_mask == 0) { | ||
130 | @@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op) | ||
131 | |||
132 | static bool fold_bswap(OptContext *ctx, TCGOp *op) | ||
133 | { | ||
134 | - uint64_t z_mask, sign; | ||
135 | + uint64_t z_mask, s_mask, sign; | ||
136 | |||
137 | if (arg_is_const(op->args[1])) { | ||
138 | uint64_t t = arg_info(op->args[1])->val; | ||
139 | @@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op) | ||
140 | } | ||
141 | |||
142 | z_mask = arg_info(op->args[1])->z_mask; | ||
143 | + | ||
144 | switch (op->opc) { | ||
145 | case INDEX_op_bswap16_i32: | ||
146 | case INDEX_op_bswap16_i64: | ||
147 | @@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op) | ||
148 | default: | 410 | default: |
149 | g_assert_not_reached(); | 411 | g_assert_not_reached(); |
150 | } | 412 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, |
151 | + s_mask = smask_from_zmask(z_mask); | 413 | |
152 | 414 | static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | |
153 | switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { | 415 | { |
154 | case TCG_BSWAP_OZ: | 416 | - TCGReg addr_regl, addr_regh __attribute__((unused)); |
155 | @@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op) | 417 | - TCGReg data_regl, data_regh; |
156 | /* If the sign bit may be 1, force all the bits above to 1. */ | 418 | + TCGReg addr_reg, data_reg; |
157 | if (z_mask & sign) { | 419 | MemOpIdx oi; |
158 | z_mask |= sign; | 420 | MemOp opc; |
159 | + s_mask = sign << 1; | 421 | #if defined(CONFIG_SOFTMMU) |
160 | } | 422 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) |
161 | break; | 423 | #endif |
162 | default: | 424 | TCGReg base; |
163 | /* The high bits are undefined: force all bits above the sign to 1. */ | 425 | |
164 | z_mask |= sign << 1; | 426 | - data_regl = *args++; |
165 | + s_mask = 0; | 427 | - data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); |
166 | break; | 428 | - addr_regl = *args++; |
167 | } | 429 | - addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); |
168 | ctx->z_mask = z_mask; | 430 | + data_reg = *args++; |
169 | + ctx->s_mask = s_mask; | 431 | + addr_reg = *args++; |
170 | 432 | oi = *args++; | |
171 | return fold_masks(ctx, op); | 433 | opc = get_memop(oi); |
172 | } | 434 | |
173 | @@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op) | 435 | #if defined(CONFIG_SOFTMMU) |
174 | static bool fold_extract(OptContext *ctx, TCGOp *op) | 436 | - base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1); |
175 | { | 437 | - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); |
176 | uint64_t z_mask_old, z_mask; | 438 | - add_qemu_ldst_label(s, 1, oi, |
177 | + int pos = op->args[2]; | 439 | - (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), |
178 | + int len = op->args[3]; | 440 | - data_regl, data_regh, addr_regl, addr_regh, |
179 | 441 | - s->code_ptr, label_ptr); | |
180 | if (arg_is_const(op->args[1])) { | 442 | + base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1); |
181 | uint64_t t; | 443 | + tcg_out_qemu_ld_direct(s, data_reg, base, opc, is_64); |
182 | 444 | + add_qemu_ldst_label(s, 1, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), | |
183 | t = arg_info(op->args[1])->val; | 445 | + data_reg, addr_reg, s->code_ptr, label_ptr); |
184 | - t = extract64(t, op->args[2], op->args[3]); | 446 | #else |
185 | + t = extract64(t, pos, len); | 447 | a_bits = get_alignment_bits(opc); |
186 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | 448 | if (a_bits) { |
187 | } | 449 | - tcg_out_test_alignment(s, true, addr_regl, a_bits); |
188 | 450 | + tcg_out_test_alignment(s, true, addr_reg, a_bits); | |
189 | z_mask_old = arg_info(op->args[1])->z_mask; | 451 | } |
190 | - z_mask = extract64(z_mask_old, op->args[2], op->args[3]); | 452 | - base = addr_regl; |
191 | - if (op->args[2] == 0) { | 453 | - if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { |
192 | + z_mask = extract64(z_mask_old, pos, len); | 454 | + base = addr_reg; |
193 | + if (pos == 0) { | 455 | + if (TARGET_LONG_BITS == 32) { |
194 | ctx->a_mask = z_mask_old ^ z_mask; | 456 | tcg_out_ext32u(s, TCG_REG_TMP0, base); |
195 | } | 457 | base = TCG_REG_TMP0; |
196 | ctx->z_mask = z_mask; | 458 | } |
197 | + ctx->s_mask = smask_from_zmask(z_mask); | 459 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) |
198 | 460 | tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base); | |
199 | return fold_masks(ctx, op); | 461 | base = TCG_REG_TMP0; |
200 | } | 462 | } |
201 | @@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op) | 463 | - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); |
202 | 464 | + tcg_out_qemu_ld_direct(s, data_reg, base, opc, is_64); | |
203 | static bool fold_exts(OptContext *ctx, TCGOp *op) | 465 | #endif |
204 | { | 466 | } |
205 | - uint64_t z_mask_old, z_mask, sign; | 467 | |
206 | + uint64_t s_mask_old, s_mask, z_mask, sign; | 468 | -static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, |
207 | bool type_change = false; | 469 | +static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val, |
208 | 470 | TCGReg base, MemOp opc) | |
209 | if (fold_const1(ctx, op)) { | 471 | { |
210 | return true; | 472 | /* Byte swapping is left to middle-end expansion. */ |
211 | } | 473 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, |
212 | 474 | ||
213 | - z_mask_old = z_mask = arg_info(op->args[1])->z_mask; | 475 | switch (opc & (MO_SSIZE)) { |
214 | + z_mask = arg_info(op->args[1])->z_mask; | 476 | case MO_8: |
215 | + s_mask = arg_info(op->args[1])->s_mask; | 477 | - tcg_out_opc_store(s, OPC_SB, base, lo, 0); |
216 | + s_mask_old = s_mask; | 478 | + tcg_out_opc_store(s, OPC_SB, base, val, 0); |
217 | 479 | break; | |
218 | switch (op->opc) { | 480 | case MO_16: |
219 | CASE_OP_32_64(ext8s): | 481 | - tcg_out_opc_store(s, OPC_SH, base, lo, 0); |
220 | @@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op) | 482 | + tcg_out_opc_store(s, OPC_SH, base, val, 0); |
221 | 483 | break; | |
222 | if (z_mask & sign) { | 484 | case MO_32: |
223 | z_mask |= sign; | 485 | - tcg_out_opc_store(s, OPC_SW, base, lo, 0); |
224 | - } else if (!type_change) { | 486 | + tcg_out_opc_store(s, OPC_SW, base, val, 0); |
225 | - ctx->a_mask = z_mask_old ^ z_mask; | 487 | break; |
226 | } | 488 | case MO_64: |
227 | + s_mask |= sign << 1; | 489 | - if (TCG_TARGET_REG_BITS == 64) { |
228 | + | 490 | - tcg_out_opc_store(s, OPC_SD, base, lo, 0); |
229 | ctx->z_mask = z_mask; | 491 | - } else { |
230 | + ctx->s_mask = s_mask; | 492 | - tcg_out_opc_store(s, OPC_SW, base, lo, 0); |
231 | + if (!type_change) { | 493 | - tcg_out_opc_store(s, OPC_SW, base, hi, 4); |
232 | + ctx->a_mask = s_mask & ~s_mask_old; | 494 | - } |
233 | + } | 495 | + tcg_out_opc_store(s, OPC_SD, base, val, 0); |
234 | |||
235 | return fold_masks(ctx, op); | ||
236 | } | ||
237 | @@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op) | ||
238 | } | ||
239 | |||
240 | ctx->z_mask = z_mask; | ||
241 | + ctx->s_mask = smask_from_zmask(z_mask); | ||
242 | if (!type_change) { | ||
243 | ctx->a_mask = z_mask_old ^ z_mask; | ||
244 | } | ||
245 | @@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) | ||
246 | MemOp mop = get_memop(oi); | ||
247 | int width = 8 * memop_size(mop); | ||
248 | |||
249 | - if (!(mop & MO_SIGN) && width < 64) { | ||
250 | - ctx->z_mask = MAKE_64BIT_MASK(0, width); | ||
251 | + if (width < 64) { | ||
252 | + ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width); | ||
253 | + if (!(mop & MO_SIGN)) { | ||
254 | + ctx->z_mask = MAKE_64BIT_MASK(0, width); | ||
255 | + ctx->s_mask <<= 1; | ||
256 | + } | ||
257 | } | ||
258 | |||
259 | /* Opcodes that touch guest memory stop the mb optimization. */ | ||
260 | @@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op) | ||
261 | |||
262 | static bool fold_sextract(OptContext *ctx, TCGOp *op) | ||
263 | { | ||
264 | - int64_t z_mask_old, z_mask; | ||
265 | + uint64_t z_mask, s_mask, s_mask_old; | ||
266 | + int pos = op->args[2]; | ||
267 | + int len = op->args[3]; | ||
268 | |||
269 | if (arg_is_const(op->args[1])) { | ||
270 | uint64_t t; | ||
271 | |||
272 | t = arg_info(op->args[1])->val; | ||
273 | - t = sextract64(t, op->args[2], op->args[3]); | ||
274 | + t = sextract64(t, pos, len); | ||
275 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | ||
276 | } | ||
277 | |||
278 | - z_mask_old = arg_info(op->args[1])->z_mask; | ||
279 | - z_mask = sextract64(z_mask_old, op->args[2], op->args[3]); | ||
280 | - if (op->args[2] == 0 && z_mask >= 0) { | ||
281 | - ctx->a_mask = z_mask_old ^ z_mask; | ||
282 | - } | ||
283 | + z_mask = arg_info(op->args[1])->z_mask; | ||
284 | + z_mask = sextract64(z_mask, pos, len); | ||
285 | ctx->z_mask = z_mask; | ||
286 | |||
287 | + s_mask_old = arg_info(op->args[1])->s_mask; | ||
288 | + s_mask = sextract64(s_mask_old, pos, len); | ||
289 | + s_mask |= MAKE_64BIT_MASK(len, 64 - len); | ||
290 | + ctx->s_mask = s_mask; | ||
291 | + | ||
292 | + if (pos == 0) { | ||
293 | + ctx->a_mask = s_mask & ~s_mask_old; | ||
294 | + } | ||
295 | + | ||
296 | return fold_masks(ctx, op); | ||
297 | } | ||
298 | |||
299 | @@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) | ||
300 | { | ||
301 | /* We can't do any folding with a load, but we can record bits. */ | ||
302 | switch (op->opc) { | ||
303 | + CASE_OP_32_64(ld8s): | ||
304 | + ctx->s_mask = MAKE_64BIT_MASK(8, 56); | ||
305 | + break; | ||
306 | CASE_OP_32_64(ld8u): | ||
307 | ctx->z_mask = MAKE_64BIT_MASK(0, 8); | ||
308 | + ctx->s_mask = MAKE_64BIT_MASK(9, 55); | ||
309 | + break; | ||
310 | + CASE_OP_32_64(ld16s): | ||
311 | + ctx->s_mask = MAKE_64BIT_MASK(16, 48); | ||
312 | break; | ||
313 | CASE_OP_32_64(ld16u): | ||
314 | ctx->z_mask = MAKE_64BIT_MASK(0, 16); | ||
315 | + ctx->s_mask = MAKE_64BIT_MASK(17, 47); | ||
316 | + break; | ||
317 | + case INDEX_op_ld32s_i64: | ||
318 | + ctx->s_mask = MAKE_64BIT_MASK(32, 32); | ||
319 | break; | ||
320 | case INDEX_op_ld32u_i64: | ||
321 | ctx->z_mask = MAKE_64BIT_MASK(0, 32); | ||
322 | + ctx->s_mask = MAKE_64BIT_MASK(33, 31); | ||
323 | break; | 496 | break; |
324 | default: | 497 | default: |
325 | g_assert_not_reached(); | 498 | g_assert_not_reached(); |
326 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 499 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, |
327 | ctx.type = TCG_TYPE_I32; | 500 | |
328 | } | 501 | static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) |
329 | 502 | { | |
330 | - /* Assume all bits affected, and no bits known zero. */ | 503 | - TCGReg addr_regl, addr_regh __attribute__((unused)); |
331 | + /* Assume all bits affected, no bits known zero, no sign reps. */ | 504 | - TCGReg data_regl, data_regh; |
332 | ctx.a_mask = -1; | 505 | + TCGReg addr_reg, data_reg; |
333 | ctx.z_mask = -1; | 506 | MemOpIdx oi; |
334 | + ctx.s_mask = 0; | 507 | MemOp opc; |
335 | 508 | #if defined(CONFIG_SOFTMMU) | |
336 | /* | 509 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) |
337 | * Process each opcode. | 510 | #endif |
338 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 511 | TCGReg base; |
339 | case INDEX_op_extrh_i64_i32: | 512 | |
340 | done = fold_extu(&ctx, op); | 513 | - data_regl = *args++; |
341 | break; | 514 | - data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); |
342 | + CASE_OP_32_64(ld8s): | 515 | - addr_regl = *args++; |
343 | CASE_OP_32_64(ld8u): | 516 | - addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); |
344 | + CASE_OP_32_64(ld16s): | 517 | + data_reg = *args++; |
345 | CASE_OP_32_64(ld16u): | 518 | + addr_reg = *args++; |
346 | + case INDEX_op_ld32s_i64: | 519 | oi = *args++; |
347 | case INDEX_op_ld32u_i64: | 520 | opc = get_memop(oi); |
348 | done = fold_tcg_ld(&ctx, op); | 521 | |
349 | break; | 522 | #if defined(CONFIG_SOFTMMU) |
523 | - base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0); | ||
524 | - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); | ||
525 | - add_qemu_ldst_label(s, 0, oi, | ||
526 | - (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), | ||
527 | - data_regl, data_regh, addr_regl, addr_regh, | ||
528 | - s->code_ptr, label_ptr); | ||
529 | + base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0); | ||
530 | + tcg_out_qemu_st_direct(s, data_reg, base, opc); | ||
531 | + add_qemu_ldst_label(s, 0, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), | ||
532 | + data_reg, addr_reg, s->code_ptr, label_ptr); | ||
533 | #else | ||
534 | a_bits = get_alignment_bits(opc); | ||
535 | if (a_bits) { | ||
536 | - tcg_out_test_alignment(s, false, addr_regl, a_bits); | ||
537 | + tcg_out_test_alignment(s, false, addr_reg, a_bits); | ||
538 | } | ||
539 | - base = addr_regl; | ||
540 | - if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { | ||
541 | + base = addr_reg; | ||
542 | + if (TARGET_LONG_BITS == 32) { | ||
543 | tcg_out_ext32u(s, TCG_REG_TMP0, base); | ||
544 | base = TCG_REG_TMP0; | ||
545 | } | ||
546 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
547 | tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base); | ||
548 | base = TCG_REG_TMP0; | ||
549 | } | ||
550 | - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); | ||
551 | + tcg_out_qemu_st_direct(s, data_reg, base, opc); | ||
552 | #endif | ||
553 | } | ||
554 | |||
555 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, | ||
556 | case INDEX_op_brcond_i64: | ||
557 | tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); | ||
558 | break; | ||
559 | - case INDEX_op_brcond2_i32: | ||
560 | - tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5])); | ||
561 | - break; | ||
562 | |||
563 | case INDEX_op_setcond_i32: | ||
564 | case INDEX_op_setcond_i64: | ||
565 | tcg_out_setcond(s, args[3], a0, a1, a2); | ||
566 | break; | ||
567 | - case INDEX_op_setcond2_i32: | ||
568 | - tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); | ||
569 | - break; | ||
570 | |||
571 | case INDEX_op_qemu_ld_i32: | ||
572 | tcg_out_qemu_ld(s, args, false); | ||
573 | @@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) | ||
574 | case INDEX_op_sub2_i64: | ||
575 | return C_O2_I4(r, r, rZ, rZ, rM, rM); | ||
576 | |||
577 | - case INDEX_op_brcond2_i32: | ||
578 | - return C_O0_I4(rZ, rZ, rZ, rZ); | ||
579 | - | ||
580 | - case INDEX_op_setcond2_i32: | ||
581 | - return C_O1_I4(r, rZ, rZ, rZ, rZ); | ||
582 | - | ||
583 | case INDEX_op_qemu_ld_i32: | ||
584 | - return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS | ||
585 | - ? C_O1_I1(r, L) : C_O1_I2(r, L, L)); | ||
586 | - case INDEX_op_qemu_st_i32: | ||
587 | - return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS | ||
588 | - ? C_O0_I2(LZ, L) : C_O0_I3(LZ, L, L)); | ||
589 | case INDEX_op_qemu_ld_i64: | ||
590 | - return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) | ||
591 | - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L) | ||
592 | - : C_O2_I2(r, r, L, L)); | ||
593 | + return C_O1_I1(r, L); | ||
594 | + case INDEX_op_qemu_st_i32: | ||
595 | case INDEX_op_qemu_st_i64: | ||
596 | - return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(LZ, L) | ||
597 | - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(LZ, LZ, L) | ||
598 | - : C_O0_I4(LZ, LZ, L, L)); | ||
599 | + return C_O0_I2(LZ, L); | ||
600 | |||
601 | default: | ||
602 | g_assert_not_reached(); | ||
603 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
604 | static void tcg_target_init(TCGContext *s) | ||
605 | { | ||
606 | tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; | ||
607 | - if (TCG_TARGET_REG_BITS == 64) { | ||
608 | - tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; | ||
609 | - } | ||
610 | + tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; | ||
611 | |||
612 | tcg_target_call_clobber_regs = -1u; | ||
613 | tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); | ||
350 | -- | 614 | -- |
351 | 2.25.1 | 615 | 2.34.1 |
352 | 616 | ||
353 | 617 | diff view generated by jsdifflib |
1 | From: Luis Pires <luis.pires@eldorado.org.br> | 1 | Interpret the variable argument placement in the caller. Pass data_type |
---|---|---|---|
2 | instead of is64 -- there are several places where we already convert back | ||
3 | from bool to type. Clean things up by using type throughout. | ||
2 | 4 | ||
3 | These will be used to implement new decimal floating point | 5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | instructions from Power ISA 3.1. | 6 | Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> |
5 | |||
6 | The remainder is now returned directly by divu128/divs128, | ||
7 | freeing up phigh to receive the high 64 bits of the quotient. | ||
8 | |||
9 | Signed-off-by: Luis Pires <luis.pires@eldorado.org.br> | ||
10 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | Message-Id: <20211025191154.350831-4-luis.pires@eldorado.org.br> | ||
12 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
13 | --- | 8 | --- |
14 | include/hw/clock.h | 6 +- | 9 | tcg/riscv/tcg-target.c.inc | 66 ++++++++++++++------------------------ |
15 | include/qemu/host-utils.h | 20 ++++-- | 10 | 1 file changed, 24 insertions(+), 42 deletions(-) |
16 | target/ppc/int_helper.c | 9 +-- | ||
17 | util/host-utils.c | 133 +++++++++++++++++++++++++------------- | ||
18 | 4 files changed, 108 insertions(+), 60 deletions(-) | ||
19 | 11 | ||
20 | diff --git a/include/hw/clock.h b/include/hw/clock.h | 12 | diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc |
21 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/include/hw/clock.h | 14 | --- a/tcg/riscv/tcg-target.c.inc |
23 | +++ b/include/hw/clock.h | 15 | +++ b/tcg/riscv/tcg-target.c.inc |
24 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns) | 16 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) |
25 | if (clk->period == 0) { | 17 | #endif /* CONFIG_SOFTMMU */ |
26 | return 0; | 18 | |
27 | } | 19 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val, |
28 | - /* | 20 | - TCGReg base, MemOp opc, bool is_64) |
29 | - * BUG: when CONFIG_INT128 is not defined, the current implementation of | 21 | + TCGReg base, MemOp opc, TCGType type) |
30 | - * divu128 does not return a valid truncated quotient, so the result will | ||
31 | - * be wrong. | ||
32 | - */ | ||
33 | + | ||
34 | divu128(&lo, &hi, clk->period); | ||
35 | return lo; | ||
36 | } | ||
37 | diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h | ||
38 | index XXXXXXX..XXXXXXX 100644 | ||
39 | --- a/include/qemu/host-utils.h | ||
40 | +++ b/include/qemu/host-utils.h | ||
41 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) | ||
42 | return (__int128_t)a * b / c; | ||
43 | } | ||
44 | |||
45 | -static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) | ||
46 | +static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh, | ||
47 | + uint64_t divisor) | ||
48 | { | 22 | { |
49 | __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow; | 23 | /* Byte swapping is left to middle-end expansion. */ |
50 | __uint128_t result = dividend / divisor; | 24 | tcg_debug_assert((opc & MO_BSWAP) == 0); |
51 | + | 25 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val, |
52 | *plow = result; | 26 | tcg_out_opc_imm(s, OPC_LH, val, base, 0); |
53 | - *phigh = dividend % divisor; | 27 | break; |
54 | + *phigh = result >> 64; | 28 | case MO_UL: |
55 | + return dividend % divisor; | 29 | - if (is_64) { |
56 | } | 30 | + if (type == TCG_TYPE_I64) { |
57 | 31 | tcg_out_opc_imm(s, OPC_LWU, val, base, 0); | |
58 | -static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor) | 32 | break; |
59 | +static inline int64_t divs128(uint64_t *plow, int64_t *phigh, | ||
60 | + int64_t divisor) | ||
61 | { | ||
62 | - __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow; | ||
63 | + __int128_t dividend = ((__int128_t)*phigh << 64) | *plow; | ||
64 | __int128_t result = dividend / divisor; | ||
65 | + | ||
66 | *plow = result; | ||
67 | - *phigh = dividend % divisor; | ||
68 | + *phigh = result >> 64; | ||
69 | + return dividend % divisor; | ||
70 | } | ||
71 | #else | ||
72 | void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b); | ||
73 | void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b); | ||
74 | -void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor); | ||
75 | -void divs128(int64_t *plow, int64_t *phigh, int64_t divisor); | ||
76 | +uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor); | ||
77 | +int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor); | ||
78 | |||
79 | static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) | ||
80 | { | ||
81 | diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c | ||
82 | index XXXXXXX..XXXXXXX 100644 | ||
83 | --- a/target/ppc/int_helper.c | ||
84 | +++ b/target/ppc/int_helper.c | ||
85 | @@ -XXX,XX +XXX,XX @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe) | ||
86 | |||
87 | uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe) | ||
88 | { | ||
89 | - int64_t rt = 0; | ||
90 | + uint64_t rt = 0; | ||
91 | int64_t ra = (int64_t)rau; | ||
92 | int64_t rb = (int64_t)rbu; | ||
93 | int overflow = 0; | ||
94 | @@ -XXX,XX +XXX,XX @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) | ||
95 | int cr; | ||
96 | uint64_t lo_value; | ||
97 | uint64_t hi_value; | ||
98 | + uint64_t rem; | ||
99 | ppc_avr_t ret = { .u64 = { 0, 0 } }; | ||
100 | |||
101 | if (b->VsrSD(0) < 0) { | ||
102 | @@ -XXX,XX +XXX,XX @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) | ||
103 | * In that case, we leave r unchanged. | ||
104 | */ | ||
105 | } else { | ||
106 | - divu128(&lo_value, &hi_value, 1000000000000000ULL); | ||
107 | + rem = divu128(&lo_value, &hi_value, 1000000000000000ULL); | ||
108 | |||
109 | - for (i = 1; i < 16; hi_value /= 10, i++) { | ||
110 | - bcd_put_digit(&ret, hi_value % 10, i); | ||
111 | + for (i = 1; i < 16; rem /= 10, i++) { | ||
112 | + bcd_put_digit(&ret, rem % 10, i); | ||
113 | } | 33 | } |
114 | 34 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val, | |
115 | for (; i < 32; lo_value /= 10, i++) { | ||
116 | diff --git a/util/host-utils.c b/util/host-utils.c | ||
117 | index XXXXXXX..XXXXXXX 100644 | ||
118 | --- a/util/host-utils.c | ||
119 | +++ b/util/host-utils.c | ||
120 | @@ -XXX,XX +XXX,XX @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b) | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | - * Unsigned 128-by-64 division. Returns quotient via plow and | ||
125 | - * remainder via phigh. | ||
126 | - * The result must fit in 64 bits (plow) - otherwise, the result | ||
127 | - * is undefined. | ||
128 | - * This function will cause a division by zero if passed a zero divisor. | ||
129 | + * Unsigned 128-by-64 division. | ||
130 | + * Returns the remainder. | ||
131 | + * Returns quotient via plow and phigh. | ||
132 | + * Also returns the remainder via the function return value. | ||
133 | */ | ||
134 | -void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) | ||
135 | +uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) | ||
136 | { | ||
137 | uint64_t dhi = *phigh; | ||
138 | uint64_t dlo = *plow; | ||
139 | - unsigned i; | ||
140 | - uint64_t carry = 0; | ||
141 | + uint64_t rem, dhighest; | ||
142 | + int sh; | ||
143 | |||
144 | if (divisor == 0 || dhi == 0) { | ||
145 | *plow = dlo / divisor; | ||
146 | - *phigh = dlo % divisor; | ||
147 | + *phigh = 0; | ||
148 | + return dlo % divisor; | ||
149 | } else { | ||
150 | + sh = clz64(divisor); | ||
151 | |||
152 | - for (i = 0; i < 64; i++) { | ||
153 | - carry = dhi >> 63; | ||
154 | - dhi = (dhi << 1) | (dlo >> 63); | ||
155 | - if (carry || (dhi >= divisor)) { | ||
156 | - dhi -= divisor; | ||
157 | - carry = 1; | ||
158 | - } else { | ||
159 | - carry = 0; | ||
160 | + if (dhi < divisor) { | ||
161 | + if (sh != 0) { | ||
162 | + /* normalize the divisor, shifting the dividend accordingly */ | ||
163 | + divisor <<= sh; | ||
164 | + dhi = (dhi << sh) | (dlo >> (64 - sh)); | ||
165 | + dlo <<= sh; | ||
166 | } | ||
167 | - dlo = (dlo << 1) | carry; | ||
168 | + | ||
169 | + *phigh = 0; | ||
170 | + *plow = udiv_qrnnd(&rem, dhi, dlo, divisor); | ||
171 | + } else { | ||
172 | + if (sh != 0) { | ||
173 | + /* normalize the divisor, shifting the dividend accordingly */ | ||
174 | + divisor <<= sh; | ||
175 | + dhighest = dhi >> (64 - sh); | ||
176 | + dhi = (dhi << sh) | (dlo >> (64 - sh)); | ||
177 | + dlo <<= sh; | ||
178 | + | ||
179 | + *phigh = udiv_qrnnd(&dhi, dhighest, dhi, divisor); | ||
180 | + } else { | ||
181 | + /** | ||
182 | + * dhi >= divisor | ||
183 | + * Since the MSB of divisor is set (sh == 0), | ||
184 | + * (dhi - divisor) < divisor | ||
185 | + * | ||
186 | + * Thus, the high part of the quotient is 1, and we can | ||
187 | + * calculate the low part with a single call to udiv_qrnnd | ||
188 | + * after subtracting divisor from dhi | ||
189 | + */ | ||
190 | + dhi -= divisor; | ||
191 | + *phigh = 1; | ||
192 | + } | ||
193 | + | ||
194 | + *plow = udiv_qrnnd(&rem, dhi, dlo, divisor); | ||
195 | } | ||
196 | |||
197 | - *plow = dlo; | ||
198 | - *phigh = dhi; | ||
199 | + /* | ||
200 | + * since the dividend/divisor might have been normalized, | ||
201 | + * the remainder might also have to be shifted back | ||
202 | + */ | ||
203 | + return rem >> sh; | ||
204 | } | 35 | } |
205 | } | 36 | } |
206 | 37 | ||
207 | /* | 38 | -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) |
208 | - * Signed 128-by-64 division. Returns quotient via plow and | 39 | +static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, |
209 | - * remainder via phigh. | 40 | + MemOpIdx oi, TCGType data_type) |
210 | - * The result must fit in 64 bits (plow) - otherwise, the result | ||
211 | - * is undefined. | ||
212 | - * This function will cause a division by zero if passed a zero divisor. | ||
213 | + * Signed 128-by-64 division. | ||
214 | + * Returns quotient via plow and phigh. | ||
215 | + * Also returns the remainder via the function return value. | ||
216 | */ | ||
217 | -void divs128(int64_t *plow, int64_t *phigh, int64_t divisor) | ||
218 | +int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor) | ||
219 | { | 41 | { |
220 | - int sgn_dvdnd = *phigh < 0; | 42 | - TCGReg addr_reg, data_reg; |
221 | - int sgn_divsr = divisor < 0; | 43 | - MemOpIdx oi; |
222 | + bool neg_quotient = false, neg_remainder = false; | 44 | - MemOp opc; |
223 | + uint64_t unsig_hi = *phigh, unsig_lo = *plow; | 45 | -#if defined(CONFIG_SOFTMMU) |
224 | + uint64_t rem; | 46 | - tcg_insn_unit *label_ptr[1]; |
225 | 47 | -#else | |
226 | - if (sgn_dvdnd) { | 48 | - unsigned a_bits; |
227 | - *plow = ~(*plow); | 49 | -#endif |
228 | - *phigh = ~(*phigh); | 50 | + MemOp opc = get_memop(oi); |
229 | - if (*plow == (int64_t)-1) { | 51 | TCGReg base; |
230 | + if (*phigh < 0) { | 52 | |
231 | + neg_quotient = !neg_quotient; | 53 | - data_reg = *args++; |
232 | + neg_remainder = !neg_remainder; | 54 | - addr_reg = *args++; |
55 | - oi = *args++; | ||
56 | - opc = get_memop(oi); | ||
57 | - | ||
58 | #if defined(CONFIG_SOFTMMU) | ||
59 | + tcg_insn_unit *label_ptr[1]; | ||
233 | + | 60 | + |
234 | + if (unsig_lo == 0) { | 61 | base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1); |
235 | + unsig_hi = -unsig_hi; | 62 | - tcg_out_qemu_ld_direct(s, data_reg, base, opc, is_64); |
236 | + } else { | 63 | - add_qemu_ldst_label(s, 1, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), |
237 | + unsig_hi = ~unsig_hi; | 64 | - data_reg, addr_reg, s->code_ptr, label_ptr); |
238 | + unsig_lo = -unsig_lo; | 65 | + tcg_out_qemu_ld_direct(s, data_reg, base, opc, data_type); |
239 | + } | 66 | + add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg, |
240 | + } | 67 | + s->code_ptr, label_ptr); |
241 | + | 68 | #else |
242 | + if (divisor < 0) { | 69 | - a_bits = get_alignment_bits(opc); |
243 | + neg_quotient = !neg_quotient; | 70 | + unsigned a_bits = get_alignment_bits(opc); |
244 | + | 71 | if (a_bits) { |
245 | + divisor = -divisor; | 72 | tcg_out_test_alignment(s, true, addr_reg, a_bits); |
246 | + } | ||
247 | + | ||
248 | + rem = divu128(&unsig_lo, &unsig_hi, (uint64_t)divisor); | ||
249 | + | ||
250 | + if (neg_quotient) { | ||
251 | + if (unsig_lo == 0) { | ||
252 | + *phigh = -unsig_hi; | ||
253 | *plow = 0; | ||
254 | - (*phigh)++; | ||
255 | - } else { | ||
256 | - (*plow)++; | ||
257 | - } | ||
258 | + } else { | ||
259 | + *phigh = ~unsig_hi; | ||
260 | + *plow = -unsig_lo; | ||
261 | + } | ||
262 | + } else { | ||
263 | + *phigh = unsig_hi; | ||
264 | + *plow = unsig_lo; | ||
265 | } | 73 | } |
266 | 74 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | |
267 | - if (sgn_divsr) { | 75 | tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base); |
268 | - divisor = 0 - divisor; | 76 | base = TCG_REG_TMP0; |
269 | - } | 77 | } |
270 | - | 78 | - tcg_out_qemu_ld_direct(s, data_reg, base, opc, is_64); |
271 | - divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor); | 79 | + tcg_out_qemu_ld_direct(s, data_reg, base, opc, data_type); |
272 | - | 80 | #endif |
273 | - if (sgn_dvdnd ^ sgn_divsr) { | 81 | } |
274 | - *plow = 0 - *plow; | 82 | |
275 | + if (neg_remainder) { | 83 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val, |
276 | + return -rem; | ||
277 | + } else { | ||
278 | + return rem; | ||
279 | } | 84 | } |
280 | } | 85 | } |
281 | #endif | 86 | |
87 | -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
88 | +static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
89 | + MemOpIdx oi, TCGType data_type) | ||
90 | { | ||
91 | - TCGReg addr_reg, data_reg; | ||
92 | - MemOpIdx oi; | ||
93 | - MemOp opc; | ||
94 | -#if defined(CONFIG_SOFTMMU) | ||
95 | - tcg_insn_unit *label_ptr[1]; | ||
96 | -#else | ||
97 | - unsigned a_bits; | ||
98 | -#endif | ||
99 | + MemOp opc = get_memop(oi); | ||
100 | TCGReg base; | ||
101 | |||
102 | - data_reg = *args++; | ||
103 | - addr_reg = *args++; | ||
104 | - oi = *args++; | ||
105 | - opc = get_memop(oi); | ||
106 | - | ||
107 | #if defined(CONFIG_SOFTMMU) | ||
108 | + tcg_insn_unit *label_ptr[1]; | ||
109 | + | ||
110 | base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0); | ||
111 | tcg_out_qemu_st_direct(s, data_reg, base, opc); | ||
112 | - add_qemu_ldst_label(s, 0, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), | ||
113 | - data_reg, addr_reg, s->code_ptr, label_ptr); | ||
114 | + add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg, | ||
115 | + s->code_ptr, label_ptr); | ||
116 | #else | ||
117 | - a_bits = get_alignment_bits(opc); | ||
118 | + unsigned a_bits = get_alignment_bits(opc); | ||
119 | if (a_bits) { | ||
120 | tcg_out_test_alignment(s, false, addr_reg, a_bits); | ||
121 | } | ||
122 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, | ||
123 | break; | ||
124 | |||
125 | case INDEX_op_qemu_ld_i32: | ||
126 | - tcg_out_qemu_ld(s, args, false); | ||
127 | + tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); | ||
128 | break; | ||
129 | case INDEX_op_qemu_ld_i64: | ||
130 | - tcg_out_qemu_ld(s, args, true); | ||
131 | + tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); | ||
132 | break; | ||
133 | case INDEX_op_qemu_st_i32: | ||
134 | - tcg_out_qemu_st(s, args, false); | ||
135 | + tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); | ||
136 | break; | ||
137 | case INDEX_op_qemu_st_i64: | ||
138 | - tcg_out_qemu_st(s, args, true); | ||
139 | + tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); | ||
140 | break; | ||
141 | |||
142 | case INDEX_op_extrh_i64_i32: | ||
282 | -- | 143 | -- |
283 | 2.25.1 | 144 | 2.34.1 |
284 | 145 | ||
285 | 146 | diff view generated by jsdifflib |
1 | Add two additional helpers, fold_add2_i32 and fold_sub2_i32 | 1 | We need to set this in TCGLabelQemuLdst, so plumb this |
---|---|---|---|
2 | which will not be simple wrappers forever. | 2 | all the way through from tcg_out_op. |
3 | 3 | ||
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 6 | --- |
8 | tcg/optimize.c | 70 +++++++++++++++++++++++++++++++------------------- | 7 | tcg/s390x/tcg-target.c.inc | 22 ++++++++++++++-------- |
9 | 1 file changed, 44 insertions(+), 26 deletions(-) | 8 | 1 file changed, 14 insertions(+), 8 deletions(-) |
10 | 9 | ||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 10 | diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc |
12 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/optimize.c | 12 | --- a/tcg/s390x/tcg-target.c.inc |
14 | +++ b/tcg/optimize.c | 13 | +++ b/tcg/s390x/tcg-target.c.inc |
15 | @@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op) | 14 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, |
16 | return fold_const2(ctx, op); | ||
17 | } | 15 | } |
18 | 16 | ||
19 | +static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add) | 17 | static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, |
20 | +{ | 18 | - TCGReg data, TCGReg addr, |
21 | + if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) && | 19 | + TCGType type, TCGReg data, TCGReg addr, |
22 | + arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { | 20 | tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) |
23 | + uint32_t al = arg_info(op->args[2])->val; | ||
24 | + uint32_t ah = arg_info(op->args[3])->val; | ||
25 | + uint32_t bl = arg_info(op->args[4])->val; | ||
26 | + uint32_t bh = arg_info(op->args[5])->val; | ||
27 | + uint64_t a = ((uint64_t)ah << 32) | al; | ||
28 | + uint64_t b = ((uint64_t)bh << 32) | bl; | ||
29 | + TCGArg rl, rh; | ||
30 | + TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32); | ||
31 | + | ||
32 | + if (add) { | ||
33 | + a += b; | ||
34 | + } else { | ||
35 | + a -= b; | ||
36 | + } | ||
37 | + | ||
38 | + rl = op->args[0]; | ||
39 | + rh = op->args[1]; | ||
40 | + tcg_opt_gen_movi(ctx, op, rl, (int32_t)a); | ||
41 | + tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32)); | ||
42 | + return true; | ||
43 | + } | ||
44 | + return false; | ||
45 | +} | ||
46 | + | ||
47 | +static bool fold_add2_i32(OptContext *ctx, TCGOp *op) | ||
48 | +{ | ||
49 | + return fold_addsub2_i32(ctx, op, true); | ||
50 | +} | ||
51 | + | ||
52 | static bool fold_and(OptContext *ctx, TCGOp *op) | ||
53 | { | 21 | { |
54 | return fold_const2(ctx, op); | 22 | TCGLabelQemuLdst *label = new_ldst_label(s); |
55 | @@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op) | 23 | |
56 | return fold_const2(ctx, op); | 24 | label->is_ld = is_ld; |
25 | label->oi = oi; | ||
26 | + label->type = type; | ||
27 | label->datalo_reg = data; | ||
28 | label->addrlo_reg = addr; | ||
29 | label->raddr = tcg_splitwx_to_rx(raddr); | ||
30 | @@ -XXX,XX +XXX,XX @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, | ||
31 | #endif /* CONFIG_SOFTMMU */ | ||
32 | |||
33 | static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | ||
34 | - MemOpIdx oi) | ||
35 | + MemOpIdx oi, TCGType data_type) | ||
36 | { | ||
37 | MemOp opc = get_memop(oi); | ||
38 | #ifdef CONFIG_SOFTMMU | ||
39 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | ||
40 | |||
41 | tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); | ||
42 | |||
43 | - add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr); | ||
44 | + add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg, | ||
45 | + s->code_ptr, label_ptr); | ||
46 | #else | ||
47 | TCGReg index_reg; | ||
48 | tcg_target_long disp; | ||
49 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | ||
57 | } | 50 | } |
58 | 51 | ||
59 | +static bool fold_sub2_i32(OptContext *ctx, TCGOp *op) | 52 | static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, |
60 | +{ | 53 | - MemOpIdx oi) |
61 | + return fold_addsub2_i32(ctx, op, false); | 54 | + MemOpIdx oi, TCGType data_type) |
62 | +} | ||
63 | + | ||
64 | static bool fold_xor(OptContext *ctx, TCGOp *op) | ||
65 | { | 55 | { |
66 | return fold_const2(ctx, op); | 56 | MemOp opc = get_memop(oi); |
67 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 57 | #ifdef CONFIG_SOFTMMU |
68 | } | 58 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, |
69 | break; | 59 | |
70 | 60 | tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); | |
71 | - case INDEX_op_add2_i32: | 61 | |
72 | - case INDEX_op_sub2_i32: | 62 | - add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr); |
73 | - if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) | 63 | + add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg, |
74 | - && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { | 64 | + s->code_ptr, label_ptr); |
75 | - uint32_t al = arg_info(op->args[2])->val; | 65 | #else |
76 | - uint32_t ah = arg_info(op->args[3])->val; | 66 | TCGReg index_reg; |
77 | - uint32_t bl = arg_info(op->args[4])->val; | 67 | tcg_target_long disp; |
78 | - uint32_t bh = arg_info(op->args[5])->val; | 68 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, |
79 | - uint64_t a = ((uint64_t)ah << 32) | al; | 69 | break; |
80 | - uint64_t b = ((uint64_t)bh << 32) | bl; | 70 | |
81 | - TCGArg rl, rh; | 71 | case INDEX_op_qemu_ld_i32: |
82 | - TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32); | 72 | - /* ??? Technically we can use a non-extending instruction. */ |
83 | - | 73 | + tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32); |
84 | - if (opc == INDEX_op_add2_i32) { | 74 | + break; |
85 | - a += b; | 75 | case INDEX_op_qemu_ld_i64: |
86 | - } else { | 76 | - tcg_out_qemu_ld(s, args[0], args[1], args[2]); |
87 | - a -= b; | 77 | + tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64); |
88 | - } | 78 | break; |
89 | - | 79 | case INDEX_op_qemu_st_i32: |
90 | - rl = op->args[0]; | 80 | + tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32); |
91 | - rh = op->args[1]; | 81 | + break; |
92 | - tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a); | 82 | case INDEX_op_qemu_st_i64: |
93 | - tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32)); | 83 | - tcg_out_qemu_st(s, args[0], args[1], args[2]); |
94 | - continue; | 84 | + tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64); |
95 | - } | 85 | break; |
96 | - break; | 86 | |
97 | 87 | case INDEX_op_ld16s_i64: | |
98 | default: | ||
99 | break; | ||
100 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
101 | CASE_OP_32_64_VEC(add): | ||
102 | done = fold_add(&ctx, op); | ||
103 | break; | ||
104 | + case INDEX_op_add2_i32: | ||
105 | + done = fold_add2_i32(&ctx, op); | ||
106 | + break; | ||
107 | CASE_OP_32_64_VEC(and): | ||
108 | done = fold_and(&ctx, op); | ||
109 | break; | ||
110 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
111 | CASE_OP_32_64_VEC(sub): | ||
112 | done = fold_sub(&ctx, op); | ||
113 | break; | ||
114 | + case INDEX_op_sub2_i32: | ||
115 | + done = fold_sub2_i32(&ctx, op); | ||
116 | + break; | ||
117 | CASE_OP_32_64_VEC(xor): | ||
118 | done = fold_xor(&ctx, op); | ||
119 | break; | ||
120 | -- | 88 | -- |
121 | 2.25.1 | 89 | 2.34.1 |
122 | 90 | ||
123 | 91 | diff view generated by jsdifflib |
1 | Sign repetitions are perforce all identical, whether they are 1 or 0. | 1 | Collect the 3 potential parts of the host address into a struct. |
---|---|---|---|
2 | Bitwise operations preserve the relative quantity of the repetitions. | 2 | Reorg tcg_out_qemu_{ld,st}_direct to use it. |
3 | 3 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
5 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
6 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 6 | --- |
9 | tcg/optimize.c | 29 +++++++++++++++++++++++++++++ | 7 | tcg/s390x/tcg-target.c.inc | 109 ++++++++++++++++++++----------------- |
10 | 1 file changed, 29 insertions(+) | 8 | 1 file changed, 60 insertions(+), 49 deletions(-) |
11 | 9 | ||
12 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 10 | diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc |
13 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tcg/optimize.c | 12 | --- a/tcg/s390x/tcg-target.c.inc |
15 | +++ b/tcg/optimize.c | 13 | +++ b/tcg/s390x/tcg-target.c.inc |
16 | @@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op) | 14 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest, |
17 | z2 = arg_info(op->args[2])->z_mask; | 15 | tcg_out_call_int(s, dest); |
18 | ctx->z_mask = z1 & z2; | 16 | } |
19 | 17 | ||
20 | + /* | 18 | +typedef struct { |
21 | + * Sign repetitions are perforce all identical, whether they are 1 or 0. | 19 | + TCGReg base; |
22 | + * Bitwise operations preserve the relative quantity of the repetitions. | 20 | + TCGReg index; |
23 | + */ | 21 | + int disp; |
24 | + ctx->s_mask = arg_info(op->args[1])->s_mask | 22 | +} HostAddress; |
25 | + & arg_info(op->args[2])->s_mask; | 23 | + |
26 | + | 24 | static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data, |
27 | /* | 25 | - TCGReg base, TCGReg index, int disp) |
28 | * Known-zeros does not imply known-ones. Therefore unless | 26 | + HostAddress h) |
29 | * arg2 is constant, we can't infer affected bits from it. | 27 | { |
30 | @@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op) | 28 | switch (opc & (MO_SSIZE | MO_BSWAP)) { |
31 | } | 29 | case MO_UB: |
32 | ctx->z_mask = z1; | 30 | - tcg_out_insn(s, RXY, LLGC, data, base, index, disp); |
33 | 31 | + tcg_out_insn(s, RXY, LLGC, data, h.base, h.index, h.disp); | |
34 | + ctx->s_mask = arg_info(op->args[1])->s_mask | 32 | break; |
35 | + & arg_info(op->args[2])->s_mask; | 33 | case MO_SB: |
36 | return fold_masks(ctx, op); | 34 | - tcg_out_insn(s, RXY, LGB, data, base, index, disp); |
37 | } | 35 | + tcg_out_insn(s, RXY, LGB, data, h.base, h.index, h.disp); |
38 | 36 | break; | |
39 | @@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op) | 37 | |
40 | fold_xi_to_not(ctx, op, 0)) { | 38 | case MO_UW | MO_BSWAP: |
41 | return true; | 39 | /* swapped unsigned halfword load with upper bits zeroed */ |
42 | } | 40 | - tcg_out_insn(s, RXY, LRVH, data, base, index, disp); |
43 | + | 41 | + tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp); |
44 | + ctx->s_mask = arg_info(op->args[1])->s_mask | 42 | tcg_out_ext16u(s, data, data); |
45 | + & arg_info(op->args[2])->s_mask; | 43 | break; |
46 | return false; | 44 | case MO_UW: |
47 | } | 45 | - tcg_out_insn(s, RXY, LLGH, data, base, index, disp); |
48 | 46 | + tcg_out_insn(s, RXY, LLGH, data, h.base, h.index, h.disp); | |
49 | @@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op) | 47 | break; |
50 | 48 | ||
51 | ctx->z_mask = arg_info(op->args[3])->z_mask | 49 | case MO_SW | MO_BSWAP: |
52 | | arg_info(op->args[4])->z_mask; | 50 | /* swapped sign-extended halfword load */ |
53 | + ctx->s_mask = arg_info(op->args[3])->s_mask | 51 | - tcg_out_insn(s, RXY, LRVH, data, base, index, disp); |
54 | + & arg_info(op->args[4])->s_mask; | 52 | + tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp); |
55 | 53 | tcg_out_ext16s(s, TCG_TYPE_REG, data, data); | |
56 | if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { | 54 | break; |
57 | uint64_t tv = arg_info(op->args[3])->val; | 55 | case MO_SW: |
58 | @@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op) | 56 | - tcg_out_insn(s, RXY, LGH, data, base, index, disp); |
59 | fold_xi_to_not(ctx, op, -1)) { | 57 | + tcg_out_insn(s, RXY, LGH, data, h.base, h.index, h.disp); |
60 | return true; | 58 | break; |
61 | } | 59 | |
62 | + | 60 | case MO_UL | MO_BSWAP: |
63 | + ctx->s_mask = arg_info(op->args[1])->s_mask | 61 | /* swapped unsigned int load with upper bits zeroed */ |
64 | + & arg_info(op->args[2])->s_mask; | 62 | - tcg_out_insn(s, RXY, LRV, data, base, index, disp); |
65 | return false; | 63 | + tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp); |
66 | } | 64 | tcg_out_ext32u(s, data, data); |
67 | 65 | break; | |
68 | @@ -XXX,XX +XXX,XX @@ static bool fold_nor(OptContext *ctx, TCGOp *op) | 66 | case MO_UL: |
69 | fold_xi_to_not(ctx, op, 0)) { | 67 | - tcg_out_insn(s, RXY, LLGF, data, base, index, disp); |
70 | return true; | 68 | + tcg_out_insn(s, RXY, LLGF, data, h.base, h.index, h.disp); |
71 | } | 69 | break; |
72 | + | 70 | |
73 | + ctx->s_mask = arg_info(op->args[1])->s_mask | 71 | case MO_SL | MO_BSWAP: |
74 | + & arg_info(op->args[2])->s_mask; | 72 | /* swapped sign-extended int load */ |
75 | return false; | 73 | - tcg_out_insn(s, RXY, LRV, data, base, index, disp); |
76 | } | 74 | + tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp); |
77 | 75 | tcg_out_ext32s(s, data, data); | |
78 | @@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op) | 76 | break; |
79 | return true; | 77 | case MO_SL: |
80 | } | 78 | - tcg_out_insn(s, RXY, LGF, data, base, index, disp); |
81 | 79 | + tcg_out_insn(s, RXY, LGF, data, h.base, h.index, h.disp); | |
82 | + ctx->s_mask = arg_info(op->args[1])->s_mask; | 80 | break; |
83 | + | 81 | |
84 | /* Because of fold_to_not, we want to always return true, via finish. */ | 82 | case MO_UQ | MO_BSWAP: |
85 | finish_folding(ctx, op); | 83 | - tcg_out_insn(s, RXY, LRVG, data, base, index, disp); |
86 | return true; | 84 | + tcg_out_insn(s, RXY, LRVG, data, h.base, h.index, h.disp); |
87 | @@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op) | 85 | break; |
88 | 86 | case MO_UQ: | |
89 | ctx->z_mask = arg_info(op->args[1])->z_mask | 87 | - tcg_out_insn(s, RXY, LG, data, base, index, disp); |
90 | | arg_info(op->args[2])->z_mask; | 88 | + tcg_out_insn(s, RXY, LG, data, h.base, h.index, h.disp); |
91 | + ctx->s_mask = arg_info(op->args[1])->s_mask | 89 | break; |
92 | + & arg_info(op->args[2])->s_mask; | 90 | |
93 | return fold_masks(ctx, op); | 91 | default: |
94 | } | 92 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data, |
95 | 93 | } | |
96 | @@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op) | 94 | |
97 | fold_ix_to_not(ctx, op, 0)) { | 95 | static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, |
98 | return true; | 96 | - TCGReg base, TCGReg index, int disp) |
99 | } | 97 | + HostAddress h) |
100 | + | 98 | { |
101 | + ctx->s_mask = arg_info(op->args[1])->s_mask | 99 | switch (opc & (MO_SIZE | MO_BSWAP)) { |
102 | + & arg_info(op->args[2])->s_mask; | 100 | case MO_UB: |
103 | return false; | 101 | - if (disp >= 0 && disp < 0x1000) { |
104 | } | 102 | - tcg_out_insn(s, RX, STC, data, base, index, disp); |
105 | 103 | + if (h.disp >= 0 && h.disp < 0x1000) { | |
106 | @@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op) | 104 | + tcg_out_insn(s, RX, STC, data, h.base, h.index, h.disp); |
107 | 105 | } else { | |
108 | ctx->z_mask = arg_info(op->args[1])->z_mask | 106 | - tcg_out_insn(s, RXY, STCY, data, base, index, disp); |
109 | | arg_info(op->args[2])->z_mask; | 107 | + tcg_out_insn(s, RXY, STCY, data, h.base, h.index, h.disp); |
110 | + ctx->s_mask = arg_info(op->args[1])->s_mask | 108 | } |
111 | + & arg_info(op->args[2])->s_mask; | 109 | break; |
112 | return fold_masks(ctx, op); | 110 | |
113 | } | 111 | case MO_UW | MO_BSWAP: |
112 | - tcg_out_insn(s, RXY, STRVH, data, base, index, disp); | ||
113 | + tcg_out_insn(s, RXY, STRVH, data, h.base, h.index, h.disp); | ||
114 | break; | ||
115 | case MO_UW: | ||
116 | - if (disp >= 0 && disp < 0x1000) { | ||
117 | - tcg_out_insn(s, RX, STH, data, base, index, disp); | ||
118 | + if (h.disp >= 0 && h.disp < 0x1000) { | ||
119 | + tcg_out_insn(s, RX, STH, data, h.base, h.index, h.disp); | ||
120 | } else { | ||
121 | - tcg_out_insn(s, RXY, STHY, data, base, index, disp); | ||
122 | + tcg_out_insn(s, RXY, STHY, data, h.base, h.index, h.disp); | ||
123 | } | ||
124 | break; | ||
125 | |||
126 | case MO_UL | MO_BSWAP: | ||
127 | - tcg_out_insn(s, RXY, STRV, data, base, index, disp); | ||
128 | + tcg_out_insn(s, RXY, STRV, data, h.base, h.index, h.disp); | ||
129 | break; | ||
130 | case MO_UL: | ||
131 | - if (disp >= 0 && disp < 0x1000) { | ||
132 | - tcg_out_insn(s, RX, ST, data, base, index, disp); | ||
133 | + if (h.disp >= 0 && h.disp < 0x1000) { | ||
134 | + tcg_out_insn(s, RX, ST, data, h.base, h.index, h.disp); | ||
135 | } else { | ||
136 | - tcg_out_insn(s, RXY, STY, data, base, index, disp); | ||
137 | + tcg_out_insn(s, RXY, STY, data, h.base, h.index, h.disp); | ||
138 | } | ||
139 | break; | ||
140 | |||
141 | case MO_UQ | MO_BSWAP: | ||
142 | - tcg_out_insn(s, RXY, STRVG, data, base, index, disp); | ||
143 | + tcg_out_insn(s, RXY, STRVG, data, h.base, h.index, h.disp); | ||
144 | break; | ||
145 | case MO_UQ: | ||
146 | - tcg_out_insn(s, RXY, STG, data, base, index, disp); | ||
147 | + tcg_out_insn(s, RXY, STG, data, h.base, h.index, h.disp); | ||
148 | break; | ||
149 | |||
150 | default: | ||
151 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
152 | return tcg_out_fail_alignment(s, l); | ||
153 | } | ||
154 | |||
155 | -static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, | ||
156 | - TCGReg *index_reg, tcg_target_long *disp) | ||
157 | +static HostAddress tcg_prepare_user_ldst(TCGContext *s, TCGReg addr_reg) | ||
158 | { | ||
159 | + TCGReg index; | ||
160 | + int disp; | ||
161 | + | ||
162 | if (TARGET_LONG_BITS == 32) { | ||
163 | - tcg_out_ext32u(s, TCG_TMP0, *addr_reg); | ||
164 | - *addr_reg = TCG_TMP0; | ||
165 | + tcg_out_ext32u(s, TCG_TMP0, addr_reg); | ||
166 | + addr_reg = TCG_TMP0; | ||
167 | } | ||
168 | if (guest_base < 0x80000) { | ||
169 | - *index_reg = TCG_REG_NONE; | ||
170 | - *disp = guest_base; | ||
171 | + index = TCG_REG_NONE; | ||
172 | + disp = guest_base; | ||
173 | } else { | ||
174 | - *index_reg = TCG_GUEST_BASE_REG; | ||
175 | - *disp = 0; | ||
176 | + index = TCG_GUEST_BASE_REG; | ||
177 | + disp = 0; | ||
178 | } | ||
179 | + return (HostAddress){ .base = addr_reg, .index = index, .disp = disp }; | ||
180 | } | ||
181 | #endif /* CONFIG_SOFTMMU */ | ||
182 | |||
183 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | ||
184 | MemOpIdx oi, TCGType data_type) | ||
185 | { | ||
186 | MemOp opc = get_memop(oi); | ||
187 | + HostAddress h; | ||
188 | + | ||
189 | #ifdef CONFIG_SOFTMMU | ||
190 | unsigned mem_index = get_mmuidx(oi); | ||
191 | tcg_insn_unit *label_ptr; | ||
192 | - TCGReg base_reg; | ||
193 | |||
194 | - base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1); | ||
195 | + h.base = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1); | ||
196 | + h.index = TCG_REG_R2; | ||
197 | + h.disp = 0; | ||
198 | |||
199 | tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); | ||
200 | label_ptr = s->code_ptr; | ||
201 | s->code_ptr += 1; | ||
202 | |||
203 | - tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); | ||
204 | + tcg_out_qemu_ld_direct(s, opc, data_reg, h); | ||
205 | |||
206 | add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg, | ||
207 | s->code_ptr, label_ptr); | ||
208 | #else | ||
209 | - TCGReg index_reg; | ||
210 | - tcg_target_long disp; | ||
211 | unsigned a_bits = get_alignment_bits(opc); | ||
212 | |||
213 | if (a_bits) { | ||
214 | tcg_out_test_alignment(s, true, addr_reg, a_bits); | ||
215 | } | ||
216 | - tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); | ||
217 | - tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp); | ||
218 | + h = tcg_prepare_user_ldst(s, addr_reg); | ||
219 | + tcg_out_qemu_ld_direct(s, opc, data_reg, h); | ||
220 | #endif | ||
221 | } | ||
222 | |||
223 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | ||
224 | MemOpIdx oi, TCGType data_type) | ||
225 | { | ||
226 | MemOp opc = get_memop(oi); | ||
227 | + HostAddress h; | ||
228 | + | ||
229 | #ifdef CONFIG_SOFTMMU | ||
230 | unsigned mem_index = get_mmuidx(oi); | ||
231 | tcg_insn_unit *label_ptr; | ||
232 | - TCGReg base_reg; | ||
233 | |||
234 | - base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0); | ||
235 | + h.base = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0); | ||
236 | + h.index = TCG_REG_R2; | ||
237 | + h.disp = 0; | ||
238 | |||
239 | tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); | ||
240 | label_ptr = s->code_ptr; | ||
241 | s->code_ptr += 1; | ||
242 | |||
243 | - tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); | ||
244 | + tcg_out_qemu_st_direct(s, opc, data_reg, h); | ||
245 | |||
246 | add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg, | ||
247 | s->code_ptr, label_ptr); | ||
248 | #else | ||
249 | - TCGReg index_reg; | ||
250 | - tcg_target_long disp; | ||
251 | unsigned a_bits = get_alignment_bits(opc); | ||
252 | |||
253 | if (a_bits) { | ||
254 | tcg_out_test_alignment(s, false, addr_reg, a_bits); | ||
255 | } | ||
256 | - tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); | ||
257 | - tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp); | ||
258 | + h = tcg_prepare_user_ldst(s, addr_reg); | ||
259 | + tcg_out_qemu_st_direct(s, opc, data_reg, h); | ||
260 | #endif | ||
261 | } | ||
114 | 262 | ||
115 | -- | 263 | -- |
116 | 2.25.1 | 264 | 2.34.1 |
117 | 265 | ||
118 | 266 | diff view generated by jsdifflib |
1 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | 1 | In tcg_canonicalize_memop, we remove MO_SIGN from MO_32 operations |
---|---|---|---|
2 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 2 | with TCG_TYPE_I32. Thus this is never set. We already have an |
3 | identical test just above which does not include is_64 | ||
4 | |||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | --- | 7 | --- |
5 | tcg/optimize.c | 53 +++++++++++++++++++++++++++++--------------------- | 8 | tcg/sparc64/tcg-target.c.inc | 2 +- |
6 | 1 file changed, 31 insertions(+), 22 deletions(-) | 9 | 1 file changed, 1 insertion(+), 1 deletion(-) |
7 | 10 | ||
8 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 11 | diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc |
9 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
10 | --- a/tcg/optimize.c | 13 | --- a/tcg/sparc64/tcg-target.c.inc |
11 | +++ b/tcg/optimize.c | 14 | +++ b/tcg/sparc64/tcg-target.c.inc |
12 | @@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op) | 15 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, |
13 | return fold_const2(ctx, op); | 16 | tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi); |
14 | } | 17 | |
15 | 18 | /* We let the helper sign-extend SB and SW, but leave SL for here. */ | |
16 | +static bool fold_dup(OptContext *ctx, TCGOp *op) | 19 | - if (is_64 && (memop & MO_SSIZE) == MO_SL) { |
17 | +{ | 20 | + if ((memop & MO_SSIZE) == MO_SL) { |
18 | + if (arg_is_const(op->args[1])) { | 21 | tcg_out_ext32s(s, data, TCG_REG_O0); |
19 | + uint64_t t = arg_info(op->args[1])->val; | 22 | } else { |
20 | + t = dup_const(TCGOP_VECE(op), t); | 23 | tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0); |
21 | + return tcg_opt_gen_movi(ctx, op, op->args[0], t); | ||
22 | + } | ||
23 | + return false; | ||
24 | +} | ||
25 | + | ||
26 | +static bool fold_dup2(OptContext *ctx, TCGOp *op) | ||
27 | +{ | ||
28 | + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
29 | + uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32, | ||
30 | + arg_info(op->args[2])->val); | ||
31 | + return tcg_opt_gen_movi(ctx, op, op->args[0], t); | ||
32 | + } | ||
33 | + | ||
34 | + if (args_are_copies(op->args[1], op->args[2])) { | ||
35 | + op->opc = INDEX_op_dup_vec; | ||
36 | + TCGOP_VECE(op) = MO_32; | ||
37 | + } | ||
38 | + return false; | ||
39 | +} | ||
40 | + | ||
41 | static bool fold_eqv(OptContext *ctx, TCGOp *op) | ||
42 | { | ||
43 | return fold_const2(ctx, op); | ||
44 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
45 | done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); | ||
46 | break; | ||
47 | |||
48 | - case INDEX_op_dup_vec: | ||
49 | - if (arg_is_const(op->args[1])) { | ||
50 | - tmp = arg_info(op->args[1])->val; | ||
51 | - tmp = dup_const(TCGOP_VECE(op), tmp); | ||
52 | - tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
53 | - continue; | ||
54 | - } | ||
55 | - break; | ||
56 | - | ||
57 | - case INDEX_op_dup2_vec: | ||
58 | - assert(TCG_TARGET_REG_BITS == 32); | ||
59 | - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
60 | - tcg_opt_gen_movi(&ctx, op, op->args[0], | ||
61 | - deposit64(arg_info(op->args[1])->val, 32, 32, | ||
62 | - arg_info(op->args[2])->val)); | ||
63 | - continue; | ||
64 | - } else if (args_are_copies(op->args[1], op->args[2])) { | ||
65 | - op->opc = INDEX_op_dup_vec; | ||
66 | - TCGOP_VECE(op) = MO_32; | ||
67 | - } | ||
68 | - break; | ||
69 | - | ||
70 | default: | ||
71 | break; | ||
72 | |||
73 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
74 | CASE_OP_32_64(divu): | ||
75 | done = fold_divide(&ctx, op); | ||
76 | break; | ||
77 | + case INDEX_op_dup_vec: | ||
78 | + done = fold_dup(&ctx, op); | ||
79 | + break; | ||
80 | + case INDEX_op_dup2_vec: | ||
81 | + done = fold_dup2(&ctx, op); | ||
82 | + break; | ||
83 | CASE_OP_32_64(eqv): | ||
84 | done = fold_eqv(&ctx, op); | ||
85 | break; | ||
86 | -- | 24 | -- |
87 | 2.25.1 | 25 | 2.34.1 |
88 | 26 | ||
89 | 27 | diff view generated by jsdifflib |
1 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | 1 | We need to set this in TCGLabelQemuLdst, so plumb this |
---|---|---|---|
2 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 2 | all the way through from tcg_out_op. |
3 | |||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | --- | 6 | --- |
5 | tcg/optimize.c | 27 ++++++++++++++++----------- | 7 | tcg/sparc64/tcg-target.c.inc | 6 +++--- |
6 | 1 file changed, 16 insertions(+), 11 deletions(-) | 8 | 1 file changed, 3 insertions(+), 3 deletions(-) |
7 | 9 | ||
8 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 10 | diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc |
9 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
10 | --- a/tcg/optimize.c | 12 | --- a/tcg/sparc64/tcg-target.c.inc |
11 | +++ b/tcg/optimize.c | 13 | +++ b/tcg/sparc64/tcg-target.c.inc |
12 | @@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op) | 14 | @@ -XXX,XX +XXX,XX @@ static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = { |
13 | return false; | 15 | }; |
14 | } | 16 | |
15 | 17 | static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, | |
16 | +static bool fold_bswap(OptContext *ctx, TCGOp *op) | 18 | - MemOpIdx oi, bool is_64) |
17 | +{ | 19 | + MemOpIdx oi, TCGType data_type) |
18 | + if (arg_is_const(op->args[1])) { | ||
19 | + uint64_t t = arg_info(op->args[1])->val; | ||
20 | + | ||
21 | + t = do_constant_folding(op->opc, t, op->args[2]); | ||
22 | + return tcg_opt_gen_movi(ctx, op, op->args[0], t); | ||
23 | + } | ||
24 | + return false; | ||
25 | +} | ||
26 | + | ||
27 | static bool fold_call(OptContext *ctx, TCGOp *op) | ||
28 | { | 20 | { |
29 | TCGContext *s = ctx->tcg; | 21 | MemOp memop = get_memop(oi); |
30 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 22 | tcg_insn_unit *label_ptr; |
31 | } | 23 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, |
32 | break; | 24 | break; |
33 | 25 | ||
34 | - CASE_OP_32_64(bswap16): | 26 | case INDEX_op_qemu_ld_i32: |
35 | - CASE_OP_32_64(bswap32): | 27 | - tcg_out_qemu_ld(s, a0, a1, a2, false); |
36 | - case INDEX_op_bswap64_i64: | 28 | + tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); |
37 | - if (arg_is_const(op->args[1])) { | 29 | break; |
38 | - tmp = do_constant_folding(opc, arg_info(op->args[1])->val, | 30 | case INDEX_op_qemu_ld_i64: |
39 | - op->args[2]); | 31 | - tcg_out_qemu_ld(s, a0, a1, a2, true); |
40 | - tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | 32 | + tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); |
41 | - continue; | 33 | break; |
42 | - } | 34 | case INDEX_op_qemu_st_i32: |
43 | - break; | 35 | tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); |
44 | - | ||
45 | default: | ||
46 | break; | ||
47 | |||
48 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
49 | case INDEX_op_brcond2_i32: | ||
50 | done = fold_brcond2(&ctx, op); | ||
51 | break; | ||
52 | + CASE_OP_32_64(bswap16): | ||
53 | + CASE_OP_32_64(bswap32): | ||
54 | + case INDEX_op_bswap64_i64: | ||
55 | + done = fold_bswap(&ctx, op); | ||
56 | + break; | ||
57 | CASE_OP_32_64(clz): | ||
58 | CASE_OP_32_64(ctz): | ||
59 | done = fold_count_zeros(&ctx, op); | ||
60 | -- | 36 | -- |
61 | 2.25.1 | 37 | 2.34.1 |
62 | 38 | ||
63 | 39 | diff view generated by jsdifflib |
1 | From: Luis Pires <luis.pires@eldorado.org.br> | 1 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
---|---|---|---|
2 | |||
3 | Move udiv_qrnnd() from include/fpu/softfloat-macros.h to host-utils, | ||
4 | so it can be reused by divu128(). | ||
5 | |||
6 | Signed-off-by: Luis Pires <luis.pires@eldorado.org.br> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-Id: <20211025191154.350831-3-luis.pires@eldorado.org.br> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 3 | --- |
11 | include/fpu/softfloat-macros.h | 82 ---------------------------------- | 4 | tcg/tcg.c | 13 +++++++++++++ |
12 | include/qemu/host-utils.h | 81 +++++++++++++++++++++++++++++++++ | 5 | tcg/tcg-ldst.c.inc | 14 -------------- |
13 | 2 files changed, 81 insertions(+), 82 deletions(-) | 6 | 2 files changed, 13 insertions(+), 14 deletions(-) |
14 | 7 | ||
15 | diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h | 8 | diff --git a/tcg/tcg.c b/tcg/tcg.c |
16 | index XXXXXXX..XXXXXXX 100644 | 9 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/include/fpu/softfloat-macros.h | 10 | --- a/tcg/tcg.c |
18 | +++ b/include/fpu/softfloat-macros.h | 11 | +++ b/tcg/tcg.c |
19 | @@ -XXX,XX +XXX,XX @@ | 12 | @@ -XXX,XX +XXX,XX @@ typedef struct QEMU_PACKED { |
20 | * so some portions are provided under: | 13 | DebugFrameFDEHeader fde; |
21 | * the SoftFloat-2a license | 14 | } DebugFrameHeader; |
22 | * the BSD license | 15 | |
23 | - * GPL-v2-or-later | 16 | +typedef struct TCGLabelQemuLdst { |
24 | * | 17 | + bool is_ld; /* qemu_ld: true, qemu_st: false */ |
25 | * Any future contributions to this file after December 1st 2014 will be | 18 | + MemOpIdx oi; |
26 | * taken to be licensed under the Softfloat-2a license unless specifically | 19 | + TCGType type; /* result type of a load */ |
27 | @@ -XXX,XX +XXX,XX @@ this code that are retained. | 20 | + TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */ |
28 | * THE POSSIBILITY OF SUCH DAMAGE. | 21 | + TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */ |
29 | */ | 22 | + TCGReg datalo_reg; /* reg index for low word to be loaded or stored */ |
30 | 23 | + TCGReg datahi_reg; /* reg index for high word to be loaded or stored */ | |
31 | -/* Portions of this work are licensed under the terms of the GNU GPL, | 24 | + const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */ |
32 | - * version 2 or later. See the COPYING file in the top-level directory. | 25 | + tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */ |
33 | - */ | 26 | + QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next; |
34 | - | 27 | +} TCGLabelQemuLdst; |
35 | #ifndef FPU_SOFTFLOAT_MACROS_H | 28 | + |
36 | #define FPU_SOFTFLOAT_MACROS_H | 29 | static void tcg_register_jit_int(const void *buf, size_t size, |
37 | 30 | const void *debug_frame, | |
38 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b) | 31 | size_t debug_frame_size) |
39 | 32 | diff --git a/tcg/tcg-ldst.c.inc b/tcg/tcg-ldst.c.inc | |
40 | } | ||
41 | |||
42 | -/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd | ||
43 | - * (https://gmplib.org/repo/gmp/file/tip/longlong.h) | ||
44 | - * | ||
45 | - * Licensed under the GPLv2/LGPLv3 | ||
46 | - */ | ||
47 | -static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1, | ||
48 | - uint64_t n0, uint64_t d) | ||
49 | -{ | ||
50 | -#if defined(__x86_64__) | ||
51 | - uint64_t q; | ||
52 | - asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d)); | ||
53 | - return q; | ||
54 | -#elif defined(__s390x__) && !defined(__clang__) | ||
55 | - /* Need to use a TImode type to get an even register pair for DLGR. */ | ||
56 | - unsigned __int128 n = (unsigned __int128)n1 << 64 | n0; | ||
57 | - asm("dlgr %0, %1" : "+r"(n) : "r"(d)); | ||
58 | - *r = n >> 64; | ||
59 | - return n; | ||
60 | -#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7) | ||
61 | - /* From Power ISA 2.06, programming note for divdeu. */ | ||
62 | - uint64_t q1, q2, Q, r1, r2, R; | ||
63 | - asm("divdeu %0,%2,%4; divdu %1,%3,%4" | ||
64 | - : "=&r"(q1), "=r"(q2) | ||
65 | - : "r"(n1), "r"(n0), "r"(d)); | ||
66 | - r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */ | ||
67 | - r2 = n0 - (q2 * d); | ||
68 | - Q = q1 + q2; | ||
69 | - R = r1 + r2; | ||
70 | - if (R >= d || R < r2) { /* overflow implies R > d */ | ||
71 | - Q += 1; | ||
72 | - R -= d; | ||
73 | - } | ||
74 | - *r = R; | ||
75 | - return Q; | ||
76 | -#else | ||
77 | - uint64_t d0, d1, q0, q1, r1, r0, m; | ||
78 | - | ||
79 | - d0 = (uint32_t)d; | ||
80 | - d1 = d >> 32; | ||
81 | - | ||
82 | - r1 = n1 % d1; | ||
83 | - q1 = n1 / d1; | ||
84 | - m = q1 * d0; | ||
85 | - r1 = (r1 << 32) | (n0 >> 32); | ||
86 | - if (r1 < m) { | ||
87 | - q1 -= 1; | ||
88 | - r1 += d; | ||
89 | - if (r1 >= d) { | ||
90 | - if (r1 < m) { | ||
91 | - q1 -= 1; | ||
92 | - r1 += d; | ||
93 | - } | ||
94 | - } | ||
95 | - } | ||
96 | - r1 -= m; | ||
97 | - | ||
98 | - r0 = r1 % d1; | ||
99 | - q0 = r1 / d1; | ||
100 | - m = q0 * d0; | ||
101 | - r0 = (r0 << 32) | (uint32_t)n0; | ||
102 | - if (r0 < m) { | ||
103 | - q0 -= 1; | ||
104 | - r0 += d; | ||
105 | - if (r0 >= d) { | ||
106 | - if (r0 < m) { | ||
107 | - q0 -= 1; | ||
108 | - r0 += d; | ||
109 | - } | ||
110 | - } | ||
111 | - } | ||
112 | - r0 -= m; | ||
113 | - | ||
114 | - *r = r0; | ||
115 | - return (q1 << 32) | q0; | ||
116 | -#endif | ||
117 | -} | ||
118 | - | ||
119 | /*---------------------------------------------------------------------------- | ||
120 | | Returns an approximation to the square root of the 32-bit significand given | ||
121 | | by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of | ||
122 | diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h | ||
123 | index XXXXXXX..XXXXXXX 100644 | 33 | index XXXXXXX..XXXXXXX 100644 |
124 | --- a/include/qemu/host-utils.h | 34 | --- a/tcg/tcg-ldst.c.inc |
125 | +++ b/include/qemu/host-utils.h | 35 | +++ b/tcg/tcg-ldst.c.inc |
126 | @@ -XXX,XX +XXX,XX @@ | 36 | @@ -XXX,XX +XXX,XX @@ |
127 | * THE SOFTWARE. | 37 | * THE SOFTWARE. |
128 | */ | 38 | */ |
129 | 39 | ||
130 | +/* Portions of this work are licensed under the terms of the GNU GPL, | 40 | -typedef struct TCGLabelQemuLdst { |
131 | + * version 2 or later. See the COPYING file in the top-level directory. | 41 | - bool is_ld; /* qemu_ld: true, qemu_st: false */ |
132 | + */ | 42 | - MemOpIdx oi; |
133 | + | 43 | - TCGType type; /* result type of a load */ |
134 | #ifndef HOST_UTILS_H | 44 | - TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */ |
135 | #define HOST_UTILS_H | 45 | - TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */ |
136 | 46 | - TCGReg datalo_reg; /* reg index for low word to be loaded or stored */ | |
137 | @@ -XXX,XX +XXX,XX @@ void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift); | 47 | - TCGReg datahi_reg; /* reg index for high word to be loaded or stored */ |
48 | - const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */ | ||
49 | - tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */ | ||
50 | - QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next; | ||
51 | -} TCGLabelQemuLdst; | ||
52 | - | ||
53 | - | ||
54 | /* | ||
55 | * Generate TB finalization at the end of block | ||
138 | */ | 56 | */ |
139 | void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow); | ||
140 | |||
141 | +/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd | ||
142 | + * (https://gmplib.org/repo/gmp/file/tip/longlong.h) | ||
143 | + * | ||
144 | + * Licensed under the GPLv2/LGPLv3 | ||
145 | + */ | ||
146 | +static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1, | ||
147 | + uint64_t n0, uint64_t d) | ||
148 | +{ | ||
149 | +#if defined(__x86_64__) | ||
150 | + uint64_t q; | ||
151 | + asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d)); | ||
152 | + return q; | ||
153 | +#elif defined(__s390x__) && !defined(__clang__) | ||
154 | + /* Need to use a TImode type to get an even register pair for DLGR. */ | ||
155 | + unsigned __int128 n = (unsigned __int128)n1 << 64 | n0; | ||
156 | + asm("dlgr %0, %1" : "+r"(n) : "r"(d)); | ||
157 | + *r = n >> 64; | ||
158 | + return n; | ||
159 | +#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7) | ||
160 | + /* From Power ISA 2.06, programming note for divdeu. */ | ||
161 | + uint64_t q1, q2, Q, r1, r2, R; | ||
162 | + asm("divdeu %0,%2,%4; divdu %1,%3,%4" | ||
163 | + : "=&r"(q1), "=r"(q2) | ||
164 | + : "r"(n1), "r"(n0), "r"(d)); | ||
165 | + r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */ | ||
166 | + r2 = n0 - (q2 * d); | ||
167 | + Q = q1 + q2; | ||
168 | + R = r1 + r2; | ||
169 | + if (R >= d || R < r2) { /* overflow implies R > d */ | ||
170 | + Q += 1; | ||
171 | + R -= d; | ||
172 | + } | ||
173 | + *r = R; | ||
174 | + return Q; | ||
175 | +#else | ||
176 | + uint64_t d0, d1, q0, q1, r1, r0, m; | ||
177 | + | ||
178 | + d0 = (uint32_t)d; | ||
179 | + d1 = d >> 32; | ||
180 | + | ||
181 | + r1 = n1 % d1; | ||
182 | + q1 = n1 / d1; | ||
183 | + m = q1 * d0; | ||
184 | + r1 = (r1 << 32) | (n0 >> 32); | ||
185 | + if (r1 < m) { | ||
186 | + q1 -= 1; | ||
187 | + r1 += d; | ||
188 | + if (r1 >= d) { | ||
189 | + if (r1 < m) { | ||
190 | + q1 -= 1; | ||
191 | + r1 += d; | ||
192 | + } | ||
193 | + } | ||
194 | + } | ||
195 | + r1 -= m; | ||
196 | + | ||
197 | + r0 = r1 % d1; | ||
198 | + q0 = r1 / d1; | ||
199 | + m = q0 * d0; | ||
200 | + r0 = (r0 << 32) | (uint32_t)n0; | ||
201 | + if (r0 < m) { | ||
202 | + q0 -= 1; | ||
203 | + r0 += d; | ||
204 | + if (r0 >= d) { | ||
205 | + if (r0 < m) { | ||
206 | + q0 -= 1; | ||
207 | + r0 += d; | ||
208 | + } | ||
209 | + } | ||
210 | + } | ||
211 | + r0 -= m; | ||
212 | + | ||
213 | + *r = r0; | ||
214 | + return (q1 << 32) | q0; | ||
215 | +#endif | ||
216 | +} | ||
217 | + | ||
218 | #endif | ||
219 | -- | 57 | -- |
220 | 2.25.1 | 58 | 2.34.1 |
221 | 59 | ||
222 | 60 | diff view generated by jsdifflib |
1 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | 1 | An inline function is safer than a macro, and REG_P |
---|---|---|---|
2 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 2 | was rather too generic. |
3 | |||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | --- | 6 | --- |
5 | tcg/optimize.c | 32 ++++++++++++++++++-------------- | 7 | tcg/tcg-internal.h | 4 ---- |
6 | 1 file changed, 18 insertions(+), 14 deletions(-) | 8 | tcg/tcg.c | 16 +++++++++++++--- |
9 | 2 files changed, 13 insertions(+), 7 deletions(-) | ||
7 | 10 | ||
8 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 11 | diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h |
9 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
10 | --- a/tcg/optimize.c | 13 | --- a/tcg/tcg-internal.h |
11 | +++ b/tcg/optimize.c | 14 | +++ b/tcg/tcg-internal.h |
12 | @@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op) | 15 | @@ -XXX,XX +XXX,XX @@ typedef struct TCGCallArgumentLoc { |
13 | return true; | 16 | unsigned tmp_subindex : 2; |
17 | } TCGCallArgumentLoc; | ||
18 | |||
19 | -/* Avoid "unsigned < 0 is always false" Werror, when iarg_regs is empty. */ | ||
20 | -#define REG_P(L) \ | ||
21 | - ((int)(L)->arg_slot < (int)ARRAY_SIZE(tcg_target_call_iarg_regs)) | ||
22 | - | ||
23 | typedef struct TCGHelperInfo { | ||
24 | void *func; | ||
25 | const char *name; | ||
26 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
27 | index XXXXXXX..XXXXXXX 100644 | ||
28 | --- a/tcg/tcg.c | ||
29 | +++ b/tcg/tcg.c | ||
30 | @@ -XXX,XX +XXX,XX @@ static void init_ffi_layouts(void) | ||
14 | } | 31 | } |
15 | 32 | #endif /* CONFIG_TCG_INTERPRETER */ | |
16 | +static bool fold_count_zeros(OptContext *ctx, TCGOp *op) | 33 | |
34 | +static inline bool arg_slot_reg_p(unsigned arg_slot) | ||
17 | +{ | 35 | +{ |
18 | + if (arg_is_const(op->args[1])) { | 36 | + /* |
19 | + uint64_t t = arg_info(op->args[1])->val; | 37 | + * Split the sizeof away from the comparison to avoid Werror from |
20 | + | 38 | + * "unsigned < 0 is always false", when iarg_regs is empty. |
21 | + if (t != 0) { | 39 | + */ |
22 | + t = do_constant_folding(op->opc, t, 0); | 40 | + unsigned nreg = ARRAY_SIZE(tcg_target_call_iarg_regs); |
23 | + return tcg_opt_gen_movi(ctx, op, op->args[0], t); | 41 | + return arg_slot < nreg; |
24 | + } | ||
25 | + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); | ||
26 | + } | ||
27 | + return false; | ||
28 | +} | 42 | +} |
29 | + | 43 | + |
30 | static bool fold_ctpop(OptContext *ctx, TCGOp *op) | 44 | typedef struct TCGCumulativeArgs { |
45 | int arg_idx; /* tcg_gen_callN args[] */ | ||
46 | int info_in_idx; /* TCGHelperInfo in[] */ | ||
47 | @@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s) | ||
48 | case TCG_CALL_ARG_NORMAL: | ||
49 | case TCG_CALL_ARG_EXTEND_U: | ||
50 | case TCG_CALL_ARG_EXTEND_S: | ||
51 | - if (REG_P(loc)) { | ||
52 | + if (arg_slot_reg_p(loc->arg_slot)) { | ||
53 | *la_temp_pref(ts) = 0; | ||
54 | break; | ||
55 | } | ||
56 | @@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s) | ||
57 | case TCG_CALL_ARG_NORMAL: | ||
58 | case TCG_CALL_ARG_EXTEND_U: | ||
59 | case TCG_CALL_ARG_EXTEND_S: | ||
60 | - if (REG_P(loc)) { | ||
61 | + if (arg_slot_reg_p(loc->arg_slot)) { | ||
62 | tcg_regset_set_reg(*la_temp_pref(ts), | ||
63 | tcg_target_call_iarg_regs[loc->arg_slot]); | ||
64 | } | ||
65 | @@ -XXX,XX +XXX,XX @@ static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts, | ||
66 | static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l, | ||
67 | TCGTemp *ts, TCGRegSet *allocated_regs) | ||
31 | { | 68 | { |
32 | return fold_const1(ctx, op); | 69 | - if (REG_P(l)) { |
33 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 70 | + if (arg_slot_reg_p(l->arg_slot)) { |
34 | } | 71 | TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot]; |
35 | break; | 72 | load_arg_reg(s, reg, ts, *allocated_regs); |
36 | 73 | tcg_regset_set_reg(*allocated_regs, reg); | |
37 | - CASE_OP_32_64(clz): | ||
38 | - CASE_OP_32_64(ctz): | ||
39 | - if (arg_is_const(op->args[1])) { | ||
40 | - TCGArg v = arg_info(op->args[1])->val; | ||
41 | - if (v != 0) { | ||
42 | - tmp = do_constant_folding(opc, v, 0); | ||
43 | - tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
44 | - } else { | ||
45 | - tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]); | ||
46 | - } | ||
47 | - continue; | ||
48 | - } | ||
49 | - break; | ||
50 | - | ||
51 | default: | ||
52 | break; | ||
53 | |||
54 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
55 | case INDEX_op_brcond2_i32: | ||
56 | done = fold_brcond2(&ctx, op); | ||
57 | break; | ||
58 | + CASE_OP_32_64(clz): | ||
59 | + CASE_OP_32_64(ctz): | ||
60 | + done = fold_count_zeros(&ctx, op); | ||
61 | + break; | ||
62 | CASE_OP_32_64(ctpop): | ||
63 | done = fold_ctpop(&ctx, op); | ||
64 | break; | ||
65 | -- | 74 | -- |
66 | 2.25.1 | 75 | 2.34.1 |
67 | 76 | ||
68 | 77 | diff view generated by jsdifflib |
1 | Pull the "op r, a, a => mov r, a" optimization into a function, | 1 | Unify all computation of argument stack offset in one function. |
---|---|---|---|
2 | and use it in the outer opcode fold functions. | 2 | This requires that we adjust ref_slot to be in the same units, |
3 | by adding max_reg_slots during init_call_layout. | ||
3 | 4 | ||
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | 5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 7 | --- |
8 | tcg/optimize.c | 39 ++++++++++++++++++++++++--------------- | 8 | tcg/tcg.c | 29 +++++++++++++++++------------ |
9 | 1 file changed, 24 insertions(+), 15 deletions(-) | 9 | 1 file changed, 17 insertions(+), 12 deletions(-) |
10 | 10 | ||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 11 | diff --git a/tcg/tcg.c b/tcg/tcg.c |
12 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/optimize.c | 13 | --- a/tcg/tcg.c |
14 | +++ b/tcg/optimize.c | 14 | +++ b/tcg/tcg.c |
15 | @@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | 15 | @@ -XXX,XX +XXX,XX @@ static inline bool arg_slot_reg_p(unsigned arg_slot) |
16 | return false; | 16 | return arg_slot < nreg; |
17 | } | 17 | } |
18 | 18 | ||
19 | +/* If the binary operation has both arguments equal, fold to identity. */ | 19 | +static inline int arg_slot_stk_ofs(unsigned arg_slot) |
20 | +static bool fold_xx_to_x(OptContext *ctx, TCGOp *op) | ||
21 | +{ | 20 | +{ |
22 | + if (args_are_copies(op->args[1], op->args[2])) { | 21 | + unsigned max = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long); |
23 | + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | 22 | + unsigned stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs); |
24 | + } | 23 | + |
25 | + return false; | 24 | + tcg_debug_assert(stk_slot < max); |
25 | + return TCG_TARGET_CALL_STACK_OFFSET + stk_slot * sizeof(tcg_target_long); | ||
26 | +} | 26 | +} |
27 | + | 27 | + |
28 | /* | 28 | typedef struct TCGCumulativeArgs { |
29 | * These outermost fold_<op> functions are sorted alphabetically. | 29 | int arg_idx; /* tcg_gen_callN args[] */ |
30 | + * | 30 | int info_in_idx; /* TCGHelperInfo in[] */ |
31 | + * The ordering of the transformations should be: | 31 | @@ -XXX,XX +XXX,XX @@ static void init_call_layout(TCGHelperInfo *info) |
32 | + * 1) those that produce a constant | 32 | } |
33 | + * 2) those that produce a copy | 33 | } |
34 | + * 3) those that produce information about the result value. | 34 | assert(ref_base + cum.ref_slot <= max_stk_slots); |
35 | */ | 35 | + ref_base += max_reg_slots; |
36 | 36 | ||
37 | static bool fold_add(OptContext *ctx, TCGOp *op) | 37 | if (ref_base != 0) { |
38 | @@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op) | 38 | for (int i = cum.info_in_idx - 1; i >= 0; --i) { |
39 | 39 | @@ -XXX,XX +XXX,XX @@ static void load_arg_reg(TCGContext *s, TCGReg reg, TCGTemp *ts, | |
40 | static bool fold_and(OptContext *ctx, TCGOp *op) | 40 | } |
41 | } | ||
42 | |||
43 | -static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts, | ||
44 | +static void load_arg_stk(TCGContext *s, unsigned arg_slot, TCGTemp *ts, | ||
45 | TCGRegSet allocated_regs) | ||
41 | { | 46 | { |
42 | - return fold_const2(ctx, op); | 47 | /* |
43 | + if (fold_const2(ctx, op) || | 48 | @@ -XXX,XX +XXX,XX @@ static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts, |
44 | + fold_xx_to_x(ctx, op)) { | 49 | */ |
45 | + return true; | 50 | temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0); |
46 | + } | 51 | tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, |
47 | + return false; | 52 | - TCG_TARGET_CALL_STACK_OFFSET + |
53 | - stk_slot * sizeof(tcg_target_long)); | ||
54 | + arg_slot_stk_ofs(arg_slot)); | ||
48 | } | 55 | } |
49 | 56 | ||
50 | static bool fold_andc(OptContext *ctx, TCGOp *op) | 57 | static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l, |
51 | @@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op) | 58 | @@ -XXX,XX +XXX,XX @@ static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l, |
52 | 59 | load_arg_reg(s, reg, ts, *allocated_regs); | |
53 | static bool fold_or(OptContext *ctx, TCGOp *op) | 60 | tcg_regset_set_reg(*allocated_regs, reg); |
61 | } else { | ||
62 | - load_arg_stk(s, l->arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs), | ||
63 | - ts, *allocated_regs); | ||
64 | + load_arg_stk(s, l->arg_slot, ts, *allocated_regs); | ||
65 | } | ||
66 | } | ||
67 | |||
68 | -static void load_arg_ref(TCGContext *s, int arg_slot, TCGReg ref_base, | ||
69 | +static void load_arg_ref(TCGContext *s, unsigned arg_slot, TCGReg ref_base, | ||
70 | intptr_t ref_off, TCGRegSet *allocated_regs) | ||
54 | { | 71 | { |
55 | - return fold_const2(ctx, op); | 72 | TCGReg reg; |
56 | + if (fold_const2(ctx, op) || | 73 | - int stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs); |
57 | + fold_xx_to_x(ctx, op)) { | 74 | |
58 | + return true; | 75 | - if (stk_slot < 0) { |
59 | + } | 76 | + if (arg_slot_reg_p(arg_slot)) { |
60 | + return false; | 77 | reg = tcg_target_call_iarg_regs[arg_slot]; |
78 | tcg_reg_free(s, reg, *allocated_regs); | ||
79 | tcg_out_addi_ptr(s, reg, ref_base, ref_off); | ||
80 | @@ -XXX,XX +XXX,XX @@ static void load_arg_ref(TCGContext *s, int arg_slot, TCGReg ref_base, | ||
81 | *allocated_regs, 0, false); | ||
82 | tcg_out_addi_ptr(s, reg, ref_base, ref_off); | ||
83 | tcg_out_st(s, TCG_TYPE_PTR, reg, TCG_REG_CALL_STACK, | ||
84 | - TCG_TARGET_CALL_STACK_OFFSET | ||
85 | - + stk_slot * sizeof(tcg_target_long)); | ||
86 | + arg_slot_stk_ofs(arg_slot)); | ||
87 | } | ||
61 | } | 88 | } |
62 | 89 | ||
63 | static bool fold_orc(OptContext *ctx, TCGOp *op) | 90 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) |
64 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 91 | case TCG_CALL_ARG_BY_REF: |
92 | load_arg_stk(s, loc->ref_slot, ts, allocated_regs); | ||
93 | load_arg_ref(s, loc->arg_slot, TCG_REG_CALL_STACK, | ||
94 | - TCG_TARGET_CALL_STACK_OFFSET | ||
95 | - + loc->ref_slot * sizeof(tcg_target_long), | ||
96 | + arg_slot_stk_ofs(loc->ref_slot), | ||
97 | &allocated_regs); | ||
65 | break; | 98 | break; |
66 | } | 99 | case TCG_CALL_ARG_BY_REF_N: |
67 | |||
68 | - /* Simplify expression for "op r, a, a => mov r, a" cases */ | ||
69 | - switch (opc) { | ||
70 | - CASE_OP_32_64_VEC(or): | ||
71 | - CASE_OP_32_64_VEC(and): | ||
72 | - if (args_are_copies(op->args[1], op->args[2])) { | ||
73 | - tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); | ||
74 | - continue; | ||
75 | - } | ||
76 | - break; | ||
77 | - default: | ||
78 | - break; | ||
79 | - } | ||
80 | - | ||
81 | /* | ||
82 | * Process each opcode. | ||
83 | * Sorted alphabetically by opcode as much as possible. | ||
84 | -- | 100 | -- |
85 | 2.25.1 | 101 | 2.34.1 |
86 | 102 | ||
87 | 103 | diff view generated by jsdifflib |
1 | From: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr> | 1 | While the old type was correct in the ideal sense, some ABIs require |
---|---|---|---|
2 | the argument to be zero-extended. Using uint32_t for all such values | ||
3 | is a decent compromise. | ||
2 | 4 | ||
3 | Addition of not and xor on 128-bit integers. | 5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | |||
5 | Signed-off-by: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr> | ||
6 | Co-authored-by: Fabien Portas <fabien.portas@grenoble-inp.org> | ||
7 | Message-Id: <20211025122818.168890-3-frederic.petrot@univ-grenoble-alpes.fr> | ||
8 | [rth: Split out logical operations.] | ||
9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | --- | 7 | --- |
12 | include/qemu/int128.h | 20 ++++++++++++++++++++ | 8 | include/tcg/tcg-ldst.h | 10 +++++++--- |
13 | 1 file changed, 20 insertions(+) | 9 | accel/tcg/cputlb.c | 6 +++--- |
10 | 2 files changed, 10 insertions(+), 6 deletions(-) | ||
14 | 11 | ||
15 | diff --git a/include/qemu/int128.h b/include/qemu/int128.h | 12 | diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h |
16 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/include/qemu/int128.h | 14 | --- a/include/tcg/tcg-ldst.h |
18 | +++ b/include/qemu/int128.h | 15 | +++ b/include/tcg/tcg-ldst.h |
19 | @@ -XXX,XX +XXX,XX @@ static inline Int128 int128_exts64(int64_t a) | 16 | @@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, |
20 | return a; | 17 | tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, |
18 | MemOpIdx oi, uintptr_t retaddr); | ||
19 | |||
20 | -void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, | ||
21 | +/* | ||
22 | + * Value extended to at least uint32_t, so that some ABIs do not require | ||
23 | + * zero-extension from uint8_t or uint16_t. | ||
24 | + */ | ||
25 | +void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val, | ||
26 | MemOpIdx oi, uintptr_t retaddr); | ||
27 | -void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, | ||
28 | +void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, | ||
29 | MemOpIdx oi, uintptr_t retaddr); | ||
30 | void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, | ||
31 | MemOpIdx oi, uintptr_t retaddr); | ||
32 | void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | ||
33 | MemOpIdx oi, uintptr_t retaddr); | ||
34 | -void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, | ||
35 | +void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, | ||
36 | MemOpIdx oi, uintptr_t retaddr); | ||
37 | void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, | ||
38 | MemOpIdx oi, uintptr_t retaddr); | ||
39 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/accel/tcg/cputlb.c | ||
42 | +++ b/accel/tcg/cputlb.c | ||
43 | @@ -XXX,XX +XXX,XX @@ full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | ||
44 | store_helper(env, addr, val, oi, retaddr, MO_UB); | ||
21 | } | 45 | } |
22 | 46 | ||
23 | +static inline Int128 int128_not(Int128 a) | 47 | -void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, |
24 | +{ | 48 | +void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val, |
25 | + return ~a; | 49 | MemOpIdx oi, uintptr_t retaddr) |
26 | +} | ||
27 | + | ||
28 | static inline Int128 int128_and(Int128 a, Int128 b) | ||
29 | { | 50 | { |
30 | return a & b; | 51 | full_stb_mmu(env, addr, val, oi, retaddr); |
31 | @@ -XXX,XX +XXX,XX @@ static inline Int128 int128_or(Int128 a, Int128 b) | 52 | @@ -XXX,XX +XXX,XX @@ static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
32 | return a | b; | 53 | store_helper(env, addr, val, oi, retaddr, MO_LEUW); |
33 | } | 54 | } |
34 | 55 | ||
35 | +static inline Int128 int128_xor(Int128 a, Int128 b) | 56 | -void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, |
36 | +{ | 57 | +void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, |
37 | + return a ^ b; | 58 | MemOpIdx oi, uintptr_t retaddr) |
38 | +} | ||
39 | + | ||
40 | static inline Int128 int128_rshift(Int128 a, int n) | ||
41 | { | 59 | { |
42 | return a >> n; | 60 | full_le_stw_mmu(env, addr, val, oi, retaddr); |
43 | @@ -XXX,XX +XXX,XX @@ static inline Int128 int128_exts64(int64_t a) | 61 | @@ -XXX,XX +XXX,XX @@ static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
44 | return int128_make128(a, (a < 0) ? -1 : 0); | 62 | store_helper(env, addr, val, oi, retaddr, MO_BEUW); |
45 | } | 63 | } |
46 | 64 | ||
47 | +static inline Int128 int128_not(Int128 a) | 65 | -void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, |
48 | +{ | 66 | +void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, |
49 | + return int128_make128(~a.lo, ~a.hi); | 67 | MemOpIdx oi, uintptr_t retaddr) |
50 | +} | ||
51 | + | ||
52 | static inline Int128 int128_and(Int128 a, Int128 b) | ||
53 | { | 68 | { |
54 | return int128_make128(a.lo & b.lo, a.hi & b.hi); | 69 | full_be_stw_mmu(env, addr, val, oi, retaddr); |
55 | @@ -XXX,XX +XXX,XX @@ static inline Int128 int128_or(Int128 a, Int128 b) | ||
56 | return int128_make128(a.lo | b.lo, a.hi | b.hi); | ||
57 | } | ||
58 | |||
59 | +static inline Int128 int128_xor(Int128 a, Int128 b) | ||
60 | +{ | ||
61 | + return int128_make128(a.lo ^ b.lo, a.hi ^ b.hi); | ||
62 | +} | ||
63 | + | ||
64 | static inline Int128 int128_rshift(Int128 a, int n) | ||
65 | { | ||
66 | int64_t h; | ||
67 | -- | 70 | -- |
68 | 2.25.1 | 71 | 2.34.1 |
69 | 72 | ||
70 | 73 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Luis Pires <luis.pires@eldorado.org.br> | ||
2 | 1 | ||
3 | Signed-off-by: Luis Pires <luis.pires@eldorado.org.br> | ||
4 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Message-Id: <20211025191154.350831-5-luis.pires@eldorado.org.br> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | tests/unit/test-div128.c | 197 +++++++++++++++++++++++++++++++++++++++ | ||
9 | tests/unit/meson.build | 1 + | ||
10 | 2 files changed, 198 insertions(+) | ||
11 | create mode 100644 tests/unit/test-div128.c | ||
12 | |||
13 | diff --git a/tests/unit/test-div128.c b/tests/unit/test-div128.c | ||
14 | new file mode 100644 | ||
15 | index XXXXXXX..XXXXXXX | ||
16 | --- /dev/null | ||
17 | +++ b/tests/unit/test-div128.c | ||
18 | @@ -XXX,XX +XXX,XX @@ | ||
19 | +/* | ||
20 | + * Test 128-bit division functions | ||
21 | + * | ||
22 | + * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br) | ||
23 | + * | ||
24 | + * This library is free software; you can redistribute it and/or | ||
25 | + * modify it under the terms of the GNU Lesser General Public | ||
26 | + * License as published by the Free Software Foundation; either | ||
27 | + * version 2.1 of the License, or (at your option) any later version. | ||
28 | + * | ||
29 | + * This library is distributed in the hope that it will be useful, | ||
30 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
31 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
32 | + * Lesser General Public License for more details. | ||
33 | + * | ||
34 | + * You should have received a copy of the GNU Lesser General Public | ||
35 | + * License along with this library; if not, see <http://www.gnu.org/licenses/>. | ||
36 | + */ | ||
37 | + | ||
38 | +#include "qemu/osdep.h" | ||
39 | +#include "qemu/host-utils.h" | ||
40 | + | ||
41 | +typedef struct { | ||
42 | + uint64_t high; | ||
43 | + uint64_t low; | ||
44 | + uint64_t rhigh; | ||
45 | + uint64_t rlow; | ||
46 | + uint64_t divisor; | ||
47 | + uint64_t remainder; | ||
48 | +} test_data_unsigned; | ||
49 | + | ||
50 | +typedef struct { | ||
51 | + int64_t high; | ||
52 | + uint64_t low; | ||
53 | + int64_t rhigh; | ||
54 | + uint64_t rlow; | ||
55 | + int64_t divisor; | ||
56 | + int64_t remainder; | ||
57 | +} test_data_signed; | ||
58 | + | ||
59 | +static const test_data_unsigned test_table_unsigned[] = { | ||
60 | + /* Dividend fits in 64 bits */ | ||
61 | + { 0x0000000000000000ULL, 0x0000000000000000ULL, | ||
62 | + 0x0000000000000000ULL, 0x0000000000000000ULL, | ||
63 | + 0x0000000000000001ULL, 0x0000000000000000ULL}, | ||
64 | + { 0x0000000000000000ULL, 0x0000000000000001ULL, | ||
65 | + 0x0000000000000000ULL, 0x0000000000000001ULL, | ||
66 | + 0x0000000000000001ULL, 0x0000000000000000ULL}, | ||
67 | + { 0x0000000000000000ULL, 0x0000000000000003ULL, | ||
68 | + 0x0000000000000000ULL, 0x0000000000000001ULL, | ||
69 | + 0x0000000000000002ULL, 0x0000000000000001ULL}, | ||
70 | + { 0x0000000000000000ULL, 0x8000000000000000ULL, | ||
71 | + 0x0000000000000000ULL, 0x8000000000000000ULL, | ||
72 | + 0x0000000000000001ULL, 0x0000000000000000ULL}, | ||
73 | + { 0x0000000000000000ULL, 0xa000000000000000ULL, | ||
74 | + 0x0000000000000000ULL, 0x0000000000000002ULL, | ||
75 | + 0x4000000000000000ULL, 0x2000000000000000ULL}, | ||
76 | + { 0x0000000000000000ULL, 0x8000000000000000ULL, | ||
77 | + 0x0000000000000000ULL, 0x0000000000000001ULL, | ||
78 | + 0x8000000000000000ULL, 0x0000000000000000ULL}, | ||
79 | + | ||
80 | + /* Dividend > 64 bits, with MSB 0 */ | ||
81 | + { 0x123456789abcdefeULL, 0xefedcba987654321ULL, | ||
82 | + 0x123456789abcdefeULL, 0xefedcba987654321ULL, | ||
83 | + 0x0000000000000001ULL, 0x0000000000000000ULL}, | ||
84 | + { 0x123456789abcdefeULL, 0xefedcba987654321ULL, | ||
85 | + 0x0000000000000001ULL, 0x000000000000000dULL, | ||
86 | + 0x123456789abcdefeULL, 0x03456789abcdf03bULL}, | ||
87 | + { 0x123456789abcdefeULL, 0xefedcba987654321ULL, | ||
88 | + 0x0123456789abcdefULL, 0xeefedcba98765432ULL, | ||
89 | + 0x0000000000000010ULL, 0x0000000000000001ULL}, | ||
90 | + | ||
91 | + /* Dividend > 64 bits, with MSB 1 */ | ||
92 | + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, | ||
93 | + 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, | ||
94 | + 0x0000000000000001ULL, 0x0000000000000000ULL}, | ||
95 | + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, | ||
96 | + 0x0000000000000001ULL, 0x0000000000000000ULL, | ||
97 | + 0xfeeddccbbaa99887ULL, 0x766554433221100fULL}, | ||
98 | + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, | ||
99 | + 0x0feeddccbbaa9988ULL, 0x7766554433221100ULL, | ||
100 | + 0x0000000000000010ULL, 0x000000000000000fULL}, | ||
101 | + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, | ||
102 | + 0x000000000000000eULL, 0x00f0f0f0f0f0f35aULL, | ||
103 | + 0x123456789abcdefeULL, 0x0f8922bc55ef90c3ULL}, | ||
104 | + | ||
105 | + /** | ||
106 | + * Divisor == 64 bits, with MSB 1 | ||
107 | + * and high 64 bits of dividend >= divisor | ||
108 | + * (for testing normalization) | ||
109 | + */ | ||
110 | + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, | ||
111 | + 0x0000000000000001ULL, 0x0000000000000000ULL, | ||
112 | + 0xfeeddccbbaa99887ULL, 0x766554433221100fULL}, | ||
113 | + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, | ||
114 | + 0x0000000000000001ULL, 0xfddbb9977553310aULL, | ||
115 | + 0x8000000000000001ULL, 0x78899aabbccddf05ULL}, | ||
116 | + | ||
117 | + /* Dividend > 64 bits, divisor almost as big */ | ||
118 | + { 0x0000000000000001ULL, 0x23456789abcdef01ULL, | ||
119 | + 0x0000000000000000ULL, 0x000000000000000fULL, | ||
120 | + 0x123456789abcdefeULL, 0x123456789abcde1fULL}, | ||
121 | +}; | ||
122 | + | ||
123 | +static const test_data_signed test_table_signed[] = { | ||
124 | + /* Positive dividend, positive/negative divisors */ | ||
125 | + { 0x0000000000000000LL, 0x0000000000bc614eULL, | ||
126 | + 0x0000000000000000LL, 0x0000000000bc614eULL, | ||
127 | + 0x0000000000000001LL, 0x0000000000000000LL}, | ||
128 | + { 0x0000000000000000LL, 0x0000000000bc614eULL, | ||
129 | + 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, | ||
130 | + 0xffffffffffffffffLL, 0x0000000000000000LL}, | ||
131 | + { 0x0000000000000000LL, 0x0000000000bc614eULL, | ||
132 | + 0x0000000000000000LL, 0x00000000005e30a7ULL, | ||
133 | + 0x0000000000000002LL, 0x0000000000000000LL}, | ||
134 | + { 0x0000000000000000LL, 0x0000000000bc614eULL, | ||
135 | + 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL, | ||
136 | + 0xfffffffffffffffeLL, 0x0000000000000000LL}, | ||
137 | + { 0x0000000000000000LL, 0x0000000000bc614eULL, | ||
138 | + 0x0000000000000000LL, 0x0000000000178c29ULL, | ||
139 | + 0x0000000000000008LL, 0x0000000000000006LL}, | ||
140 | + { 0x0000000000000000LL, 0x0000000000bc614eULL, | ||
141 | + 0xffffffffffffffffLL, 0xffffffffffe873d7ULL, | ||
142 | + 0xfffffffffffffff8LL, 0x0000000000000006LL}, | ||
143 | + { 0x0000000000000000LL, 0x0000000000bc614eULL, | ||
144 | + 0x0000000000000000LL, 0x000000000000550dULL, | ||
145 | + 0x0000000000000237LL, 0x0000000000000183LL}, | ||
146 | + { 0x0000000000000000LL, 0x0000000000bc614eULL, | ||
147 | + 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL, | ||
148 | + 0xfffffffffffffdc9LL, 0x0000000000000183LL}, | ||
149 | + | ||
150 | + /* Negative dividend, positive/negative divisors */ | ||
151 | + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, | ||
152 | + 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, | ||
153 | + 0x0000000000000001LL, 0x0000000000000000LL}, | ||
154 | + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, | ||
155 | + 0x0000000000000000LL, 0x0000000000bc614eULL, | ||
156 | + 0xffffffffffffffffLL, 0x0000000000000000LL}, | ||
157 | + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, | ||
158 | + 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL, | ||
159 | + 0x0000000000000002LL, 0x0000000000000000LL}, | ||
160 | + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, | ||
161 | + 0x0000000000000000LL, 0x00000000005e30a7ULL, | ||
162 | + 0xfffffffffffffffeLL, 0x0000000000000000LL}, | ||
163 | + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, | ||
164 | + 0xffffffffffffffffLL, 0xffffffffffe873d7ULL, | ||
165 | + 0x0000000000000008LL, 0xfffffffffffffffaLL}, | ||
166 | + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, | ||
167 | + 0x0000000000000000LL, 0x0000000000178c29ULL, | ||
168 | + 0xfffffffffffffff8LL, 0xfffffffffffffffaLL}, | ||
169 | + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, | ||
170 | + 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL, | ||
171 | + 0x0000000000000237LL, 0xfffffffffffffe7dLL}, | ||
172 | + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, | ||
173 | + 0x0000000000000000LL, 0x000000000000550dULL, | ||
174 | + 0xfffffffffffffdc9LL, 0xfffffffffffffe7dLL}, | ||
175 | +}; | ||
176 | + | ||
177 | +static void test_divu128(void) | ||
178 | +{ | ||
179 | + int i; | ||
180 | + uint64_t rem; | ||
181 | + test_data_unsigned tmp; | ||
182 | + | ||
183 | + for (i = 0; i < ARRAY_SIZE(test_table_unsigned); ++i) { | ||
184 | + tmp = test_table_unsigned[i]; | ||
185 | + | ||
186 | + rem = divu128(&tmp.low, &tmp.high, tmp.divisor); | ||
187 | + g_assert_cmpuint(tmp.low, ==, tmp.rlow); | ||
188 | + g_assert_cmpuint(tmp.high, ==, tmp.rhigh); | ||
189 | + g_assert_cmpuint(rem, ==, tmp.remainder); | ||
190 | + } | ||
191 | +} | ||
192 | + | ||
193 | +static void test_divs128(void) | ||
194 | +{ | ||
195 | + int i; | ||
196 | + int64_t rem; | ||
197 | + test_data_signed tmp; | ||
198 | + | ||
199 | + for (i = 0; i < ARRAY_SIZE(test_table_signed); ++i) { | ||
200 | + tmp = test_table_signed[i]; | ||
201 | + | ||
202 | + rem = divs128(&tmp.low, &tmp.high, tmp.divisor); | ||
203 | + g_assert_cmpuint(tmp.low, ==, tmp.rlow); | ||
204 | + g_assert_cmpuint(tmp.high, ==, tmp.rhigh); | ||
205 | + g_assert_cmpuint(rem, ==, tmp.remainder); | ||
206 | + } | ||
207 | +} | ||
208 | + | ||
209 | +int main(int argc, char **argv) | ||
210 | +{ | ||
211 | + g_test_init(&argc, &argv, NULL); | ||
212 | + g_test_add_func("/host-utils/test_divu128", test_divu128); | ||
213 | + g_test_add_func("/host-utils/test_divs128", test_divs128); | ||
214 | + return g_test_run(); | ||
215 | +} | ||
216 | diff --git a/tests/unit/meson.build b/tests/unit/meson.build | ||
217 | index XXXXXXX..XXXXXXX 100644 | ||
218 | --- a/tests/unit/meson.build | ||
219 | +++ b/tests/unit/meson.build | ||
220 | @@ -XXX,XX +XXX,XX @@ tests = { | ||
221 | # all code tested by test-x86-cpuid is inside topology.h | ||
222 | 'test-x86-cpuid': [], | ||
223 | 'test-cutils': [], | ||
224 | + 'test-div128': [], | ||
225 | 'test-shift128': [], | ||
226 | 'test-mul64': [], | ||
227 | # all code tested by test-int128 is inside int128.h | ||
228 | -- | ||
229 | 2.25.1 | ||
230 | |||
231 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | This will expose the variable to subroutines that | ||
2 | will be broken out of tcg_optimize. | ||
3 | 1 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
5 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
6 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | --- | ||
9 | tcg/optimize.c | 11 ++++++----- | ||
10 | 1 file changed, 6 insertions(+), 5 deletions(-) | ||
11 | |||
12 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/tcg/optimize.c | ||
15 | +++ b/tcg/optimize.c | ||
16 | @@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo { | ||
17 | |||
18 | typedef struct OptContext { | ||
19 | TCGContext *tcg; | ||
20 | + TCGOp *prev_mb; | ||
21 | TCGTempSet temps_used; | ||
22 | } OptContext; | ||
23 | |||
24 | @@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2) | ||
25 | void tcg_optimize(TCGContext *s) | ||
26 | { | ||
27 | int nb_temps, nb_globals, i; | ||
28 | - TCGOp *op, *op_next, *prev_mb = NULL; | ||
29 | + TCGOp *op, *op_next; | ||
30 | OptContext ctx = { .tcg = s }; | ||
31 | |||
32 | /* Array VALS has an element for each temp. | ||
33 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
34 | } | ||
35 | |||
36 | /* Eliminate duplicate and redundant fence instructions. */ | ||
37 | - if (prev_mb) { | ||
38 | + if (ctx.prev_mb) { | ||
39 | switch (opc) { | ||
40 | case INDEX_op_mb: | ||
41 | /* Merge two barriers of the same type into one, | ||
42 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
43 | * barrier. This is stricter than specified but for | ||
44 | * the purposes of TCG is better than not optimizing. | ||
45 | */ | ||
46 | - prev_mb->args[0] |= op->args[0]; | ||
47 | + ctx.prev_mb->args[0] |= op->args[0]; | ||
48 | tcg_op_remove(s, op); | ||
49 | break; | ||
50 | |||
51 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
52 | case INDEX_op_qemu_st_i64: | ||
53 | case INDEX_op_call: | ||
54 | /* Opcodes that touch guest memory stop the optimization. */ | ||
55 | - prev_mb = NULL; | ||
56 | + ctx.prev_mb = NULL; | ||
57 | break; | ||
58 | } | ||
59 | } else if (opc == INDEX_op_mb) { | ||
60 | - prev_mb = op; | ||
61 | + ctx.prev_mb = op; | ||
62 | } | ||
63 | } | ||
64 | } | ||
65 | -- | ||
66 | 2.25.1 | ||
67 | |||
68 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Continue splitting tcg_optimize. | ||
2 | 1 | ||
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | tcg/optimize.c | 22 ++++++++++++++-------- | ||
9 | 1 file changed, 14 insertions(+), 8 deletions(-) | ||
10 | |||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/tcg/optimize.c | ||
14 | +++ b/tcg/optimize.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args) | ||
16 | } | ||
17 | } | ||
18 | |||
19 | +static void copy_propagate(OptContext *ctx, TCGOp *op, | ||
20 | + int nb_oargs, int nb_iargs) | ||
21 | +{ | ||
22 | + TCGContext *s = ctx->tcg; | ||
23 | + | ||
24 | + for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) { | ||
25 | + TCGTemp *ts = arg_temp(op->args[i]); | ||
26 | + if (ts && ts_is_copy(ts)) { | ||
27 | + op->args[i] = temp_arg(find_better_copy(s, ts)); | ||
28 | + } | ||
29 | + } | ||
30 | +} | ||
31 | + | ||
32 | /* Propagate constants and copies, fold constant expressions. */ | ||
33 | void tcg_optimize(TCGContext *s) | ||
34 | { | ||
35 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
36 | nb_iargs = def->nb_iargs; | ||
37 | } | ||
38 | init_arguments(&ctx, op, nb_oargs + nb_iargs); | ||
39 | - | ||
40 | - /* Do copy propagation */ | ||
41 | - for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { | ||
42 | - TCGTemp *ts = arg_temp(op->args[i]); | ||
43 | - if (ts && ts_is_copy(ts)) { | ||
44 | - op->args[i] = temp_arg(find_better_copy(s, ts)); | ||
45 | - } | ||
46 | - } | ||
47 | + copy_propagate(&ctx, op, nb_oargs, nb_iargs); | ||
48 | |||
49 | /* For commutative operations make constant second argument */ | ||
50 | switch (opc) { | ||
51 | -- | ||
52 | 2.25.1 | ||
53 | |||
54 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Calls are special in that they have a variable number | ||
2 | of arguments, and need to be able to clobber globals. | ||
3 | 1 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
5 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | tcg/optimize.c | 63 ++++++++++++++++++++++++++++++++------------------ | ||
9 | 1 file changed, 41 insertions(+), 22 deletions(-) | ||
10 | |||
11 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/tcg/optimize.c | ||
14 | +++ b/tcg/optimize.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op, | ||
16 | } | ||
17 | } | ||
18 | |||
19 | +static bool fold_call(OptContext *ctx, TCGOp *op) | ||
20 | +{ | ||
21 | + TCGContext *s = ctx->tcg; | ||
22 | + int nb_oargs = TCGOP_CALLO(op); | ||
23 | + int nb_iargs = TCGOP_CALLI(op); | ||
24 | + int flags, i; | ||
25 | + | ||
26 | + init_arguments(ctx, op, nb_oargs + nb_iargs); | ||
27 | + copy_propagate(ctx, op, nb_oargs, nb_iargs); | ||
28 | + | ||
29 | + /* If the function reads or writes globals, reset temp data. */ | ||
30 | + flags = tcg_call_flags(op); | ||
31 | + if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { | ||
32 | + int nb_globals = s->nb_globals; | ||
33 | + | ||
34 | + for (i = 0; i < nb_globals; i++) { | ||
35 | + if (test_bit(i, ctx->temps_used.l)) { | ||
36 | + reset_ts(&ctx->tcg->temps[i]); | ||
37 | + } | ||
38 | + } | ||
39 | + } | ||
40 | + | ||
41 | + /* Reset temp data for outputs. */ | ||
42 | + for (i = 0; i < nb_oargs; i++) { | ||
43 | + reset_temp(op->args[i]); | ||
44 | + } | ||
45 | + | ||
46 | + /* Stop optimizing MB across calls. */ | ||
47 | + ctx->prev_mb = NULL; | ||
48 | + return true; | ||
49 | +} | ||
50 | + | ||
51 | /* Propagate constants and copies, fold constant expressions. */ | ||
52 | void tcg_optimize(TCGContext *s) | ||
53 | { | ||
54 | - int nb_temps, nb_globals, i; | ||
55 | + int nb_temps, i; | ||
56 | TCGOp *op, *op_next; | ||
57 | OptContext ctx = { .tcg = s }; | ||
58 | |||
59 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
60 | available through the doubly linked circular list. */ | ||
61 | |||
62 | nb_temps = s->nb_temps; | ||
63 | - nb_globals = s->nb_globals; | ||
64 | - | ||
65 | for (i = 0; i < nb_temps; ++i) { | ||
66 | s->temps[i].state_ptr = NULL; | ||
67 | } | ||
68 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
69 | uint64_t z_mask, partmask, affected, tmp; | ||
70 | int nb_oargs, nb_iargs; | ||
71 | TCGOpcode opc = op->opc; | ||
72 | - const TCGOpDef *def = &tcg_op_defs[opc]; | ||
73 | + const TCGOpDef *def; | ||
74 | |||
75 | - /* Count the arguments, and initialize the temps that are | ||
76 | - going to be used */ | ||
77 | + /* Calls are special. */ | ||
78 | if (opc == INDEX_op_call) { | ||
79 | - nb_oargs = TCGOP_CALLO(op); | ||
80 | - nb_iargs = TCGOP_CALLI(op); | ||
81 | - } else { | ||
82 | - nb_oargs = def->nb_oargs; | ||
83 | - nb_iargs = def->nb_iargs; | ||
84 | + fold_call(&ctx, op); | ||
85 | + continue; | ||
86 | } | ||
87 | + | ||
88 | + def = &tcg_op_defs[opc]; | ||
89 | + nb_oargs = def->nb_oargs; | ||
90 | + nb_iargs = def->nb_iargs; | ||
91 | init_arguments(&ctx, op, nb_oargs + nb_iargs); | ||
92 | copy_propagate(&ctx, op, nb_oargs, nb_iargs); | ||
93 | |||
94 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
95 | if (def->flags & TCG_OPF_BB_END) { | ||
96 | memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
97 | } else { | ||
98 | - if (opc == INDEX_op_call && | ||
99 | - !(tcg_call_flags(op) | ||
100 | - & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { | ||
101 | - for (i = 0; i < nb_globals; i++) { | ||
102 | - if (test_bit(i, ctx.temps_used.l)) { | ||
103 | - reset_ts(&s->temps[i]); | ||
104 | - } | ||
105 | - } | ||
106 | - } | ||
107 | - | ||
108 | for (i = 0; i < nb_oargs; i++) { | ||
109 | reset_temp(op->args[i]); | ||
110 | /* Save the corresponding known-zero bits mask for the | ||
111 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
112 | case INDEX_op_qemu_st_i32: | ||
113 | case INDEX_op_qemu_st8_i32: | ||
114 | case INDEX_op_qemu_st_i64: | ||
115 | - case INDEX_op_call: | ||
116 | /* Opcodes that touch guest memory stop the optimization. */ | ||
117 | ctx.prev_mb = NULL; | ||
118 | break; | ||
119 | -- | ||
120 | 2.25.1 | ||
121 | |||
122 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Rather than try to keep these up-to-date across folding, | ||
2 | re-read nb_oargs at the end, after re-reading the opcode. | ||
3 | 1 | ||
4 | A couple of asserts need dropping, but that will take care | ||
5 | of itself as we split the function further. | ||
6 | |||
7 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
8 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | --- | ||
11 | tcg/optimize.c | 14 ++++---------- | ||
12 | 1 file changed, 4 insertions(+), 10 deletions(-) | ||
13 | |||
14 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/tcg/optimize.c | ||
17 | +++ b/tcg/optimize.c | ||
18 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
19 | |||
20 | QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { | ||
21 | uint64_t z_mask, partmask, affected, tmp; | ||
22 | - int nb_oargs, nb_iargs; | ||
23 | TCGOpcode opc = op->opc; | ||
24 | const TCGOpDef *def; | ||
25 | |||
26 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
27 | } | ||
28 | |||
29 | def = &tcg_op_defs[opc]; | ||
30 | - nb_oargs = def->nb_oargs; | ||
31 | - nb_iargs = def->nb_iargs; | ||
32 | - init_arguments(&ctx, op, nb_oargs + nb_iargs); | ||
33 | - copy_propagate(&ctx, op, nb_oargs, nb_iargs); | ||
34 | + init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs); | ||
35 | + copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs); | ||
36 | |||
37 | /* For commutative operations make constant second argument */ | ||
38 | switch (opc) { | ||
39 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
40 | |||
41 | CASE_OP_32_64(qemu_ld): | ||
42 | { | ||
43 | - MemOpIdx oi = op->args[nb_oargs + nb_iargs]; | ||
44 | + MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs]; | ||
45 | MemOp mop = get_memop(oi); | ||
46 | if (!(mop & MO_SIGN)) { | ||
47 | z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; | ||
48 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
49 | } | ||
50 | |||
51 | if (partmask == 0) { | ||
52 | - tcg_debug_assert(nb_oargs == 1); | ||
53 | tcg_opt_gen_movi(&ctx, op, op->args[0], 0); | ||
54 | continue; | ||
55 | } | ||
56 | if (affected == 0) { | ||
57 | - tcg_debug_assert(nb_oargs == 1); | ||
58 | tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); | ||
59 | continue; | ||
60 | } | ||
61 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
62 | } else if (args_are_copies(op->args[1], op->args[2])) { | ||
63 | op->opc = INDEX_op_dup_vec; | ||
64 | TCGOP_VECE(op) = MO_32; | ||
65 | - nb_iargs = 1; | ||
66 | } | ||
67 | break; | ||
68 | |||
69 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
70 | op->opc = opc = (opc == INDEX_op_movcond_i32 | ||
71 | ? INDEX_op_setcond_i32 | ||
72 | : INDEX_op_setcond_i64); | ||
73 | - nb_iargs = 2; | ||
74 | } | ||
75 | break; | ||
76 | |||
77 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
78 | if (def->flags & TCG_OPF_BB_END) { | ||
79 | memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
80 | } else { | ||
81 | + int nb_oargs = def->nb_oargs; | ||
82 | for (i = 0; i < nb_oargs; i++) { | ||
83 | reset_temp(op->args[i]); | ||
84 | /* Save the corresponding known-zero bits mask for the | ||
85 | -- | ||
86 | 2.25.1 | ||
87 | |||
88 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | This puts the separate mb optimization into the same framework | ||
2 | as the others. While fold_qemu_{ld,st} are currently identical, | ||
3 | that won't last as more code gets moved. | ||
4 | 1 | ||
5 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
6 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | --- | ||
9 | tcg/optimize.c | 89 +++++++++++++++++++++++++++++--------------------- | ||
10 | 1 file changed, 51 insertions(+), 38 deletions(-) | ||
11 | |||
12 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/tcg/optimize.c | ||
15 | +++ b/tcg/optimize.c | ||
16 | @@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op) | ||
17 | return true; | ||
18 | } | ||
19 | |||
20 | +static bool fold_mb(OptContext *ctx, TCGOp *op) | ||
21 | +{ | ||
22 | + /* Eliminate duplicate and redundant fence instructions. */ | ||
23 | + if (ctx->prev_mb) { | ||
24 | + /* | ||
25 | + * Merge two barriers of the same type into one, | ||
26 | + * or a weaker barrier into a stronger one, | ||
27 | + * or two weaker barriers into a stronger one. | ||
28 | + * mb X; mb Y => mb X|Y | ||
29 | + * mb; strl => mb; st | ||
30 | + * ldaq; mb => ld; mb | ||
31 | + * ldaq; strl => ld; mb; st | ||
32 | + * Other combinations are also merged into a strong | ||
33 | + * barrier. This is stricter than specified but for | ||
34 | + * the purposes of TCG is better than not optimizing. | ||
35 | + */ | ||
36 | + ctx->prev_mb->args[0] |= op->args[0]; | ||
37 | + tcg_op_remove(ctx->tcg, op); | ||
38 | + } else { | ||
39 | + ctx->prev_mb = op; | ||
40 | + } | ||
41 | + return true; | ||
42 | +} | ||
43 | + | ||
44 | +static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) | ||
45 | +{ | ||
46 | + /* Opcodes that touch guest memory stop the mb optimization. */ | ||
47 | + ctx->prev_mb = NULL; | ||
48 | + return false; | ||
49 | +} | ||
50 | + | ||
51 | +static bool fold_qemu_st(OptContext *ctx, TCGOp *op) | ||
52 | +{ | ||
53 | + /* Opcodes that touch guest memory stop the mb optimization. */ | ||
54 | + ctx->prev_mb = NULL; | ||
55 | + return false; | ||
56 | +} | ||
57 | + | ||
58 | /* Propagate constants and copies, fold constant expressions. */ | ||
59 | void tcg_optimize(TCGContext *s) | ||
60 | { | ||
61 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
62 | } | ||
63 | break; | ||
64 | |||
65 | + case INDEX_op_mb: | ||
66 | + done = fold_mb(&ctx, op); | ||
67 | + break; | ||
68 | + case INDEX_op_qemu_ld_i32: | ||
69 | + case INDEX_op_qemu_ld_i64: | ||
70 | + done = fold_qemu_ld(&ctx, op); | ||
71 | + break; | ||
72 | + case INDEX_op_qemu_st_i32: | ||
73 | + case INDEX_op_qemu_st8_i32: | ||
74 | + case INDEX_op_qemu_st_i64: | ||
75 | + done = fold_qemu_st(&ctx, op); | ||
76 | + break; | ||
77 | + | ||
78 | default: | ||
79 | break; | ||
80 | } | ||
81 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
82 | if (!done) { | ||
83 | finish_folding(&ctx, op); | ||
84 | } | ||
85 | - | ||
86 | - /* Eliminate duplicate and redundant fence instructions. */ | ||
87 | - if (ctx.prev_mb) { | ||
88 | - switch (opc) { | ||
89 | - case INDEX_op_mb: | ||
90 | - /* Merge two barriers of the same type into one, | ||
91 | - * or a weaker barrier into a stronger one, | ||
92 | - * or two weaker barriers into a stronger one. | ||
93 | - * mb X; mb Y => mb X|Y | ||
94 | - * mb; strl => mb; st | ||
95 | - * ldaq; mb => ld; mb | ||
96 | - * ldaq; strl => ld; mb; st | ||
97 | - * Other combinations are also merged into a strong | ||
98 | - * barrier. This is stricter than specified but for | ||
99 | - * the purposes of TCG is better than not optimizing. | ||
100 | - */ | ||
101 | - ctx.prev_mb->args[0] |= op->args[0]; | ||
102 | - tcg_op_remove(s, op); | ||
103 | - break; | ||
104 | - | ||
105 | - default: | ||
106 | - /* Opcodes that end the block stop the optimization. */ | ||
107 | - if ((def->flags & TCG_OPF_BB_END) == 0) { | ||
108 | - break; | ||
109 | - } | ||
110 | - /* fallthru */ | ||
111 | - case INDEX_op_qemu_ld_i32: | ||
112 | - case INDEX_op_qemu_ld_i64: | ||
113 | - case INDEX_op_qemu_st_i32: | ||
114 | - case INDEX_op_qemu_st8_i32: | ||
115 | - case INDEX_op_qemu_st_i64: | ||
116 | - /* Opcodes that touch guest memory stop the optimization. */ | ||
117 | - ctx.prev_mb = NULL; | ||
118 | - break; | ||
119 | - } | ||
120 | - } else if (opc == INDEX_op_mb) { | ||
121 | - ctx.prev_mb = op; | ||
122 | - } | ||
123 | } | ||
124 | } | ||
125 | -- | ||
126 | 2.25.1 | ||
127 | |||
128 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Reduce some code duplication by folding the NE and EQ cases. | ||
2 | 1 | ||
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
4 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | tcg/optimize.c | 145 ++++++++++++++++++++++++------------------------- | ||
8 | 1 file changed, 72 insertions(+), 73 deletions(-) | ||
9 | |||
10 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/tcg/optimize.c | ||
13 | +++ b/tcg/optimize.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op) | ||
15 | return fold_const2(ctx, op); | ||
16 | } | ||
17 | |||
18 | +static bool fold_setcond2(OptContext *ctx, TCGOp *op) | ||
19 | +{ | ||
20 | + TCGCond cond = op->args[5]; | ||
21 | + int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond); | ||
22 | + int inv = 0; | ||
23 | + | ||
24 | + if (i >= 0) { | ||
25 | + goto do_setcond_const; | ||
26 | + } | ||
27 | + | ||
28 | + switch (cond) { | ||
29 | + case TCG_COND_LT: | ||
30 | + case TCG_COND_GE: | ||
31 | + /* | ||
32 | + * Simplify LT/GE comparisons vs zero to a single compare | ||
33 | + * vs the high word of the input. | ||
34 | + */ | ||
35 | + if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 && | ||
36 | + arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) { | ||
37 | + goto do_setcond_high; | ||
38 | + } | ||
39 | + break; | ||
40 | + | ||
41 | + case TCG_COND_NE: | ||
42 | + inv = 1; | ||
43 | + QEMU_FALLTHROUGH; | ||
44 | + case TCG_COND_EQ: | ||
45 | + /* | ||
46 | + * Simplify EQ/NE comparisons where one of the pairs | ||
47 | + * can be simplified. | ||
48 | + */ | ||
49 | + i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1], | ||
50 | + op->args[3], cond); | ||
51 | + switch (i ^ inv) { | ||
52 | + case 0: | ||
53 | + goto do_setcond_const; | ||
54 | + case 1: | ||
55 | + goto do_setcond_high; | ||
56 | + } | ||
57 | + | ||
58 | + i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2], | ||
59 | + op->args[4], cond); | ||
60 | + switch (i ^ inv) { | ||
61 | + case 0: | ||
62 | + goto do_setcond_const; | ||
63 | + case 1: | ||
64 | + op->args[2] = op->args[3]; | ||
65 | + op->args[3] = cond; | ||
66 | + op->opc = INDEX_op_setcond_i32; | ||
67 | + break; | ||
68 | + } | ||
69 | + break; | ||
70 | + | ||
71 | + default: | ||
72 | + break; | ||
73 | + | ||
74 | + do_setcond_high: | ||
75 | + op->args[1] = op->args[2]; | ||
76 | + op->args[2] = op->args[4]; | ||
77 | + op->args[3] = cond; | ||
78 | + op->opc = INDEX_op_setcond_i32; | ||
79 | + break; | ||
80 | + } | ||
81 | + return false; | ||
82 | + | ||
83 | + do_setcond_const: | ||
84 | + return tcg_opt_gen_movi(ctx, op, op->args[0], i); | ||
85 | +} | ||
86 | + | ||
87 | static bool fold_shift(OptContext *ctx, TCGOp *op) | ||
88 | { | ||
89 | return fold_const2(ctx, op); | ||
90 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
91 | } | ||
92 | break; | ||
93 | |||
94 | - case INDEX_op_setcond2_i32: | ||
95 | - i = do_constant_folding_cond2(&op->args[1], &op->args[3], | ||
96 | - op->args[5]); | ||
97 | - if (i >= 0) { | ||
98 | - do_setcond_const: | ||
99 | - tcg_opt_gen_movi(&ctx, op, op->args[0], i); | ||
100 | - continue; | ||
101 | - } | ||
102 | - if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE) | ||
103 | - && arg_is_const(op->args[3]) | ||
104 | - && arg_info(op->args[3])->val == 0 | ||
105 | - && arg_is_const(op->args[4]) | ||
106 | - && arg_info(op->args[4])->val == 0) { | ||
107 | - /* Simplify LT/GE comparisons vs zero to a single compare | ||
108 | - vs the high word of the input. */ | ||
109 | - do_setcond_high: | ||
110 | - reset_temp(op->args[0]); | ||
111 | - arg_info(op->args[0])->z_mask = 1; | ||
112 | - op->opc = INDEX_op_setcond_i32; | ||
113 | - op->args[1] = op->args[2]; | ||
114 | - op->args[2] = op->args[4]; | ||
115 | - op->args[3] = op->args[5]; | ||
116 | - break; | ||
117 | - } | ||
118 | - if (op->args[5] == TCG_COND_EQ) { | ||
119 | - /* Simplify EQ comparisons where one of the pairs | ||
120 | - can be simplified. */ | ||
121 | - i = do_constant_folding_cond(INDEX_op_setcond_i32, | ||
122 | - op->args[1], op->args[3], | ||
123 | - TCG_COND_EQ); | ||
124 | - if (i == 0) { | ||
125 | - goto do_setcond_const; | ||
126 | - } else if (i > 0) { | ||
127 | - goto do_setcond_high; | ||
128 | - } | ||
129 | - i = do_constant_folding_cond(INDEX_op_setcond_i32, | ||
130 | - op->args[2], op->args[4], | ||
131 | - TCG_COND_EQ); | ||
132 | - if (i == 0) { | ||
133 | - goto do_setcond_high; | ||
134 | - } else if (i < 0) { | ||
135 | - break; | ||
136 | - } | ||
137 | - do_setcond_low: | ||
138 | - reset_temp(op->args[0]); | ||
139 | - arg_info(op->args[0])->z_mask = 1; | ||
140 | - op->opc = INDEX_op_setcond_i32; | ||
141 | - op->args[2] = op->args[3]; | ||
142 | - op->args[3] = op->args[5]; | ||
143 | - break; | ||
144 | - } | ||
145 | - if (op->args[5] == TCG_COND_NE) { | ||
146 | - /* Simplify NE comparisons where one of the pairs | ||
147 | - can be simplified. */ | ||
148 | - i = do_constant_folding_cond(INDEX_op_setcond_i32, | ||
149 | - op->args[1], op->args[3], | ||
150 | - TCG_COND_NE); | ||
151 | - if (i == 0) { | ||
152 | - goto do_setcond_high; | ||
153 | - } else if (i > 0) { | ||
154 | - goto do_setcond_const; | ||
155 | - } | ||
156 | - i = do_constant_folding_cond(INDEX_op_setcond_i32, | ||
157 | - op->args[2], op->args[4], | ||
158 | - TCG_COND_NE); | ||
159 | - if (i == 0) { | ||
160 | - goto do_setcond_low; | ||
161 | - } else if (i > 0) { | ||
162 | - goto do_setcond_const; | ||
163 | - } | ||
164 | - } | ||
165 | - break; | ||
166 | - | ||
167 | default: | ||
168 | break; | ||
169 | |||
170 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
171 | CASE_OP_32_64(shr): | ||
172 | done = fold_shift(&ctx, op); | ||
173 | break; | ||
174 | + case INDEX_op_setcond2_i32: | ||
175 | + done = fold_setcond2(&ctx, op); | ||
176 | + break; | ||
177 | CASE_OP_32_64_VEC(sub): | ||
178 | done = fold_sub(&ctx, op); | ||
179 | break; | ||
180 | -- | ||
181 | 2.25.1 | ||
182 | |||
183 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Reduce some code duplication by folding the NE and EQ cases. | ||
2 | 1 | ||
3 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/optimize.c | 159 +++++++++++++++++++++++++------------------------ | ||
7 | 1 file changed, 81 insertions(+), 78 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/optimize.c | ||
12 | +++ b/tcg/optimize.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op) | ||
14 | return fold_const2(ctx, op); | ||
15 | } | ||
16 | |||
17 | +static bool fold_brcond2(OptContext *ctx, TCGOp *op) | ||
18 | +{ | ||
19 | + TCGCond cond = op->args[4]; | ||
20 | + int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond); | ||
21 | + TCGArg label = op->args[5]; | ||
22 | + int inv = 0; | ||
23 | + | ||
24 | + if (i >= 0) { | ||
25 | + goto do_brcond_const; | ||
26 | + } | ||
27 | + | ||
28 | + switch (cond) { | ||
29 | + case TCG_COND_LT: | ||
30 | + case TCG_COND_GE: | ||
31 | + /* | ||
32 | + * Simplify LT/GE comparisons vs zero to a single compare | ||
33 | + * vs the high word of the input. | ||
34 | + */ | ||
35 | + if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 && | ||
36 | + arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) { | ||
37 | + goto do_brcond_high; | ||
38 | + } | ||
39 | + break; | ||
40 | + | ||
41 | + case TCG_COND_NE: | ||
42 | + inv = 1; | ||
43 | + QEMU_FALLTHROUGH; | ||
44 | + case TCG_COND_EQ: | ||
45 | + /* | ||
46 | + * Simplify EQ/NE comparisons where one of the pairs | ||
47 | + * can be simplified. | ||
48 | + */ | ||
49 | + i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0], | ||
50 | + op->args[2], cond); | ||
51 | + switch (i ^ inv) { | ||
52 | + case 0: | ||
53 | + goto do_brcond_const; | ||
54 | + case 1: | ||
55 | + goto do_brcond_high; | ||
56 | + } | ||
57 | + | ||
58 | + i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1], | ||
59 | + op->args[3], cond); | ||
60 | + switch (i ^ inv) { | ||
61 | + case 0: | ||
62 | + goto do_brcond_const; | ||
63 | + case 1: | ||
64 | + op->opc = INDEX_op_brcond_i32; | ||
65 | + op->args[1] = op->args[2]; | ||
66 | + op->args[2] = cond; | ||
67 | + op->args[3] = label; | ||
68 | + break; | ||
69 | + } | ||
70 | + break; | ||
71 | + | ||
72 | + default: | ||
73 | + break; | ||
74 | + | ||
75 | + do_brcond_high: | ||
76 | + op->opc = INDEX_op_brcond_i32; | ||
77 | + op->args[0] = op->args[1]; | ||
78 | + op->args[1] = op->args[3]; | ||
79 | + op->args[2] = cond; | ||
80 | + op->args[3] = label; | ||
81 | + break; | ||
82 | + | ||
83 | + do_brcond_const: | ||
84 | + if (i == 0) { | ||
85 | + tcg_op_remove(ctx->tcg, op); | ||
86 | + return true; | ||
87 | + } | ||
88 | + op->opc = INDEX_op_br; | ||
89 | + op->args[0] = label; | ||
90 | + break; | ||
91 | + } | ||
92 | + return false; | ||
93 | +} | ||
94 | + | ||
95 | static bool fold_call(OptContext *ctx, TCGOp *op) | ||
96 | { | ||
97 | TCGContext *s = ctx->tcg; | ||
98 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
99 | } | ||
100 | break; | ||
101 | |||
102 | - case INDEX_op_brcond2_i32: | ||
103 | - i = do_constant_folding_cond2(&op->args[0], &op->args[2], | ||
104 | - op->args[4]); | ||
105 | - if (i == 0) { | ||
106 | - do_brcond_false: | ||
107 | - tcg_op_remove(s, op); | ||
108 | - continue; | ||
109 | - } | ||
110 | - if (i > 0) { | ||
111 | - do_brcond_true: | ||
112 | - op->opc = opc = INDEX_op_br; | ||
113 | - op->args[0] = op->args[5]; | ||
114 | - break; | ||
115 | - } | ||
116 | - if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE) | ||
117 | - && arg_is_const(op->args[2]) | ||
118 | - && arg_info(op->args[2])->val == 0 | ||
119 | - && arg_is_const(op->args[3]) | ||
120 | - && arg_info(op->args[3])->val == 0) { | ||
121 | - /* Simplify LT/GE comparisons vs zero to a single compare | ||
122 | - vs the high word of the input. */ | ||
123 | - do_brcond_high: | ||
124 | - op->opc = opc = INDEX_op_brcond_i32; | ||
125 | - op->args[0] = op->args[1]; | ||
126 | - op->args[1] = op->args[3]; | ||
127 | - op->args[2] = op->args[4]; | ||
128 | - op->args[3] = op->args[5]; | ||
129 | - break; | ||
130 | - } | ||
131 | - if (op->args[4] == TCG_COND_EQ) { | ||
132 | - /* Simplify EQ comparisons where one of the pairs | ||
133 | - can be simplified. */ | ||
134 | - i = do_constant_folding_cond(INDEX_op_brcond_i32, | ||
135 | - op->args[0], op->args[2], | ||
136 | - TCG_COND_EQ); | ||
137 | - if (i == 0) { | ||
138 | - goto do_brcond_false; | ||
139 | - } else if (i > 0) { | ||
140 | - goto do_brcond_high; | ||
141 | - } | ||
142 | - i = do_constant_folding_cond(INDEX_op_brcond_i32, | ||
143 | - op->args[1], op->args[3], | ||
144 | - TCG_COND_EQ); | ||
145 | - if (i == 0) { | ||
146 | - goto do_brcond_false; | ||
147 | - } else if (i < 0) { | ||
148 | - break; | ||
149 | - } | ||
150 | - do_brcond_low: | ||
151 | - memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
152 | - op->opc = INDEX_op_brcond_i32; | ||
153 | - op->args[1] = op->args[2]; | ||
154 | - op->args[2] = op->args[4]; | ||
155 | - op->args[3] = op->args[5]; | ||
156 | - break; | ||
157 | - } | ||
158 | - if (op->args[4] == TCG_COND_NE) { | ||
159 | - /* Simplify NE comparisons where one of the pairs | ||
160 | - can be simplified. */ | ||
161 | - i = do_constant_folding_cond(INDEX_op_brcond_i32, | ||
162 | - op->args[0], op->args[2], | ||
163 | - TCG_COND_NE); | ||
164 | - if (i == 0) { | ||
165 | - goto do_brcond_high; | ||
166 | - } else if (i > 0) { | ||
167 | - goto do_brcond_true; | ||
168 | - } | ||
169 | - i = do_constant_folding_cond(INDEX_op_brcond_i32, | ||
170 | - op->args[1], op->args[3], | ||
171 | - TCG_COND_NE); | ||
172 | - if (i == 0) { | ||
173 | - goto do_brcond_low; | ||
174 | - } else if (i > 0) { | ||
175 | - goto do_brcond_true; | ||
176 | - } | ||
177 | - } | ||
178 | - break; | ||
179 | - | ||
180 | default: | ||
181 | break; | ||
182 | |||
183 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
184 | CASE_OP_32_64_VEC(andc): | ||
185 | done = fold_andc(&ctx, op); | ||
186 | break; | ||
187 | + case INDEX_op_brcond2_i32: | ||
188 | + done = fold_brcond2(&ctx, op); | ||
189 | + break; | ||
190 | CASE_OP_32_64(ctpop): | ||
191 | done = fold_ctpop(&ctx, op); | ||
192 | break; | ||
193 | -- | ||
194 | 2.25.1 | ||
195 | |||
196 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
2 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | --- | ||
5 | tcg/optimize.c | 33 +++++++++++++++++++-------------- | ||
6 | 1 file changed, 19 insertions(+), 14 deletions(-) | ||
7 | 1 | ||
8 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
9 | index XXXXXXX..XXXXXXX 100644 | ||
10 | --- a/tcg/optimize.c | ||
11 | +++ b/tcg/optimize.c | ||
12 | @@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op) | ||
13 | return fold_const2(ctx, op); | ||
14 | } | ||
15 | |||
16 | +static bool fold_brcond(OptContext *ctx, TCGOp *op) | ||
17 | +{ | ||
18 | + TCGCond cond = op->args[2]; | ||
19 | + int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond); | ||
20 | + | ||
21 | + if (i == 0) { | ||
22 | + tcg_op_remove(ctx->tcg, op); | ||
23 | + return true; | ||
24 | + } | ||
25 | + if (i > 0) { | ||
26 | + op->opc = INDEX_op_br; | ||
27 | + op->args[0] = op->args[3]; | ||
28 | + } | ||
29 | + return false; | ||
30 | +} | ||
31 | + | ||
32 | static bool fold_brcond2(OptContext *ctx, TCGOp *op) | ||
33 | { | ||
34 | TCGCond cond = op->args[4]; | ||
35 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
36 | } | ||
37 | break; | ||
38 | |||
39 | - CASE_OP_32_64(brcond): | ||
40 | - i = do_constant_folding_cond(opc, op->args[0], | ||
41 | - op->args[1], op->args[2]); | ||
42 | - if (i == 0) { | ||
43 | - tcg_op_remove(s, op); | ||
44 | - continue; | ||
45 | - } else if (i > 0) { | ||
46 | - memset(&ctx.temps_used, 0, sizeof(ctx.temps_used)); | ||
47 | - op->opc = opc = INDEX_op_br; | ||
48 | - op->args[0] = op->args[3]; | ||
49 | - break; | ||
50 | - } | ||
51 | - break; | ||
52 | - | ||
53 | CASE_OP_32_64(movcond): | ||
54 | i = do_constant_folding_cond(opc, op->args[1], | ||
55 | op->args[2], op->args[5]); | ||
56 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
57 | CASE_OP_32_64_VEC(andc): | ||
58 | done = fold_andc(&ctx, op); | ||
59 | break; | ||
60 | + CASE_OP_32_64(brcond): | ||
61 | + done = fold_brcond(&ctx, op); | ||
62 | + break; | ||
63 | case INDEX_op_brcond2_i32: | ||
64 | done = fold_brcond2(&ctx, op); | ||
65 | break; | ||
66 | -- | ||
67 | 2.25.1 | ||
68 | |||
69 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
2 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | --- | ||
5 | tcg/optimize.c | 23 ++++++++++++++--------- | ||
6 | 1 file changed, 14 insertions(+), 9 deletions(-) | ||
7 | 1 | ||
8 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
9 | index XXXXXXX..XXXXXXX 100644 | ||
10 | --- a/tcg/optimize.c | ||
11 | +++ b/tcg/optimize.c | ||
12 | @@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op) | ||
13 | return fold_const2(ctx, op); | ||
14 | } | ||
15 | |||
16 | +static bool fold_setcond(OptContext *ctx, TCGOp *op) | ||
17 | +{ | ||
18 | + TCGCond cond = op->args[3]; | ||
19 | + int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond); | ||
20 | + | ||
21 | + if (i >= 0) { | ||
22 | + return tcg_opt_gen_movi(ctx, op, op->args[0], i); | ||
23 | + } | ||
24 | + return false; | ||
25 | +} | ||
26 | + | ||
27 | static bool fold_setcond2(OptContext *ctx, TCGOp *op) | ||
28 | { | ||
29 | TCGCond cond = op->args[5]; | ||
30 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
31 | } | ||
32 | break; | ||
33 | |||
34 | - CASE_OP_32_64(setcond): | ||
35 | - i = do_constant_folding_cond(opc, op->args[1], | ||
36 | - op->args[2], op->args[3]); | ||
37 | - if (i >= 0) { | ||
38 | - tcg_opt_gen_movi(&ctx, op, op->args[0], i); | ||
39 | - continue; | ||
40 | - } | ||
41 | - break; | ||
42 | - | ||
43 | CASE_OP_32_64(movcond): | ||
44 | i = do_constant_folding_cond(opc, op->args[1], | ||
45 | op->args[2], op->args[5]); | ||
46 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
47 | CASE_OP_32_64(shr): | ||
48 | done = fold_shift(&ctx, op); | ||
49 | break; | ||
50 | + CASE_OP_32_64(setcond): | ||
51 | + done = fold_setcond(&ctx, op); | ||
52 | + break; | ||
53 | case INDEX_op_setcond2_i32: | ||
54 | done = fold_setcond2(&ctx, op); | ||
55 | break; | ||
56 | -- | ||
57 | 2.25.1 | ||
58 | |||
59 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
2 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | --- | ||
5 | tcg/optimize.c | 37 +++++++++++++++++++++---------------- | ||
6 | 1 file changed, 21 insertions(+), 16 deletions(-) | ||
7 | 1 | ||
8 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
9 | index XXXXXXX..XXXXXXX 100644 | ||
10 | --- a/tcg/optimize.c | ||
11 | +++ b/tcg/optimize.c | ||
12 | @@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) | ||
13 | return fold_const2(ctx, op); | ||
14 | } | ||
15 | |||
16 | +static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op) | ||
17 | +{ | ||
18 | + if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { | ||
19 | + uint32_t a = arg_info(op->args[2])->val; | ||
20 | + uint32_t b = arg_info(op->args[3])->val; | ||
21 | + uint64_t r = (uint64_t)a * b; | ||
22 | + TCGArg rl, rh; | ||
23 | + TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32); | ||
24 | + | ||
25 | + rl = op->args[0]; | ||
26 | + rh = op->args[1]; | ||
27 | + tcg_opt_gen_movi(ctx, op, rl, (int32_t)r); | ||
28 | + tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32)); | ||
29 | + return true; | ||
30 | + } | ||
31 | + return false; | ||
32 | +} | ||
33 | + | ||
34 | static bool fold_nand(OptContext *ctx, TCGOp *op) | ||
35 | { | ||
36 | return fold_const2(ctx, op); | ||
37 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
38 | } | ||
39 | break; | ||
40 | |||
41 | - case INDEX_op_mulu2_i32: | ||
42 | - if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { | ||
43 | - uint32_t a = arg_info(op->args[2])->val; | ||
44 | - uint32_t b = arg_info(op->args[3])->val; | ||
45 | - uint64_t r = (uint64_t)a * b; | ||
46 | - TCGArg rl, rh; | ||
47 | - TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32); | ||
48 | - | ||
49 | - rl = op->args[0]; | ||
50 | - rh = op->args[1]; | ||
51 | - tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r); | ||
52 | - tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32)); | ||
53 | - continue; | ||
54 | - } | ||
55 | - break; | ||
56 | - | ||
57 | default: | ||
58 | break; | ||
59 | |||
60 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
61 | CASE_OP_32_64(muluh): | ||
62 | done = fold_mul_highpart(&ctx, op); | ||
63 | break; | ||
64 | + case INDEX_op_mulu2_i32: | ||
65 | + done = fold_mulu2_i32(&ctx, op); | ||
66 | + break; | ||
67 | CASE_OP_32_64(nand): | ||
68 | done = fold_nand(&ctx, op); | ||
69 | break; | ||
70 | -- | ||
71 | 2.25.1 | ||
72 | |||
73 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
2 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | --- | ||
5 | tcg/optimize.c | 56 ++++++++++++++++++++++++++++---------------------- | ||
6 | 1 file changed, 31 insertions(+), 25 deletions(-) | ||
7 | 1 | ||
8 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
9 | index XXXXXXX..XXXXXXX 100644 | ||
10 | --- a/tcg/optimize.c | ||
11 | +++ b/tcg/optimize.c | ||
12 | @@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op) | ||
13 | return true; | ||
14 | } | ||
15 | |||
16 | +static bool fold_movcond(OptContext *ctx, TCGOp *op) | ||
17 | +{ | ||
18 | + TCGOpcode opc = op->opc; | ||
19 | + TCGCond cond = op->args[5]; | ||
20 | + int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond); | ||
21 | + | ||
22 | + if (i >= 0) { | ||
23 | + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); | ||
24 | + } | ||
25 | + | ||
26 | + if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { | ||
27 | + uint64_t tv = arg_info(op->args[3])->val; | ||
28 | + uint64_t fv = arg_info(op->args[4])->val; | ||
29 | + | ||
30 | + opc = (opc == INDEX_op_movcond_i32 | ||
31 | + ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64); | ||
32 | + | ||
33 | + if (tv == 1 && fv == 0) { | ||
34 | + op->opc = opc; | ||
35 | + op->args[3] = cond; | ||
36 | + } else if (fv == 1 && tv == 0) { | ||
37 | + op->opc = opc; | ||
38 | + op->args[3] = tcg_invert_cond(cond); | ||
39 | + } | ||
40 | + } | ||
41 | + return false; | ||
42 | +} | ||
43 | + | ||
44 | static bool fold_mul(OptContext *ctx, TCGOp *op) | ||
45 | { | ||
46 | return fold_const2(ctx, op); | ||
47 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
48 | } | ||
49 | break; | ||
50 | |||
51 | - CASE_OP_32_64(movcond): | ||
52 | - i = do_constant_folding_cond(opc, op->args[1], | ||
53 | - op->args[2], op->args[5]); | ||
54 | - if (i >= 0) { | ||
55 | - tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]); | ||
56 | - continue; | ||
57 | - } | ||
58 | - if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { | ||
59 | - uint64_t tv = arg_info(op->args[3])->val; | ||
60 | - uint64_t fv = arg_info(op->args[4])->val; | ||
61 | - TCGCond cond = op->args[5]; | ||
62 | - | ||
63 | - if (fv == 1 && tv == 0) { | ||
64 | - cond = tcg_invert_cond(cond); | ||
65 | - } else if (!(tv == 1 && fv == 0)) { | ||
66 | - break; | ||
67 | - } | ||
68 | - op->args[3] = cond; | ||
69 | - op->opc = opc = (opc == INDEX_op_movcond_i32 | ||
70 | - ? INDEX_op_setcond_i32 | ||
71 | - : INDEX_op_setcond_i64); | ||
72 | - } | ||
73 | - break; | ||
74 | - | ||
75 | - | ||
76 | default: | ||
77 | break; | ||
78 | |||
79 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
80 | case INDEX_op_mb: | ||
81 | done = fold_mb(&ctx, op); | ||
82 | break; | ||
83 | + CASE_OP_32_64(movcond): | ||
84 | + done = fold_movcond(&ctx, op); | ||
85 | + break; | ||
86 | CASE_OP_32_64(mul): | ||
87 | done = fold_mul(&ctx, op); | ||
88 | break; | ||
89 | -- | ||
90 | 2.25.1 | ||
91 | |||
92 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
2 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | --- | ||
5 | tcg/optimize.c | 39 ++++++++++++++++++++++----------------- | ||
6 | 1 file changed, 22 insertions(+), 17 deletions(-) | ||
7 | 1 | ||
8 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
9 | index XXXXXXX..XXXXXXX 100644 | ||
10 | --- a/tcg/optimize.c | ||
11 | +++ b/tcg/optimize.c | ||
12 | @@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op) | ||
13 | return fold_const2(ctx, op); | ||
14 | } | ||
15 | |||
16 | +static bool fold_extract2(OptContext *ctx, TCGOp *op) | ||
17 | +{ | ||
18 | + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
19 | + uint64_t v1 = arg_info(op->args[1])->val; | ||
20 | + uint64_t v2 = arg_info(op->args[2])->val; | ||
21 | + int shr = op->args[3]; | ||
22 | + | ||
23 | + if (op->opc == INDEX_op_extract2_i64) { | ||
24 | + v1 >>= shr; | ||
25 | + v2 <<= 64 - shr; | ||
26 | + } else { | ||
27 | + v1 = (uint32_t)v1 >> shr; | ||
28 | + v2 = (int32_t)v2 << (32 - shr); | ||
29 | + } | ||
30 | + return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2); | ||
31 | + } | ||
32 | + return false; | ||
33 | +} | ||
34 | + | ||
35 | static bool fold_exts(OptContext *ctx, TCGOp *op) | ||
36 | { | ||
37 | return fold_const1(ctx, op); | ||
38 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
39 | } | ||
40 | break; | ||
41 | |||
42 | - CASE_OP_32_64(extract2): | ||
43 | - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
44 | - uint64_t v1 = arg_info(op->args[1])->val; | ||
45 | - uint64_t v2 = arg_info(op->args[2])->val; | ||
46 | - int shr = op->args[3]; | ||
47 | - | ||
48 | - if (opc == INDEX_op_extract2_i64) { | ||
49 | - tmp = (v1 >> shr) | (v2 << (64 - shr)); | ||
50 | - } else { | ||
51 | - tmp = (int32_t)(((uint32_t)v1 >> shr) | | ||
52 | - ((uint32_t)v2 << (32 - shr))); | ||
53 | - } | ||
54 | - tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
55 | - continue; | ||
56 | - } | ||
57 | - break; | ||
58 | - | ||
59 | default: | ||
60 | break; | ||
61 | |||
62 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
63 | CASE_OP_32_64(eqv): | ||
64 | done = fold_eqv(&ctx, op); | ||
65 | break; | ||
66 | + CASE_OP_32_64(extract2): | ||
67 | + done = fold_extract2(&ctx, op); | ||
68 | + break; | ||
69 | CASE_OP_32_64(ext8s): | ||
70 | CASE_OP_32_64(ext16s): | ||
71 | case INDEX_op_ext32s_i64: | ||
72 | -- | ||
73 | 2.25.1 | ||
74 | |||
75 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> | ||
2 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | --- | ||
5 | tcg/optimize.c | 25 +++++++++++++++---------- | ||
6 | 1 file changed, 15 insertions(+), 10 deletions(-) | ||
7 | 1 | ||
8 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
9 | index XXXXXXX..XXXXXXX 100644 | ||
10 | --- a/tcg/optimize.c | ||
11 | +++ b/tcg/optimize.c | ||
12 | @@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op) | ||
13 | return fold_const1(ctx, op); | ||
14 | } | ||
15 | |||
16 | +static bool fold_deposit(OptContext *ctx, TCGOp *op) | ||
17 | +{ | ||
18 | + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
19 | + uint64_t t1 = arg_info(op->args[1])->val; | ||
20 | + uint64_t t2 = arg_info(op->args[2])->val; | ||
21 | + | ||
22 | + t1 = deposit64(t1, op->args[3], op->args[4], t2); | ||
23 | + return tcg_opt_gen_movi(ctx, op, op->args[0], t1); | ||
24 | + } | ||
25 | + return false; | ||
26 | +} | ||
27 | + | ||
28 | static bool fold_divide(OptContext *ctx, TCGOp *op) | ||
29 | { | ||
30 | return fold_const2(ctx, op); | ||
31 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
32 | } | ||
33 | break; | ||
34 | |||
35 | - CASE_OP_32_64(deposit): | ||
36 | - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
37 | - tmp = deposit64(arg_info(op->args[1])->val, | ||
38 | - op->args[3], op->args[4], | ||
39 | - arg_info(op->args[2])->val); | ||
40 | - tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); | ||
41 | - continue; | ||
42 | - } | ||
43 | - break; | ||
44 | - | ||
45 | default: | ||
46 | break; | ||
47 | |||
48 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
49 | CASE_OP_32_64(ctpop): | ||
50 | done = fold_ctpop(&ctx, op); | ||
51 | break; | ||
52 | + CASE_OP_32_64(deposit): | ||
53 | + done = fold_deposit(&ctx, op); | ||
54 | + break; | ||
55 | CASE_OP_32_64(div): | ||
56 | CASE_OP_32_64(divu): | ||
57 | done = fold_divide(&ctx, op); | ||
58 | -- | ||
59 | 2.25.1 | ||
60 | |||
61 | diff view generated by jsdifflib |